In [1]:
from __future__ import division
import numpy as np
from mc_tools2 import DMarkov
In [2]:
# Taken from www.math.wustl.edu/~feres/Math450Lect04.pdf
P = np.zeros((10, 10))
P[[0, 0], [0, 2]] = 1./2
P[1, 1], P[1, 6] = 1./3, 2./3
P[2, 0] = 1
P[3, 4] = 1
P[[4, 4, 4], [3, 4, 8]] = 1./3
P[5, 5] = 1
P[6, 6], P[6, 8] = 1./4, 3./4
P[[7, 7, 7, 7], [2, 3, 7, 9]] = 1./4
P[8, 1] = 1
P[[9, 9, 9], [1, 4, 9]] = 1./3
In [3]:
np.set_printoptions(precision=3)
In [4]:
print P
In [5]:
mc = DMarkov(P)
In [6]:
print mc
Let us perform a simulation with initial state 0
:
In [7]:
X0 = mc.simulate(init=0)
In [8]:
bins = list(range(11))
hist, bin_edges = np.histogram(X0, bins=bins)
x0 = hist/len(X0)
print x0
Let us write a function:
In [9]:
def empirical_dist(init):
X = mc.simulate(init=init)
hist, bin_edges = np.histogram(X, bins=bins)
x= hist/len(X)
return x
The set of states [0, 2]
seems to be closed:
In [10]:
print empirical_dist(2)
The set of states [1, 6, 8]
seems to be closed:
In [11]:
print empirical_dist(1)
In [12]:
print empirical_dist(6)
In [13]:
print empirical_dist(8)
3
and 4
seem to communicate with each other and eventually get absorbed into [1, 6, 8]
:
In [14]:
print empirical_dist(3)
In [15]:
print empirical_dist(4)
5
is an absorbind state:
In [16]:
print empirical_dist(5)
From 9
, the path seems to be absorbed into [1, 6, 8]
:
In [17]:
print empirical_dist(9)
From 7
, the path gets absorbed into either [0, 2]
or [1, 6, 8]
, depending on the realization:
In [18]:
print empirical_dist(7)
In [19]:
print empirical_dist(7)
In [20]:
N=50
print np.mean([empirical_dist(7) for i in range(N)], axis=0)
As expected, the Markov chain mc
is reducible:
In [21]:
mc.is_irreducible
Out[21]:
Communication classes of mc
:
In [22]:
mc.num_comm_classes
Out[22]:
In [23]:
mc.comm_classes
Out[23]:
Recurrent classes of mc
:
In [24]:
mc.num_rec_classes
Out[24]:
In [25]:
mc.rec_classes
Out[25]:
Recurrent states of mc
:
In [26]:
rec_states = [i for rec_class in mc.rec_classes for i in rec_class]
print rec_states
Transient states of mc
:
In [27]:
trans_states = [i for i in range(10) if i not in rec_states]
print trans_states
Canonical form of P
:
In [28]:
permutation = rec_states + trans_states
print permutation
In [29]:
print P[permutation, :][:, permutation]
Decompose P
into irreducible submatrices:
In [30]:
for i, rec_class in enumerate(mc.rec_classes):
print 'P{0} ='.format(i)
print P[rec_class, :][:, rec_class]
In [31]:
print mc.stationary_dists
The Markov chain mc
above has three stationary distributions,
one for each of the three recurrent classes:
In [32]:
vecs = mc.compute_stationary()
print vecs
In [33]:
print mc.stationary_dists
In [34]:
print np.dot(vecs, P)
In [34]: