In [1]:
from __future__ import division
import numpy as np
from mc_tools import MarkovChain
In [2]:
# Taken from www.math.wustl.edu/~feres/Math450Lect04.pdf
P = np.zeros((10, 10))
P[[0, 0], [0, 2]] = 1./2
P[1, 1], P[1, 6] = 1./3, 2./3
P[2, 0] = 1
P[3, 4] = 1
P[[4, 4, 4], [3, 4, 8]] = 1./3
P[5, 5] = 1
P[6, 6], P[6, 8] = 1./4, 3./4
P[[7, 7, 7, 7], [2, 3, 7, 9]] = 1./4
P[8, 1] = 1
P[[9, 9, 9], [1, 4, 9]] = 1./3
In [3]:
np.set_printoptions(precision=3)
In [4]:
print P
In [5]:
mc = MarkovChain(P)
In [6]:
print mc
Let us perform a simulation with initial state 0
:
In [7]:
X0 = mc.simulate(init=0)
In [8]:
bins = list(range(11))
hist, bin_edges = np.histogram(X0, bins=bins)
x0 = hist/len(X0)
print x0
Let us write a function:
In [9]:
def empirical_dist(init):
X = mc.simulate(init=init)
hist, bin_edges = np.histogram(X, bins=bins)
x= hist/len(X)
return x
The set of states [0, 2]
seems to be closed:
In [10]:
print empirical_dist(2)
The set of states [1, 6, 8]
seems to be closed:
In [11]:
print empirical_dist(1)
In [12]:
print empirical_dist(6)
In [13]:
print empirical_dist(8)
3
and 4
seem to communicate with each other and eventually get absorbed into [1, 6, 8]
:
In [14]:
print empirical_dist(3)
In [15]:
print empirical_dist(4)
5
is an absorbind state:
In [16]:
print empirical_dist(5)
From 9
, the path seems to be absorbed into [1, 6, 8]
:
In [17]:
print empirical_dist(9)
From 7
, the path gets absorbed into either [0, 2]
or [1, 6, 8]
, depending on the realization:
In [18]:
print empirical_dist(7)
In [19]:
print empirical_dist(7)
In [20]:
N=50
print np.mean([empirical_dist(7) for i in range(N)], axis=0)
As expected, the Markov chain mc
is reducible:
In [21]:
mc.is_irreducible
Out[21]:
Communication classes of mc
:
In [22]:
mc.num_communication_classes
Out[22]:
In [23]:
mc.communication_classes
Out[23]:
Recurrent classes of mc
:
In [24]:
mc.num_recurrent_classes
Out[24]:
In [25]:
mc.recurrent_classes
Out[25]:
Recurrent states of mc
:
In [26]:
recurrent_states = np.concatenate(mc.recurrent_classes)
print recurrent_states
Transient states of mc
:
In [27]:
transient_states = np.setdiff1d(np.arange(mc.n), recurrent_states)
print transient_states
Canonical form of P
:
In [28]:
permutation = np.concatenate((recurrent_states, transient_states))
print permutation
In [29]:
print P[permutation, :][:, permutation]
Decompose P
into irreducible submatrices:
In [30]:
for i, recurrent_class in enumerate(mc.recurrent_classes):
print 'P{0} ='.format(i)
print P[recurrent_class, :][:, recurrent_class]
The Markov chain mc
above has three stationary distributions,
one for each of the three recurrent classes:
In [31]:
vecs = mc.stationary_distributions
print vecs
In [32]:
print np.dot(vecs, P)
In [33]:
Q = np.zeros((11, 11))
Q[0, [1, 5]] = 1./2
Q[1, [2, 10]] = 1./2
Q[2, 7] = 1.
Q[3, 4] = 1.
Q[4, 5] = 1.
Q[5, [2, 6]] = 1./2
Q[6, [7, 8]] = 1./2
Q[7, 4] = 1.
Q[8, 0] = 1.
Q[9, 4] = 1.
Q[10, [3, 8, 9]] = 1./3
In [34]:
print Q
In [35]:
mc2 = MarkovChain(Q)
This Markov chain mc2
is irreducible:
In [36]:
mc2.is_irreducible
Out[36]:
Whether it is aperiodic:
In [37]:
mc2.is_aperiodic
Out[37]:
The priod mc2
:
In [38]:
mc2.period
Out[38]:
Cyclic classes of mc2
:
In [39]:
cyclic_classes = mc2.cyclic_classes
print cyclic_classes
Cyclic normal form of Q
:
In [40]:
permutation = np.concatenate(cyclic_classes)
print permutation
In [41]:
np.set_printoptions(precision=2)
In [42]:
print Q[permutation, :][:, permutation]
Powers of Q
:
In [43]:
R = Q[permutation, :][:, permutation]
R /= np.sum(R, axis=1, keepdims=True)
In [44]:
print R
In [45]:
print np.linalg.matrix_power(R, 5)
In [46]:
S = np.linalg.matrix_power(R, 8)
for k in range(5):
S = S.dot(R)
print 'R^{0} ='.format(9 + k)
print S, '\n'
So far, periodicity is left undefined for reducible Markov chains:
In [47]:
mc3 = MarkovChain([[1, 0], [0, 1]])
In [48]:
mc3.is_irreducible
Out[48]:
In [49]:
mc3.is_aperiodic
In [50]:
mc3.period
In [51]:
mc3.cyclic_classes
In [ ]: