In [3]:
#this is where we do markov chain exercises
#we need a way to draw from discrete distributions so we do this
using QuantEcon
#just a qucik test
psi = [0.1, 0.9]
d = DiscreteRV(psi)
rand(d,5)
Out[3]:
In [10]:
function mc_sample_path(P; init=1, sample_size=1000)
#P is our stochastic matrix. Our initial state is init, our sample_size i
#is the number of time steps the chain should run
X = Array{Int64}(sample_size) # we allocate our array
X[1] = init # our initial state.
#X is the array of all our states
#we convert our P into a distibution for each row
n = size(P)[1]
P_dist = [DiscreteRV(vec(P[i,:])) for i in 1:n]
#we now generate the sample path
for t in 1:(sample_size-1)
X[t+1]= rand(P_dist[X[t]])
end
return X
end
#so we've got to figure out how this works in the small scale ideally.
#it's mostly the matrix operations which I don't get atm... argh
# at least it doesn't have to be vectorised so differential equatiosn and MCs
# work out okay which is good! so we can do loops and simulatoins efficiently
# which is it's main advantageover python tbh
Out[10]:
In [14]:
#we now try it out on a simple matrix
P = [0.4 0.6; 0.2 0.8]
X = mc_sample_path(P, sample_size = 10);
print("done")
println(X)
In [16]:
#we now simulate fora long time to see the chain and get the mean
Y =mc_sample_path(P, sample_size=100000)
println(mean(X .==1))
In [17]:
# we can lso use quant econ directly to handle our markov chain
# whic hprobably has the same rough function, but vastly better as it's a library
# functoin which makes sense adn is cool
mc = MarkovChain(P)
X = simulate(mc, 100000)
mean(X .==1)
Out[17]:
In [ ]: