In [1]:
maxage = 5 # Maximum asset age
repcost = 75 # Replacement cost
mancost = 10 # Maintainance cost
beta = 0.9 # Discount factor
m = 3 # Number of actions; 1: keep, 2: service, 3: replace
Out[1]:
In [2]:
S = Matrix{Int}(maxage^2, 2)
for i in 1:maxage
for j in 0:maxage-1
S[(i-1) * maxage + (j+1), :] = [i, j]
end
end
S
Out[2]:
In [3]:
n = length(S[:, 1]) # Number of states
Out[3]:
In [4]:
# We need a routine to get the index of a age-serv pair
function getindex_0(age, serv, S)
for i in 1:n
if S[i, 1] == age && S[i, 2] == serv
return i
end
end
end
Out[4]:
In [5]:
# Profit function as a function of the age and the number of service
function p(age, serv)
return (1 - (age - serv)/5) * (50 - 2.5 * age - 2.5 * age^2)
end
Out[5]:
In [6]:
# Reward array
R = Array{Float64}(n, m)
for i in 1:n
R[i, 1] = p(S[i, 1], S[i, 2])
R[i, 2] = p(S[i, 1], S[i, 2]+1) - mancost
R[i, 3] = p(0, 0) - repcost
end
# Infeasible actions
for j in n-maxage+1:n
R[j, 1] = -Inf
R[j, 2] = -Inf
end
R
Out[6]:
In [7]:
# (Degenerate) transition probability array
Q = zeros(n, m, n)
for i in 1:n
Q[i, 1, getindex_0(min(S[i, 1]+1, maxage), S[i, 2], S)] = 1
Q[i, 2, getindex_0(min(S[i, 1]+1, maxage), min(S[i, 2]+1, maxage-1), S)] = 1
Q[i, 3, getindex_0(1, 0, S)] = 1
end
Q
Out[7]:
In [8]:
using QuantEcon
In [9]:
# Create a DiscreteDP
ddp = DiscreteDP(R, Q, beta)
Out[9]:
In [10]:
# Solve the dynamic optimization problem (by policy iteration)
res = solve(ddp, PFI)
Out[10]:
In [11]:
# Number of iterations
res.num_iter
Out[11]:
In [12]:
# Optimal policy
res.sigma
Out[12]:
In [13]:
# Optimal actions for reachable states
for i in 1:n
if S[i, 1] > S[i, 2]
print(S[i, :], res.sigma[i])
end
end
In [14]:
res.mc.p
Out[14]:
In [15]:
# Simulate the controlled Markov chain
mc = MarkovChain(res.mc.p, S[:,1])
Out[15]:
In [23]:
# Simulate the controlled Markov chain
mc = MarkovChain(res.mc.p, S)
In [16]:
initial_state_index = getindex_0(1, 0, S)
nyrs = 12
spath_1 = simulate(mc, nyrs+1, init=initial_state_index)
Out[16]:
In [17]:
mc = MarkovChain(res.mc.p, S[:,2])
Out[17]:
In [18]:
initial_state_index = getindex_0(1, 0, S)
nyrs = 12
spath_2 = simulate(mc, nyrs+1, init=initial_state_index)
Out[18]:
In [19]:
using Plots
In [20]:
plotlyjs()
Out[20]:
In [26]:
y = spath_1
plot(0:nyrs, y)
title!("Optimal State Path: Age of Asset")
xaxis!("Year")
yaxis!("Age of Asset", 1:0.5:4)
Out[26]:
In [27]:
y = spath_2
plot(0:nyrs, y)
title!("Optimal State Path: Number of Servicings")
xaxis!("Year")
yaxis!("Number of Servicings", 0:0.5:2)
Out[27]:
In [ ]: