7.6.3 Asset Replacement with Maintenance


In [1]:
maxage = 5    # Maximum asset age
repcost = 75  # Replacement cost
mancost = 10  # Maintainance cost
beta = 0.9    # Discount factor
m = 3         # Number of actions; 1: keep, 2: service, 3: replace


Out[1]:
3

In [2]:
S = Matrix{Int}(maxage^2, 2)
for i in 1:maxage
    for j in 0:maxage-1
        S[(i-1) * maxage + (j+1), :] = [i, j]
    end
end
S


Out[2]:
25×2 Array{Int64,2}:
 1  0
 1  1
 1  2
 1  3
 1  4
 2  0
 2  1
 2  2
 2  3
 2  4
 3  0
 3  1
 3  2
 3  3
 3  4
 4  0
 4  1
 4  2
 4  3
 4  4
 5  0
 5  1
 5  2
 5  3
 5  4

In [3]:
n = length(S[:, 1])   # Number of states


Out[3]:
25

In [4]:
# We need a routine to get the index of a age-serv pair
function getindex_0(age, serv, S)
    for i in 1:n
        if S[i, 1] == age && S[i, 2] == serv
            return i
        end
    end
end


Out[4]:
getindex_0 (generic function with 1 method)

In [5]:
# Profit function as a function of the age and the number of service
function p(age, serv)
    return (1 - (age - serv)/5) * (50 - 2.5 * age - 2.5 * age^2)
end


Out[5]:
p (generic function with 1 method)

In [6]:
# Reward array
R = Array{Float64}(n, m)
for i in 1:n
    R[i, 1] = p(S[i, 1], S[i, 2])
    R[i, 2] = p(S[i, 1], S[i, 2]+1) - mancost
    R[i, 3] = p(0, 0) - repcost
end

# Infeasible actions
for j in n-maxage+1:n
    R[j, 1] = -Inf
    R[j, 2] = -Inf
end

R


Out[6]:
25×3 Array{Float64,2}:
   36.0    35.0  -25.0
   45.0    44.0  -25.0
   54.0    53.0  -25.0
   63.0    62.0  -25.0
   72.0    71.0  -25.0
   21.0    18.0  -25.0
   28.0    25.0  -25.0
   35.0    32.0  -25.0
   42.0    39.0  -25.0
   49.0    46.0  -25.0
    8.0     2.0  -25.0
   12.0     6.0  -25.0
   16.0    10.0  -25.0
   20.0    14.0  -25.0
   24.0    18.0  -25.0
    0.0   -10.0  -25.0
    0.0   -10.0  -25.0
    0.0   -10.0  -25.0
    0.0   -10.0  -25.0
    0.0   -10.0  -25.0
 -Inf    -Inf    -25.0
 -Inf    -Inf    -25.0
 -Inf    -Inf    -25.0
 -Inf    -Inf    -25.0
 -Inf    -Inf    -25.0

In [7]:
# (Degenerate) transition probability array
Q = zeros(n, m, n)
for i in 1:n
    Q[i, 1, getindex_0(min(S[i, 1]+1, maxage), S[i, 2], S)] = 1
    Q[i, 2, getindex_0(min(S[i, 1]+1, maxage), min(S[i, 2]+1, maxage-1), S)] = 1
    Q[i, 3, getindex_0(1, 0, S)] = 1
end

Q


Out[7]:
25×3×25 Array{Float64,3}:
[:, :, 1] =
 0.0  0.0  1.0
 0.0  0.0  1.0
 0.0  0.0  1.0
 0.0  0.0  1.0
 0.0  0.0  1.0
 0.0  0.0  1.0
 0.0  0.0  1.0
 0.0  0.0  1.0
 0.0  0.0  1.0
 0.0  0.0  1.0
 0.0  0.0  1.0
 0.0  0.0  1.0
 0.0  0.0  1.0
 0.0  0.0  1.0
 0.0  0.0  1.0
 0.0  0.0  1.0
 0.0  0.0  1.0
 0.0  0.0  1.0
 0.0  0.0  1.0
 0.0  0.0  1.0
 0.0  0.0  1.0
 0.0  0.0  1.0
 0.0  0.0  1.0
 0.0  0.0  1.0
 0.0  0.0  1.0

[:, :, 2] =
 0.0  0.0  0.0
 0.0  0.0  0.0
 0.0  0.0  0.0
 0.0  0.0  0.0
 0.0  0.0  0.0
 0.0  0.0  0.0
 0.0  0.0  0.0
 0.0  0.0  0.0
 0.0  0.0  0.0
 0.0  0.0  0.0
 0.0  0.0  0.0
 0.0  0.0  0.0
 0.0  0.0  0.0
 0.0  0.0  0.0
 0.0  0.0  0.0
 0.0  0.0  0.0
 0.0  0.0  0.0
 0.0  0.0  0.0
 0.0  0.0  0.0
 0.0  0.0  0.0
 0.0  0.0  0.0
 0.0  0.0  0.0
 0.0  0.0  0.0
 0.0  0.0  0.0
 0.0  0.0  0.0

[:, :, 3] =
 0.0  0.0  0.0
 0.0  0.0  0.0
 0.0  0.0  0.0
 0.0  0.0  0.0
 0.0  0.0  0.0
 0.0  0.0  0.0
 0.0  0.0  0.0
 0.0  0.0  0.0
 0.0  0.0  0.0
 0.0  0.0  0.0
 0.0  0.0  0.0
 0.0  0.0  0.0
 0.0  0.0  0.0
 0.0  0.0  0.0
 0.0  0.0  0.0
 0.0  0.0  0.0
 0.0  0.0  0.0
 0.0  0.0  0.0
 0.0  0.0  0.0
 0.0  0.0  0.0
 0.0  0.0  0.0
 0.0  0.0  0.0
 0.0  0.0  0.0
 0.0  0.0  0.0
 0.0  0.0  0.0

...

[:, :, 23] =
 0.0  0.0  0.0
 0.0  0.0  0.0
 0.0  0.0  0.0
 0.0  0.0  0.0
 0.0  0.0  0.0
 0.0  0.0  0.0
 0.0  0.0  0.0
 0.0  0.0  0.0
 0.0  0.0  0.0
 0.0  0.0  0.0
 0.0  0.0  0.0
 0.0  0.0  0.0
 0.0  0.0  0.0
 0.0  0.0  0.0
 0.0  0.0  0.0
 0.0  0.0  0.0
 0.0  1.0  0.0
 1.0  0.0  0.0
 0.0  0.0  0.0
 0.0  0.0  0.0
 0.0  0.0  0.0
 0.0  1.0  0.0
 1.0  0.0  0.0
 0.0  0.0  0.0
 0.0  0.0  0.0

[:, :, 24] =
 0.0  0.0  0.0
 0.0  0.0  0.0
 0.0  0.0  0.0
 0.0  0.0  0.0
 0.0  0.0  0.0
 0.0  0.0  0.0
 0.0  0.0  0.0
 0.0  0.0  0.0
 0.0  0.0  0.0
 0.0  0.0  0.0
 0.0  0.0  0.0
 0.0  0.0  0.0
 0.0  0.0  0.0
 0.0  0.0  0.0
 0.0  0.0  0.0
 0.0  0.0  0.0
 0.0  0.0  0.0
 0.0  1.0  0.0
 1.0  0.0  0.0
 0.0  0.0  0.0
 0.0  0.0  0.0
 0.0  0.0  0.0
 0.0  1.0  0.0
 1.0  0.0  0.0
 0.0  0.0  0.0

[:, :, 25] =
 0.0  0.0  0.0
 0.0  0.0  0.0
 0.0  0.0  0.0
 0.0  0.0  0.0
 0.0  0.0  0.0
 0.0  0.0  0.0
 0.0  0.0  0.0
 0.0  0.0  0.0
 0.0  0.0  0.0
 0.0  0.0  0.0
 0.0  0.0  0.0
 0.0  0.0  0.0
 0.0  0.0  0.0
 0.0  0.0  0.0
 0.0  0.0  0.0
 0.0  0.0  0.0
 0.0  0.0  0.0
 0.0  0.0  0.0
 0.0  1.0  0.0
 1.0  1.0  0.0
 0.0  0.0  0.0
 0.0  0.0  0.0
 0.0  0.0  0.0
 0.0  1.0  0.0
 1.0  1.0  0.0

In [8]:
using QuantEcon

In [9]:
# Create a DiscreteDP
ddp = DiscreteDP(R, Q, beta)


Out[9]:
QuantEcon.DiscreteDP{Float64,3,2,Float64,Int64}([36.0 35.0 -25.0; 45.0 44.0 -25.0; … ; -Inf -Inf -25.0; -Inf -Inf -25.0], [0.0 0.0 1.0; 0.0 0.0 1.0; … ; 0.0 0.0 1.0; 0.0 0.0 1.0]

[0.0 0.0 0.0; 0.0 0.0 0.0; … ; 0.0 0.0 0.0; 0.0 0.0 0.0]

[0.0 0.0 0.0; 0.0 0.0 0.0; … ; 0.0 0.0 0.0; 0.0 0.0 0.0]

...

[0.0 0.0 0.0; 0.0 0.0 0.0; … ; 0.0 0.0 0.0; 0.0 0.0 0.0]

[0.0 0.0 0.0; 0.0 0.0 0.0; … ; 1.0 0.0 0.0; 0.0 0.0 0.0]

[0.0 0.0 0.0; 0.0 0.0 0.0; … ; 0.0 1.0 0.0; 1.0 1.0 0.0], 0.9, Nullable{Array{Int64,1}}(), Nullable{Array{Int64,1}}())

In [10]:
# Solve the dynamic optimization problem (by policy iteration)
res = solve(ddp, PFI)


Out[10]:
QuantEcon.DPSolveResult{QuantEcon.PFI,Float64}([151.89, 170.43, 188.97, 206.97, 216.97, 121.531, 129.878, 140.478, 151.078, 161.078  …  111.701, 111.701, 111.701, 111.701, 111.701, 111.701, 111.701, 111.701, 111.701, 111.701], [151.89, 170.43, 188.97, 206.97, 216.97, 121.531, 129.878, 140.478, 151.078, 161.078  …  111.701, 111.701, 111.701, 111.701, 111.701, 111.701, 111.701, 111.701, 111.701, 111.701], 3, [2, 2, 2, 2, 1, 1, 2, 2, 2, 1  …  3, 3, 3, 3, 3, 3, 3, 3, 3, 3], Discrete Markov Chain
stochastic matrix of type Array{Float64,2}:
[0.0 0.0 … 0.0 0.0; 0.0 0.0 … 0.0 0.0; … ; 1.0 0.0 … 0.0 0.0; 1.0 0.0 … 0.0 0.0])

In [11]:
# Number of iterations
res.num_iter


Out[11]:
3

In [12]:
# Optimal policy
res.sigma


Out[12]:
25-element Array{Int64,1}:
 2
 2
 2
 2
 1
 1
 2
 2
 2
 1
 3
 1
 1
 1
 1
 3
 3
 3
 3
 3
 3
 3
 3
 3
 3

In [13]:
# Optimal actions for reachable states
for i in 1:n
    if S[i, 1] > S[i, 2]
        print(S[i, :], res.sigma[i])
    end
end


[1, 0]2[2, 0]1[2, 1]2[3, 0]3[3, 1]1[3, 2]1[4, 0]3[4, 1]3[4, 2]3[4, 3]3[5, 0]3[5, 1]3[5, 2]3[5, 3]3[5, 4]3

In [14]:
res.mc.p


Out[14]:
25×25 Array{Float64,2}:
 0.0  0.0  0.0  0.0  0.0  0.0  1.0  0.0  …  0.0  0.0  0.0  0.0  0.0  0.0  0.0
 0.0  0.0  0.0  0.0  0.0  0.0  0.0  1.0     0.0  0.0  0.0  0.0  0.0  0.0  0.0
 0.0  0.0  0.0  0.0  0.0  0.0  0.0  0.0     0.0  0.0  0.0  0.0  0.0  0.0  0.0
 0.0  0.0  0.0  0.0  0.0  0.0  0.0  0.0     0.0  0.0  0.0  0.0  0.0  0.0  0.0
 0.0  0.0  0.0  0.0  0.0  0.0  0.0  0.0     0.0  0.0  0.0  0.0  0.0  0.0  0.0
 0.0  0.0  0.0  0.0  0.0  0.0  0.0  0.0  …  0.0  0.0  0.0  0.0  0.0  0.0  0.0
 0.0  0.0  0.0  0.0  0.0  0.0  0.0  0.0     0.0  0.0  0.0  0.0  0.0  0.0  0.0
 0.0  0.0  0.0  0.0  0.0  0.0  0.0  0.0     0.0  0.0  0.0  0.0  0.0  0.0  0.0
 0.0  0.0  0.0  0.0  0.0  0.0  0.0  0.0     0.0  0.0  0.0  0.0  0.0  0.0  0.0
 0.0  0.0  0.0  0.0  0.0  0.0  0.0  0.0     0.0  0.0  0.0  0.0  0.0  0.0  0.0
 1.0  0.0  0.0  0.0  0.0  0.0  0.0  0.0  …  0.0  0.0  0.0  0.0  0.0  0.0  0.0
 0.0  0.0  0.0  0.0  0.0  0.0  0.0  0.0     0.0  0.0  0.0  0.0  0.0  0.0  0.0
 0.0  0.0  0.0  0.0  0.0  0.0  0.0  0.0     0.0  0.0  0.0  0.0  0.0  0.0  0.0
 0.0  0.0  0.0  0.0  0.0  0.0  0.0  0.0     1.0  0.0  0.0  0.0  0.0  0.0  0.0
 0.0  0.0  0.0  0.0  0.0  0.0  0.0  0.0     0.0  1.0  0.0  0.0  0.0  0.0  0.0
 1.0  0.0  0.0  0.0  0.0  0.0  0.0  0.0  …  0.0  0.0  0.0  0.0  0.0  0.0  0.0
 1.0  0.0  0.0  0.0  0.0  0.0  0.0  0.0     0.0  0.0  0.0  0.0  0.0  0.0  0.0
 1.0  0.0  0.0  0.0  0.0  0.0  0.0  0.0     0.0  0.0  0.0  0.0  0.0  0.0  0.0
 1.0  0.0  0.0  0.0  0.0  0.0  0.0  0.0     0.0  0.0  0.0  0.0  0.0  0.0  0.0
 1.0  0.0  0.0  0.0  0.0  0.0  0.0  0.0     0.0  0.0  0.0  0.0  0.0  0.0  0.0
 1.0  0.0  0.0  0.0  0.0  0.0  0.0  0.0  …  0.0  0.0  0.0  0.0  0.0  0.0  0.0
 1.0  0.0  0.0  0.0  0.0  0.0  0.0  0.0     0.0  0.0  0.0  0.0  0.0  0.0  0.0
 1.0  0.0  0.0  0.0  0.0  0.0  0.0  0.0     0.0  0.0  0.0  0.0  0.0  0.0  0.0
 1.0  0.0  0.0  0.0  0.0  0.0  0.0  0.0     0.0  0.0  0.0  0.0  0.0  0.0  0.0
 1.0  0.0  0.0  0.0  0.0  0.0  0.0  0.0     0.0  0.0  0.0  0.0  0.0  0.0  0.0

In [15]:
# Simulate the controlled Markov chain
mc = MarkovChain(res.mc.p, S[:,1])


Out[15]:
Discrete Markov Chain
stochastic matrix of type Array{Float64,2}:
[0.0 0.0 … 0.0 0.0; 0.0 0.0 … 0.0 0.0; … ; 1.0 0.0 … 0.0 0.0; 1.0 0.0 … 0.0 0.0]

In [23]:
# Simulate the controlled Markov chain
mc = MarkovChain(res.mc.p, S)


TypeError: Type: in TV, expected TV<:(AbstractArray{T,1} where T), got Type{Array{Int64,2}}

Stacktrace:
 [1] QuantEcon.MarkovChain(::Array{Float64,2}, ::Array{Int64,2}) at C:\Users\直之\.julia\v0.6\QuantEcon\src\markov\mc_tools.jl:58

In [16]:
initial_state_index = getindex_0(1, 0, S)
nyrs = 12
spath_1 = simulate(mc, nyrs+1, init=initial_state_index)


Out[16]:
13-element Array{Int64,1}:
 1
 2
 3
 4
 1
 2
 3
 4
 1
 2
 3
 4
 1

In [17]:
mc = MarkovChain(res.mc.p, S[:,2])


Out[17]:
Discrete Markov Chain
stochastic matrix of type Array{Float64,2}:
[0.0 0.0 … 0.0 0.0; 0.0 0.0 … 0.0 0.0; … ; 1.0 0.0 … 0.0 0.0; 1.0 0.0 … 0.0 0.0]

In [18]:
initial_state_index = getindex_0(1, 0, S)
nyrs = 12
spath_2 = simulate(mc, nyrs+1, init=initial_state_index)


Out[18]:
13-element Array{Int64,1}:
 0
 1
 2
 2
 0
 1
 2
 2
 0
 1
 2
 2
 0

In [19]:
using Plots

In [20]:
plotlyjs()


Plotly javascript loaded.

To load again call

init_notebook(true)

Out[20]:
Plots.PlotlyJSBackend()

In [26]:
y = spath_1
plot(0:nyrs, y)
title!("Optimal State Path: Age of Asset")
xaxis!("Year")
yaxis!("Age of Asset", 1:0.5:4)


Out[26]:

In [27]:
y = spath_2
plot(0:nyrs, y)
title!("Optimal State Path: Number of Servicings")
xaxis!("Year")
yaxis!("Number of Servicings", 0:0.5:2)


Out[27]:

In [ ]: