DiscreteDP Example: Asset Replacement with Maintenance

Naoyuki Tsuchiya

Department of Economics, University of Tokyo

From Miranda and Fackler, Applied Computational Economics and Finance, 2002, Section 7.6.3

Julia translation of the Python version


In [1]:
maxage = 5    # Maximum asset age
repcost = 75  # Replacement cost
mancost = 10  # Maintainance cost
beta = 0.9    # Discount factor
m = 3         # Number of actions; 1: keep, 2: service, 3: replace ;

In [2]:
S = vec([(j, i) for i = 0:maxage-1, j = 1:maxage])
S   # State space


Out[2]:
25-element Array{Tuple{Int64,Int64},1}:
 (1, 0)
 (1, 1)
 (1, 2)
 (1, 3)
 (1, 4)
 (2, 0)
 (2, 1)
 (2, 2)
 (2, 3)
 (2, 4)
 (3, 0)
 (3, 1)
 (3, 2)
 (3, 3)
 (3, 4)
 (4, 0)
 (4, 1)
 (4, 2)
 (4, 3)
 (4, 4)
 (5, 0)
 (5, 1)
 (5, 2)
 (5, 3)
 (5, 4)

In [3]:
n = length(S)   # Number of states


Out[3]:
25

In [4]:
# We need a routine to get the index of a age-serv pair
function getindex_0(age, serv, S)
    for i in 1:n
        if S[i][1] == age && S[i][2] == serv
            return i
        end
    end
end


Out[4]:
getindex_0 (generic function with 1 method)

In [5]:
# Profit function as a function of the age and the number of service
function p(age, serv)
    return (1 - (age - serv)/5) * (50 - 2.5 * age - 2.5 * age^2)
end


Out[5]:
p (generic function with 1 method)

In [6]:
# Reward array
R = Array{Float64}(n, m)
for i in 1:n
    R[i, 1] = p(S[i][1], S[i][2])
    R[i, 2] = p(S[i][1], S[i][2]+1) - mancost
    R[i, 3] = p(0, 0) - repcost
end

# Infeasible actions
for j in n-maxage+1:n
    R[j, 1] = -Inf
    R[j, 2] = -Inf
end
R


Out[6]:
25×3 Array{Float64,2}:
   36.0    35.0  -25.0
   45.0    44.0  -25.0
   54.0    53.0  -25.0
   63.0    62.0  -25.0
   72.0    71.0  -25.0
   21.0    18.0  -25.0
   28.0    25.0  -25.0
   35.0    32.0  -25.0
   42.0    39.0  -25.0
   49.0    46.0  -25.0
    8.0     2.0  -25.0
   12.0     6.0  -25.0
   16.0    10.0  -25.0
   20.0    14.0  -25.0
   24.0    18.0  -25.0
    0.0   -10.0  -25.0
    0.0   -10.0  -25.0
    0.0   -10.0  -25.0
    0.0   -10.0  -25.0
    0.0   -10.0  -25.0
 -Inf    -Inf    -25.0
 -Inf    -Inf    -25.0
 -Inf    -Inf    -25.0
 -Inf    -Inf    -25.0
 -Inf    -Inf    -25.0

In [7]:
# (Degenerate) transition probability array
Q = zeros(n, m, n)
for i in 1:n
    Q[i, 1, getindex_0(min(S[i][1]+1, maxage), S[i][2], S)] = 1
    Q[i, 2, getindex_0(min(S[i][1]+1, maxage), min(S[i][2]+1, maxage-1), S)] = 1
    Q[i, 3, getindex_0(1, 0, S)] = 1
end

In [8]:
using QuantEcon

In [9]:
# Create a DiscreteDP
ddp = DiscreteDP(R, Q, beta);

In [10]:
# Solve the dynamic optimization problem (by policy iteration)
res = solve(ddp, PFI);

In [11]:
# Number of iterations
res.num_iter


Out[11]:
3

In [12]:
# Optimal policy
res.sigma


Out[12]:
25-element Array{Int64,1}:
 2
 2
 2
 2
 1
 1
 2
 2
 2
 1
 3
 1
 1
 1
 1
 3
 3
 3
 3
 3
 3
 3
 3
 3
 3

In [13]:
# Optimal actions for reachable states
for i in 1:n
    if S[i][1] > S[i][2]
        print(S[i], res.sigma[i])
    end
end


(1, 0)2(2, 0)1(2, 1)2(3, 0)3(3, 1)1(3, 2)1(4, 0)3(4, 1)3(4, 2)3(4, 3)3(5, 0)3(5, 1)3(5, 2)3(5, 3)3(5, 4)3

In [14]:
# Simulate the controlled Markov chain
mc = MarkovChain(res.mc.p, S)


Out[14]:
Discrete Markov Chain
stochastic matrix of type Array{Float64,2}:
[0.0 0.0 … 0.0 0.0; 0.0 0.0 … 0.0 0.0; … ; 1.0 0.0 … 0.0 0.0; 1.0 0.0 … 0.0 0.0]

In [15]:
initial_state_value = getindex_0(1, 0, S)
nyrs = 12
spath = simulate(mc, nyrs+1, init=initial_state_value)


Out[15]:
13-element Array{Tuple{Int64,Int64},1}:
 (1, 0)
 (2, 1)
 (3, 2)
 (4, 2)
 (1, 0)
 (2, 1)
 (3, 2)
 (4, 2)
 (1, 0)
 (2, 1)
 (3, 2)
 (4, 2)
 (1, 0)

In [16]:
using Plots

In [17]:
plotlyjs()


Plotly javascript loaded.

To load again call

init_notebook(true)

Out[17]:
Plots.PlotlyJSBackend()

In [18]:
spath_1 = Vector{Int}(nyrs)
for i in 1:nyrs
    spath_1[i] = spath[i][1]
end

spath_2 = Vector{Int}(nyrs)
for j in 1:nyrs
    spath_2[j] = spath[j][2]
end

In [19]:
y = spath_1
plot(0:nyrs, y)
title!("Optimal State Path: Age of Asset")
xaxis!("Year")
yaxis!("Age of Asset", 1:0.5:4)


Out[19]:

In [20]:
y = spath_2
plot(0:nyrs, y)
title!("Optimal State Path: Number of Servicings")
xaxis!("Year")
yaxis!("Number of Servicings", 0:0.5:2)


Out[20]: