In [4]:
#quant econ code for solving optimal consumption problem
# I added some notations
%matplotlib inline
"""
Filename: optgrowth_v0.py
Authors: John Stachurski and Thomas Sargent
A first pass at solving the optimal growth problem via value function
iteration. A more general version is provided in optgrowth.py.
"""
from __future__ import division # Omit for Python 3.x
import matplotlib.pyplot as plt
import numpy as np
from numpy import log
from scipy.optimize import fminbound
from scipy import interp
# Primitives and grid
alpha = 0.65
beta = 0.95
grid_max = 2
grid_size = 150
grid = np.linspace(1e-6, grid_max, grid_size)
# Exact solution
ab = alpha * beta
c1 = (log(1 - ab) + log(ab) * ab / (1 - ab)) / (1 - beta)
c2 = alpha / (1 - ab)
def v_star(k):
return c1 + c2 * log(k)
# this function processes step2 and step3 in fitted value function iteration
def bellman_operator(w):
"""
The approximate Bellman operator, which computes and returns the updated
value function Tw on the grid points.
* w is a flat NumPy array with len(w) = len(grid)
The vector w represents the value of the input function on the grid
points.
"""
# === Apply linear interpolation to w === #
# interp is numpy's function
# interp(x, grid, w) returns the array of the fitted values corresponding to x obtained by the interpolation
# this is step 2 ,making w_hat
Aw = lambda x: interp(x, grid, w)
# === set Tw[i] equal to max_c { log(c) + beta w(f(k_i) - c)} === #
Tw = np.empty(grid_size)
# k is a state
for i, k in enumerate(grid):
# to consider the maximized value we think of the minimumized value of the function multiplied -1
objective = lambda c: - log(c) - beta * Aw(k**alpha - c)
# fminbound is the function which returns the minimizer of the function
c_star = fminbound(objective, 1e-6, k**alpha)
# this process is really like the shortest paths
Tw[i] = - objective(c_star)
return Tw
# === If file is run directly, not imported, produce figure === #
if __name__ == '__main__':
w = 5 * log(grid) - 25 # An initial condition -- fairly arbitrary
# this form of function cannot be specified usualy. So, we need more time to solve the problem like this.
n = 35
fig, ax = plt.subplots()
ax.set_ylim(-40, -20)
ax.set_xlim(np.min(grid), np.max(grid))
lb = 'initial condition'
ax.plot(grid, w, color=plt.cm.jet(0), lw=2, alpha=0.6, label=lb)
for i in range(n):
w = bellman_operator(w)
ax.plot(grid, w, color=plt.cm.jet(i / n), lw=2, alpha=0.6)
lb = 'true value function'
ax.plot(grid, v_star(grid), 'k-', lw=2, alpha=0.8, label=lb)
ax.legend(loc='upper left')
plt.show()
In [ ]:
from __future__ import division # Omit for Python 3.x
import matplotlib.pyplot as plt
import numpy as np
from numpy import log
from scipy.optimize import fminbound
from scipy import interp
# Primitives and grid
alpha = 0.65
beta = 0.95
grid_max = 2
grid_size = 150
grid = np.linspace(1e-6, grid_max, grid_size)
# Exact solution
ab = alpha * beta
c1 = (log(1 - ab) + log(ab) * ab / (1 - ab)) / (1 - beta)
c2 = alpha / (1 - ab)
def v_star(k):
return c1 + c2 * log(k)
def bellman_operator(w):
"""
The approximate Bellman operator, which computes and returns the updated
value function Tw on the grid points.
* w is a flat NumPy array with len(w) = len(grid)
The vector w represents the value of the input function on the grid
points.
"""
# === Apply linear interpolation to w === #
Aw = lambda x: interp(x, grid, w)
# === set Tw[i] equal to max_c { log(c) + beta w(f(k_i) - c)} === #
Tw = np.empty(grid_size)
for i, k in enumerate(grid):
objective = lambda c: - log(c) - beta * Aw(k**alpha - c)
c_star = fminbound(objective, 1e-6, k**alpha)
Tw[i] = - objective(c_star)
return Tw
# === If file is run directly, not imported, produce figure === #
if __name__ == '__main__':
w = grid # cannot think of the form of this function case
n = 70
fig, ax = plt.subplots()
ax.set_ylim(-40, -20)
ax.set_xlim(np.min(grid), np.max(grid))
lb = 'initial condition'
ax.plot(grid, w, color=plt.cm.jet(0), lw=2, alpha=0.6, label=lb)
for i in range(n):
w = bellman_operator(w)
ax.plot(grid, w, color=plt.cm.jet(i / n), lw=2, alpha=0.6)
lb = 'true value function'
ax.plot(grid, v_star(grid), 'k-', lw=2, alpha=0.8, label=lb)
ax.legend(loc='upper left')
plt.show()
In [1]:
% matplotlib inline
In [21]:
# exercise 1
"""
Filename: optgrowth_v0.py
Authors: John Stachurski and Thomas Sargent
A first pass at solving the optimal growth problem via value function
iteration. A more general version is provided in optgrowth.py.
"""
from __future__ import division # Omit for Python 3.x
import matplotlib.pyplot as plt
import numpy as np
from numpy import log
from scipy.optimize import fminbound
from scipy import interp
# Primitives and grid
alpha = 0.65
beta = 0.95
grid_max = 2
grid_size = 150
grid = np.linspace(1e-6, grid_max, grid_size)
# Exact solution
ab = alpha * beta
c1 = (log(1 - ab) + log(ab) * ab / (1 - ab)) / (1 - beta)
c2 = alpha / (1 - ab)
def v_star(k):
return c1 + c2 * log(k)
def bellman_operator(w):
# === Apply linear interpolation to w === #
Aw = lambda x: interp(x, grid, w)
# === set Tw[i] equal to max_c { log(c) + beta w(f(k_i) - c)} === #
Tw = np.empty(grid_size)
policy = np.empty(len(w))
box = {}
for i, k in enumerate(grid):
objective = lambda c: - log(c) - beta * Aw(k**alpha - c)
c_star = fminbound(objective, 1e-6, k**alpha)
policy[i] = c_star
Tw[i] = - objective(c_star)
box[0] = Tw
box[1] = policy
return box
# === If file is run directly, not imported, produce figure === #
if __name__ == '__main__':
w = 5 * log(grid) - 25
true_sigma = (1 - alpha * beta) * grid**alpha
plt.subplots(311)
for i in range(2):
p = bellman_operator(w)[1]
w = bellman_operator(w)[0]
plt.plot(grid, p, "b-", lw=2, alpha=0.8, label = "approximate policy function")
plt.plot(grid, true_sigma, 'k-', lw=2, alpha=0.8, label='true optimal policy')
plt.legend(loc = "upper left")
t = 5 * log(grid) - 25
plt.subplots(312)
for i in range(4):
p = bellman_operator(t)[1]
t = bellman_operator(t)[0]
plt.plot(grid, p, "b-", lw=2, alpha=0.8, label = "approximate policy function")
plt.plot(grid, true_sigma, 'k-', lw=2, alpha=0.8, label='true optimal policy')
plt.legend(loc = "upper left")
s = 5 * log(grid) - 25
plt.subplots(313)
for i in range(6):
p = bellman_operator(s)[1]
s = bellman_operator(s)[0]
plt.plot(grid, p, "b-", lw=2, alpha=0.8, label = "approximate policy function")
plt.plot(grid, true_sigma, 'k-', lw=2, alpha=0.8, label='true optimal policy')
plt.legend(loc = "upper left")
plt.show()
In [23]:
# exercise 1 policy function estimation
alpha = 0.65
beta = 0.95
grid_max = 2
grid_size = 150
grid = np.linspace(1e-6, grid_max, grid_size)
# Exact solution
ab = alpha * beta
c1 = (log(1 - ab) + log(ab) * ab / (1 - ab)) / (1 - beta)
c2 = alpha / (1 - ab)
def bellman_operator(w):
Aw = lambda x: interp(x, grid, w)
Tw = np.empty(grid_size)
policy = np.empty(len(w))
box = {}
for i, k in enumerate(grid):
objective = lambda c: - log(c) - beta * Aw(k**alpha - c)
c_star = fminbound(objective, 1e-6, k**alpha)
policy[i] = c_star
Tw[i] = - objective(c_star)
box[0] = Tw
box[1] = policy
return box
if __name__ == '__main__':
w = 5 * log(grid) - 25
true_sigma = (1 - alpha * beta) * grid**alpha
plt.subplots()
for i in range(2):
p = bellman_operator(w)[1]
w = bellman_operator(w)[0]
plt.plot(grid, p, "b-", lw=2, alpha=0.8)
plt.plot(grid, true_sigma, 'k-', lw=2, alpha=0.8, label='true optimal policy')
plt.legend(loc = "upper left")
plt.show()
In [24]:
w = 5 * log(grid) - 25
true_sigma = (1 - alpha * beta) * grid**alpha
plt.subplots()
for i in range(4):
p = bellman_operator(w)[1]
w = bellman_operator(w)[0]
plt.plot(grid, p, "b-", lw=2, alpha=0.8)
plt.plot(grid, true_sigma, 'k-', lw=2, alpha=0.8, label='true optimal policy')
plt.legend(loc = "upper left")
plt.show()
In [25]:
w = 5 * log(grid) - 25
true_sigma = (1 - alpha * beta) * grid**alpha
plt.subplots()
for i in range(6):
p = bellman_operator(w)[1]
w = bellman_operator(w)[0]
plt.plot(grid, p, "b-", lw=2, alpha=0.8)
plt.plot(grid, true_sigma, 'k-', lw=2, alpha=0.8, label='true optimal policy')
plt.legend(loc = "upper left")
plt.show()
In [26]:
w = 5 * log(grid) - 25
true_sigma = (1 - alpha * beta) * grid**alpha
plt.subplots()
for i in range(8):
p = bellman_operator(w)[1]
w = bellman_operator(w)[0]
plt.plot(grid, p, "b-", lw=2, alpha=0.8)
plt.plot(grid, true_sigma, 'k-', lw=2, alpha=0.8, label='true optimal policy')
plt.legend(loc = "upper left")
plt.show()
In [36]:
# exercise 2
alpha = 0.65
grid_max = 2
grid_size = 150
grid = np.linspace(1e-6, grid_max, grid_size)
t = 5 * log(grid) - 25
plt.subplots( figsize = (8,5))
def bellman_operator(w):
# === Apply linear interpolation to w === #
Aw = lambda x: interp(x, grid, w)
# === set Tw[i] equal to max_c { log(c) + beta w(f(k_i) - c)} === #
Tw = np.empty(grid_size)
policy = np.empty(len(w))
box = {}
for i, k in enumerate(grid):
objective = lambda c: - log(c) - beta * Aw(k**alpha - c)
c_star = fminbound(objective, 1e-6, k**alpha)
policy[i] = c_star
Tw[i] = - objective(c_star)
box[0] = Tw
box[1] = policy
return box
for i in [0.9, 0.94, 0.98]:
beta = i
for j in range(35):
p = bellman_operator(t)[1]
t = bellman_operator(t)[0]
policy_function = lambda x : interp(x, grid, p)
k = np.empty(len(t))
k[0] = 0.1
for h in range(1, len(t)):
k[h] = k[h-1]**alpha - policy_function(k[h-1])
plt.plot(k, "o-", label=r'$\beta = {}$'.format(beta))
plt.legend(loc = "lower right")
plt.show()
In [ ]: