quant-econ Solutions: The Kalman Filter


In [1]:
%matplotlib inline

In [2]:
import numpy as np
import matplotlib.pyplot as plt
from quantecon import Kalman
from scipy.stats import norm

Exercise 1


In [3]:
# == Parameters == #

theta = 10
A, G, Q, R = 1, 1, 0, 1
x_hat_0, Sigma_0 = 8, 1

# == Initialize Kalman filter == #

kalman = Kalman(A, G, Q, R)
kalman.set_state(x_hat_0, Sigma_0)

# == Run == #

N = 5
fig, ax = plt.subplots(figsize=(10,8))
xgrid = np.linspace(theta - 5, theta + 2, 200)
for i in range(N):
    # Record the current predicted mean and variance, and plot their densities
    m, v = kalman.current_x_hat, kalman.current_Sigma
    m, v = float(m), float(v)
    ax.plot(xgrid, norm.pdf(xgrid, loc=m, scale=np.sqrt(v)), label=r'$t=%d$' % i)
    # Generate the noisy signal
    y = theta + norm.rvs(size=1)
    # Update the Kalman filter
    kalman.update(y)

ax.set_title(r'First %d densities when $\theta = %.1f$' % (N, theta)) 
ax.legend(loc='upper left')


Out[3]:
<matplotlib.legend.Legend at 0x5119c50>

Exercise 2


In [4]:
from scipy.integrate import quad

epsilon = 0.1
kalman = Kalman(A, G, Q, R)
kalman.set_state(x_hat_0, Sigma_0)

T = 600
z = np.empty(T)
for t in range(T):
    # Record the current predicted mean and variance, and plot their densities
    m, v = kalman.current_x_hat, kalman.current_Sigma
    m, v = float(m), float(v)
    f = lambda x: norm.pdf(x, loc=m, scale=np.sqrt(v))
    integral, error = quad(f, theta - epsilon, theta + epsilon)
    z[t] = 1 - integral
    # Generate the noisy signal and update the Kalman filter
    kalman.update(theta + norm.rvs(size=1))

fig, ax = plt.subplots(figsize=(9, 7))
ax.set_ylim(0, 1)
ax.set_xlim(0, T)
ax.plot(range(T), z) 
ax.fill_between(range(T), np.zeros(T), z, color="blue", alpha=0.2)


Out[4]:
<matplotlib.collections.PolyCollection at 0x5787f90>

Exercise 3


In [5]:
from __future__ import print_function  # Remove for Python 3.x
from numpy.random import multivariate_normal
from scipy.linalg import eigvals


# === Define A, Q, G, R === #
G = np.eye(2)
R = 0.5 * np.eye(2)
A = [[0.5, 0.4], 
     [0.6, 0.3]]
Q = 0.3 * np.eye(2)

# === Define the prior density === #
Sigma = [[0.9, 0.3], 
         [0.3, 0.9]]
Sigma = np.array(Sigma)
x_hat = np.array([8, 8])

# === Initialize the Kalman filter === #
kn = Kalman(A, G, Q, R)
kn.set_state(x_hat, Sigma)

# === Set the true initial value of the state === #
x = np.zeros(2)

# == Print eigenvalues of A == #
print("Eigenvalues of A:")
print(eigvals(A))

# == Print stationary Sigma == #
S, K = kn.stationary_values()
print("Stationary prediction error variance:")
print(S)

# === Generate the plot === #
T = 50
e1 = np.empty(T)
e2 = np.empty(T)
for t in range(T):
    # == Generate signal and update prediction == #
    y = multivariate_normal(mean=np.dot(G, x), cov=R)
    kn.update(y)
    # == Update state and record error == #
    Ax = np.dot(A, x)
    x = multivariate_normal(mean=Ax, cov=Q)
    e1[t] = np.sum((x - kn.current_x_hat)**2)
    e2[t] = np.sum((x - Ax)**2)

fig, ax = plt.subplots(figsize=(9,6))
ax.plot(range(T), e1, 'k-', lw=2, alpha=0.6, label='Kalman filter error') 
ax.plot(range(T), e2, 'g-', lw=2, alpha=0.6, label='conditional expectation error') 
ax.legend()


Eigenvalues of A:
[ 0.9+0.j -0.1+0.j]
Stationary prediction error variance:
[[ 0.40329109  0.10507181]
 [ 0.10507181  0.41061711]]
Out[5]:
<matplotlib.legend.Legend at 0x5712c90>

In [ ]: