Assignment 11

Lorenzo Biasi, Julius Vernie


In [1]:
import numpy as np
import matplotlib.pyplot as plt
from scipy.io import loadmat
from pandas import DataFrame
from numpy.random import multivariate_normal
from numpy.linalg  import inv
%matplotlib inline

In [2]:
def Relu(z):
    return np.max((z, np.zeros(z.shape)), axis=0)

def dRelu(z):
    return (z > 0) * 1

p = 3
q = 10
T = 500

data = loadmat('Tut11_file1.mat')
locals().update(data)
data.keys()


Out[2]:
dict_keys(['__header__', '__version__', '__globals__', 'A', 'B', 'Gamma', 'Sigma', 'W', 'mu0'])

Exercise 1

1.1


In [3]:
z = np.zeros((p, T + 1))
x = np.zeros((q, T + 1))
np.random.seed(10)
z[:,[0]] = mu0 + multivariate_normal(np.zeros(p), Sigma).reshape(mu0.shape)

In [4]:
def transition_f(z):
    return A.dot(z) + W.dot(np.maximum(z, 0.))

def obs_g(x):
    return B.dot(x)

z = np.zeros((p, T))
x = np.zeros((q, T))
np.random.seed(10)
z[:, [0]] = mu0 + multivariate_normal(np.zeros(p), Sigma).reshape(mu0.shape)
x[:, [0]] = transition_f(z[:, [0]]) + multivariate_normal(np.zeros(q), Gamma)


for t in range(1, T):
    
    z[:, t] = transition_f(z[:, t - 1]) + multivariate_normal(np.zeros(p), Sigma)
    x[:, t] = obs_g(z[:, t]) + multivariate_normal(np.zeros(q), Gamma)
x[:, 0] = np.ma.masked

In [10]:
plt.plot(x.T[1:, :], ':');
plt.figure()
plt.plot(z.T[:100, 1], ':');


1.2


In [6]:
# Filter

mu = np.zeros(z.shape)
K = np.zeros((p, q, T))
V = np.zeros((p, p, T))
L = np.zeros((p, p, T))

L[..., 0] = Sigma
# mu[..., [0]] = mu0
K[...,0] = L[..., 0].dot(B.T.dot(inv(B.dot(L[..., 0].dot(B.T)) + Gamma)))
mu[..., [0]] = A.dot(mu0) + K[..., 0].dot(x[:, [0]] - B.dot(A.dot(mu0)))
V[..., 0] = (np.eye(p) - K[..., 0].dot(B)).dot(Sigma)
#L[..., 0] = A.dot(V[..., 0].dot(A.T)) + Sigma

for t in range(1, T):
    L[..., t] = A.dot(V[..., t - 1].dot(A.T)) + Sigma

    K[...,t] = L[..., t].dot(B.T.dot(inv(B.dot(L[..., t].dot(B.T)) + Gamma)))
    mu[..., [t]] = A.dot(mu[..., [t-1]]) + K[..., t].dot(x[:, [t]] - B.dot(A.dot(mu[..., [t-1]])))
    V[..., t] = (np.eye(p) - K[..., t].dot(B)).dot(L[..., t])
    #L[..., t] = A.dot(V[..., t].dot(A.T)) + Sigma

In [7]:
print ('Smoothed LSE with linear assumption:', np.sum((mu[:, :-1] - z[:, 1:]).T ** 2))


Smoothed LSE with linear assumption: 24.6371491124

Exercise 2

2.1


In [8]:
# Filter

mu = np.zeros(z.shape)
K = np.zeros((p, q, T))
V = np.zeros((p, p, T))
L = np.zeros((p, p, T))

L[..., 0] = Sigma
K[...,0] = L[..., 0].dot(B.T.dot(inv(B.dot(L[..., 0].dot(B.T)) + Gamma)))
mu[..., [0]] = A.dot(mu0) + W.dot(Relu(mu0)) + \
               K[..., 0].dot(x[:, [0]]  - B.dot(A.dot(mu0) + W.dot(Relu(mu0))))
V[..., 0] = (np.eye(p) - K[..., 0].dot(B)).dot(Sigma)
#L[..., 0] = (A + W.dot(dRelu(mu0))).dot(V[..., 0].dot((A + W.dot(dRelu(mu0))).T)) + Sigma

for t in range(1, T):
    L[..., t] = (A + W.dot(dRelu(mu[..., [t-1]]))).dot(V[..., t - 1].dot(A + W.dot(dRelu(mu[..., [t-1]])))) + Sigma
    K[...,t] = L[..., t].dot(B.T.dot(inv(B.dot(L[..., t].dot(B.T)) + Gamma)))
    mu[..., [t]] = A.dot(mu[..., [t-1]]) + W.dot(Relu(mu[..., [t-1]])) + \
                   K[..., t].dot(x[:, [t]] - B.dot(A.dot(mu[..., [t-1]]) + W.dot(Relu(mu[..., [t-1]]))))
    V[..., t] = (np.eye(p) - K[..., t].dot(B)).dot(L[..., t])

2.2

We can see that the LSE using the non linear assumption is greatly improved.


In [9]:
print ('Smoothed result with non linear assumption:', np.sum((mu[:, :-1] - z[:, 1:]).T ** 2))


Smoothed result with non linear assumption: 11.6535617763