In [10]:
%pylab inline
%load_ext autoreload
%autoreload 2
In [148]:
R = randn(10, 500)
P = randn(10, 10)
#%timeit diag(R.T.dot(P).dot(R))
%timeit einsum('ij,jk,ki->i', R.T, P, R)
In [16]:
from collections import namedtuple
from scipy.stats import multivariate_normal as mvn
import ncegauss as ng
def train_model(Td, k, D, verbose=False):
global mmu, mP
TrainedModel = namedtuple('TrainedModel',
['model', 'err_nce', 'err_ml'])
mmu, mP = zeros(5), eye(D) + randn(D, D) / 10
mP = (mP + mP.T) / 2
mc = log(det(mP)) - D * log(2. * pi) / 2.
true_params = ng.params_to_vec(mmu, mP, mc)
print(mmu, mP)
# nmu, nP = zeros(D), eye(D)
# nc = log(det(nP)) - D * log(2. * pi) / 2.
X = mvn.rvs(mmu, inv(mP), Td).T
# Y = mvn.rvs(nmu, inv(nP), k * Td).T
model = ng.NceGauss()
model.fit_nce(X, k, verbose=verbose, maxnumlinesearch=1000)
model.fit_ml(X)
err_nce = (ng.params_to_vec(*model.params_nce) - true_params) ** 2
err_ml = (ng.params_to_vec(*model.params_ml) - true_params) ** 2
return TrainedModel(model, err_nce, err_ml)
# (ng.loglik(X, model.params.mu, model.params.L),
# ng.loglik(X, model.params_ml.mu, model.params_ml.L))
#SZ = [100, 200, 500]
# KS = [5]
# lls = {}
# for k in KS:
# lls[k] = map(lambda x: train_model(200, k, 5, True), xrange(10))
# print(lls)
#n err_nce = lambda k: concatenate(map(lambda r: c_[r.err_nce], lls[k]), 1)
# err_ml = lambda k: concatenate(map(lambda r: c_[r.err_ml], lls[k]), 1)
m = train_model(200, 30, 5, True)
m
Out[16]:
In [27]:
trueP = cholesky(mP)
print((m.model.params_nce.L - trueP) / trueP)
print(m.model.params_nce.L)
print(m.model.params_ml.L)
In [144]:
N = 4
D = 4
# a = rand(N)
# R = rand(D, N)
# R = (R + R.T) / 2
# L = rand(D, D)
o = einsum('k,ik,jk->ijk', a, R, R)
#o[:, :, 1], outer(R[:, 1], R[:, 1])
r = reduce(lambda a,b:a+b, [a[t] * outer(R[:, t], R[:, t]) for t in xrange(4)])
a.dot(o) == r
Out[144]:
In [161]:
mean(randn(1000))
1/sqrt(1000)
Out[161]:
In [ ]:
In [30]:
err_nce = lambda k: concatenate(map(lambda r: c_[r.err_nce], lls[k]), 1)
err_ml = lambda k: concatenate(map(lambda r: c_[r.err_ml], lls[k]), 1)
# ks = [1, 5, 10, 20]
#plot(np.array(map(lambda k: err_nce(k)[:, -1].mean(), ks)))
# plot(np.array(map(lambda k: err_ml(k)[:, -1].mean(), ks)))
#err_nce(10)[1:, :]
# subplot(131)
# k = 1
# errs = c_[err_nce(k)[0:5, :].mean(0),
# err_nce(k)[5:-1, :].mean(0),
# err_nce(k)[-1, :]]
# boxplot(log(errs))
subplot(132)
k = 5
errs = c_[err_nce(k)[0:5, :].mean(0),
err_nce(k)[5:-1, :].mean(0),
err_nce(k)[-1, :]]
boxplot(log(errs))
# subplot(133)
# k = 15
# errs = c_[err_nce(k)[0:5, :].mean(0),
# err_nce(k)[5:-1, :].mean(0),
# err_nce(k)[-1, :]]
# boxplot(log(errs))
Out[30]:
In [26]:
eye(D) + randn(D, D) / 10
Out[26]:
In [19]:
from scipy.stats import multivariate_normal as mvn
from scipy import optimize
from scipy.optimize import check_grad, approx_fprime
from minimize import minimize
import ncegauss as ng
def train_model(Td, k, D):
pass
mu = r_[0., 0.]
S = c_[[2., .2], [.2, 2.]]
P = np.linalg.inv(S)
L = cholesky(P)
Td = 200
k = 10
c = log(det(P)) / 2 - log(2 * pi)
x = ng.params_to_vec(mu, L, array([c]))
X = mvn.rvs(randn(5)/100, eye(5), Td).T
Y = mvn.rvs(randn(5)/100, eye(5), k * Td).T
model = ng.NceGauss()
model.fit(X, Y, verbose=True)
model.fit_ml(X)
print('cg=%s' % ng.params_to_vec(*model.params))
print('ml=%s' % ng.params_to_vec(*model.params_ml))
(ng.loglik(X, model.params.mu, model.params.L),
ng.loglik(X, model.params_ml.mu, model.params_ml.L))
#ng.NceGauss.J(X, Y, *est[0])
# x = x + randn(6)/10
# print(approx_fprime(x, lambda u: ng.NceGauss.J(X, Y, *vec_to_params(2, u)), 1e-7))
# print(ng.NceGauss.dJ(X, Y, *vec_to_params(2, x)))
Out[19]:
In [24]:
plot(mean(ls))
Out[24]:
In [33]:
lmean[1] = 0
print(lmedian)
plot(lmedian)
Out[33]:
In [674]:
params_to_vec(*est[0])
Out[674]:
In [659]:
ng.params_to_vec(
Out[659]:
In [665]:
c = -log(det(S)) / 2 - log(2 * pi)
emu, eL, ec = est[0]
eP = dot(eL, eL.T)
ecc = log(det(eP)) / 2 - log(2 * pi)
print('cg=%s' % str((emu, eP, ec, ecc)))
print('ml=%s' % str((mean(X, 1), inv(cov(X)), -log(det(cov(X))) / 2 - log(2 * pi))))
print('true=%s' % str((mu, P, c)))
In [391]:
cholesky(inv(cov(X))), P
Out[391]:
In [762]:
[ 1.23279859, -3.51804843, 0.42417953, 0.14220384, 0.03868466,
-3.28698105]
nn(P, c)
Out[762]:
In [995]:
N = 20
theta = params_to_vec(mu, P, c)
lspace = linspace(-30, 30, N)
[Xs, Ys] = meshgrid(lspace, lspace)
grid = c_[Xs.reshape(-1, 1), Ys.reshape(-1, 1)]
grid = c_[grid, repeat(theta[2:].reshape(1, 4), grid.shape[0], 0)]
Jeval = np.array([obj(grid[x, :]) for x in xrange(grid.shape[0])])
contour(Jeval.reshape(N, N), 60)
colorbar()
locs, labels = xticks()
# xticks(locs, map(lambda s: "%.2f" % s, lspace[map(lambda x: min(N - 1, int(x)), locs)]))
# yticks(locs, map(lambda s: "%.2f" % s, lspace[map(lambda x: min(N - 1, int(x)), locs)]))
In [34]:
- log(det(S)) / 2 - log(2 * pi)
Out[34]: