In [10]:
%pylab inline
%load_ext autoreload
%autoreload 2


Populating the interactive namespace from numpy and matplotlib

In [148]:
R = randn(10, 500)
P = randn(10, 10)
#%timeit diag(R.T.dot(P).dot(R))
%timeit einsum('ij,jk,ki->i', R.T, P, R)


10000 loops, best of 3: 70.2 µs per loop

In [16]:
from collections import namedtuple
from scipy.stats import multivariate_normal as mvn

import ncegauss as ng

def train_model(Td, k, D, verbose=False):
    global mmu, mP
    TrainedModel = namedtuple('TrainedModel', 
                              ['model', 'err_nce', 'err_ml'])
    mmu, mP = zeros(5), eye(D) + randn(D, D) / 10
    mP = (mP + mP.T) / 2
    mc = log(det(mP)) - D * log(2. * pi) / 2.
    true_params = ng.params_to_vec(mmu, mP, mc)
    print(mmu, mP)

    # nmu, nP = zeros(D), eye(D)
    # nc = log(det(nP)) - D * log(2. * pi) / 2.
    X = mvn.rvs(mmu, inv(mP), Td).T
    # Y = mvn.rvs(nmu, inv(nP), k * Td).T
    
    model = ng.NceGauss()
    model.fit_nce(X, k, verbose=verbose, maxnumlinesearch=1000)
    model.fit_ml(X)
    err_nce = (ng.params_to_vec(*model.params_nce) - true_params) ** 2
    err_ml = (ng.params_to_vec(*model.params_ml) - true_params) ** 2
    return TrainedModel(model, err_nce, err_ml)

# (ng.loglik(X, model.params.mu, model.params.L),
#  ng.loglik(X, model.params_ml.mu, model.params_ml.L))

#SZ = [100, 200, 500]
# KS = [5]
# lls = {}
# for k in KS:
#     lls[k] = map(lambda x: train_model(200, k, 5, True), xrange(10))
#     print(lls)

#n err_nce = lambda k: concatenate(map(lambda r: c_[r.err_nce], lls[k]), 1)
# err_ml = lambda k: concatenate(map(lambda r: c_[r.err_ml], lls[k]), 1)

m = train_model(200, 30, 5, True)
m


(array([ 0.,  0.,  0.,  0.,  0.]), array([[ 0.9548735 , -0.11542336,  0.00932199, -0.01545173, -0.01850689],
       [-0.11542336,  0.99135207, -0.03446331, -0.12956951, -0.10889272],
       [ 0.00932199, -0.03446331,  0.94216088,  0.00563674, -0.0876573 ],
       [-0.01545173, -0.12956951,  0.00563674,  0.99916751, -0.03588556],
       [-0.01850689, -0.10889272, -0.0876573 , -0.03588556,  0.9957096 ]]))
Optimization terminated successfully.
         Current function value: 4.359903
         Iterations: 35
         Function evaluations: 46
         Gradient evaluations: 43
ncegauss.py:157: RuntimeWarning: overflow encountered in exp
  Jm = -np.sum(log(1 + np.exp(-hvv(X, Xzero))))
ncegauss.py:21: RuntimeWarning: overflow encountered in exp
  sigmoid = lambda u: 1.0 / (1.0 + np.exp(-u))
Out[16]:
TrainedModel(model=<ncegauss.NceGauss object at 0x3b98c90>, err_nce=array([  1.56647514e-02,   3.35973046e-03,   7.74691161e-03,
         3.07002790e-03,   9.94330435e-05,   3.80759548e-03,
         8.91176594e-05,   3.87879630e-03,   4.61595850e-03,
         1.10576201e-03,   2.31081074e-03,   6.02593319e-03,
         5.09703765e-03,   4.55101401e-04,   9.54370940e-03,
         6.00646629e-04,   5.77058330e-03,   7.51513528e-05,
         1.68771042e-03,   1.60904456e-03,   1.08884088e-03]), err_ml=array([  1.44212112e-02,   2.78391153e-03,   8.29586502e-03,
         2.52953653e-03,   2.13129382e-04,   1.08091896e-02,
         6.38015870e-05,   1.50951377e-02,   4.99160231e-03,
         2.00715133e-03,   3.21568562e-04,   2.80260145e-03,
         2.84951416e-03,   1.96915038e-06,   3.84385996e-02,
         1.00791826e-03,   8.40803206e-03,   9.75459536e-05,
         4.30910077e-04,   5.07014316e-03,   2.85547104e-03]))

In [27]:
trueP = cholesky(mP)
print((m.model.params_nce.L - trueP) / trueP)
print(m.model.params_nce.L)
print(m.model.params_ml.L)


[[  0.04032325          nan          nan          nan          nan]
 [  0.05709732  -0.06024799          nan          nan          nan]
 [ -7.14471433  -0.96410672   0.02083967          nan          nan]
 [ -4.93199384  -0.56241624 -12.67067463  -0.0899479           nan]
 [  1.27122045  -0.70706197   0.02378917  -1.10092024  -0.03034412]]
[[ 1.01657921  0.          0.          0.          0.        ]
 [-0.12486357  0.92907208  0.          0.          0.        ]
 [-0.05861886 -0.00121031  0.99023177  0.          0.        ]
 [ 0.06217516 -0.05817597 -0.01569637  0.9014756   0.        ]
 [-0.04301498 -0.03292836 -0.09632629  0.00519619  0.9555967 ]]
[[ 1.05884075 -0.12341095 -0.06132928  0.03748788 -0.05025461]
 [-0.12341095  0.8684898   0.01033793 -0.07618867 -0.0171974 ]
 [-0.06132928  0.01033793  0.96009321  0.00704    -0.07778077]
 [ 0.03748788 -0.07618867  0.00704     0.80310987 -0.01512719]
 [-0.05025461 -0.0171974  -0.07778077 -0.01512719  0.92450466]]
-c:2: RuntimeWarning: invalid value encountered in divide

In [144]:
N = 4
D = 4

# a = rand(N)
# R = rand(D, N)
# R = (R + R.T) / 2
# L = rand(D, D)
o = einsum('k,ik,jk->ijk', a, R, R)
#o[:, :, 1], outer(R[:, 1], R[:, 1])
r = reduce(lambda a,b:a+b, [a[t] * outer(R[:, t], R[:, t]) for t in xrange(4)])
a.dot(o) == r


Out[144]:
array([[False, False, False, False],
       [False, False, False, False],
       [False, False, False, False],
       [False, False, False, False]], dtype=bool)

In [161]:
mean(randn(1000))
1/sqrt(1000)


Out[161]:
0.031622776601683791

In [ ]:


In [30]:
err_nce = lambda k: concatenate(map(lambda r: c_[r.err_nce], lls[k]), 1)
err_ml = lambda k: concatenate(map(lambda r: c_[r.err_ml], lls[k]), 1)
# ks = [1, 5, 10, 20]
#plot(np.array(map(lambda k: err_nce(k)[:, -1].mean(), ks)))
# plot(np.array(map(lambda k: err_ml(k)[:, -1].mean(), ks)))
#err_nce(10)[1:, :]

# subplot(131)
# k = 1
# errs = c_[err_nce(k)[0:5, :].mean(0), 
#           err_nce(k)[5:-1, :].mean(0),
#           err_nce(k)[-1, :]]
# boxplot(log(errs))

subplot(132)
k = 5
errs = c_[err_nce(k)[0:5, :].mean(0), 
          err_nce(k)[5:-1, :].mean(0),
          err_nce(k)[-1, :]]
boxplot(log(errs))

# subplot(133)
# k = 15
# errs = c_[err_nce(k)[0:5, :].mean(0), 
#           err_nce(k)[5:-1, :].mean(0),
#           err_nce(k)[-1, :]]
# boxplot(log(errs))


Out[30]:
{'boxes': [<matplotlib.lines.Line2D at 0x42b5fd0>,
  <matplotlib.lines.Line2D at 0x42c2250>,
  <matplotlib.lines.Line2D at 0x42cc490>],
 'caps': [<matplotlib.lines.Line2D at 0x42b5350>,
  <matplotlib.lines.Line2D at 0x42b5990>,
  <matplotlib.lines.Line2D at 0x42bf590>,
  <matplotlib.lines.Line2D at 0x42bfbd0>,
  <matplotlib.lines.Line2D at 0x42c77d0>,
  <matplotlib.lines.Line2D at 0x42c7e10>],
 'fliers': [<matplotlib.lines.Line2D at 0x42b8c90>,
  <matplotlib.lines.Line2D at 0x42ba690>,
  <matplotlib.lines.Line2D at 0x42c2ed0>,
  <matplotlib.lines.Line2D at 0x42c48d0>,
  <matplotlib.lines.Line2D at 0x42ce150>,
  <matplotlib.lines.Line2D at 0x42ceb10>],
 'medians': [<matplotlib.lines.Line2D at 0x42b8650>,
  <matplotlib.lines.Line2D at 0x42c2890>,
  <matplotlib.lines.Line2D at 0x42ccad0>],
 'whiskers': [<matplotlib.lines.Line2D at 0x3b0a350>,
  <matplotlib.lines.Line2D at 0x42b1c50>,
  <matplotlib.lines.Line2D at 0x42ba8d0>,
  <matplotlib.lines.Line2D at 0x42baf10>,
  <matplotlib.lines.Line2D at 0x42c4b10>,
  <matplotlib.lines.Line2D at 0x42c7190>]}

In [26]:
eye(D) + randn(D, D) / 10


Out[26]:
array([[ 0.84879388, -0.09131163,  0.04450754,  0.00417208, -0.04504048],
       [ 0.09419122,  1.07488466, -0.02855406,  0.03980543,  0.08116362],
       [ 0.03111189, -0.05519355,  1.0649304 ,  0.13881242, -0.19987636],
       [ 0.01406765, -0.04480717, -0.10052911,  0.96825815,  0.04244867],
       [ 0.12036484,  0.09691442,  0.01470134, -0.01233752,  0.96330537]])

In [19]:
from scipy.stats import multivariate_normal as mvn
from scipy import optimize
from scipy.optimize import check_grad, approx_fprime
from minimize import minimize

import ncegauss as ng

def train_model(Td, k, D):
    pass

mu = r_[0., 0.]
S = c_[[2., .2], [.2, 2.]]
P = np.linalg.inv(S)
L = cholesky(P)

Td = 200
k = 10

c = log(det(P)) / 2 - log(2 * pi)
x = ng.params_to_vec(mu, L, array([c]))

X = mvn.rvs(randn(5)/100, eye(5), Td).T
Y = mvn.rvs(randn(5)/100, eye(5), k * Td).T

model = ng.NceGauss()
model.fit(X, Y, verbose=True)
model.fit_ml(X)
print('cg=%s' % ng.params_to_vec(*model.params))
print('ml=%s' % ng.params_to_vec(*model.params_ml))

(ng.loglik(X, model.params.mu, model.params.L),
 ng.loglik(X, model.params_ml.mu, model.params_ml.L))

#ng.NceGauss.J(X, Y, *est[0])
# x = x + randn(6)/10
# print(approx_fprime(x, lambda u: ng.NceGauss.J(X, Y, *vec_to_params(2, u)), 1e-7))
# print(ng.NceGauss.dJ(X, Y, *vec_to_params(2, x)))


Linesearch      1;  Value 9.737519e+00
Linesearch      2;  Value 7.295493e+00
Linesearch      3;  Value 6.089248e+00
Linesearch      4;  Value 4.623242e+00
Linesearch      5;  Value 3.887693e+00
Linesearch      6;  Value 3.650695e+00
Linesearch      7;  Value 3.611126e+00
Linesearch      8;  Value 3.604500e+00
Linesearch      9;  Value 3.600003e+00
Linesearch     10;  Value 3.599162e+00
Linesearch     11;  Value 3.598611e+00
Linesearch     12;  Value 3.597210e+00
Linesearch     13;  Value 3.595305e+00
Linesearch     14;  Value 3.590957e+00
Linesearch     15;  Value 3.582423e+00
Linesearch     16;  Value 3.538132e+00
Linesearch     17;  Value 3.502142e+00
Linesearch     18;  Value 3.429335e+00
Linesearch     19;  Value 3.402839e+00
Linesearch     20;  Value 3.334726e+00
Linesearch     21;  Value 3.322330e+00
Linesearch     22;  Value 3.309958e+00
Linesearch     23;  Value 3.307941e+00
Linesearch     24;  Value 3.305747e+00
Linesearch     25;  Value 3.304767e+00
Linesearch     26;  Value 3.304510e+00
Linesearch     27;  Value 3.304416e+00
Linesearch     28;  Value 3.304332e+00
Linesearch     29;  Value 3.304317e+00
Linesearch     30;  Value 3.304316e+00
Linesearch     31;  Value 3.304315e+00
Linesearch     32;  Value 3.304315e+00
Linesearch     33;  Value 3.304315e+00
Linesearch     34;  Value 3.304315e+00
Linesearch     35;  Value 3.304315e+00
Linesearch     36;  Value 3.304315e+00
Linesearch     37;  Value 3.304315e+00
Linesearch     38;  Value 3.304315e+00
Linesearch     39;  Value 3.304315e+00
Linesearch     40;  Value 3.304315e+00
Linesearch     41;  Value 3.304315e+00
Linesearch     42;  Value 3.304315e+00
Linesearch     43;  Value 3.304315e+00
Linesearch     44;  Value 3.304315e+00
Linesearch     45;  Value 3.304315e+00
Linesearch     46;  Value 3.304315e+00
Linesearch     47;  Value 3.304315e+00
Linesearch     48;  Value 3.304315e+00


cg=[-0.03819288  0.0782654   0.06205764  0.09255866 -0.05835311  1.00062845
 -0.03589783  0.99223793  0.06692149 -0.02010853  0.93872204 -0.01638138
  0.05403805  0.08316961  1.1356675   0.05494383  0.09366587  0.06490963
 -0.02724323  0.92783101 -4.59335869]
ml=[-0.04070686  0.03497893  0.06133572  0.07095133 -0.02638005  0.95464381
 -0.04811261  1.00729918  0.08777081  0.02469043  0.86509097 -0.00909611
  0.06136649  0.05289546  1.26022148  0.02666917  0.05879346  0.03653518
 -0.01746524  0.9234334  -4.62330033]
Out[19]:
(1423.6411173882302, 1428.9451387667209)

In [24]:
plot(mean(ls))


Out[24]:
[<matplotlib.lines.Line2D at 0x45a2590>]

In [33]:
lmean[1] = 0
print(lmedian)
plot(lmedian)


[109.17473188219398, 7.1514697241170211, 2.0057224809013405, 0.66469117044124459, 0.016460477706175425, 0.40200014289737851, -1.0304272311694547, 1.5654547091658344, -0.66696939937119737, -0.52089360435132903]
Out[33]:
[<matplotlib.lines.Line2D at 0x5440b10>]

In [674]:
params_to_vec(*est[0])


Out[674]:
array([-0.49788282, -0.19763682,  1.71817276,  0.9976156 ,  0.1884847 ,
        1.16343582, -0.46523771])

In [659]:
ng.params_to_vec(


Out[659]:
10

In [665]:
c = -log(det(S)) / 2 - log(2 * pi)

emu, eL, ec = est[0]
eP = dot(eL, eL.T)
ecc = log(det(eP)) / 2 - log(2 * pi)
print('cg=%s' % str((emu, eP, ec, ecc)))
print('ml=%s' % str((mean(X, 1), inv(cov(X)), -log(det(cov(X))) / 2 - log(2 * pi))))
print('true=%s' % str((mu, P, c)))


cg=(array([-0.6466257 , -5.54049736, -1.70855286]), array([[ 1.12425145, -0.2749735 , -0.23209465],
       [-0.2749735 ,  0.48785404, -0.20491839],
       [-0.23209465, -0.20491839,  1.13743657]]), -1.9760774497236133, -2.2504124268487997)
ml=(array([-0.51255609, -3.42275044, -1.0088618 ]), array([[ 0.95551785,  0.0607073 , -0.00865496],
       [ 0.0607073 ,  1.15329124,  0.09654691],
       [-0.00865496,  0.09654691,  1.23125724]]), -1.6903491180140005)
true=(array([ 0.,  0.]), array([[ 0.50505051, -0.05050505],
       [-0.05050505,  0.50505051]]), -2.5259990790425402)

In [391]:
cholesky(inv(cov(X))), P


Out[391]:
(array([[ 0.65819241, -0.        ],
       [-0.05777586,  0.70828777]]),
 array([[ 0.50505051, -0.05050505],
       [-0.05050505,  0.50505051]]))

In [762]:
[ 1.23279859, -3.51804843,  0.42417953,  0.14220384,  0.03868466,
      -3.28698105]
nn(P, c)


Out[762]:
(array([[ 0.51282051, -0.05128205],
       [-0.05128205,  0.20512821]]),
 -2.9765107089142235)

In [995]:
N = 20
theta = params_to_vec(mu, P, c)
lspace = linspace(-30, 30, N)
[Xs, Ys] = meshgrid(lspace, lspace)
grid = c_[Xs.reshape(-1, 1), Ys.reshape(-1, 1)]
grid = c_[grid, repeat(theta[2:].reshape(1, 4), grid.shape[0], 0)]
Jeval = np.array([obj(grid[x, :]) for x in xrange(grid.shape[0])])
contour(Jeval.reshape(N, N), 60)
colorbar()
locs, labels = xticks()
# xticks(locs, map(lambda s: "%.2f" % s, lspace[map(lambda x: min(N - 1, int(x)), locs)]))
# yticks(locs, map(lambda s: "%.2f" % s, lspace[map(lambda x: min(N - 1, int(x)), locs)]))



In [34]:
- log(det(S)) / 2 - log(2 * pi)


Out[34]:
-2.5259990790425402