In [1]:
from pylearn2.models.mlp import MLP, Linear, Tanh, CompositeLayer, Sigmoid
In [2]:
dimZ = 20
dimX = 784
bsize = 200
IH_model = MLP(batch_size=bsize, nvis=784, layers=[
Tanh(layer_name='IH', dim=400, irange=.01),
CompositeLayer(layer_name='mz and vz',
layers=[
Linear(layer_name='mz', dim=20, irange=.01),
Linear(layer_name='vz', dim=20, irange=.01)
]
)
]
)
HI_model = MLP(batch_size=bsize, nvis=dimZ, layers=[
Tanh(layer_name='H_decoder', dim=400, irange=.01),
Sigmoid(layer_name='x_hat', dim=dimX, irange=.01)
])
In [3]:
IH_model.get_input_space()
Out[3]:
In [4]:
IH_model.get_output_space()
Out[4]:
In [5]:
X = IH_model.input_space.make_theano_batch()
Y1, Y2 = IH_model.fprop(X)
In [6]:
print Y1
print Y2
In [7]:
IH_model.batch_size
Out[7]:
In [8]:
from vpcn import VAE2
import numpy as np
import argparse
import time
import gzip, cPickle
f = gzip.open('../Variational-Autoencoder/mnist.pkl.gz', 'rb')
(x_train, t_train), (x_valid, t_valid), (x_test, t_test) = cPickle.load(f)
f.close()
data = x_train.astype('float32')
dimZ = 20
batch_size = 200
L = 1
learning_rate = .01
[N,dimX] = data.shape
encoder = VAE2(IH_model,HI_model,L,learning_rate)
encoder.initParams()
encoder.createGradientFunctions()
print "Initializing weights and biases"
lowerbound = np.array([])
testlowerbound = np.array([])
begin = time.time()
for j in xrange(150):
encoder.lowerbound = 0
print 'Iteration:', j
encoder.iterate(data)
end = time.time()
print("Iteration %d, lower bound = %.2f,"
" time = %.2fs"
% (j, encoder.lowerbound*batch_size/N, end - begin))
begin = end
if j % 5 == 0:
print "Calculating test lowerbound"
testlowerbound = np.append(testlowerbound,encoder.getLowerBound(x_test.astype('float32')))
In [ ]:
%matplotlib inline
from pylab import imshow, rcParams
import display_data
rcParams['figure.figsize'] = 8, 8
W = IH_model.get_param_values()[0]
I = display_data.displayData(W.T, 'W.png')
imshow(I,cmap='gray')
In [ ]:
from vpcn import VAE2
import numpy as np
import argparse
import time
import gzip, cPickle
f = gzip.open('../Variational-Autoencoder/mnist.pkl.gz', 'rb')
(x_train, t_train), (x_valid, t_valid), (x_test, t_test) = cPickle.load(f)
f.close()
data = x_train.astype('float32')
dimZ = 20
batch_size = 100
L = 1
learning_rate = 0.00001
[N,dimX] = data.shape
encoder = VAE2(IH_model,HI_model,L,learning_rate)
encoder.initParams()
encoder.createGradientFunctions()
In [ ]: