In [1]:
#!export PYTHONPATH=./:$PYTHONPATH
import librnn
from librnn.pylearn2.datasets.rotating_mnist import RotatingMNIST
In [2]:
dataset = RotatingMNIST('/home/eders/Copy/python/data/rotating-mnist.h5', 'train')
testset = RotatingMNIST('/home/eders/Copy/python/data/rotating-mnist.h5', 'test')
In [3]:
dataset.get_data_specs()
Out[3]:
In [4]:
for d, in dataset.iterator(100, 3, mode='sequential',
data_specs=dataset.get_data_specs()):
d
#print len(d)
print d.shape
#print b.shape
In [5]:
from pylearn2.sandbox.rnn.models.rnn import RNN, GRU, Recurrent
from pylearn2.models.mlp import Sigmoid, CompositeLayer, Linear, MLP, Tanh, CompositeLayer, FlattenerLayer
from pylearn2.space import VectorSpace, CompositeSpace
from pylearn2.sandbox.rnn.space import SequenceDataSpace
In [10]:
dimZ = 20
dimX = 784
bsize = 100
dimH = 400
IH_model = Mmodel = RNN(
batch_size=bsize,
input_space=SequenceDataSpace(
space=VectorSpace(
dim=dimX
)
),
layers= [
GRU(
layer_name='h_1',
dim=dimH,
irange=0.1,
weight_noise=1
),
CompositeLayer(
layer_name='comp',
layers=[
Linear(
layer_name='m_z',
dim=dimZ,
irange=0.1
),
Linear(
layer_name='ls_z',
dim=dimZ,
irange=0.1
),
]
)
]
)
HI_model = model = RNN(
batch_size=bsize,
input_space=SequenceDataSpace(
space=VectorSpace(
dim=dimZ
)
),
layers= [
Tanh(layer_name='H_decoder', dim=dimH, irange=.01),
Sigmoid(layer_name='x_hat', dim=dimX, irange=.01)
]
)
In [ ]:
import vpcn_rnn; reload(vpcn_rnn)
from vpcn_rnn import VPCN
import numpy as np
import time
import gzip, cPickle
from theano import tensor as T
import top; reload(top)
L = 1
learning_rate = .001
momentum = .9
encoder = vpcn_rnn.VPCN(IH_model, HI_model, 'rmsprop', learning_rate, momentum)
encoder.initParams()
encoder.createGradientFunctions()
print "Initializing weights and biases"
lowerbound = np.array([])
testlowerbound = np.array([])
begin = time.time()
for j in xrange(200):
encoder.lowerbound = 0
print 'Iteration:', j
encoder.iterate(dataset)
end = time.time()
print("Iteration %d, lower bound = %.2f,"
" time = %.2fs"
% (j, encoder.lowerbound*bsize/dataset.get_num_examples(), end - begin))
begin = end
if j % 5 == 0:
#print "### Calculating test lowerbound "
testlowerbound = np.append(testlowerbound,encoder.getLowerBound(testset))
print "### Test lowerbound: %f ###" % testlowerbound[-1]
print ">>> learning rate: %f" % encoder.opt.lr.get_value()
In [27]:
%matplotlib inline
from pylab import imshow, plot
In [28]:
import theano
def decoder(HI_model):
Z = T.tensor3()
return theano.function([Z], HI_model.fprop(Z))
def encoder(IH_model, dimZ, deterministic=True):
X = T.tensor3()
bsize = IH_model.batch_size
m, ls = IH_model.fprop(X)
zero = np.zeros((bsize, dimZ)).astype('float32')
if deterministic:
Z = m
else:
raise NotImplementedError('use deterministic = True for now...'
)
return theano.function([X], Z)
fdecoder = decoder(HI_model)
fencoder = encoder(IH_model, 20)
In [29]:
X = fdecoder(np.random.normal(0,1,(100,1,dimZ)).astype('float32'))
Z = fencoder(d[0])
Xh = fdecoder(Z)
In [30]:
_ = plot(Z[:,0,:].T)
In [37]:
from pylab import subplot, title
subplot(121)
title('hat')
_ = imshow(-(Xh[:,0,:]>.5).T,cmap='gray')
subplot(122)
title('original')
_ = imshow(-d[:,0,:].T, cmap='gray')
In [ ]: