I tried these THEANO flags at the prompt:

THEANO_FLAGS='mode=FAST_RUN,floatX=float32,device=gpu0,lib.cnmem=0.80,optimizer_excluding=low_memory' jupyter notebook

In [1]:
%matplotlib inline

In [2]:
from collections import namedtuple

In [3]:
import matplotlib.pyplot as plt
import sklearn
from sklearn import datasets
import pandas as pd

In [4]:
import theano


Using gpu device 0: GeForce GTX 980 Ti (CNMeM is enabled with initial size: 80.0% of memory, cuDNN 5105)
/home/topolo/Public/anaconda2/lib/python2.7/site-packages/theano/sandbox/cuda/__init__.py:600: UserWarning: Your cuDNN version is more recent than the one Theano officially supports. If you see any problems, try updating Theano or downgrading cuDNN to version 5.
  warnings.warn(warn)

In [5]:
from theano import function, config, sandbox, shared 
import theano.tensor as T

In [6]:
import numpy as np
import scipy
import time

In [7]:
print( theano.config.device )
print( theano.config.lib.cnmem)  # cf. http://deeplearning.net/software/theano/library/config.html
print( theano.config.print_active_device)# Print active device at when the GPU device is initialized.


gpu0
0.8
True

In [8]:
import os, sys
os.getcwd()


Out[8]:
'/home/topolo/PropD/MLgrabbag'

In [9]:
%run gpu_test.py THEANO_FLAGS='mode=FAST_RUN,device=gpu,floatX=float32,lib.cnmem=0.65' # note lib.cnmem option for CnMem


[GpuElemwise{exp,no_inplace}(<CudaNdarrayType(float32, vector)>), HostFromGpu(GpuElemwise{exp,no_inplace}.0)]
Looping 1000 times took 0.221608 seconds
Result is [ 1.23178029  1.61879349  1.52278066 ...,  2.20771813  2.29967761
  1.62323296]
Used the gpu

In [9]:
print(theano.config.allow_gc)
print(theano.config.optimizer_excluding)


False


In [10]:
# cf. http://deeplearning.net/software/theano/faq.html
theano.config.allow_gc=False
print(theano.config.allow_gc)


False

In [11]:
"""A list of optimizer tags that we don’t want included in the default Mode. 
If multiple tags, separate them by ‘:’. 
Ex: to remove the elemwise inplace optimizer(slow for big graph), 
use the flags: optimizer_excluding:inplace_opt, where inplace_opt is the name of that optimization."""
theano.config.optimizer_excluding


Out[11]:
'low_memory'

cf. Theano memory/speed trade-off

"Could raise memory usage but speed up computation:" Try this:


In [25]:
config.optimizer_excluding ="low_memory"


---------------------------------------------------------------------------
Exception                                 Traceback (most recent call last)
<ipython-input-25-a54a5d9832d0> in <module>()
----> 1 config.optimizer_excluding ="low_memory"

/home/topolo/Public/anaconda2/lib/python2.7/site-packages/theano/configparser.pyc in __set__(self, cls, val)
    329         if not self.allow_override and hasattr(self, 'val'):
    330             raise Exception(
--> 331                 "Can't change the value of this config parameter "
    332                 "after initialization!")
    333         # print "SETTING PARAM", self.fullname,(cls), val

Exception: Can't change the value of this config parameter after initialization!

In [10]:
from six.moves import cPickle

Notes on scan; using theano.scan, what is scan

    theano.scan(fn,
                sequences=None,
                outputs_info, 
                non_sequences=None
                mode=None,name=None)
  • fn function should expect as input: theano variables, representing all slices of input sequences, and previous output values
    • order of sequences is same as one in list sequences
    • order of outputs of this function is same as order of outputs_info
  • sequences - list of Theano variables or dictionaries describing the sequences scan has to iterate over
  • outputs_info - list of theano varialbes or dictionaries describing the initial state of outputs computed recurrently

In [ ]:

RNN (Recurrent Neural Networks), LSTM (Long Short-Term Memory)


In [11]:
import reberGrammar

In [10]:
train_data = reberGrammar.get_one_embedded_example() # pair (train-sequence, target-sequence)

In [12]:
train_data = reberGrammar.get_n_embedded_examples(1000)

In [12]:
print(type(train_data)); print(len(train_data)); print(type(train_data[0])); print(len(train_data[0])); 
print(len(train_data[1]))
#for arr in eg00[0]:
#    print(type(arr)); print(arr.shape)
print(type(train_data[0][0])); print(train_data[0][0].shape)
print(type(train_data[1][0])); print(train_data[1][0].shape)


<type 'tuple'>
2
<type 'list'>
15
15
<type 'numpy.ndarray'>
(7,)
<type 'numpy.ndarray'>
(7,)

In [14]:
# for example
print( train_data[0][0])
train_data[1]


[ 1.  0.  0.  0.  0.  0.  0.]
Out[14]:
[array([ 0.,  1.,  0.,  0.,  1.,  0.,  0.]),
 array([ 1.,  0.,  0.,  0.,  0.,  0.,  0.]),
 array([ 0.,  1.,  0.,  0.,  1.,  0.,  0.]),
 array([ 0.,  0.,  1.,  1.,  0.,  0.,  0.]),
 array([ 0.,  0.,  1.,  1.,  0.,  0.,  0.]),
 array([ 0.,  0.,  1.,  1.,  0.,  0.,  0.]),
 array([ 0.,  0.,  1.,  1.,  0.,  0.,  0.]),
 array([ 0.,  0.,  1.,  1.,  0.,  0.,  0.]),
 array([ 0.,  1.,  0.,  0.,  0.,  1.,  0.]),
 array([ 0.,  1.,  0.,  0.,  0.,  1.,  0.]),
 array([ 0.,  0.,  0.,  0.,  1.,  1.,  0.]),
 array([ 0.,  0.,  1.,  1.,  0.,  0.,  0.]),
 array([ 0.,  0.,  0.,  0.,  0.,  0.,  1.]),
 array([ 0.,  0.,  0.,  0.,  1.,  0.,  0.]),
 array([ 0.,  0.,  0.,  0.,  0.,  0.,  1.])]

In [15]:
X_train_data = train_data[0]
y_train_data = train_data[1]

In [28]:
print(type(train_data)); print(len(train_data)); print(type(train_data[0])); 
print(len(train_data[0])); print( type( train_data[0][0] ) ) ; print( len(train_data[0][0])); 
print( type(train_data[0][0][0])) ; print( train_data[0][0][0].shape)


<type 'list'>
1000
<type 'tuple'>
2
<type 'list'>
14
<type 'numpy.ndarray'>
(7,)

In [17]:
pd.DataFrame(X_train_data).describe()


Out[17]:
0 1 2 3 4 5 6
count 15.000000 15.000000 15.000000 15.000000 15.000000 15.000000 15.000000
mean 0.133333 0.133333 0.266667 0.133333 0.200000 0.066667 0.066667
std 0.351866 0.351866 0.457738 0.351866 0.414039 0.258199 0.258199
min 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000
25% 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000
50% 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000
75% 0.000000 0.000000 0.500000 0.000000 0.000000 0.000000 0.000000
max 1.000000 1.000000 1.000000 1.000000 1.000000 1.000000 1.000000

In [33]:
index = np.random.randint(0, len(train_data)); print(index);
i,o = train_data[index]; print(i[0]); print(o[0])


853
[ 1.  0.  0.  0.  0.  0.  0.]
[ 0.  1.  0.  0.  1.  0.  0.]

In [17]:
train_data = reberGrammar.get_n_embedded_examples(1000)


---------------------------------------------------------------------------
NameError                                 Traceback (most recent call last)
<ipython-input-17-1b998b031bed> in <module>()
----> 1 train_data = reberGrammar.get_n_embedded_examples(1000)

NameError: name 'reberGrammar' is not defined

In [11]:
print( type(train_data)); print(len(train_data)); print(type( train_data[0]));print(len(train_data[0]));
print(type(train_data[0][0])); print(type(train_data[0][1])); print(len(train_data[0][0]));print(len(train_data[0][1]))
print(type(train_data[0][0][0])); print(train_data[0][0][0].shape)


<type 'list'>
1000
<type 'tuple'>
2
<type 'list'>
<type 'list'>
14
14
<type 'numpy.ndarray'>
(7,)

In [12]:
index=np.random.randint(0,len(train_data)); print(index)


908

In [13]:
i,o = train_data[index]
print(type(i));print(type(o));print(len(i));print(len(o))


<type 'list'>
<type 'list'>
14
14

Split up train_data into input training examples $X$, and output data $y$, each a function of time $t=0,1,\dots T-1$, where $T=1000$ in this case.


In [30]:
X_train_datat, y_train_datat = zip(*train_data)

In [36]:
print(type(X_train_datat)); print(len(X_train_datat)); print(type(X_train_datat[0]));print(len(X_train_datat[0]))
X_train_datat[0]


<type 'tuple'>
1000
<type 'list'>
17
Out[36]:
[array([ 1.,  0.,  0.,  0.,  0.,  0.,  0.]),
 array([ 0.,  0.,  0.,  0.,  1.,  0.,  0.]),
 array([ 1.,  0.,  0.,  0.,  0.,  0.,  0.]),
 array([ 0.,  1.,  0.,  0.,  0.,  0.,  0.]),
 array([ 0.,  0.,  0.,  1.,  0.,  0.,  0.]),
 array([ 0.,  0.,  0.,  1.,  0.,  0.,  0.]),
 array([ 0.,  0.,  0.,  0.,  0.,  1.,  0.]),
 array([ 0.,  0.,  0.,  0.,  1.,  0.,  0.]),
 array([ 0.,  0.,  0.,  1.,  0.,  0.,  0.]),
 array([ 0.,  0.,  0.,  0.,  0.,  1.,  0.]),
 array([ 0.,  0.,  0.,  0.,  1.,  0.,  0.]),
 array([ 0.,  0.,  0.,  1.,  0.,  0.,  0.]),
 array([ 0.,  1.,  0.,  0.,  0.,  0.,  0.]),
 array([ 0.,  0.,  0.,  0.,  0.,  1.,  0.]),
 array([ 0.,  0.,  0.,  0.,  0.,  1.,  0.]),
 array([ 0.,  0.,  0.,  0.,  0.,  0.,  1.]),
 array([ 0.,  0.,  0.,  0.,  1.,  0.,  0.])]

In [37]:
print(type(y_train_datat)); print(len(y_train_datat)); print(type(y_train_datat[0]));print(len(y_train_datat[0]))
y_train_datat[0]


<type 'tuple'>
1000
<type 'list'>
17
Out[37]:
[array([ 0.,  1.,  0.,  0.,  1.,  0.,  0.]),
 array([ 1.,  0.,  0.,  0.,  0.,  0.,  0.]),
 array([ 0.,  1.,  0.,  0.,  1.,  0.,  0.]),
 array([ 0.,  0.,  1.,  1.,  0.,  0.,  0.]),
 array([ 0.,  0.,  1.,  1.,  0.,  0.,  0.]),
 array([ 0.,  1.,  0.,  0.,  0.,  1.,  0.]),
 array([ 0.,  0.,  0.,  0.,  1.,  1.,  0.]),
 array([ 0.,  0.,  1.,  1.,  0.,  0.,  0.]),
 array([ 0.,  1.,  0.,  0.,  0.,  1.,  0.]),
 array([ 0.,  0.,  0.,  0.,  1.,  1.,  0.]),
 array([ 0.,  0.,  1.,  1.,  0.,  0.,  0.]),
 array([ 0.,  1.,  0.,  0.,  0.,  1.,  0.]),
 array([ 0.,  1.,  0.,  0.,  0.,  1.,  0.]),
 array([ 0.,  0.,  0.,  0.,  1.,  1.,  0.]),
 array([ 0.,  0.,  0.,  0.,  0.,  0.,  1.]),
 array([ 0.,  0.,  0.,  0.,  1.,  0.,  0.]),
 array([ 0.,  0.,  0.,  0.,  0.,  0.,  1.])]

In [40]:
dtype=theano.config.floatX
#TODO: Use a more appropriate initialization method
def sample_weights(sizeX, sizeY):
    values = np.ndarray([sizeX, sizeY], dtype=dtype)
    for dx in xrange(sizeX):
        vals = np.random.uniform(low=-1., high=1.,  size=(sizeY,))
        #vals_norm = np.sqrt((vals**2).sum())
        #vals = vals / vals_norm
        values[dx,:] = vals
    _,svs,_ = np.linalg.svd(values)
    #svs[0] is the largest singular value                      
    values = values / svs[0]
    return values

In [42]:
sample_weights(7,10).shape


Out[42]:
(7, 10)

Herta's LSTM


In [15]:
sys.path.append( os.getcwd() + '/ML' )

import in my code from the folder ./ML


In [15]:
from LSTM import Gates, Thetab, ThetabthetaW

In [16]:
from LSTM_Herta import *
#from LSTM_Herta import Gates, Psis, Thetab_right, Thetabtheta_right, ThetabthetaW, Feedforward_g_right, Feedforward_ifo_right, LSTM_Model_right, MemoryBlock_right

In [17]:
L_Herta = Gates(g=2,i=2,f=2,o=2)

n_hidden = n_i = n_c = n_o = n_f = 10
n_in = 7 # for embedded reber grammar
n_y = 7 # for embedded reber grammar; this is K in my notation

s_l_Herta = Gates(g=[n_in,n_c],i=[n_in,n_i],f=[n_in,n_f],o=[n_in,n_o])

activations_Herta = Psis(g=(T.tanh, T.tanh), i=(T.nnet.sigmoid, T.nnet.sigmoid),f=(T.nnet.sigmoid, T.nnet.sigmoid),
                         o=(T.nnet.sigmoid, T.nnet.sigmoid),h=(T.tanh,))

In [18]:
LSTM_model_Herta=LSTM_Model_right(L_Herta,s_l_Herta,n_hidden,n_y,activations_Herta,T.nnet.sigmoid )
lstm_step_fxn = LSTM_model_Herta.build_lstm_step()
MemBlck_Herta = MemoryBlock_right(n_hidden,LSTM_model_Herta)
MemBlck_Herta.build_scan_over_t()
MemBlck_Herta.build_J(0.1)


Total number of parameters: 17 
Out[18]:
Elemwise{add,no_inplace}.0

In [19]:
MemBlck_Herta.build_update()

In [20]:
%time results_Herta_prelim = MemBlck_Herta.train_rnn(train_data,2)  # theano.config.allow_gc =:  False
# CPU times: user 20.5 s, sys: 10.3 s, total: 30.8 s
# Wall time: 30.7 s


theano.config.allow_gc =:  False
CPU times: user 20.5 s, sys: 10.3 s, total: 30.8 s
Wall time: 30.7 s

In [20]:
%time results_Herta_prelim = MemBlck_Herta.train_rnn(train_data,2) # theano.config.allow_gc =:  False
# CPU times: user 20.4 s, sys: 4.79 s, total: 25.2 s
# Wall time: 25.2 s # NOTE EY: 20170224, I added the optimizer_excluding=low_memory flag


theano.config.allow_gc =:  False
CPU times: user 20.4 s, sys: 4.79 s, total: 25.2 s
Wall time: 25.2 s

In [22]:
%time results_Herta = MemBlck_Herta.train_rnn(train_data)  # theano.config.allow_gc =:  False
# CPU times: user 41min 31s, sys: 10min 9s, total: 51min 40s
# Wall time: 51min 39s


theano.config.allow_gc =:  False
CPU times: user 41min 31s, sys: 10min 9s, total: 51min 40s
Wall time: 51min 39s

In [21]:
%time results_Herta = MemBlck_Herta.train_rnn(train_data) # I don't think optimizer_excluding=low_memory helps # theano.config.allow_gc =:  False
#CPU times: user 42min 32s, sys: 10min 49s, total: 53min 22s
#Wall time: 53min 20s


theano.config.allow_gc =:  False
CPU times: user 42min 32s, sys: 10min 49s, total: 53min 22s
Wall time: 53min 20s

In [22]:
print(type(results_Herta)); print(len(results_Herta)); 
plt.plot(np.arange(250), results_Herta, 'b-')
plt.xlabel('epochs')
plt.ylabel('error')
#plt.ylim(0., 50)


<type 'numpy.ndarray'>
250
Out[22]:
<matplotlib.text.Text at 0x7f5d631c9350>

In [ ]:
%time results_full = MemBlck_Herta.train_model_full(train_data)

In [20]:
test_data = reberGrammar.get_n_embedded_examples(10)

In [30]:
# automatically
# MemBlck_Herta.predict(test_data)

# manually,
#predictions = theano.function(inputs=[MemBlck_Herta.X], outputs=MemBlck_Herta.scan_res[0][-1])

def predict_on_lst_manual(test_data, verbose=True):
    predictions=[]
    for i,o in test_data:
        predictions_func = theano.function(inputs=[MemBlck_Herta.X],outputs=MemBlck_Herta.scan_res[0][-1])
        predicted_y = predictions_func(i)
        
        if verbose:
            print o[-2]
            print predicted_y[-2]

            print np.argmax( o[-2] )
            print np.argmax( predicted_y[-2] )

            
        predictions.append( predicted_y)
    return predictions

predictions_test  = predict_on_lst_manual(test_data)


<<!! BUG IN FGRAPH.REPLACE OR A LISTENER !!>> <type 'exceptions.TypeError'> ('The type of the replacement must be compatible with the type of the original Variable.', AllocEmpty{dtype='float32'}.0, AllocEmpty{dtype='float32'}.0, TensorType(float32, row), TensorType(float32, matrix), 'MergeOptimizer') MergeOptimizer
ERROR (theano.gof.opt): SeqOptimizer apply <theano.gof.opt.EquilibriumOptimizer object at 0x7fbc969e2c90>
ERROR (theano.gof.opt): Traceback:
ERROR (theano.gof.opt): Traceback (most recent call last):
  File "/home/topolo/Public/anaconda2/lib/python2.7/site-packages/theano/gof/opt.py", line 230, in apply
    sub_prof = optimizer.optimize(fgraph)
  File "/home/topolo/Public/anaconda2/lib/python2.7/site-packages/theano/gof/opt.py", line 89, in optimize
    ret = self.apply(fgraph, *args, **kwargs)
  File "/home/topolo/Public/anaconda2/lib/python2.7/site-packages/theano/gof/opt.py", line 2223, in apply
    sub_prof = gopt.apply(fgraph)
  File "/home/topolo/Public/anaconda2/lib/python2.7/site-packages/theano/gof/opt.py", line 817, in apply
    fgraph.replace_all_validate(pairs, 'MergeOptimizer')
  File "/home/topolo/Public/anaconda2/lib/python2.7/site-packages/theano/gof/toolbox.py", line 309, in replace_all_validate
    fgraph.replace(r, new_r, reason=reason, verbose=False)
  File "/home/topolo/Public/anaconda2/lib/python2.7/site-packages/theano/gof/fg.py", line 561, in replace
    str(reason))
TypeError: ('The type of the replacement must be compatible with the type of the original Variable.', AllocEmpty{dtype='float32'}.0, AllocEmpty{dtype='float32'}.0, TensorType(float32, row), TensorType(float32, matrix), 'MergeOptimizer')

<<!! BUG IN FGRAPH.REPLACE OR A LISTENER !!>> <type 'exceptions.TypeError'> ('The type of the replacement must be compatible with the type of the original Variable.', AllocEmpty{dtype='float32'}.0, AllocEmpty{dtype='float32'}.0, TensorType(float32, row), TensorType(float32, matrix), 'MergeOptimizer') MergeOptimizer
ERROR (theano.gof.opt): SeqOptimizer apply <theano.gof.opt.EquilibriumOptimizer object at 0x7fbc9668bc50>
ERROR (theano.gof.opt): Traceback:
ERROR (theano.gof.opt): Traceback (most recent call last):
  File "/home/topolo/Public/anaconda2/lib/python2.7/site-packages/theano/gof/opt.py", line 230, in apply
    sub_prof = optimizer.optimize(fgraph)
  File "/home/topolo/Public/anaconda2/lib/python2.7/site-packages/theano/gof/opt.py", line 89, in optimize
    ret = self.apply(fgraph, *args, **kwargs)
  File "/home/topolo/Public/anaconda2/lib/python2.7/site-packages/theano/gof/opt.py", line 2223, in apply
    sub_prof = gopt.apply(fgraph)
  File "/home/topolo/Public/anaconda2/lib/python2.7/site-packages/theano/gof/opt.py", line 817, in apply
    fgraph.replace_all_validate(pairs, 'MergeOptimizer')
  File "/home/topolo/Public/anaconda2/lib/python2.7/site-packages/theano/gof/toolbox.py", line 309, in replace_all_validate
    fgraph.replace(r, new_r, reason=reason, verbose=False)
  File "/home/topolo/Public/anaconda2/lib/python2.7/site-packages/theano/gof/fg.py", line 561, in replace
    str(reason))
TypeError: ('The type of the replacement must be compatible with the type of the original Variable.', AllocEmpty{dtype='float32'}.0, AllocEmpty{dtype='float32'}.0, TensorType(float32, row), TensorType(float32, matrix), 'MergeOptimizer')

[ 0.  1.  0.  0.  0.  0.  0.]
[ 0.03654994  0.22352684  0.17996094  0.17996106  0.27605593  0.25267285
  0.27077496]
<<!! BUG IN FGRAPH.REPLACE OR A LISTENER !!>> <type 'exceptions.TypeError'> ('The type of the replacement must be compatible with the type of the original Variable.', AllocEmpty{dtype='float32'}.0, AllocEmpty{dtype='float32'}.0, TensorType(float32, row), TensorType(float32, matrix), 'MergeOptimizer') MergeOptimizer
ERROR (theano.gof.opt): SeqOptimizer apply <theano.gof.opt.EquilibriumOptimizer object at 0x7fbc962f09d0>
ERROR (theano.gof.opt): Traceback:
ERROR (theano.gof.opt): Traceback (most recent call last):
  File "/home/topolo/Public/anaconda2/lib/python2.7/site-packages/theano/gof/opt.py", line 230, in apply
    sub_prof = optimizer.optimize(fgraph)
  File "/home/topolo/Public/anaconda2/lib/python2.7/site-packages/theano/gof/opt.py", line 89, in optimize
    ret = self.apply(fgraph, *args, **kwargs)
  File "/home/topolo/Public/anaconda2/lib/python2.7/site-packages/theano/gof/opt.py", line 2223, in apply
    sub_prof = gopt.apply(fgraph)
  File "/home/topolo/Public/anaconda2/lib/python2.7/site-packages/theano/gof/opt.py", line 817, in apply
    fgraph.replace_all_validate(pairs, 'MergeOptimizer')
  File "/home/topolo/Public/anaconda2/lib/python2.7/site-packages/theano/gof/toolbox.py", line 309, in replace_all_validate
    fgraph.replace(r, new_r, reason=reason, verbose=False)
  File "/home/topolo/Public/anaconda2/lib/python2.7/site-packages/theano/gof/fg.py", line 561, in replace
    str(reason))
TypeError: ('The type of the replacement must be compatible with the type of the original Variable.', AllocEmpty{dtype='float32'}.0, AllocEmpty{dtype='float32'}.0, TensorType(float32, row), TensorType(float32, matrix), 'MergeOptimizer')

[ 0.  1.  0.  0.  0.  0.  0.]
[ 0.03652277  0.22360229  0.18104303  0.18104315  0.27559629  0.25401294
  0.2690331 ]
<<!! BUG IN FGRAPH.REPLACE OR A LISTENER !!>> <type 'exceptions.TypeError'> ('The type of the replacement must be compatible with the type of the original Variable.', AllocEmpty{dtype='float32'}.0, AllocEmpty{dtype='float32'}.0, TensorType(float32, row), TensorType(float32, matrix), 'MergeOptimizer') MergeOptimizer
ERROR (theano.gof.opt): SeqOptimizer apply <theano.gof.opt.EquilibriumOptimizer object at 0x7fbc95e9c750>
ERROR (theano.gof.opt): Traceback:
ERROR (theano.gof.opt): Traceback (most recent call last):
  File "/home/topolo/Public/anaconda2/lib/python2.7/site-packages/theano/gof/opt.py", line 230, in apply
    sub_prof = optimizer.optimize(fgraph)
  File "/home/topolo/Public/anaconda2/lib/python2.7/site-packages/theano/gof/opt.py", line 89, in optimize
    ret = self.apply(fgraph, *args, **kwargs)
  File "/home/topolo/Public/anaconda2/lib/python2.7/site-packages/theano/gof/opt.py", line 2223, in apply
    sub_prof = gopt.apply(fgraph)
  File "/home/topolo/Public/anaconda2/lib/python2.7/site-packages/theano/gof/opt.py", line 817, in apply
    fgraph.replace_all_validate(pairs, 'MergeOptimizer')
  File "/home/topolo/Public/anaconda2/lib/python2.7/site-packages/theano/gof/toolbox.py", line 309, in replace_all_validate
    fgraph.replace(r, new_r, reason=reason, verbose=False)
  File "/home/topolo/Public/anaconda2/lib/python2.7/site-packages/theano/gof/fg.py", line 561, in replace
    str(reason))
TypeError: ('The type of the replacement must be compatible with the type of the original Variable.', AllocEmpty{dtype='float32'}.0, AllocEmpty{dtype='float32'}.0, TensorType(float32, row), TensorType(float32, matrix), 'MergeOptimizer')

[ 0.  0.  0.  0.  1.  0.  0.]
[ 0.03623551  0.21539557  0.15901656  0.15901668  0.28735468  0.22797918
  0.31401905]
<<!! BUG IN FGRAPH.REPLACE OR A LISTENER !!>> <type 'exceptions.TypeError'> ('The type of the replacement must be compatible with the type of the original Variable.', AllocEmpty{dtype='float32'}.0, AllocEmpty{dtype='float32'}.0, TensorType(float32, row), TensorType(float32, matrix), 'MergeOptimizer') MergeOptimizer
ERROR (theano.gof.opt): SeqOptimizer apply <theano.gof.opt.EquilibriumOptimizer object at 0x7fbc95b317d0>
ERROR (theano.gof.opt): Traceback:
ERROR (theano.gof.opt): Traceback (most recent call last):
  File "/home/topolo/Public/anaconda2/lib/python2.7/site-packages/theano/gof/opt.py", line 230, in apply
    sub_prof = optimizer.optimize(fgraph)
  File "/home/topolo/Public/anaconda2/lib/python2.7/site-packages/theano/gof/opt.py", line 89, in optimize
    ret = self.apply(fgraph, *args, **kwargs)
  File "/home/topolo/Public/anaconda2/lib/python2.7/site-packages/theano/gof/opt.py", line 2223, in apply
    sub_prof = gopt.apply(fgraph)
  File "/home/topolo/Public/anaconda2/lib/python2.7/site-packages/theano/gof/opt.py", line 817, in apply
    fgraph.replace_all_validate(pairs, 'MergeOptimizer')
  File "/home/topolo/Public/anaconda2/lib/python2.7/site-packages/theano/gof/toolbox.py", line 309, in replace_all_validate
    fgraph.replace(r, new_r, reason=reason, verbose=False)
  File "/home/topolo/Public/anaconda2/lib/python2.7/site-packages/theano/gof/fg.py", line 561, in replace
    str(reason))
TypeError: ('The type of the replacement must be compatible with the type of the original Variable.', AllocEmpty{dtype='float32'}.0, AllocEmpty{dtype='float32'}.0, TensorType(float32, row), TensorType(float32, matrix), 'MergeOptimizer')

[ 0.  0.  0.  0.  1.  0.  0.]
[ 0.03653074  0.2218926   0.18212698  0.18212706  0.27314919  0.2502594
  0.27180815]
<<!! BUG IN FGRAPH.REPLACE OR A LISTENER !!>> <type 'exceptions.TypeError'> ('The type of the replacement must be compatible with the type of the original Variable.', AllocEmpty{dtype='float32'}.0, AllocEmpty{dtype='float32'}.0, TensorType(float32, row), TensorType(float32, matrix), 'MergeOptimizer') MergeOptimizer
ERROR (theano.gof.opt): SeqOptimizer apply <theano.gof.opt.EquilibriumOptimizer object at 0x7fbc957602d0>
ERROR (theano.gof.opt): Traceback:
ERROR (theano.gof.opt): Traceback (most recent call last):
  File "/home/topolo/Public/anaconda2/lib/python2.7/site-packages/theano/gof/opt.py", line 230, in apply
    sub_prof = optimizer.optimize(fgraph)
  File "/home/topolo/Public/anaconda2/lib/python2.7/site-packages/theano/gof/opt.py", line 89, in optimize
    ret = self.apply(fgraph, *args, **kwargs)
  File "/home/topolo/Public/anaconda2/lib/python2.7/site-packages/theano/gof/opt.py", line 2223, in apply
    sub_prof = gopt.apply(fgraph)
  File "/home/topolo/Public/anaconda2/lib/python2.7/site-packages/theano/gof/opt.py", line 817, in apply
    fgraph.replace_all_validate(pairs, 'MergeOptimizer')
  File "/home/topolo/Public/anaconda2/lib/python2.7/site-packages/theano/gof/toolbox.py", line 309, in replace_all_validate
    fgraph.replace(r, new_r, reason=reason, verbose=False)
  File "/home/topolo/Public/anaconda2/lib/python2.7/site-packages/theano/gof/fg.py", line 561, in replace
    str(reason))
TypeError: ('The type of the replacement must be compatible with the type of the original Variable.', AllocEmpty{dtype='float32'}.0, AllocEmpty{dtype='float32'}.0, TensorType(float32, row), TensorType(float32, matrix), 'MergeOptimizer')

[ 0.  0.  0.  0.  1.  0.  0.]
[ 0.03678124  0.2233016   0.17556426  0.17556438  0.27802235  0.24592921
  0.27813935]
<<!! BUG IN FGRAPH.REPLACE OR A LISTENER !!>> <type 'exceptions.TypeError'> ('The type of the replacement must be compatible with the type of the original Variable.', AllocEmpty{dtype='float32'}.0, AllocEmpty{dtype='float32'}.0, TensorType(float32, row), TensorType(float32, matrix), 'MergeOptimizer') MergeOptimizer
ERROR (theano.gof.opt): SeqOptimizer apply <theano.gof.opt.EquilibriumOptimizer object at 0x7fbc954ef490>
ERROR (theano.gof.opt): Traceback:
ERROR (theano.gof.opt): Traceback (most recent call last):
  File "/home/topolo/Public/anaconda2/lib/python2.7/site-packages/theano/gof/opt.py", line 230, in apply
    sub_prof = optimizer.optimize(fgraph)
  File "/home/topolo/Public/anaconda2/lib/python2.7/site-packages/theano/gof/opt.py", line 89, in optimize
    ret = self.apply(fgraph, *args, **kwargs)
  File "/home/topolo/Public/anaconda2/lib/python2.7/site-packages/theano/gof/opt.py", line 2223, in apply
    sub_prof = gopt.apply(fgraph)
  File "/home/topolo/Public/anaconda2/lib/python2.7/site-packages/theano/gof/opt.py", line 817, in apply
    fgraph.replace_all_validate(pairs, 'MergeOptimizer')
  File "/home/topolo/Public/anaconda2/lib/python2.7/site-packages/theano/gof/toolbox.py", line 309, in replace_all_validate
    fgraph.replace(r, new_r, reason=reason, verbose=False)
  File "/home/topolo/Public/anaconda2/lib/python2.7/site-packages/theano/gof/fg.py", line 561, in replace
    str(reason))
TypeError: ('The type of the replacement must be compatible with the type of the original Variable.', AllocEmpty{dtype='float32'}.0, AllocEmpty{dtype='float32'}.0, TensorType(float32, row), TensorType(float32, matrix), 'MergeOptimizer')

[ 0.  1.  0.  0.  0.  0.  0.]
[ 0.03789635  0.35366094  0.22607996  0.2260799   0.25579727  0.45357969
  0.1161061 ]
<<!! BUG IN FGRAPH.REPLACE OR A LISTENER !!>> <type 'exceptions.TypeError'> ('The type of the replacement must be compatible with the type of the original Variable.', AllocEmpty{dtype='float32'}.0, AllocEmpty{dtype='float32'}.0, TensorType(float32, row), TensorType(float32, matrix), 'MergeOptimizer') MergeOptimizer
ERROR (theano.gof.opt): SeqOptimizer apply <theano.gof.opt.EquilibriumOptimizer object at 0x7fbc959eed10>
ERROR (theano.gof.opt): Traceback:
ERROR (theano.gof.opt): Traceback (most recent call last):
  File "/home/topolo/Public/anaconda2/lib/python2.7/site-packages/theano/gof/opt.py", line 230, in apply
    sub_prof = optimizer.optimize(fgraph)
  File "/home/topolo/Public/anaconda2/lib/python2.7/site-packages/theano/gof/opt.py", line 89, in optimize
    ret = self.apply(fgraph, *args, **kwargs)
  File "/home/topolo/Public/anaconda2/lib/python2.7/site-packages/theano/gof/opt.py", line 2223, in apply
    sub_prof = gopt.apply(fgraph)
  File "/home/topolo/Public/anaconda2/lib/python2.7/site-packages/theano/gof/opt.py", line 817, in apply
    fgraph.replace_all_validate(pairs, 'MergeOptimizer')
  File "/home/topolo/Public/anaconda2/lib/python2.7/site-packages/theano/gof/toolbox.py", line 309, in replace_all_validate
    fgraph.replace(r, new_r, reason=reason, verbose=False)
  File "/home/topolo/Public/anaconda2/lib/python2.7/site-packages/theano/gof/fg.py", line 561, in replace
    str(reason))
TypeError: ('The type of the replacement must be compatible with the type of the original Variable.', AllocEmpty{dtype='float32'}.0, AllocEmpty{dtype='float32'}.0, TensorType(float32, row), TensorType(float32, matrix), 'MergeOptimizer')

[ 0.  1.  0.  0.  0.  0.  0.]
[ 0.03562396  0.20534104  0.14943017  0.14943029  0.29428062  0.21335547
  0.34463325]
[ 0.  1.  0.  0.  0.  0.  0.]
[ 0.03593003  0.20925818  0.15188682  0.15188695  0.29261521  0.21754707
  0.33451572]
<<!! BUG IN FGRAPH.REPLACE OR A LISTENER !!>> <type 'exceptions.TypeError'> ('The type of the replacement must be compatible with the type of the original Variable.', AllocEmpty{dtype='float32'}.0, AllocEmpty{dtype='float32'}.0, TensorType(float32, row), TensorType(float32, matrix), 'MergeOptimizer') MergeOptimizer
ERROR (theano.gof.opt): SeqOptimizer apply <theano.gof.opt.EquilibriumOptimizer object at 0x7fbc9697c110>
ERROR (theano.gof.opt): Traceback:
ERROR (theano.gof.opt): Traceback (most recent call last):
  File "/home/topolo/Public/anaconda2/lib/python2.7/site-packages/theano/gof/opt.py", line 230, in apply
    sub_prof = optimizer.optimize(fgraph)
  File "/home/topolo/Public/anaconda2/lib/python2.7/site-packages/theano/gof/opt.py", line 89, in optimize
    ret = self.apply(fgraph, *args, **kwargs)
  File "/home/topolo/Public/anaconda2/lib/python2.7/site-packages/theano/gof/opt.py", line 2223, in apply
    sub_prof = gopt.apply(fgraph)
  File "/home/topolo/Public/anaconda2/lib/python2.7/site-packages/theano/gof/opt.py", line 817, in apply
    fgraph.replace_all_validate(pairs, 'MergeOptimizer')
  File "/home/topolo/Public/anaconda2/lib/python2.7/site-packages/theano/gof/toolbox.py", line 309, in replace_all_validate
    fgraph.replace(r, new_r, reason=reason, verbose=False)
  File "/home/topolo/Public/anaconda2/lib/python2.7/site-packages/theano/gof/fg.py", line 561, in replace
    str(reason))
TypeError: ('The type of the replacement must be compatible with the type of the original Variable.', AllocEmpty{dtype='float32'}.0, AllocEmpty{dtype='float32'}.0, TensorType(float32, row), TensorType(float32, matrix), 'MergeOptimizer')

<<!! BUG IN FGRAPH.REPLACE OR A LISTENER !!>> <type 'exceptions.TypeError'> ('The type of the replacement must be compatible with the type of the original Variable.', AllocEmpty{dtype='float32'}.0, AllocEmpty{dtype='float32'}.0, TensorType(float32, row), TensorType(float32, matrix), 'MergeOptimizer') MergeOptimizer
ERROR (theano.gof.opt): SeqOptimizer apply <theano.gof.opt.EquilibriumOptimizer object at 0x7fbc966e8d10>
ERROR (theano.gof.opt): Traceback:
ERROR (theano.gof.opt): Traceback (most recent call last):
  File "/home/topolo/Public/anaconda2/lib/python2.7/site-packages/theano/gof/opt.py", line 230, in apply
    sub_prof = optimizer.optimize(fgraph)
  File "/home/topolo/Public/anaconda2/lib/python2.7/site-packages/theano/gof/opt.py", line 89, in optimize
    ret = self.apply(fgraph, *args, **kwargs)
  File "/home/topolo/Public/anaconda2/lib/python2.7/site-packages/theano/gof/opt.py", line 2223, in apply
    sub_prof = gopt.apply(fgraph)
  File "/home/topolo/Public/anaconda2/lib/python2.7/site-packages/theano/gof/opt.py", line 817, in apply
    fgraph.replace_all_validate(pairs, 'MergeOptimizer')
  File "/home/topolo/Public/anaconda2/lib/python2.7/site-packages/theano/gof/toolbox.py", line 309, in replace_all_validate
    fgraph.replace(r, new_r, reason=reason, verbose=False)
  File "/home/topolo/Public/anaconda2/lib/python2.7/site-packages/theano/gof/fg.py", line 561, in replace
    str(reason))
TypeError: ('The type of the replacement must be compatible with the type of the original Variable.', AllocEmpty{dtype='float32'}.0, AllocEmpty{dtype='float32'}.0, TensorType(float32, row), TensorType(float32, matrix), 'MergeOptimizer')

[ 0.  0.  0.  0.  1.  0.  0.]
[ 0.03678124  0.2233016   0.17556426  0.17556438  0.27802235  0.24592921
  0.27813935]
[ 0.  1.  0.  0.  0.  0.  0.]
[ 0.03415391  0.23968399  0.1267381   0.12673819  0.31862083  0.2563501
  0.32694301]

In [21]:
# j=0,1,...m-1, m=10 training pts.
print(type(test_data));print(len(test_data));print(type(test_data[0]));print(len(test_data[0]));
print(type(test_data[0][0])); print(len(test_data[0][0]));
X_test_data,y_test_data=map( list, zip(*test_data) )
print(type(X_test_data));print(len(X_test_data));print(type(y_test_data));print(len(y_test_data))


<type 'list'>
10
<type 'tuple'>
2
<type 'list'>
14
<type 'list'>
10
<type 'list'>
10

In [ ]:


In [ ]:


In [47]:
print(type(X_test_data[0]));print(len(X_test_data[0]));
print(test_data[0][0] == X_test_data[0])


<type 'list'>
14
True

In [40]:
len(test_data[0][0])


Out[40]:
14

In [49]:
print(type(predictions_test));print(len(predictions_test));print(type(predictions_test[0]));print(len(predictions_test[0]))
print(predictions_test[0].shape)


<type 'list'>
10
<type 'numpy.ndarray'>
14
(14, 7)

In [53]:
np.argmax( predictions_test[0],axis=1 )


Out[53]:
array([1, 1, 1, 3, 1, 1, 3, 5, 5, 5, 5, 5, 4, 6])

In [60]:
print(type(y_test_data[0])); print(len(y_test_data[0]));
[ np.argmax(row_t) for row_t in y_test_data[0]]


<type 'list'>
14
Out[60]:
[1, 0, 1, 1, 1, 4, 2, 1, 1, 1, 4, 6, 1, 6]

In [57]:
np.equal( np.array( [np.argmax(row_t) for row_t in y_test_data[0]]), np.argmax(predictions_test[0],axis=1) ).astype(int)


Out[57]:
array([1, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1])

In [58]:
y_test_data[0]


Out[58]:
[array([ 0.,  1.,  0.,  0.,  1.,  0.,  0.]),
 array([ 1.,  0.,  0.,  0.,  0.,  0.,  0.]),
 array([ 0.,  1.,  0.,  0.,  1.,  0.,  0.]),
 array([ 0.,  1.,  0.,  0.,  0.,  1.,  0.]),
 array([ 0.,  1.,  0.,  0.,  0.,  1.,  0.]),
 array([ 0.,  0.,  0.,  0.,  1.,  1.,  0.]),
 array([ 0.,  0.,  1.,  1.,  0.,  0.,  0.]),
 array([ 0.,  1.,  0.,  0.,  0.,  1.,  0.]),
 array([ 0.,  1.,  0.,  0.,  0.,  1.,  0.]),
 array([ 0.,  1.,  0.,  0.,  0.,  1.,  0.]),
 array([ 0.,  0.,  0.,  0.,  1.,  1.,  0.]),
 array([ 0.,  0.,  0.,  0.,  0.,  0.,  1.]),
 array([ 0.,  1.,  0.,  0.,  0.,  0.,  0.]),
 array([ 0.,  0.,  0.,  0.,  0.,  0.,  1.])]

In [59]:
predictions_test[0]


Out[59]:
array([[ 0.12725599,  0.61258602,  0.10895271,  0.10895335,  0.57064903,
         0.228144  ,  0.05477276],
       [ 0.12897901,  0.47995096,  0.223637  ,  0.22363782,  0.39479122,
         0.17177676,  0.05590523],
       [ 0.11709789,  0.49009666,  0.21457456,  0.2145751 ,  0.44983098,
         0.21251591,  0.04870971],
       [ 0.10283783,  0.39010844,  0.40230978,  0.40231028,  0.20971726,
         0.20519017,  0.05218001],
       [ 0.07234708,  0.51123303,  0.33657774,  0.33657801,  0.19556043,
         0.42258799,  0.04219118],
       [ 0.05789826,  0.47211504,  0.25898573,  0.25898579,  0.32073686,
         0.45494437,  0.05063221],
       [ 0.05792973,  0.3779439 ,  0.45257682,  0.45257685,  0.12470939,
         0.38619873,  0.05874603],
       [ 0.04284814,  0.60218281,  0.24309468,  0.24309473,  0.17546253,
         0.68830228,  0.04707833],
       [ 0.03593669,  0.64953119,  0.17262556,  0.17262563,  0.20547776,
         0.76594102,  0.05280624],
       [ 0.03250235,  0.64148813,  0.1480808 ,  0.14808087,  0.2136755 ,
         0.77689433,  0.06402083],
       [ 0.03233429,  0.47218853,  0.13169146,  0.13169149,  0.33533978,
         0.61225629,  0.10998431],
       [ 0.03612649,  0.2625376 ,  0.19276516,  0.19276524,  0.27899075,
         0.32785827,  0.20313965],
       [ 0.03654994,  0.22352684,  0.17996094,  0.17996106,  0.27605593,
         0.25267285,  0.27077496],
       [ 0.03735466,  0.19758794,  0.19612394,  0.19612411,  0.23156635,
         0.20967364,  0.3144643 ]], dtype=float32)

In [62]:
(predictions_test[0] > 0.5).astype(float)


Out[62]:
array([[ 0.,  1.,  0.,  0.,  1.,  0.,  0.],
       [ 0.,  0.,  0.,  0.,  0.,  0.,  0.],
       [ 0.,  0.,  0.,  0.,  0.,  0.,  0.],
       [ 0.,  0.,  0.,  0.,  0.,  0.,  0.],
       [ 0.,  1.,  0.,  0.,  0.,  0.,  0.],
       [ 0.,  0.,  0.,  0.,  0.,  0.,  0.],
       [ 0.,  0.,  0.,  0.,  0.,  0.,  0.],
       [ 0.,  1.,  0.,  0.,  0.,  1.,  0.],
       [ 0.,  1.,  0.,  0.,  0.,  1.,  0.],
       [ 0.,  1.,  0.,  0.,  0.,  1.,  0.],
       [ 0.,  0.,  0.,  0.,  0.,  1.,  0.],
       [ 0.,  0.,  0.,  0.,  0.,  0.,  0.],
       [ 0.,  0.,  0.,  0.,  0.,  0.,  0.],
       [ 0.,  0.,  0.,  0.,  0.,  0.,  0.]])

In [65]:
np.equal( (predictions_test[0]>0.5).astype(float) , np.array(y_test_data[0]) ).astype(float)


Out[65]:
array([[ 1.,  1.,  1.,  1.,  1.,  1.,  1.],
       [ 0.,  1.,  1.,  1.,  1.,  1.,  1.],
       [ 1.,  0.,  1.,  1.,  0.,  1.,  1.],
       [ 1.,  0.,  1.,  1.,  1.,  0.,  1.],
       [ 1.,  1.,  1.,  1.,  1.,  0.,  1.],
       [ 1.,  1.,  1.,  1.,  0.,  0.,  1.],
       [ 1.,  1.,  0.,  0.,  1.,  1.,  1.],
       [ 1.,  1.,  1.,  1.,  1.,  1.,  1.],
       [ 1.,  1.,  1.,  1.,  1.,  1.,  1.],
       [ 1.,  1.,  1.,  1.,  1.,  1.,  1.],
       [ 1.,  1.,  1.,  1.,  0.,  1.,  1.],
       [ 1.,  1.,  1.,  1.,  1.,  1.,  0.],
       [ 1.,  0.,  1.,  1.,  1.,  1.,  1.],
       [ 1.,  1.,  1.,  1.,  1.,  1.,  0.]])

In [67]:
testarr0=np.equal( (predictions_test[0]>0.5).astype(float) , np.array(y_test_data[0]) ).astype(float)
print(testarr0.size); print(np.sum(testarr0))


98
84.0

In [68]:
assert len(predictions_test) == len(y_test_data)
m = len(predictions_test)
TOTALELE = 0.
POSRES   = 0.
for j in range(m):
    testing_arr = np.equal( (predictions_test[j]>0.5).astype(float) , np.array( y_test_data[j]).astype(float) )
    TOTALELE += testing_arr.size
    POSRES += np.sum( testing_arr )

print POSRES, TOTALELE, POSRES/TOTALELE  # 942.0 1099.0 0.857142857143


942.0 1099.0 0.857142857143

In [75]:
print(len(MemBlck_Herta.LSTM_model.params))
print([type(ele) for ele in MemBlck_Herta.LSTM_model.params])
print(type( MemBlck_Herta.LSTM_model.params[0].get_value()) )


17
[<class 'theano.sandbox.cuda.var.CudaNdarraySharedVariable'>, <class 'theano.sandbox.cuda.var.CudaNdarraySharedVariable'>, <class 'theano.sandbox.cuda.var.CudaNdarraySharedVariable'>, <class 'theano.sandbox.cuda.var.CudaNdarraySharedVariable'>, <class 'theano.sandbox.cuda.var.CudaNdarraySharedVariable'>, <class 'theano.sandbox.cuda.var.CudaNdarraySharedVariable'>, <class 'theano.sandbox.cuda.var.CudaNdarraySharedVariable'>, <class 'theano.sandbox.cuda.var.CudaNdarraySharedVariable'>, <class 'theano.sandbox.cuda.var.CudaNdarraySharedVariable'>, <class 'theano.sandbox.cuda.var.CudaNdarraySharedVariable'>, <class 'theano.sandbox.cuda.var.CudaNdarraySharedVariable'>, <class 'theano.sandbox.cuda.var.CudaNdarraySharedVariable'>, <class 'theano.sandbox.cuda.var.CudaNdarraySharedVariable'>, <class 'theano.sandbox.cuda.var.CudaNdarraySharedVariable'>, <class 'theano.sandbox.cuda.var.CudaNdarraySharedVariable'>, <class 'theano.sandbox.cuda.var.CudaNdarraySharedVariable'>, <class 'theano.sandbox.cuda.var.CudaNdarraySharedVariable'>]
<type 'numpy.ndarray'>

In [78]:
f = open("LSTM_Herta_rebergrammar.save",'wb')
for param in MemBlck_Herta.LSTM_model.params:
    cPickle.dump( param.get_value(), f, protocol=cPickle.HIGHEST_PROTOCOL)
f.close()

testing out the .train_model_full method


In [79]:
%time train_err_full = MemBlck_Herta.train_model_full(train_data)  # theano.config.allow_gc =:  False
# CPU times: user 41min 42s, sys: 10min 26s, total: 52min 9s
# Wall time: 52min 8s


theano.config.allow_gc =:  False
CPU times: user 41min 42s, sys: 10min 26s, total: 52min 9s
Wall time: 52min 8s

In [88]:
m = len( test_data )
#test_data[0][0]
test_data2 = []
for j in range(m):
     test_data2.append( (np.array( test_data[j][0] ).astype(theano.config.floatX), 
                         np.array( test_data[j][1]).astype(theano.config.floatX) )  )

In [89]:
predictions_test2  = predict_on_lst_manual(test_data2)


<<!! BUG IN FGRAPH.REPLACE OR A LISTENER !!>> <type 'exceptions.TypeError'> ('The type of the replacement must be compatible with the type of the original Variable.', AllocEmpty{dtype='float32'}.0, AllocEmpty{dtype='float32'}.0, TensorType(float32, row), TensorType(float32, matrix), 'MergeOptimizer') MergeOptimizer
ERROR (theano.gof.opt): SeqOptimizer apply <theano.gof.opt.EquilibriumOptimizer object at 0x7fbc94bf0a90>
ERROR (theano.gof.opt): Traceback:
ERROR (theano.gof.opt): Traceback (most recent call last):
  File "/home/topolo/Public/anaconda2/lib/python2.7/site-packages/theano/gof/opt.py", line 230, in apply
    sub_prof = optimizer.optimize(fgraph)
  File "/home/topolo/Public/anaconda2/lib/python2.7/site-packages/theano/gof/opt.py", line 89, in optimize
    ret = self.apply(fgraph, *args, **kwargs)
  File "/home/topolo/Public/anaconda2/lib/python2.7/site-packages/theano/gof/opt.py", line 2223, in apply
    sub_prof = gopt.apply(fgraph)
  File "/home/topolo/Public/anaconda2/lib/python2.7/site-packages/theano/gof/opt.py", line 817, in apply
    fgraph.replace_all_validate(pairs, 'MergeOptimizer')
  File "/home/topolo/Public/anaconda2/lib/python2.7/site-packages/theano/gof/toolbox.py", line 309, in replace_all_validate
    fgraph.replace(r, new_r, reason=reason, verbose=False)
  File "/home/topolo/Public/anaconda2/lib/python2.7/site-packages/theano/gof/fg.py", line 561, in replace
    str(reason))
TypeError: ('The type of the replacement must be compatible with the type of the original Variable.', AllocEmpty{dtype='float32'}.0, AllocEmpty{dtype='float32'}.0, TensorType(float32, row), TensorType(float32, matrix), 'MergeOptimizer')

<<!! BUG IN FGRAPH.REPLACE OR A LISTENER !!>> <type 'exceptions.TypeError'> ('The type of the replacement must be compatible with the type of the original Variable.', AllocEmpty{dtype='float32'}.0, AllocEmpty{dtype='float32'}.0, TensorType(float32, row), TensorType(float32, matrix), 'MergeOptimizer') MergeOptimizer
ERROR (theano.gof.opt): SeqOptimizer apply <theano.gof.opt.EquilibriumOptimizer object at 0x7fbc94818510>
ERROR (theano.gof.opt): Traceback:
ERROR (theano.gof.opt): Traceback (most recent call last):
  File "/home/topolo/Public/anaconda2/lib/python2.7/site-packages/theano/gof/opt.py", line 230, in apply
    sub_prof = optimizer.optimize(fgraph)
  File "/home/topolo/Public/anaconda2/lib/python2.7/site-packages/theano/gof/opt.py", line 89, in optimize
    ret = self.apply(fgraph, *args, **kwargs)
  File "/home/topolo/Public/anaconda2/lib/python2.7/site-packages/theano/gof/opt.py", line 2223, in apply
    sub_prof = gopt.apply(fgraph)
  File "/home/topolo/Public/anaconda2/lib/python2.7/site-packages/theano/gof/opt.py", line 817, in apply
    fgraph.replace_all_validate(pairs, 'MergeOptimizer')
  File "/home/topolo/Public/anaconda2/lib/python2.7/site-packages/theano/gof/toolbox.py", line 309, in replace_all_validate
    fgraph.replace(r, new_r, reason=reason, verbose=False)
  File "/home/topolo/Public/anaconda2/lib/python2.7/site-packages/theano/gof/fg.py", line 561, in replace
    str(reason))
TypeError: ('The type of the replacement must be compatible with the type of the original Variable.', AllocEmpty{dtype='float32'}.0, AllocEmpty{dtype='float32'}.0, TensorType(float32, row), TensorType(float32, matrix), 'MergeOptimizer')

[ 0.  1.  0.  0.  0.  0.  0.]
[ 0.03436736  0.22665322  0.16776943  0.16776943  0.27792847  0.2552793
  0.28169337]
<<!! BUG IN FGRAPH.REPLACE OR A LISTENER !!>> <type 'exceptions.TypeError'> ('The type of the replacement must be compatible with the type of the original Variable.', AllocEmpty{dtype='float32'}.0, AllocEmpty{dtype='float32'}.0, TensorType(float32, row), TensorType(float32, matrix), 'MergeOptimizer') MergeOptimizer
ERROR (theano.gof.opt): SeqOptimizer apply <theano.gof.opt.EquilibriumOptimizer object at 0x7fbc94473290>
ERROR (theano.gof.opt): Traceback:
ERROR (theano.gof.opt): Traceback (most recent call last):
  File "/home/topolo/Public/anaconda2/lib/python2.7/site-packages/theano/gof/opt.py", line 230, in apply
    sub_prof = optimizer.optimize(fgraph)
  File "/home/topolo/Public/anaconda2/lib/python2.7/site-packages/theano/gof/opt.py", line 89, in optimize
    ret = self.apply(fgraph, *args, **kwargs)
  File "/home/topolo/Public/anaconda2/lib/python2.7/site-packages/theano/gof/opt.py", line 2223, in apply
    sub_prof = gopt.apply(fgraph)
  File "/home/topolo/Public/anaconda2/lib/python2.7/site-packages/theano/gof/opt.py", line 817, in apply
    fgraph.replace_all_validate(pairs, 'MergeOptimizer')
  File "/home/topolo/Public/anaconda2/lib/python2.7/site-packages/theano/gof/toolbox.py", line 309, in replace_all_validate
    fgraph.replace(r, new_r, reason=reason, verbose=False)
  File "/home/topolo/Public/anaconda2/lib/python2.7/site-packages/theano/gof/fg.py", line 561, in replace
    str(reason))
TypeError: ('The type of the replacement must be compatible with the type of the original Variable.', AllocEmpty{dtype='float32'}.0, AllocEmpty{dtype='float32'}.0, TensorType(float32, row), TensorType(float32, matrix), 'MergeOptimizer')

[ 0.  1.  0.  0.  0.  0.  0.]
[ 0.03431728  0.22717163  0.16979101  0.16979101  0.27687672  0.25808966
  0.27789384]
<<!! BUG IN FGRAPH.REPLACE OR A LISTENER !!>> <type 'exceptions.TypeError'> ('The type of the replacement must be compatible with the type of the original Variable.', AllocEmpty{dtype='float32'}.0, AllocEmpty{dtype='float32'}.0, TensorType(float32, row), TensorType(float32, matrix), 'MergeOptimizer') MergeOptimizer
ERROR (theano.gof.opt): SeqOptimizer apply <theano.gof.opt.EquilibriumOptimizer object at 0x7fbc94016fd0>
ERROR (theano.gof.opt): Traceback:
ERROR (theano.gof.opt): Traceback (most recent call last):
  File "/home/topolo/Public/anaconda2/lib/python2.7/site-packages/theano/gof/opt.py", line 230, in apply
    sub_prof = optimizer.optimize(fgraph)
  File "/home/topolo/Public/anaconda2/lib/python2.7/site-packages/theano/gof/opt.py", line 89, in optimize
    ret = self.apply(fgraph, *args, **kwargs)
  File "/home/topolo/Public/anaconda2/lib/python2.7/site-packages/theano/gof/opt.py", line 2223, in apply
    sub_prof = gopt.apply(fgraph)
  File "/home/topolo/Public/anaconda2/lib/python2.7/site-packages/theano/gof/opt.py", line 817, in apply
    fgraph.replace_all_validate(pairs, 'MergeOptimizer')
  File "/home/topolo/Public/anaconda2/lib/python2.7/site-packages/theano/gof/toolbox.py", line 309, in replace_all_validate
    fgraph.replace(r, new_r, reason=reason, verbose=False)
  File "/home/topolo/Public/anaconda2/lib/python2.7/site-packages/theano/gof/fg.py", line 561, in replace
    str(reason))
TypeError: ('The type of the replacement must be compatible with the type of the original Variable.', AllocEmpty{dtype='float32'}.0, AllocEmpty{dtype='float32'}.0, TensorType(float32, row), TensorType(float32, matrix), 'MergeOptimizer')

[ 0.  0.  0.  0.  1.  0.  0.]
[ 0.03446481  0.21921226  0.1519177   0.1519177   0.28736371  0.23247316
  0.31759408]
<<!! BUG IN FGRAPH.REPLACE OR A LISTENER !!>> <type 'exceptions.TypeError'> ('The type of the replacement must be compatible with the type of the original Variable.', AllocEmpty{dtype='float32'}.0, AllocEmpty{dtype='float32'}.0, TensorType(float32, row), TensorType(float32, matrix), 'MergeOptimizer') MergeOptimizer
ERROR (theano.gof.opt): SeqOptimizer apply <theano.gof.opt.EquilibriumOptimizer object at 0x7fbc93cc4090>
ERROR (theano.gof.opt): Traceback:
ERROR (theano.gof.opt): Traceback (most recent call last):
  File "/home/topolo/Public/anaconda2/lib/python2.7/site-packages/theano/gof/opt.py", line 230, in apply
    sub_prof = optimizer.optimize(fgraph)
  File "/home/topolo/Public/anaconda2/lib/python2.7/site-packages/theano/gof/opt.py", line 89, in optimize
    ret = self.apply(fgraph, *args, **kwargs)
  File "/home/topolo/Public/anaconda2/lib/python2.7/site-packages/theano/gof/opt.py", line 2223, in apply
    sub_prof = gopt.apply(fgraph)
  File "/home/topolo/Public/anaconda2/lib/python2.7/site-packages/theano/gof/opt.py", line 817, in apply
    fgraph.replace_all_validate(pairs, 'MergeOptimizer')
  File "/home/topolo/Public/anaconda2/lib/python2.7/site-packages/theano/gof/toolbox.py", line 309, in replace_all_validate
    fgraph.replace(r, new_r, reason=reason, verbose=False)
  File "/home/topolo/Public/anaconda2/lib/python2.7/site-packages/theano/gof/fg.py", line 561, in replace
    str(reason))
TypeError: ('The type of the replacement must be compatible with the type of the original Variable.', AllocEmpty{dtype='float32'}.0, AllocEmpty{dtype='float32'}.0, TensorType(float32, row), TensorType(float32, matrix), 'MergeOptimizer')

[ 0.  0.  0.  0.  1.  0.  0.]
[ 0.03449107  0.22043866  0.16158645  0.16158645  0.28003576  0.23917593
  0.30183432]
<<!! BUG IN FGRAPH.REPLACE OR A LISTENER !!>> <type 'exceptions.TypeError'> ('The type of the replacement must be compatible with the type of the original Variable.', AllocEmpty{dtype='float32'}.0, AllocEmpty{dtype='float32'}.0, TensorType(float32, row), TensorType(float32, matrix), 'MergeOptimizer') MergeOptimizer
ERROR (theano.gof.opt): SeqOptimizer apply <theano.gof.opt.EquilibriumOptimizer object at 0x7fbc938cab50>
ERROR (theano.gof.opt): Traceback:
ERROR (theano.gof.opt): Traceback (most recent call last):
  File "/home/topolo/Public/anaconda2/lib/python2.7/site-packages/theano/gof/opt.py", line 230, in apply
    sub_prof = optimizer.optimize(fgraph)
  File "/home/topolo/Public/anaconda2/lib/python2.7/site-packages/theano/gof/opt.py", line 89, in optimize
    ret = self.apply(fgraph, *args, **kwargs)
  File "/home/topolo/Public/anaconda2/lib/python2.7/site-packages/theano/gof/opt.py", line 2223, in apply
    sub_prof = gopt.apply(fgraph)
  File "/home/topolo/Public/anaconda2/lib/python2.7/site-packages/theano/gof/opt.py", line 817, in apply
    fgraph.replace_all_validate(pairs, 'MergeOptimizer')
  File "/home/topolo/Public/anaconda2/lib/python2.7/site-packages/theano/gof/toolbox.py", line 309, in replace_all_validate
    fgraph.replace(r, new_r, reason=reason, verbose=False)
  File "/home/topolo/Public/anaconda2/lib/python2.7/site-packages/theano/gof/fg.py", line 561, in replace
    str(reason))
TypeError: ('The type of the replacement must be compatible with the type of the original Variable.', AllocEmpty{dtype='float32'}.0, AllocEmpty{dtype='float32'}.0, TensorType(float32, row), TensorType(float32, matrix), 'MergeOptimizer')

[ 0.  0.  0.  0.  1.  0.  0.]
[ 0.03458566  0.22651395  0.16490869  0.16490869  0.27892867  0.25001329
  0.28713083]
<<!! BUG IN FGRAPH.REPLACE OR A LISTENER !!>> <type 'exceptions.TypeError'> ('The type of the replacement must be compatible with the type of the original Variable.', AllocEmpty{dtype='float32'}.0, AllocEmpty{dtype='float32'}.0, TensorType(float32, row), TensorType(float32, matrix), 'MergeOptimizer') MergeOptimizer
ERROR (theano.gof.opt): SeqOptimizer apply <theano.gof.opt.EquilibriumOptimizer object at 0x7fbc93587d10>
[ 0.  1.  0.  0.  0.  0.  0.]
[ 0.03340504  0.32906768  0.19061083  0.19061083  0.27263764  0.40362844
  0.15755469]
ERROR (theano.gof.opt): Traceback:
ERROR (theano.gof.opt): Traceback (most recent call last):
  File "/home/topolo/Public/anaconda2/lib/python2.7/site-packages/theano/gof/opt.py", line 230, in apply
    sub_prof = optimizer.optimize(fgraph)
  File "/home/topolo/Public/anaconda2/lib/python2.7/site-packages/theano/gof/opt.py", line 89, in optimize
    ret = self.apply(fgraph, *args, **kwargs)
  File "/home/topolo/Public/anaconda2/lib/python2.7/site-packages/theano/gof/opt.py", line 2223, in apply
    sub_prof = gopt.apply(fgraph)
  File "/home/topolo/Public/anaconda2/lib/python2.7/site-packages/theano/gof/opt.py", line 817, in apply
    fgraph.replace_all_validate(pairs, 'MergeOptimizer')
  File "/home/topolo/Public/anaconda2/lib/python2.7/site-packages/theano/gof/toolbox.py", line 309, in replace_all_validate
    fgraph.replace(r, new_r, reason=reason, verbose=False)
  File "/home/topolo/Public/anaconda2/lib/python2.7/site-packages/theano/gof/fg.py", line 561, in replace
    str(reason))
TypeError: ('The type of the replacement must be compatible with the type of the original Variable.', AllocEmpty{dtype='float32'}.0, AllocEmpty{dtype='float32'}.0, TensorType(float32, row), TensorType(float32, matrix), 'MergeOptimizer')

<<!! BUG IN FGRAPH.REPLACE OR A LISTENER !!>> <type 'exceptions.TypeError'> ('The type of the replacement must be compatible with the type of the original Variable.', AllocEmpty{dtype='float32'}.0, AllocEmpty{dtype='float32'}.0, TensorType(float32, row), TensorType(float32, matrix), 'MergeOptimizer') MergeOptimizer
ERROR (theano.gof.opt): SeqOptimizer apply <theano.gof.opt.EquilibriumOptimizer object at 0x7fbc931359d0>
ERROR (theano.gof.opt): Traceback:
ERROR (theano.gof.opt): Traceback (most recent call last):
  File "/home/topolo/Public/anaconda2/lib/python2.7/site-packages/theano/gof/opt.py", line 230, in apply
    sub_prof = optimizer.optimize(fgraph)
  File "/home/topolo/Public/anaconda2/lib/python2.7/site-packages/theano/gof/opt.py", line 89, in optimize
    ret = self.apply(fgraph, *args, **kwargs)
  File "/home/topolo/Public/anaconda2/lib/python2.7/site-packages/theano/gof/opt.py", line 2223, in apply
    sub_prof = gopt.apply(fgraph)
  File "/home/topolo/Public/anaconda2/lib/python2.7/site-packages/theano/gof/opt.py", line 817, in apply
    fgraph.replace_all_validate(pairs, 'MergeOptimizer')
  File "/home/topolo/Public/anaconda2/lib/python2.7/site-packages/theano/gof/toolbox.py", line 309, in replace_all_validate
    fgraph.replace(r, new_r, reason=reason, verbose=False)
  File "/home/topolo/Public/anaconda2/lib/python2.7/site-packages/theano/gof/fg.py", line 561, in replace
    str(reason))
TypeError: ('The type of the replacement must be compatible with the type of the original Variable.', AllocEmpty{dtype='float32'}.0, AllocEmpty{dtype='float32'}.0, TensorType(float32, row), TensorType(float32, matrix), 'MergeOptimizer')

[ 0.  1.  0.  0.  0.  0.  0.]
[ 0.03436713  0.20963696  0.14587112  0.14587112  0.29266161  0.21802917
  0.3412233 ]
<<!! BUG IN FGRAPH.REPLACE OR A LISTENER !!>> <type 'exceptions.TypeError'> ('The type of the replacement must be compatible with the type of the original Variable.', AllocEmpty{dtype='float32'}.0, AllocEmpty{dtype='float32'}.0, TensorType(float32, row), TensorType(float32, matrix), 'MergeOptimizer') MergeOptimizer
ERROR (theano.gof.opt): SeqOptimizer apply <theano.gof.opt.EquilibriumOptimizer object at 0x7fbc92d48950>
ERROR (theano.gof.opt): Traceback:
ERROR (theano.gof.opt): Traceback (most recent call last):
  File "/home/topolo/Public/anaconda2/lib/python2.7/site-packages/theano/gof/opt.py", line 230, in apply
    sub_prof = optimizer.optimize(fgraph)
  File "/home/topolo/Public/anaconda2/lib/python2.7/site-packages/theano/gof/opt.py", line 89, in optimize
    ret = self.apply(fgraph, *args, **kwargs)
  File "/home/topolo/Public/anaconda2/lib/python2.7/site-packages/theano/gof/opt.py", line 2223, in apply
    sub_prof = gopt.apply(fgraph)
  File "/home/topolo/Public/anaconda2/lib/python2.7/site-packages/theano/gof/opt.py", line 817, in apply
    fgraph.replace_all_validate(pairs, 'MergeOptimizer')
  File "/home/topolo/Public/anaconda2/lib/python2.7/site-packages/theano/gof/toolbox.py", line 309, in replace_all_validate
    fgraph.replace(r, new_r, reason=reason, verbose=False)
  File "/home/topolo/Public/anaconda2/lib/python2.7/site-packages/theano/gof/fg.py", line 561, in replace
    str(reason))
TypeError: ('The type of the replacement must be compatible with the type of the original Variable.', AllocEmpty{dtype='float32'}.0, AllocEmpty{dtype='float32'}.0, TensorType(float32, row), TensorType(float32, matrix), 'MergeOptimizer')

[ 0.  1.  0.  0.  0.  0.  0.]
[ 0.0344641   0.21368355  0.14697511  0.14697511  0.29182962  0.2226091
  0.33337036]
<<!! BUG IN FGRAPH.REPLACE OR A LISTENER !!>> <type 'exceptions.TypeError'> ('The type of the replacement must be compatible with the type of the original Variable.', AllocEmpty{dtype='float32'}.0, AllocEmpty{dtype='float32'}.0, TensorType(float32, row), TensorType(float32, matrix), 'MergeOptimizer') MergeOptimizer
ERROR (theano.gof.opt): SeqOptimizer apply <theano.gof.opt.EquilibriumOptimizer object at 0x7fbc929f4450>
ERROR (theano.gof.opt): Traceback:
ERROR (theano.gof.opt): Traceback (most recent call last):
  File "/home/topolo/Public/anaconda2/lib/python2.7/site-packages/theano/gof/opt.py", line 230, in apply
    sub_prof = optimizer.optimize(fgraph)
  File "/home/topolo/Public/anaconda2/lib/python2.7/site-packages/theano/gof/opt.py", line 89, in optimize
    ret = self.apply(fgraph, *args, **kwargs)
  File "/home/topolo/Public/anaconda2/lib/python2.7/site-packages/theano/gof/opt.py", line 2223, in apply
    sub_prof = gopt.apply(fgraph)
  File "/home/topolo/Public/anaconda2/lib/python2.7/site-packages/theano/gof/opt.py", line 817, in apply
    fgraph.replace_all_validate(pairs, 'MergeOptimizer')
  File "/home/topolo/Public/anaconda2/lib/python2.7/site-packages/theano/gof/toolbox.py", line 309, in replace_all_validate
    fgraph.replace(r, new_r, reason=reason, verbose=False)
  File "/home/topolo/Public/anaconda2/lib/python2.7/site-packages/theano/gof/fg.py", line 561, in replace
    str(reason))
TypeError: ('The type of the replacement must be compatible with the type of the original Variable.', AllocEmpty{dtype='float32'}.0, AllocEmpty{dtype='float32'}.0, TensorType(float32, row), TensorType(float32, matrix), 'MergeOptimizer')

[ 0.  0.  0.  0.  1.  0.  0.]
[ 0.03458566  0.22651395  0.16490869  0.16490869  0.27892867  0.25001329
  0.28713083]
[ 0.  1.  0.  0.  0.  0.  0.]
[ 0.03248088  0.24626826  0.1243487   0.1243487   0.31888732  0.2595799
  0.32345057]

In [90]:
print( len(predictions_test2))


10

In [91]:
assert len(predictions_test2) == len(y_test_data)
m = len(predictions_test2)
TOTALELE = 0.
POSRES   = 0.
for j in range(m):
    testing_arr = np.equal( (predictions_test2[j]>0.5).astype(float) , np.array( y_test_data[j]).astype(float) )
    TOTALELE += testing_arr.size
    POSRES += np.sum( testing_arr )

print POSRES, TOTALELE, POSRES/TOTALELE  # 944.0 1099.0 0.858962693358


944.0 1099.0 0.858962693358

In [25]:
MemBlck_Herta.gradDescent_step( train_data[900][0], train_data[900][1] )


---------------------------------------------------------------------------
ValueError                                Traceback (most recent call last)
<ipython-input-25-1b583f18ca6c> in <module>()
----> 1 MemBlck_Herta.gradDescent_step( train_data[900][0], train_data[900][1] )

/home/topolo/Public/anaconda2/lib/python2.7/site-packages/theano/compile/function_module.pyc in __call__(self, *args, **kwargs)
    869                     node=self.fn.nodes[self.fn.position_of_error],
    870                     thunk=thunk,
--> 871                     storage_map=getattr(self.fn, 'storage_map', None))
    872             else:
    873                 # old-style linkers raise their own exceptions

/home/topolo/Public/anaconda2/lib/python2.7/site-packages/theano/gof/link.pyc in raise_with_op(node, thunk, exc_info, storage_map)
    312         # extra long error message in that case.
    313         pass
--> 314     reraise(exc_type, exc_value, exc_trace)
    315 
    316 

/home/topolo/Public/anaconda2/lib/python2.7/site-packages/theano/compile/function_module.pyc in __call__(self, *args, **kwargs)
    857         t0_fn = time.time()
    858         try:
--> 859             outputs = self.fn()
    860         except Exception:
    861             if hasattr(self.fn, 'position_of_error'):

/home/topolo/Public/anaconda2/lib/python2.7/site-packages/theano/scan_module/scan_op.pyc in rval(p, i, o, n, allow_gc)
    949         def rval(p=p, i=node_input_storage, o=node_output_storage, n=node,
    950                  allow_gc=allow_gc):
--> 951             r = p(n, [x[0] for x in i], o)
    952             for o in node.outputs:
    953                 compute_map[o][0] = True

/home/topolo/Public/anaconda2/lib/python2.7/site-packages/theano/scan_module/scan_op.pyc in <lambda>(node, args, outs)
    938                         args,
    939                         outs,
--> 940                         self, node)
    941         except (ImportError, theano.gof.cmodule.MissingGXX):
    942             p = self.execute

theano/scan_module/scan_perform.pyx in theano.scan_module.scan_perform.perform (/home/topolo/.theano/compiledir_Linux-4.2-fc23.x86_64-x86_64-with-fedora-23-Twenty_Three-x86_64-2.7.11-64/scan_perform/mod.cpp:4316)()

/home/topolo/Public/anaconda2/lib/python2.7/site-packages/theano/gof/link.pyc in raise_with_op(node, thunk, exc_info, storage_map)
    312         # extra long error message in that case.
    313         pass
--> 314     reraise(exc_type, exc_value, exc_trace)
    315 
    316 

theano/scan_module/scan_perform.pyx in theano.scan_module.scan_perform.perform (/home/topolo/.theano/compiledir_Linux-4.2-fc23.x86_64-x86_64-with-fedora-23-Twenty_Three-x86_64-2.7.11-64/scan_perform/mod.cpp:4193)()

ValueError: dimension mismatch in args to gemv (10,7)x(10)->(10)
Apply node that caused the error: GpuGemv{no_inplace}(b1_copy[cuda], TensorConstant{1.0}, GpuDimShuffle{1,0}.0, GpuElemwise{Composite{(scalar_sigmoid((i0 + i1)) * tanh(i2))},no_inplace}.0, TensorConstant{1.0})
Toposort index: 31
Inputs types: [CudaNdarrayType(float32, vector), TensorType(float32, scalar), CudaNdarrayType(float32, matrix), CudaNdarrayType(float32, vector), TensorType(float32, scalar)]
Inputs shapes: [(10,), (), (10, 7), (10,), ()]
Inputs strides: [(1,), (), (1, 10), (1,), ()]
Inputs values: ['not shown', array(1.0, dtype=float32), 'not shown', 'not shown', array(1.0, dtype=float32)]
Outputs clients: [[GpuElemwise{scalar_sigmoid,no_inplace}(GpuGemv{no_inplace}.0)]]

HINT: Re-running with most Theano optimization disabled could give you a back-trace of when this node was created. This can be done with by setting the Theano flag 'optimizer=fast_compile'. If that does not work, Theano optimizations can be disabled with 'optimizer=None'.
HINT: Use the Theano flag 'exception_verbosity=high' for a debugprint and storage map footprint of this apply node.
Apply node that caused the error: forall_inplace,gpu,scan_fn}(Shape_i{0}.0, GpuSubtensor{int64:int64:int8}.0, GpuIncSubtensor{Set;:int64:}.0, GpuIncSubtensor{InplaceSet;:int64:}.0, Shape_i{0}.0, Theta1, b1, theta1, Theta1, b1, theta1, W1, Theta1, b1, theta1, W1, Theta1, b1, theta1, W1, Theta1, b1)
Toposort index: 152
Inputs types: [TensorType(int64, scalar), CudaNdarrayType(float32, matrix), CudaNdarrayType(float32, matrix), CudaNdarrayType(float32, matrix), TensorType(int64, scalar), CudaNdarrayType(float32, matrix), CudaNdarrayType(float32, vector), CudaNdarrayType(float32, matrix), CudaNdarrayType(float32, matrix), CudaNdarrayType(float32, vector), CudaNdarrayType(float32, matrix), CudaNdarrayType(float32, matrix), CudaNdarrayType(float32, matrix), CudaNdarrayType(float32, vector), CudaNdarrayType(float32, matrix), CudaNdarrayType(float32, matrix), CudaNdarrayType(float32, matrix), CudaNdarrayType(float32, vector), CudaNdarrayType(float32, matrix), CudaNdarrayType(float32, matrix), CudaNdarrayType(float32, matrix), CudaNdarrayType(float32, vector)]
Inputs shapes: [(), (17, 7), (18, 10), (18, 10), (), (7, 10), (10,), (10, 10), (7, 10), (10,), (10, 10), (10, 10), (7, 10), (10,), (10, 10), (10, 10), (7, 10), (10,), (10, 10), (10, 10), (7, 10), (10,)]
Inputs strides: [(), (7, 1), (10, 1), (10, 1), (), (10, 1), (1,), (10, 1), (10, 1), (1,), (10, 1), (10, 1), (10, 1), (1,), (10, 1), (10, 1), (10, 1), (1,), (10, 1), (10, 1), (10, 1), (1,)]
Inputs values: [array(17), 'not shown', 'not shown', 'not shown', array(17), 'not shown', 'not shown', 'not shown', 'not shown', 'not shown', 'not shown', 'not shown', 'not shown', 'not shown', 'not shown', 'not shown', 'not shown', 'not shown', 'not shown', 'not shown', 'not shown', 'not shown']
Outputs clients: [[GpuSubtensor{int64:int64:int64}(forall_inplace,gpu,scan_fn}.0, ScalarFromTensor.0, ScalarFromTensor.0, Constant{-1}), GpuSubtensor{int64:int64:int64}(forall_inplace,gpu,scan_fn}.0, ScalarFromTensor.0, ScalarFromTensor.0, Constant{-1})], [GpuSubtensor{int64:int64:int64}(forall_inplace,gpu,scan_fn}.1, ScalarFromTensor.0, ScalarFromTensor.0, Constant{-1}), GpuSubtensor{int64:int64:int64}(forall_inplace,gpu,scan_fn}.1, ScalarFromTensor.0, ScalarFromTensor.0, Constant{-1})], [GpuSubtensor{int64:int64:int64}(forall_inplace,gpu,scan_fn}.2, ScalarFromTensor.0, ScalarFromTensor.0, Constant{-1}), GpuElemwise{sub,no_inplace}(CudaNdarrayConstant{[[ 1.]]}, forall_inplace,gpu,scan_fn}.2), GpuElemwise{Composite{((i0 * log(i1)) + (i2 * log(i3)))}}[(0, 0)](GpuFromHost.0, forall_inplace,gpu,scan_fn}.2, GpuElemwise{sub,no_inplace}.0, GpuElemwise{sub,no_inplace}.0)]]

HINT: Re-running with most Theano optimization disabled could give you a back-trace of when this node was created. This can be done with by setting the Theano flag 'optimizer=fast_compile'. If that does not work, Theano optimizations can be disabled with 'optimizer=None'.
HINT: Use the Theano flag 'exception_verbosity=high' for a debugprint and storage map footprint of this apply node.

In [ ]:

From Christian Herta's Neural Networks page. Note that how he sizes the matrices and vectors, and thus the numpy arrays is according to a right-action on the module or vector space, where as I use the usual left action on the module (i.e. matrix multiplication from the left, on to vectors of the vector space the matrix acts upon, on the right).


In [31]:
n_c = 10
n_in = 7
s_l_in = Gates(g=[n_in,n_c],i=[n_in,n_c],f=[n_in,n_c],o=[n_in,n_c])
print(s_l_in)


Gates(g=[7, 10], i=[7, 10], f=[7, 10], o=[7, 10])

In [21]:
np.ndarray([10,7]).shape
np.ndarray([3,5],dtype='int32')[:,0]


Out[21]:
array([0, 0, 0], dtype=int32)

In [32]:
g1ThetabthetaW = ThetabthetaW( s_l_in.g, n_c,activation=T.tanh)

In [33]:
print( type(g1ThetabthetaW.Theta) , type(g1ThetabthetaW.b ), type(g1ThetabthetaW.theta), type(g1ThetabthetaW.W))


(<class 'theano.sandbox.cuda.var.CudaNdarraySharedVariable'>, <class 'theano.sandbox.cuda.var.CudaNdarraySharedVariable'>, <class 'theano.sandbox.cuda.var.CudaNdarraySharedVariable'>, <class 'theano.sandbox.cuda.var.CudaNdarraySharedVariable'>)

LSTM_Right; LSTM with right action


In [13]:
sys.path.append( os.getcwd() + '/ML' )

In [14]:
from LSTM_Right import *

In [25]:
L = Gates(g=2,i=2,f=2,o=2)

n_hidden = n_i = n_c = n_o = n_f = 10
n_in = 7 # for embedded reber grammar
n_y = 7 # for embedded reber grammar; this is K in my notation

s_l = Gates(g=[n_in,n_c],i=[n_in,n_i],f=[n_in,n_f],o=[n_in,n_o])

activations = Psis(g=(T.tanh, T.tanh), i=(T.nnet.sigmoid, T.nnet.sigmoid),f=(T.nnet.sigmoid, T.nnet.sigmoid),
                         o=(T.nnet.sigmoid, T.nnet.sigmoid),h=(T.tanh,))

In [26]:
LSTM_model=LSTM_Model_right(L,s_l,n_hidden,n_y,activations,T.nnet.sigmoid )
lstm_step_fxn = LSTM_model.build_lstm_step()
MemBlck = MemoryBlock_right(n_hidden,LSTM_model)
MemBlck.build_scan_over_t()
MemBlck.build_J(0.1)


Total number of parameters: 17 
Total number of parameters: 17 
Out[26]:
Elemwise{add,no_inplace}.0

In [33]:
MemBlck.build_update()


Total number of parameters: 17 

In [36]:
%time results_prelim = MemBlck.train_model_full(train_data,2)


theano.config.allow_gc =:  False
CPU times: user 20.3 s, sys: 4.83 s, total: 25.2 s
Wall time: 25.1 s

In [35]:
MemBlck.LSTM_model.__get_state__();


Total number of parameters: 17 

In [37]:
%time results_prelim = MemBlck.train_model_full(train_data)


theano.config.allow_gc =:  False
CPU times: user 41min 57s, sys: 9min 16s, total: 51min 14s
Wall time: 51min 13s

In [43]:
predictions_full = MemBlck.predict_on_lst( test_data, verbose=False);


<<!! BUG IN FGRAPH.REPLACE OR A LISTENER !!>> <type 'exceptions.TypeError'> ('The type of the replacement must be compatible with the type of the original Variable.', AllocEmpty{dtype='float32'}.0, AllocEmpty{dtype='float32'}.0, TensorType(float32, row), TensorType(float32, matrix), 'MergeOptimizer') MergeOptimizer
ERROR (theano.gof.opt): SeqOptimizer apply <theano.gof.opt.EquilibriumOptimizer object at 0x7f5d3c4f7610>
ERROR (theano.gof.opt): Traceback:
ERROR (theano.gof.opt): Traceback (most recent call last):
  File "/home/topolo/Public/anaconda2/lib/python2.7/site-packages/theano/gof/opt.py", line 230, in apply
    sub_prof = optimizer.optimize(fgraph)
  File "/home/topolo/Public/anaconda2/lib/python2.7/site-packages/theano/gof/opt.py", line 89, in optimize
    ret = self.apply(fgraph, *args, **kwargs)
  File "/home/topolo/Public/anaconda2/lib/python2.7/site-packages/theano/gof/opt.py", line 2223, in apply
    sub_prof = gopt.apply(fgraph)
  File "/home/topolo/Public/anaconda2/lib/python2.7/site-packages/theano/gof/opt.py", line 817, in apply
    fgraph.replace_all_validate(pairs, 'MergeOptimizer')
  File "/home/topolo/Public/anaconda2/lib/python2.7/site-packages/theano/gof/toolbox.py", line 309, in replace_all_validate
    fgraph.replace(r, new_r, reason=reason, verbose=False)
  File "/home/topolo/Public/anaconda2/lib/python2.7/site-packages/theano/gof/fg.py", line 561, in replace
    str(reason))
TypeError: ('The type of the replacement must be compatible with the type of the original Variable.', AllocEmpty{dtype='float32'}.0, AllocEmpty{dtype='float32'}.0, TensorType(float32, row), TensorType(float32, matrix), 'MergeOptimizer')

<<!! BUG IN FGRAPH.REPLACE OR A LISTENER !!>> <type 'exceptions.TypeError'> ('The type of the replacement must be compatible with the type of the original Variable.', AllocEmpty{dtype='float32'}.0, AllocEmpty{dtype='float32'}.0, TensorType(float32, row), TensorType(float32, matrix), 'MergeOptimizer') MergeOptimizer
ERROR (theano.gof.opt): SeqOptimizer apply <theano.gof.opt.EquilibriumOptimizer object at 0x7f5d3bf51dd0>
ERROR (theano.gof.opt): Traceback:
ERROR (theano.gof.opt): Traceback (most recent call last):
  File "/home/topolo/Public/anaconda2/lib/python2.7/site-packages/theano/gof/opt.py", line 230, in apply
    sub_prof = optimizer.optimize(fgraph)
  File "/home/topolo/Public/anaconda2/lib/python2.7/site-packages/theano/gof/opt.py", line 89, in optimize
    ret = self.apply(fgraph, *args, **kwargs)
  File "/home/topolo/Public/anaconda2/lib/python2.7/site-packages/theano/gof/opt.py", line 2223, in apply
    sub_prof = gopt.apply(fgraph)
  File "/home/topolo/Public/anaconda2/lib/python2.7/site-packages/theano/gof/opt.py", line 817, in apply
    fgraph.replace_all_validate(pairs, 'MergeOptimizer')
  File "/home/topolo/Public/anaconda2/lib/python2.7/site-packages/theano/gof/toolbox.py", line 309, in replace_all_validate
    fgraph.replace(r, new_r, reason=reason, verbose=False)
  File "/home/topolo/Public/anaconda2/lib/python2.7/site-packages/theano/gof/fg.py", line 561, in replace
    str(reason))
TypeError: ('The type of the replacement must be compatible with the type of the original Variable.', AllocEmpty{dtype='float32'}.0, AllocEmpty{dtype='float32'}.0, TensorType(float32, row), TensorType(float32, matrix), 'MergeOptimizer')

<<!! BUG IN FGRAPH.REPLACE OR A LISTENER !!>> <type 'exceptions.TypeError'> ('The type of the replacement must be compatible with the type of the original Variable.', AllocEmpty{dtype='float32'}.0, AllocEmpty{dtype='float32'}.0, TensorType(float32, row), TensorType(float32, matrix), 'MergeOptimizer') MergeOptimizer
ERROR (theano.gof.opt): SeqOptimizer apply <theano.gof.opt.EquilibriumOptimizer object at 0x7f5d3bbb5ad0>
ERROR (theano.gof.opt): Traceback:
ERROR (theano.gof.opt): Traceback (most recent call last):
  File "/home/topolo/Public/anaconda2/lib/python2.7/site-packages/theano/gof/opt.py", line 230, in apply
    sub_prof = optimizer.optimize(fgraph)
  File "/home/topolo/Public/anaconda2/lib/python2.7/site-packages/theano/gof/opt.py", line 89, in optimize
    ret = self.apply(fgraph, *args, **kwargs)
  File "/home/topolo/Public/anaconda2/lib/python2.7/site-packages/theano/gof/opt.py", line 2223, in apply
    sub_prof = gopt.apply(fgraph)
  File "/home/topolo/Public/anaconda2/lib/python2.7/site-packages/theano/gof/opt.py", line 817, in apply
    fgraph.replace_all_validate(pairs, 'MergeOptimizer')
  File "/home/topolo/Public/anaconda2/lib/python2.7/site-packages/theano/gof/toolbox.py", line 309, in replace_all_validate
    fgraph.replace(r, new_r, reason=reason, verbose=False)
  File "/home/topolo/Public/anaconda2/lib/python2.7/site-packages/theano/gof/fg.py", line 561, in replace
    str(reason))
TypeError: ('The type of the replacement must be compatible with the type of the original Variable.', AllocEmpty{dtype='float32'}.0, AllocEmpty{dtype='float32'}.0, TensorType(float32, row), TensorType(float32, matrix), 'MergeOptimizer')

<<!! BUG IN FGRAPH.REPLACE OR A LISTENER !!>> <type 'exceptions.TypeError'> ('The type of the replacement must be compatible with the type of the original Variable.', AllocEmpty{dtype='float32'}.0, AllocEmpty{dtype='float32'}.0, TensorType(float32, row), TensorType(float32, matrix), 'MergeOptimizer') MergeOptimizer
ERROR (theano.gof.opt): SeqOptimizer apply <theano.gof.opt.EquilibriumOptimizer object at 0x7f5d3b7a2f10>
ERROR (theano.gof.opt): Traceback:
ERROR (theano.gof.opt): Traceback (most recent call last):
  File "/home/topolo/Public/anaconda2/lib/python2.7/site-packages/theano/gof/opt.py", line 230, in apply
    sub_prof = optimizer.optimize(fgraph)
  File "/home/topolo/Public/anaconda2/lib/python2.7/site-packages/theano/gof/opt.py", line 89, in optimize
    ret = self.apply(fgraph, *args, **kwargs)
  File "/home/topolo/Public/anaconda2/lib/python2.7/site-packages/theano/gof/opt.py", line 2223, in apply
    sub_prof = gopt.apply(fgraph)
  File "/home/topolo/Public/anaconda2/lib/python2.7/site-packages/theano/gof/opt.py", line 817, in apply
    fgraph.replace_all_validate(pairs, 'MergeOptimizer')
  File "/home/topolo/Public/anaconda2/lib/python2.7/site-packages/theano/gof/toolbox.py", line 309, in replace_all_validate
    fgraph.replace(r, new_r, reason=reason, verbose=False)
  File "/home/topolo/Public/anaconda2/lib/python2.7/site-packages/theano/gof/fg.py", line 561, in replace
    str(reason))
TypeError: ('The type of the replacement must be compatible with the type of the original Variable.', AllocEmpty{dtype='float32'}.0, AllocEmpty{dtype='float32'}.0, TensorType(float32, row), TensorType(float32, matrix), 'MergeOptimizer')

<<!! BUG IN FGRAPH.REPLACE OR A LISTENER !!>> <type 'exceptions.TypeError'> ('The type of the replacement must be compatible with the type of the original Variable.', AllocEmpty{dtype='float32'}.0, AllocEmpty{dtype='float32'}.0, TensorType(float32, row), TensorType(float32, matrix), 'MergeOptimizer') MergeOptimizer
ERROR (theano.gof.opt): SeqOptimizer apply <theano.gof.opt.EquilibriumOptimizer object at 0x7f5d3b3cab90>
ERROR (theano.gof.opt): Traceback:
ERROR (theano.gof.opt): Traceback (most recent call last):
  File "/home/topolo/Public/anaconda2/lib/python2.7/site-packages/theano/gof/opt.py", line 230, in apply
    sub_prof = optimizer.optimize(fgraph)
  File "/home/topolo/Public/anaconda2/lib/python2.7/site-packages/theano/gof/opt.py", line 89, in optimize
    ret = self.apply(fgraph, *args, **kwargs)
  File "/home/topolo/Public/anaconda2/lib/python2.7/site-packages/theano/gof/opt.py", line 2223, in apply
    sub_prof = gopt.apply(fgraph)
  File "/home/topolo/Public/anaconda2/lib/python2.7/site-packages/theano/gof/opt.py", line 817, in apply
    fgraph.replace_all_validate(pairs, 'MergeOptimizer')
  File "/home/topolo/Public/anaconda2/lib/python2.7/site-packages/theano/gof/toolbox.py", line 309, in replace_all_validate
    fgraph.replace(r, new_r, reason=reason, verbose=False)
  File "/home/topolo/Public/anaconda2/lib/python2.7/site-packages/theano/gof/fg.py", line 561, in replace
    str(reason))
TypeError: ('The type of the replacement must be compatible with the type of the original Variable.', AllocEmpty{dtype='float32'}.0, AllocEmpty{dtype='float32'}.0, TensorType(float32, row), TensorType(float32, matrix), 'MergeOptimizer')

<<!! BUG IN FGRAPH.REPLACE OR A LISTENER !!>> <type 'exceptions.TypeError'> ('The type of the replacement must be compatible with the type of the original Variable.', AllocEmpty{dtype='float32'}.0, AllocEmpty{dtype='float32'}.0, TensorType(float32, row), TensorType(float32, matrix), 'MergeOptimizer') MergeOptimizer
ERROR (theano.gof.opt): SeqOptimizer apply <theano.gof.opt.EquilibriumOptimizer object at 0x7f5d3b009d10>
ERROR (theano.gof.opt): Traceback:
ERROR (theano.gof.opt): Traceback (most recent call last):
  File "/home/topolo/Public/anaconda2/lib/python2.7/site-packages/theano/gof/opt.py", line 230, in apply
    sub_prof = optimizer.optimize(fgraph)
  File "/home/topolo/Public/anaconda2/lib/python2.7/site-packages/theano/gof/opt.py", line 89, in optimize
    ret = self.apply(fgraph, *args, **kwargs)
  File "/home/topolo/Public/anaconda2/lib/python2.7/site-packages/theano/gof/opt.py", line 2223, in apply
    sub_prof = gopt.apply(fgraph)
  File "/home/topolo/Public/anaconda2/lib/python2.7/site-packages/theano/gof/opt.py", line 817, in apply
    fgraph.replace_all_validate(pairs, 'MergeOptimizer')
  File "/home/topolo/Public/anaconda2/lib/python2.7/site-packages/theano/gof/toolbox.py", line 309, in replace_all_validate
    fgraph.replace(r, new_r, reason=reason, verbose=False)
  File "/home/topolo/Public/anaconda2/lib/python2.7/site-packages/theano/gof/fg.py", line 561, in replace
    str(reason))
TypeError: ('The type of the replacement must be compatible with the type of the original Variable.', AllocEmpty{dtype='float32'}.0, AllocEmpty{dtype='float32'}.0, TensorType(float32, row), TensorType(float32, matrix), 'MergeOptimizer')

<<!! BUG IN FGRAPH.REPLACE OR A LISTENER !!>> <type 'exceptions.TypeError'> ('The type of the replacement must be compatible with the type of the original Variable.', AllocEmpty{dtype='float32'}.0, AllocEmpty{dtype='float32'}.0, TensorType(float32, row), TensorType(float32, matrix), 'MergeOptimizer') MergeOptimizer
ERROR (theano.gof.opt): SeqOptimizer apply <theano.gof.opt.EquilibriumOptimizer object at 0x7f5d3b617510>
ERROR (theano.gof.opt): Traceback:
ERROR (theano.gof.opt): Traceback (most recent call last):
  File "/home/topolo/Public/anaconda2/lib/python2.7/site-packages/theano/gof/opt.py", line 230, in apply
    sub_prof = optimizer.optimize(fgraph)
  File "/home/topolo/Public/anaconda2/lib/python2.7/site-packages/theano/gof/opt.py", line 89, in optimize
    ret = self.apply(fgraph, *args, **kwargs)
  File "/home/topolo/Public/anaconda2/lib/python2.7/site-packages/theano/gof/opt.py", line 2223, in apply
    sub_prof = gopt.apply(fgraph)
  File "/home/topolo/Public/anaconda2/lib/python2.7/site-packages/theano/gof/opt.py", line 817, in apply
    fgraph.replace_all_validate(pairs, 'MergeOptimizer')
  File "/home/topolo/Public/anaconda2/lib/python2.7/site-packages/theano/gof/toolbox.py", line 309, in replace_all_validate
    fgraph.replace(r, new_r, reason=reason, verbose=False)
  File "/home/topolo/Public/anaconda2/lib/python2.7/site-packages/theano/gof/fg.py", line 561, in replace
    str(reason))
TypeError: ('The type of the replacement must be compatible with the type of the original Variable.', AllocEmpty{dtype='float32'}.0, AllocEmpty{dtype='float32'}.0, TensorType(float32, row), TensorType(float32, matrix), 'MergeOptimizer')

<<!! BUG IN FGRAPH.REPLACE OR A LISTENER !!>> <type 'exceptions.TypeError'> ('The type of the replacement must be compatible with the type of the original Variable.', AllocEmpty{dtype='float32'}.0, AllocEmpty{dtype='float32'}.0, TensorType(float32, row), TensorType(float32, matrix), 'MergeOptimizer') MergeOptimizer
ERROR (theano.gof.opt): SeqOptimizer apply <theano.gof.opt.EquilibriumOptimizer object at 0x7f5d3cd52650>
ERROR (theano.gof.opt): Traceback:
ERROR (theano.gof.opt): Traceback (most recent call last):
  File "/home/topolo/Public/anaconda2/lib/python2.7/site-packages/theano/gof/opt.py", line 230, in apply
    sub_prof = optimizer.optimize(fgraph)
  File "/home/topolo/Public/anaconda2/lib/python2.7/site-packages/theano/gof/opt.py", line 89, in optimize
    ret = self.apply(fgraph, *args, **kwargs)
  File "/home/topolo/Public/anaconda2/lib/python2.7/site-packages/theano/gof/opt.py", line 2223, in apply
    sub_prof = gopt.apply(fgraph)
  File "/home/topolo/Public/anaconda2/lib/python2.7/site-packages/theano/gof/opt.py", line 817, in apply
    fgraph.replace_all_validate(pairs, 'MergeOptimizer')
  File "/home/topolo/Public/anaconda2/lib/python2.7/site-packages/theano/gof/toolbox.py", line 309, in replace_all_validate
    fgraph.replace(r, new_r, reason=reason, verbose=False)
  File "/home/topolo/Public/anaconda2/lib/python2.7/site-packages/theano/gof/fg.py", line 561, in replace
    str(reason))
TypeError: ('The type of the replacement must be compatible with the type of the original Variable.', AllocEmpty{dtype='float32'}.0, AllocEmpty{dtype='float32'}.0, TensorType(float32, row), TensorType(float32, matrix), 'MergeOptimizer')

<<!! BUG IN FGRAPH.REPLACE OR A LISTENER !!>> <type 'exceptions.TypeError'> ('The type of the replacement must be compatible with the type of the original Variable.', AllocEmpty{dtype='float32'}.0, AllocEmpty{dtype='float32'}.0, TensorType(float32, row), TensorType(float32, matrix), 'MergeOptimizer') MergeOptimizer
ERROR (theano.gof.opt): SeqOptimizer apply <theano.gof.opt.EquilibriumOptimizer object at 0x7f5d3f65c950>
ERROR (theano.gof.opt): Traceback:
ERROR (theano.gof.opt): Traceback (most recent call last):
  File "/home/topolo/Public/anaconda2/lib/python2.7/site-packages/theano/gof/opt.py", line 230, in apply
    sub_prof = optimizer.optimize(fgraph)
  File "/home/topolo/Public/anaconda2/lib/python2.7/site-packages/theano/gof/opt.py", line 89, in optimize
    ret = self.apply(fgraph, *args, **kwargs)
  File "/home/topolo/Public/anaconda2/lib/python2.7/site-packages/theano/gof/opt.py", line 2223, in apply
    sub_prof = gopt.apply(fgraph)
  File "/home/topolo/Public/anaconda2/lib/python2.7/site-packages/theano/gof/opt.py", line 817, in apply
    fgraph.replace_all_validate(pairs, 'MergeOptimizer')
  File "/home/topolo/Public/anaconda2/lib/python2.7/site-packages/theano/gof/toolbox.py", line 309, in replace_all_validate
    fgraph.replace(r, new_r, reason=reason, verbose=False)
  File "/home/topolo/Public/anaconda2/lib/python2.7/site-packages/theano/gof/fg.py", line 561, in replace
    str(reason))
TypeError: ('The type of the replacement must be compatible with the type of the original Variable.', AllocEmpty{dtype='float32'}.0, AllocEmpty{dtype='float32'}.0, TensorType(float32, row), TensorType(float32, matrix), 'MergeOptimizer')

<<!! BUG IN FGRAPH.REPLACE OR A LISTENER !!>> <type 'exceptions.TypeError'> ('The type of the replacement must be compatible with the type of the original Variable.', AllocEmpty{dtype='float32'}.0, AllocEmpty{dtype='float32'}.0, TensorType(float32, row), TensorType(float32, matrix), 'MergeOptimizer') MergeOptimizer
ERROR (theano.gof.opt): SeqOptimizer apply <theano.gof.opt.EquilibriumOptimizer object at 0x7f5d64b78950>
ERROR (theano.gof.opt): Traceback:
ERROR (theano.gof.opt): Traceback (most recent call last):
  File "/home/topolo/Public/anaconda2/lib/python2.7/site-packages/theano/gof/opt.py", line 230, in apply
    sub_prof = optimizer.optimize(fgraph)
  File "/home/topolo/Public/anaconda2/lib/python2.7/site-packages/theano/gof/opt.py", line 89, in optimize
    ret = self.apply(fgraph, *args, **kwargs)
  File "/home/topolo/Public/anaconda2/lib/python2.7/site-packages/theano/gof/opt.py", line 2223, in apply
    sub_prof = gopt.apply(fgraph)
  File "/home/topolo/Public/anaconda2/lib/python2.7/site-packages/theano/gof/opt.py", line 817, in apply
    fgraph.replace_all_validate(pairs, 'MergeOptimizer')
  File "/home/topolo/Public/anaconda2/lib/python2.7/site-packages/theano/gof/toolbox.py", line 309, in replace_all_validate
    fgraph.replace(r, new_r, reason=reason, verbose=False)
  File "/home/topolo/Public/anaconda2/lib/python2.7/site-packages/theano/gof/fg.py", line 561, in replace
    str(reason))
TypeError: ('The type of the replacement must be compatible with the type of the original Variable.', AllocEmpty{dtype='float32'}.0, AllocEmpty{dtype='float32'}.0, TensorType(float32, row), TensorType(float32, matrix), 'MergeOptimizer')


In [45]:
X_test_data,y_test_data=map( list, zip(*test_data) )

In [47]:
assert len(predictions_full) == len(y_test_data)
m = len(predictions_full)
TOTALELE = 0.
POSRES   = 0.
for j in range(m):
    testing_arr = np.equal( (predictions_full[j]>0.5).astype(float) , np.array( y_test_data[j]).astype(float) )
    TOTALELE += testing_arr.size
    POSRES += np.sum( testing_arr )

print POSRES, TOTALELE, POSRES/TOTALELE  # 942.0 1099.0 0.857142857143


1069.0 1246.0 0.857945425361

In [ ]:

Trying out multiple "hidden" layers


In [15]:
L_Mult = Gates(g=3,i=2,f=2,o=2)

n_hidden = n_i = n_c = n_o = n_f = 10
n_in = 7 # for embedded reber grammar
n_y = 7 # for embedded reber grammar; this is K in my notation

sg_2 = 9 # s^{(g)}_2, i.e. \alpha = g, gate g, l = 2, layer 2's "size"

s_l_Mult = Gates(g=[n_in,sg_2, n_c],i=[n_in,n_i],f=[n_in,n_f],o=[n_in,n_o])

activations_Mult = Psis(g=(T.tanh, T.tanh), i=(T.nnet.sigmoid, T.nnet.sigmoid),f=(T.nnet.sigmoid, T.nnet.sigmoid),
                         o=(T.nnet.sigmoid, T.nnet.sigmoid),h=(T.tanh,))

In [16]:
LSTM_model_Mult=LSTM_Model_right(L_Mult,s_l_Mult,n_hidden,n_y,activations_Mult,T.nnet.sigmoid )
lstm_step_fxn_Mult = LSTM_model_Mult.build_lstm_step()
MemBlck_Mult = MemoryBlock_right(n_hidden,LSTM_model_Mult)
MemBlck_Mult.build_scan_over_t()
MemBlck_Mult.build_J(0.1)


Total number of parameters: 19 
Total number of parameters: 19 
Out[16]:
Elemwise{add,no_inplace}.0

In [17]:
MemBlck_Mult.build_update()


Total number of parameters: 19 

In [18]:
%time results_prelim = MemBlck_Mult.train_model_full(train_data,2)  # WITHOUT optimizer_excluding="low_memory"
# CPU times: user 23 s, sys: 5.5 s, total: 28.5 s
# Wall time: 28.5 s


theano.config.allow_gc =:  False
CPU times: user 23 s, sys: 5.5 s, total: 28.5 s
Wall time: 28.5 s

In [19]:
%time results_prelim_Mult = MemBlck_Mult.train_model_full(train_data)  # theano.config.allow_gc =:  False
# CPU times: user 47min 34s, sys: 13min 13s, total: 1h 47s
# Wall time: 1h 46s


theano.config.allow_gc =:  False
CPU times: user 47min 34s, sys: 13min 13s, total: 1h 47s
Wall time: 1h 46s

In [20]:
print(type(results_prelim_Mult)); print(len(results_prelim_Mult)); 
plt.plot(np.arange(250), results_prelim_Mult, 'b-')
plt.xlabel('epochs')
plt.ylabel('error')


<type 'numpy.ndarray'>
250
Out[20]:
<matplotlib.text.Text at 0x7f02df6348d0>

In [22]:
test_data = reberGrammar.get_n_embedded_examples(10)

In [23]:
predictions_Mult = MemBlck_Mult.predict_on_lst( test_data, verbose=False)


<<!! BUG IN FGRAPH.REPLACE OR A LISTENER !!>> <type 'exceptions.TypeError'> ('The type of the replacement must be compatible with the type of the original Variable.', AllocEmpty{dtype='float32'}.0, AllocEmpty{dtype='float32'}.0, TensorType(float32, row), TensorType(float32, matrix), 'MergeOptimizer') MergeOptimizer
ERROR (theano.gof.opt): SeqOptimizer apply <theano.gof.opt.EquilibriumOptimizer object at 0x7f02df4fae90>
ERROR (theano.gof.opt): Traceback:
ERROR (theano.gof.opt): Traceback (most recent call last):
  File "/home/topolo/Public/anaconda2/lib/python2.7/site-packages/theano/gof/opt.py", line 230, in apply
    sub_prof = optimizer.optimize(fgraph)
  File "/home/topolo/Public/anaconda2/lib/python2.7/site-packages/theano/gof/opt.py", line 89, in optimize
    ret = self.apply(fgraph, *args, **kwargs)
  File "/home/topolo/Public/anaconda2/lib/python2.7/site-packages/theano/gof/opt.py", line 2223, in apply
    sub_prof = gopt.apply(fgraph)
  File "/home/topolo/Public/anaconda2/lib/python2.7/site-packages/theano/gof/opt.py", line 817, in apply
    fgraph.replace_all_validate(pairs, 'MergeOptimizer')
  File "/home/topolo/Public/anaconda2/lib/python2.7/site-packages/theano/gof/toolbox.py", line 309, in replace_all_validate
    fgraph.replace(r, new_r, reason=reason, verbose=False)
  File "/home/topolo/Public/anaconda2/lib/python2.7/site-packages/theano/gof/fg.py", line 561, in replace
    str(reason))
TypeError: ('The type of the replacement must be compatible with the type of the original Variable.', AllocEmpty{dtype='float32'}.0, AllocEmpty{dtype='float32'}.0, TensorType(float32, row), TensorType(float32, matrix), 'MergeOptimizer')

<<!! BUG IN FGRAPH.REPLACE OR A LISTENER !!>> <type 'exceptions.TypeError'> ('The type of the replacement must be compatible with the type of the original Variable.', AllocEmpty{dtype='float32'}.0, AllocEmpty{dtype='float32'}.0, TensorType(float32, row), TensorType(float32, matrix), 'MergeOptimizer') MergeOptimizer
ERROR (theano.gof.opt): SeqOptimizer apply <theano.gof.opt.EquilibriumOptimizer object at 0x7f02def13fd0>
ERROR (theano.gof.opt): Traceback:
ERROR (theano.gof.opt): Traceback (most recent call last):
  File "/home/topolo/Public/anaconda2/lib/python2.7/site-packages/theano/gof/opt.py", line 230, in apply
    sub_prof = optimizer.optimize(fgraph)
  File "/home/topolo/Public/anaconda2/lib/python2.7/site-packages/theano/gof/opt.py", line 89, in optimize
    ret = self.apply(fgraph, *args, **kwargs)
  File "/home/topolo/Public/anaconda2/lib/python2.7/site-packages/theano/gof/opt.py", line 2223, in apply
    sub_prof = gopt.apply(fgraph)
  File "/home/topolo/Public/anaconda2/lib/python2.7/site-packages/theano/gof/opt.py", line 817, in apply
    fgraph.replace_all_validate(pairs, 'MergeOptimizer')
  File "/home/topolo/Public/anaconda2/lib/python2.7/site-packages/theano/gof/toolbox.py", line 309, in replace_all_validate
    fgraph.replace(r, new_r, reason=reason, verbose=False)
  File "/home/topolo/Public/anaconda2/lib/python2.7/site-packages/theano/gof/fg.py", line 561, in replace
    str(reason))
TypeError: ('The type of the replacement must be compatible with the type of the original Variable.', AllocEmpty{dtype='float32'}.0, AllocEmpty{dtype='float32'}.0, TensorType(float32, row), TensorType(float32, matrix), 'MergeOptimizer')

<<!! BUG IN FGRAPH.REPLACE OR A LISTENER !!>> <type 'exceptions.TypeError'> ('The type of the replacement must be compatible with the type of the original Variable.', AllocEmpty{dtype='float32'}.0, AllocEmpty{dtype='float32'}.0, TensorType(float32, row), TensorType(float32, matrix), 'MergeOptimizer') MergeOptimizer
ERROR (theano.gof.opt): SeqOptimizer apply <theano.gof.opt.EquilibriumOptimizer object at 0x7f02deafe2d0>
ERROR (theano.gof.opt): Traceback:
ERROR (theano.gof.opt): Traceback (most recent call last):
  File "/home/topolo/Public/anaconda2/lib/python2.7/site-packages/theano/gof/opt.py", line 230, in apply
    sub_prof = optimizer.optimize(fgraph)
  File "/home/topolo/Public/anaconda2/lib/python2.7/site-packages/theano/gof/opt.py", line 89, in optimize
    ret = self.apply(fgraph, *args, **kwargs)
  File "/home/topolo/Public/anaconda2/lib/python2.7/site-packages/theano/gof/opt.py", line 2223, in apply
    sub_prof = gopt.apply(fgraph)
  File "/home/topolo/Public/anaconda2/lib/python2.7/site-packages/theano/gof/opt.py", line 817, in apply
    fgraph.replace_all_validate(pairs, 'MergeOptimizer')
  File "/home/topolo/Public/anaconda2/lib/python2.7/site-packages/theano/gof/toolbox.py", line 309, in replace_all_validate
    fgraph.replace(r, new_r, reason=reason, verbose=False)
  File "/home/topolo/Public/anaconda2/lib/python2.7/site-packages/theano/gof/fg.py", line 561, in replace
    str(reason))
TypeError: ('The type of the replacement must be compatible with the type of the original Variable.', AllocEmpty{dtype='float32'}.0, AllocEmpty{dtype='float32'}.0, TensorType(float32, row), TensorType(float32, matrix), 'MergeOptimizer')

<<!! BUG IN FGRAPH.REPLACE OR A LISTENER !!>> <type 'exceptions.TypeError'> ('The type of the replacement must be compatible with the type of the original Variable.', AllocEmpty{dtype='float32'}.0, AllocEmpty{dtype='float32'}.0, TensorType(float32, row), TensorType(float32, matrix), 'MergeOptimizer') MergeOptimizer
ERROR (theano.gof.opt): SeqOptimizer apply <theano.gof.opt.EquilibriumOptimizer object at 0x7f02dea4ef10>
ERROR (theano.gof.opt): Traceback:
ERROR (theano.gof.opt): Traceback (most recent call last):
  File "/home/topolo/Public/anaconda2/lib/python2.7/site-packages/theano/gof/opt.py", line 230, in apply
    sub_prof = optimizer.optimize(fgraph)
  File "/home/topolo/Public/anaconda2/lib/python2.7/site-packages/theano/gof/opt.py", line 89, in optimize
    ret = self.apply(fgraph, *args, **kwargs)
  File "/home/topolo/Public/anaconda2/lib/python2.7/site-packages/theano/gof/opt.py", line 2223, in apply
    sub_prof = gopt.apply(fgraph)
  File "/home/topolo/Public/anaconda2/lib/python2.7/site-packages/theano/gof/opt.py", line 817, in apply
    fgraph.replace_all_validate(pairs, 'MergeOptimizer')
  File "/home/topolo/Public/anaconda2/lib/python2.7/site-packages/theano/gof/toolbox.py", line 309, in replace_all_validate
    fgraph.replace(r, new_r, reason=reason, verbose=False)
  File "/home/topolo/Public/anaconda2/lib/python2.7/site-packages/theano/gof/fg.py", line 561, in replace
    str(reason))
TypeError: ('The type of the replacement must be compatible with the type of the original Variable.', AllocEmpty{dtype='float32'}.0, AllocEmpty{dtype='float32'}.0, TensorType(float32, row), TensorType(float32, matrix), 'MergeOptimizer')

<<!! BUG IN FGRAPH.REPLACE OR A LISTENER !!>> <type 'exceptions.TypeError'> ('The type of the replacement must be compatible with the type of the original Variable.', AllocEmpty{dtype='float32'}.0, AllocEmpty{dtype='float32'}.0, TensorType(float32, row), TensorType(float32, matrix), 'MergeOptimizer') MergeOptimizer
ERROR (theano.gof.opt): SeqOptimizer apply <theano.gof.opt.EquilibriumOptimizer object at 0x7f02f1226e90>
ERROR (theano.gof.opt): Traceback:
ERROR (theano.gof.opt): Traceback (most recent call last):
  File "/home/topolo/Public/anaconda2/lib/python2.7/site-packages/theano/gof/opt.py", line 230, in apply
    sub_prof = optimizer.optimize(fgraph)
  File "/home/topolo/Public/anaconda2/lib/python2.7/site-packages/theano/gof/opt.py", line 89, in optimize
    ret = self.apply(fgraph, *args, **kwargs)
  File "/home/topolo/Public/anaconda2/lib/python2.7/site-packages/theano/gof/opt.py", line 2223, in apply
    sub_prof = gopt.apply(fgraph)
  File "/home/topolo/Public/anaconda2/lib/python2.7/site-packages/theano/gof/opt.py", line 817, in apply
    fgraph.replace_all_validate(pairs, 'MergeOptimizer')
  File "/home/topolo/Public/anaconda2/lib/python2.7/site-packages/theano/gof/toolbox.py", line 309, in replace_all_validate
    fgraph.replace(r, new_r, reason=reason, verbose=False)
  File "/home/topolo/Public/anaconda2/lib/python2.7/site-packages/theano/gof/fg.py", line 561, in replace
    str(reason))
TypeError: ('The type of the replacement must be compatible with the type of the original Variable.', AllocEmpty{dtype='float32'}.0, AllocEmpty{dtype='float32'}.0, TensorType(float32, row), TensorType(float32, matrix), 'MergeOptimizer')

<<!! BUG IN FGRAPH.REPLACE OR A LISTENER !!>> <type 'exceptions.TypeError'> ('The type of the replacement must be compatible with the type of the original Variable.', AllocEmpty{dtype='float32'}.0, AllocEmpty{dtype='float32'}.0, TensorType(float32, row), TensorType(float32, matrix), 'MergeOptimizer') MergeOptimizer
ERROR (theano.gof.opt): SeqOptimizer apply <theano.gof.opt.EquilibriumOptimizer object at 0x7f02f127e850>
ERROR (theano.gof.opt): Traceback:
ERROR (theano.gof.opt): Traceback (most recent call last):
  File "/home/topolo/Public/anaconda2/lib/python2.7/site-packages/theano/gof/opt.py", line 230, in apply
    sub_prof = optimizer.optimize(fgraph)
  File "/home/topolo/Public/anaconda2/lib/python2.7/site-packages/theano/gof/opt.py", line 89, in optimize
    ret = self.apply(fgraph, *args, **kwargs)
  File "/home/topolo/Public/anaconda2/lib/python2.7/site-packages/theano/gof/opt.py", line 2223, in apply
    sub_prof = gopt.apply(fgraph)
  File "/home/topolo/Public/anaconda2/lib/python2.7/site-packages/theano/gof/opt.py", line 817, in apply
    fgraph.replace_all_validate(pairs, 'MergeOptimizer')
  File "/home/topolo/Public/anaconda2/lib/python2.7/site-packages/theano/gof/toolbox.py", line 309, in replace_all_validate
    fgraph.replace(r, new_r, reason=reason, verbose=False)
  File "/home/topolo/Public/anaconda2/lib/python2.7/site-packages/theano/gof/fg.py", line 561, in replace
    str(reason))
TypeError: ('The type of the replacement must be compatible with the type of the original Variable.', AllocEmpty{dtype='float32'}.0, AllocEmpty{dtype='float32'}.0, TensorType(float32, row), TensorType(float32, matrix), 'MergeOptimizer')

<<!! BUG IN FGRAPH.REPLACE OR A LISTENER !!>> <type 'exceptions.TypeError'> ('The type of the replacement must be compatible with the type of the original Variable.', AllocEmpty{dtype='float32'}.0, AllocEmpty{dtype='float32'}.0, TensorType(float32, row), TensorType(float32, matrix), 'MergeOptimizer') MergeOptimizer
ERROR (theano.gof.opt): SeqOptimizer apply <theano.gof.opt.EquilibriumOptimizer object at 0x7f02de468450>
ERROR (theano.gof.opt): Traceback:
ERROR (theano.gof.opt): Traceback (most recent call last):
  File "/home/topolo/Public/anaconda2/lib/python2.7/site-packages/theano/gof/opt.py", line 230, in apply
    sub_prof = optimizer.optimize(fgraph)
  File "/home/topolo/Public/anaconda2/lib/python2.7/site-packages/theano/gof/opt.py", line 89, in optimize
    ret = self.apply(fgraph, *args, **kwargs)
  File "/home/topolo/Public/anaconda2/lib/python2.7/site-packages/theano/gof/opt.py", line 2223, in apply
    sub_prof = gopt.apply(fgraph)
  File "/home/topolo/Public/anaconda2/lib/python2.7/site-packages/theano/gof/opt.py", line 817, in apply
    fgraph.replace_all_validate(pairs, 'MergeOptimizer')
  File "/home/topolo/Public/anaconda2/lib/python2.7/site-packages/theano/gof/toolbox.py", line 309, in replace_all_validate
    fgraph.replace(r, new_r, reason=reason, verbose=False)
  File "/home/topolo/Public/anaconda2/lib/python2.7/site-packages/theano/gof/fg.py", line 561, in replace
    str(reason))
TypeError: ('The type of the replacement must be compatible with the type of the original Variable.', AllocEmpty{dtype='float32'}.0, AllocEmpty{dtype='float32'}.0, TensorType(float32, row), TensorType(float32, matrix), 'MergeOptimizer')

<<!! BUG IN FGRAPH.REPLACE OR A LISTENER !!>> <type 'exceptions.TypeError'> ('The type of the replacement must be compatible with the type of the original Variable.', AllocEmpty{dtype='float32'}.0, AllocEmpty{dtype='float32'}.0, TensorType(float32, row), TensorType(float32, matrix), 'MergeOptimizer') MergeOptimizer
ERROR (theano.gof.opt): SeqOptimizer apply <theano.gof.opt.EquilibriumOptimizer object at 0x7f02ddfda990>
ERROR (theano.gof.opt): Traceback:
ERROR (theano.gof.opt): Traceback (most recent call last):
  File "/home/topolo/Public/anaconda2/lib/python2.7/site-packages/theano/gof/opt.py", line 230, in apply
    sub_prof = optimizer.optimize(fgraph)
  File "/home/topolo/Public/anaconda2/lib/python2.7/site-packages/theano/gof/opt.py", line 89, in optimize
    ret = self.apply(fgraph, *args, **kwargs)
  File "/home/topolo/Public/anaconda2/lib/python2.7/site-packages/theano/gof/opt.py", line 2223, in apply
    sub_prof = gopt.apply(fgraph)
  File "/home/topolo/Public/anaconda2/lib/python2.7/site-packages/theano/gof/opt.py", line 817, in apply
    fgraph.replace_all_validate(pairs, 'MergeOptimizer')
  File "/home/topolo/Public/anaconda2/lib/python2.7/site-packages/theano/gof/toolbox.py", line 309, in replace_all_validate
    fgraph.replace(r, new_r, reason=reason, verbose=False)
  File "/home/topolo/Public/anaconda2/lib/python2.7/site-packages/theano/gof/fg.py", line 561, in replace
    str(reason))
TypeError: ('The type of the replacement must be compatible with the type of the original Variable.', AllocEmpty{dtype='float32'}.0, AllocEmpty{dtype='float32'}.0, TensorType(float32, row), TensorType(float32, matrix), 'MergeOptimizer')

<<!! BUG IN FGRAPH.REPLACE OR A LISTENER !!>> <type 'exceptions.TypeError'> ('The type of the replacement must be compatible with the type of the original Variable.', AllocEmpty{dtype='float32'}.0, AllocEmpty{dtype='float32'}.0, TensorType(float32, row), TensorType(float32, matrix), 'MergeOptimizer') MergeOptimizer
ERROR (theano.gof.opt): SeqOptimizer apply <theano.gof.opt.EquilibriumOptimizer object at 0x7f02ddbca550>
ERROR (theano.gof.opt): Traceback:
ERROR (theano.gof.opt): Traceback (most recent call last):
  File "/home/topolo/Public/anaconda2/lib/python2.7/site-packages/theano/gof/opt.py", line 230, in apply
    sub_prof = optimizer.optimize(fgraph)
  File "/home/topolo/Public/anaconda2/lib/python2.7/site-packages/theano/gof/opt.py", line 89, in optimize
    ret = self.apply(fgraph, *args, **kwargs)
  File "/home/topolo/Public/anaconda2/lib/python2.7/site-packages/theano/gof/opt.py", line 2223, in apply
    sub_prof = gopt.apply(fgraph)
  File "/home/topolo/Public/anaconda2/lib/python2.7/site-packages/theano/gof/opt.py", line 817, in apply
    fgraph.replace_all_validate(pairs, 'MergeOptimizer')
  File "/home/topolo/Public/anaconda2/lib/python2.7/site-packages/theano/gof/toolbox.py", line 309, in replace_all_validate
    fgraph.replace(r, new_r, reason=reason, verbose=False)
  File "/home/topolo/Public/anaconda2/lib/python2.7/site-packages/theano/gof/fg.py", line 561, in replace
    str(reason))
TypeError: ('The type of the replacement must be compatible with the type of the original Variable.', AllocEmpty{dtype='float32'}.0, AllocEmpty{dtype='float32'}.0, TensorType(float32, row), TensorType(float32, matrix), 'MergeOptimizer')

<<!! BUG IN FGRAPH.REPLACE OR A LISTENER !!>> <type 'exceptions.TypeError'> ('The type of the replacement must be compatible with the type of the original Variable.', AllocEmpty{dtype='float32'}.0, AllocEmpty{dtype='float32'}.0, TensorType(float32, row), TensorType(float32, matrix), 'MergeOptimizer') MergeOptimizer
ERROR (theano.gof.opt): SeqOptimizer apply <theano.gof.opt.EquilibriumOptimizer object at 0x7f02dd78f510>
ERROR (theano.gof.opt): Traceback:
ERROR (theano.gof.opt): Traceback (most recent call last):
  File "/home/topolo/Public/anaconda2/lib/python2.7/site-packages/theano/gof/opt.py", line 230, in apply
    sub_prof = optimizer.optimize(fgraph)
  File "/home/topolo/Public/anaconda2/lib/python2.7/site-packages/theano/gof/opt.py", line 89, in optimize
    ret = self.apply(fgraph, *args, **kwargs)
  File "/home/topolo/Public/anaconda2/lib/python2.7/site-packages/theano/gof/opt.py", line 2223, in apply
    sub_prof = gopt.apply(fgraph)
  File "/home/topolo/Public/anaconda2/lib/python2.7/site-packages/theano/gof/opt.py", line 817, in apply
    fgraph.replace_all_validate(pairs, 'MergeOptimizer')
  File "/home/topolo/Public/anaconda2/lib/python2.7/site-packages/theano/gof/toolbox.py", line 309, in replace_all_validate
    fgraph.replace(r, new_r, reason=reason, verbose=False)
  File "/home/topolo/Public/anaconda2/lib/python2.7/site-packages/theano/gof/fg.py", line 561, in replace
    str(reason))
TypeError: ('The type of the replacement must be compatible with the type of the original Variable.', AllocEmpty{dtype='float32'}.0, AllocEmpty{dtype='float32'}.0, TensorType(float32, row), TensorType(float32, matrix), 'MergeOptimizer')


In [25]:
X_test_data,y_test_data=map( list, zip(*test_data) )

In [26]:
assert len(predictions_Mult) == len(y_test_data)
m = len(predictions_Mult)
TOTALELE = 0.
POSRES   = 0.
for j in range(m):
    testing_arr = np.equal( (predictions_Mult[j]>0.5).astype(float) , np.array( y_test_data[j]).astype(float) )
    TOTALELE += testing_arr.size
    POSRES += np.sum( testing_arr )

print POSRES, TOTALELE, POSRES/TOTALELE # 1006.0 1253.0 0.802873104549


1006.0 1253.0 0.802873104549

More Multiple hidden layers


In [27]:
L_Mult = Gates(g=4,i=3,f=2,o=2)

n_hidden = n_i = n_c = n_o = n_f = 10
n_in = 7 # for embedded reber grammar
n_y = 7 # for embedded reber grammar; this is K in my notation

sg_2 = 9 # s^{(g)}_2, i.e. \alpha = g, gate g, l = 2, layer 2's "size"

s_l_Mult = Gates(g=[n_in,8,9, n_c],i=[n_in,sg_2,n_i],f=[n_in,n_f],o=[n_in,n_o])

activations_Mult = Psis(g=(T.tanh, T.tanh), i=(T.nnet.sigmoid, T.nnet.sigmoid),f=(T.nnet.sigmoid, T.nnet.sigmoid),
                         o=(T.nnet.sigmoid, T.nnet.sigmoid),h=(T.tanh,))

In [31]:
LSTM_model_Mult=LSTM_Model_right(L_Mult,s_l_Mult,n_hidden,n_y,activations_Mult,T.nnet.sigmoid )
lstm_step_fxn_Mult = LSTM_model_Mult.build_lstm_step()
MemBlck_Mult = MemoryBlock_right(n_hidden,LSTM_model_Mult)
MemBlck_Mult.build_scan_over_t()
MemBlck_Mult.build_J(0.5)


Total number of parameters: 23 
Total number of parameters: 23 
Out[31]:
Elemwise{add,no_inplace}.0

In [32]:
MemBlck_Mult.build_update(alpha=0.05,beta=0.000001)


Total number of parameters: 23 

In [33]:
%time results_prelim = MemBlck_Mult.train_model_full(train_data,2)  # WITHOUT optimizer_excluding="low_memory"


theano.config.allow_gc =:  False
CPU times: user 27.9 s, sys: 6.76 s, total: 34.7 s
Wall time: 34.7 s

In [34]:
%time results_prelim = MemBlck_Mult.train_model_full(train_data) # theano.config.allow_gc =:  False
#CPU times: user 59min 32s, sys: 16min 6s, total: 1h 15min 38s
#Wall time: 1h 15min 37s


theano.config.allow_gc =:  False
CPU times: user 59min 32s, sys: 16min 6s, total: 1h 15min 38s
Wall time: 1h 15min 37s

In [35]:
predictions_full = MemBlck_Mult.predict_on_lst( test_data, verbose=False)


<<!! BUG IN FGRAPH.REPLACE OR A LISTENER !!>> <type 'exceptions.TypeError'> ('The type of the replacement must be compatible with the type of the original Variable.', AllocEmpty{dtype='float32'}.0, AllocEmpty{dtype='float32'}.0, TensorType(float32, row), TensorType(float32, matrix), 'MergeOptimizer') MergeOptimizer
ERROR (theano.gof.opt): SeqOptimizer apply <theano.gof.opt.EquilibriumOptimizer object at 0x7f02fc26bdd0>
ERROR (theano.gof.opt): Traceback:
ERROR (theano.gof.opt): Traceback (most recent call last):
  File "/home/topolo/Public/anaconda2/lib/python2.7/site-packages/theano/gof/opt.py", line 230, in apply
    sub_prof = optimizer.optimize(fgraph)
  File "/home/topolo/Public/anaconda2/lib/python2.7/site-packages/theano/gof/opt.py", line 89, in optimize
    ret = self.apply(fgraph, *args, **kwargs)
  File "/home/topolo/Public/anaconda2/lib/python2.7/site-packages/theano/gof/opt.py", line 2223, in apply
    sub_prof = gopt.apply(fgraph)
  File "/home/topolo/Public/anaconda2/lib/python2.7/site-packages/theano/gof/opt.py", line 817, in apply
    fgraph.replace_all_validate(pairs, 'MergeOptimizer')
  File "/home/topolo/Public/anaconda2/lib/python2.7/site-packages/theano/gof/toolbox.py", line 309, in replace_all_validate
    fgraph.replace(r, new_r, reason=reason, verbose=False)
  File "/home/topolo/Public/anaconda2/lib/python2.7/site-packages/theano/gof/fg.py", line 561, in replace
    str(reason))
TypeError: ('The type of the replacement must be compatible with the type of the original Variable.', AllocEmpty{dtype='float32'}.0, AllocEmpty{dtype='float32'}.0, TensorType(float32, row), TensorType(float32, matrix), 'MergeOptimizer')

<<!! BUG IN FGRAPH.REPLACE OR A LISTENER !!>> <type 'exceptions.TypeError'> ('The type of the replacement must be compatible with the type of the original Variable.', AllocEmpty{dtype='float32'}.0, AllocEmpty{dtype='float32'}.0, TensorType(float32, row), TensorType(float32, matrix), 'MergeOptimizer') MergeOptimizer
ERROR (theano.gof.opt): SeqOptimizer apply <theano.gof.opt.EquilibriumOptimizer object at 0x7f02fabf5210>
ERROR (theano.gof.opt): Traceback:
ERROR (theano.gof.opt): Traceback (most recent call last):
  File "/home/topolo/Public/anaconda2/lib/python2.7/site-packages/theano/gof/opt.py", line 230, in apply
    sub_prof = optimizer.optimize(fgraph)
  File "/home/topolo/Public/anaconda2/lib/python2.7/site-packages/theano/gof/opt.py", line 89, in optimize
    ret = self.apply(fgraph, *args, **kwargs)
  File "/home/topolo/Public/anaconda2/lib/python2.7/site-packages/theano/gof/opt.py", line 2223, in apply
    sub_prof = gopt.apply(fgraph)
  File "/home/topolo/Public/anaconda2/lib/python2.7/site-packages/theano/gof/opt.py", line 817, in apply
    fgraph.replace_all_validate(pairs, 'MergeOptimizer')
  File "/home/topolo/Public/anaconda2/lib/python2.7/site-packages/theano/gof/toolbox.py", line 309, in replace_all_validate
    fgraph.replace(r, new_r, reason=reason, verbose=False)
  File "/home/topolo/Public/anaconda2/lib/python2.7/site-packages/theano/gof/fg.py", line 561, in replace
    str(reason))
TypeError: ('The type of the replacement must be compatible with the type of the original Variable.', AllocEmpty{dtype='float32'}.0, AllocEmpty{dtype='float32'}.0, TensorType(float32, row), TensorType(float32, matrix), 'MergeOptimizer')

<<!! BUG IN FGRAPH.REPLACE OR A LISTENER !!>> <type 'exceptions.TypeError'> ('The type of the replacement must be compatible with the type of the original Variable.', AllocEmpty{dtype='float32'}.0, AllocEmpty{dtype='float32'}.0, TensorType(float32, row), TensorType(float32, matrix), 'MergeOptimizer') MergeOptimizer
ERROR (theano.gof.opt): SeqOptimizer apply <theano.gof.opt.EquilibriumOptimizer object at 0x7f02fa0e1ed0>
ERROR (theano.gof.opt): Traceback:
ERROR (theano.gof.opt): Traceback (most recent call last):
  File "/home/topolo/Public/anaconda2/lib/python2.7/site-packages/theano/gof/opt.py", line 230, in apply
    sub_prof = optimizer.optimize(fgraph)
  File "/home/topolo/Public/anaconda2/lib/python2.7/site-packages/theano/gof/opt.py", line 89, in optimize
    ret = self.apply(fgraph, *args, **kwargs)
  File "/home/topolo/Public/anaconda2/lib/python2.7/site-packages/theano/gof/opt.py", line 2223, in apply
    sub_prof = gopt.apply(fgraph)
  File "/home/topolo/Public/anaconda2/lib/python2.7/site-packages/theano/gof/opt.py", line 817, in apply
    fgraph.replace_all_validate(pairs, 'MergeOptimizer')
  File "/home/topolo/Public/anaconda2/lib/python2.7/site-packages/theano/gof/toolbox.py", line 309, in replace_all_validate
    fgraph.replace(r, new_r, reason=reason, verbose=False)
  File "/home/topolo/Public/anaconda2/lib/python2.7/site-packages/theano/gof/fg.py", line 561, in replace
    str(reason))
TypeError: ('The type of the replacement must be compatible with the type of the original Variable.', AllocEmpty{dtype='float32'}.0, AllocEmpty{dtype='float32'}.0, TensorType(float32, row), TensorType(float32, matrix), 'MergeOptimizer')

<<!! BUG IN FGRAPH.REPLACE OR A LISTENER !!>> <type 'exceptions.TypeError'> ('The type of the replacement must be compatible with the type of the original Variable.', AllocEmpty{dtype='float32'}.0, AllocEmpty{dtype='float32'}.0, TensorType(float32, row), TensorType(float32, matrix), 'MergeOptimizer') MergeOptimizer
ERROR (theano.gof.opt): SeqOptimizer apply <theano.gof.opt.EquilibriumOptimizer object at 0x7f02f7551e90>
ERROR (theano.gof.opt): Traceback:
ERROR (theano.gof.opt): Traceback (most recent call last):
  File "/home/topolo/Public/anaconda2/lib/python2.7/site-packages/theano/gof/opt.py", line 230, in apply
    sub_prof = optimizer.optimize(fgraph)
  File "/home/topolo/Public/anaconda2/lib/python2.7/site-packages/theano/gof/opt.py", line 89, in optimize
    ret = self.apply(fgraph, *args, **kwargs)
  File "/home/topolo/Public/anaconda2/lib/python2.7/site-packages/theano/gof/opt.py", line 2223, in apply
    sub_prof = gopt.apply(fgraph)
  File "/home/topolo/Public/anaconda2/lib/python2.7/site-packages/theano/gof/opt.py", line 817, in apply
    fgraph.replace_all_validate(pairs, 'MergeOptimizer')
  File "/home/topolo/Public/anaconda2/lib/python2.7/site-packages/theano/gof/toolbox.py", line 309, in replace_all_validate
    fgraph.replace(r, new_r, reason=reason, verbose=False)
  File "/home/topolo/Public/anaconda2/lib/python2.7/site-packages/theano/gof/fg.py", line 561, in replace
    str(reason))
TypeError: ('The type of the replacement must be compatible with the type of the original Variable.', AllocEmpty{dtype='float32'}.0, AllocEmpty{dtype='float32'}.0, TensorType(float32, row), TensorType(float32, matrix), 'MergeOptimizer')

<<!! BUG IN FGRAPH.REPLACE OR A LISTENER !!>> <type 'exceptions.TypeError'> ('The type of the replacement must be compatible with the type of the original Variable.', AllocEmpty{dtype='float32'}.0, AllocEmpty{dtype='float32'}.0, TensorType(float32, row), TensorType(float32, matrix), 'MergeOptimizer') MergeOptimizer
ERROR (theano.gof.opt): SeqOptimizer apply <theano.gof.opt.EquilibriumOptimizer object at 0x7f02f5d4e610>
ERROR (theano.gof.opt): Traceback:
ERROR (theano.gof.opt): Traceback (most recent call last):
  File "/home/topolo/Public/anaconda2/lib/python2.7/site-packages/theano/gof/opt.py", line 230, in apply
    sub_prof = optimizer.optimize(fgraph)
  File "/home/topolo/Public/anaconda2/lib/python2.7/site-packages/theano/gof/opt.py", line 89, in optimize
    ret = self.apply(fgraph, *args, **kwargs)
  File "/home/topolo/Public/anaconda2/lib/python2.7/site-packages/theano/gof/opt.py", line 2223, in apply
    sub_prof = gopt.apply(fgraph)
  File "/home/topolo/Public/anaconda2/lib/python2.7/site-packages/theano/gof/opt.py", line 817, in apply
    fgraph.replace_all_validate(pairs, 'MergeOptimizer')
  File "/home/topolo/Public/anaconda2/lib/python2.7/site-packages/theano/gof/toolbox.py", line 309, in replace_all_validate
    fgraph.replace(r, new_r, reason=reason, verbose=False)
  File "/home/topolo/Public/anaconda2/lib/python2.7/site-packages/theano/gof/fg.py", line 561, in replace
    str(reason))
TypeError: ('The type of the replacement must be compatible with the type of the original Variable.', AllocEmpty{dtype='float32'}.0, AllocEmpty{dtype='float32'}.0, TensorType(float32, row), TensorType(float32, matrix), 'MergeOptimizer')

<<!! BUG IN FGRAPH.REPLACE OR A LISTENER !!>> <type 'exceptions.TypeError'> ('The type of the replacement must be compatible with the type of the original Variable.', AllocEmpty{dtype='float32'}.0, AllocEmpty{dtype='float32'}.0, TensorType(float32, row), TensorType(float32, matrix), 'MergeOptimizer') MergeOptimizer
ERROR (theano.gof.opt): SeqOptimizer apply <theano.gof.opt.EquilibriumOptimizer object at 0x7f02f5135d90>
ERROR (theano.gof.opt): Traceback:
ERROR (theano.gof.opt): Traceback (most recent call last):
  File "/home/topolo/Public/anaconda2/lib/python2.7/site-packages/theano/gof/opt.py", line 230, in apply
    sub_prof = optimizer.optimize(fgraph)
  File "/home/topolo/Public/anaconda2/lib/python2.7/site-packages/theano/gof/opt.py", line 89, in optimize
    ret = self.apply(fgraph, *args, **kwargs)
  File "/home/topolo/Public/anaconda2/lib/python2.7/site-packages/theano/gof/opt.py", line 2223, in apply
    sub_prof = gopt.apply(fgraph)
  File "/home/topolo/Public/anaconda2/lib/python2.7/site-packages/theano/gof/opt.py", line 817, in apply
    fgraph.replace_all_validate(pairs, 'MergeOptimizer')
  File "/home/topolo/Public/anaconda2/lib/python2.7/site-packages/theano/gof/toolbox.py", line 309, in replace_all_validate
    fgraph.replace(r, new_r, reason=reason, verbose=False)
  File "/home/topolo/Public/anaconda2/lib/python2.7/site-packages/theano/gof/fg.py", line 561, in replace
    str(reason))
TypeError: ('The type of the replacement must be compatible with the type of the original Variable.', AllocEmpty{dtype='float32'}.0, AllocEmpty{dtype='float32'}.0, TensorType(float32, row), TensorType(float32, matrix), 'MergeOptimizer')

<<!! BUG IN FGRAPH.REPLACE OR A LISTENER !!>> <type 'exceptions.TypeError'> ('The type of the replacement must be compatible with the type of the original Variable.', AllocEmpty{dtype='float32'}.0, AllocEmpty{dtype='float32'}.0, TensorType(float32, row), TensorType(float32, matrix), 'MergeOptimizer') MergeOptimizer
ERROR (theano.gof.opt): SeqOptimizer apply <theano.gof.opt.EquilibriumOptimizer object at 0x7f02f2cfc850>
ERROR (theano.gof.opt): Traceback:
ERROR (theano.gof.opt): Traceback (most recent call last):
  File "/home/topolo/Public/anaconda2/lib/python2.7/site-packages/theano/gof/opt.py", line 230, in apply
    sub_prof = optimizer.optimize(fgraph)
  File "/home/topolo/Public/anaconda2/lib/python2.7/site-packages/theano/gof/opt.py", line 89, in optimize
    ret = self.apply(fgraph, *args, **kwargs)
  File "/home/topolo/Public/anaconda2/lib/python2.7/site-packages/theano/gof/opt.py", line 2223, in apply
    sub_prof = gopt.apply(fgraph)
  File "/home/topolo/Public/anaconda2/lib/python2.7/site-packages/theano/gof/opt.py", line 817, in apply
    fgraph.replace_all_validate(pairs, 'MergeOptimizer')
  File "/home/topolo/Public/anaconda2/lib/python2.7/site-packages/theano/gof/toolbox.py", line 309, in replace_all_validate
    fgraph.replace(r, new_r, reason=reason, verbose=False)
  File "/home/topolo/Public/anaconda2/lib/python2.7/site-packages/theano/gof/fg.py", line 561, in replace
    str(reason))
TypeError: ('The type of the replacement must be compatible with the type of the original Variable.', AllocEmpty{dtype='float32'}.0, AllocEmpty{dtype='float32'}.0, TensorType(float32, row), TensorType(float32, matrix), 'MergeOptimizer')

<<!! BUG IN FGRAPH.REPLACE OR A LISTENER !!>> <type 'exceptions.TypeError'> ('The type of the replacement must be compatible with the type of the original Variable.', AllocEmpty{dtype='float32'}.0, AllocEmpty{dtype='float32'}.0, TensorType(float32, row), TensorType(float32, matrix), 'MergeOptimizer') MergeOptimizer
ERROR (theano.gof.opt): SeqOptimizer apply <theano.gof.opt.EquilibriumOptimizer object at 0x7f02dcfb11d0>
ERROR (theano.gof.opt): Traceback:
ERROR (theano.gof.opt): Traceback (most recent call last):
  File "/home/topolo/Public/anaconda2/lib/python2.7/site-packages/theano/gof/opt.py", line 230, in apply
    sub_prof = optimizer.optimize(fgraph)
  File "/home/topolo/Public/anaconda2/lib/python2.7/site-packages/theano/gof/opt.py", line 89, in optimize
    ret = self.apply(fgraph, *args, **kwargs)
  File "/home/topolo/Public/anaconda2/lib/python2.7/site-packages/theano/gof/opt.py", line 2223, in apply
    sub_prof = gopt.apply(fgraph)
  File "/home/topolo/Public/anaconda2/lib/python2.7/site-packages/theano/gof/opt.py", line 817, in apply
    fgraph.replace_all_validate(pairs, 'MergeOptimizer')
  File "/home/topolo/Public/anaconda2/lib/python2.7/site-packages/theano/gof/toolbox.py", line 309, in replace_all_validate
    fgraph.replace(r, new_r, reason=reason, verbose=False)
  File "/home/topolo/Public/anaconda2/lib/python2.7/site-packages/theano/gof/fg.py", line 561, in replace
    str(reason))
TypeError: ('The type of the replacement must be compatible with the type of the original Variable.', AllocEmpty{dtype='float32'}.0, AllocEmpty{dtype='float32'}.0, TensorType(float32, row), TensorType(float32, matrix), 'MergeOptimizer')

<<!! BUG IN FGRAPH.REPLACE OR A LISTENER !!>> <type 'exceptions.TypeError'> ('The type of the replacement must be compatible with the type of the original Variable.', AllocEmpty{dtype='float32'}.0, AllocEmpty{dtype='float32'}.0, TensorType(float32, row), TensorType(float32, matrix), 'MergeOptimizer') MergeOptimizer
ERROR (theano.gof.opt): SeqOptimizer apply <theano.gof.opt.EquilibriumOptimizer object at 0x7f02f5161cd0>
ERROR (theano.gof.opt): Traceback:
ERROR (theano.gof.opt): Traceback (most recent call last):
  File "/home/topolo/Public/anaconda2/lib/python2.7/site-packages/theano/gof/opt.py", line 230, in apply
    sub_prof = optimizer.optimize(fgraph)
  File "/home/topolo/Public/anaconda2/lib/python2.7/site-packages/theano/gof/opt.py", line 89, in optimize
    ret = self.apply(fgraph, *args, **kwargs)
  File "/home/topolo/Public/anaconda2/lib/python2.7/site-packages/theano/gof/opt.py", line 2223, in apply
    sub_prof = gopt.apply(fgraph)
  File "/home/topolo/Public/anaconda2/lib/python2.7/site-packages/theano/gof/opt.py", line 817, in apply
    fgraph.replace_all_validate(pairs, 'MergeOptimizer')
  File "/home/topolo/Public/anaconda2/lib/python2.7/site-packages/theano/gof/toolbox.py", line 309, in replace_all_validate
    fgraph.replace(r, new_r, reason=reason, verbose=False)
  File "/home/topolo/Public/anaconda2/lib/python2.7/site-packages/theano/gof/fg.py", line 561, in replace
    str(reason))
TypeError: ('The type of the replacement must be compatible with the type of the original Variable.', AllocEmpty{dtype='float32'}.0, AllocEmpty{dtype='float32'}.0, TensorType(float32, row), TensorType(float32, matrix), 'MergeOptimizer')

<<!! BUG IN FGRAPH.REPLACE OR A LISTENER !!>> <type 'exceptions.TypeError'> ('The type of the replacement must be compatible with the type of the original Variable.', AllocEmpty{dtype='float32'}.0, AllocEmpty{dtype='float32'}.0, TensorType(float32, row), TensorType(float32, matrix), 'MergeOptimizer') MergeOptimizer
ERROR (theano.gof.opt): SeqOptimizer apply <theano.gof.opt.EquilibriumOptimizer object at 0x7f02fc26b350>
ERROR (theano.gof.opt): Traceback:
ERROR (theano.gof.opt): Traceback (most recent call last):
  File "/home/topolo/Public/anaconda2/lib/python2.7/site-packages/theano/gof/opt.py", line 230, in apply
    sub_prof = optimizer.optimize(fgraph)
  File "/home/topolo/Public/anaconda2/lib/python2.7/site-packages/theano/gof/opt.py", line 89, in optimize
    ret = self.apply(fgraph, *args, **kwargs)
  File "/home/topolo/Public/anaconda2/lib/python2.7/site-packages/theano/gof/opt.py", line 2223, in apply
    sub_prof = gopt.apply(fgraph)
  File "/home/topolo/Public/anaconda2/lib/python2.7/site-packages/theano/gof/opt.py", line 817, in apply
    fgraph.replace_all_validate(pairs, 'MergeOptimizer')
  File "/home/topolo/Public/anaconda2/lib/python2.7/site-packages/theano/gof/toolbox.py", line 309, in replace_all_validate
    fgraph.replace(r, new_r, reason=reason, verbose=False)
  File "/home/topolo/Public/anaconda2/lib/python2.7/site-packages/theano/gof/fg.py", line 561, in replace
    str(reason))
TypeError: ('The type of the replacement must be compatible with the type of the original Variable.', AllocEmpty{dtype='float32'}.0, AllocEmpty{dtype='float32'}.0, TensorType(float32, row), TensorType(float32, matrix), 'MergeOptimizer')


In [37]:
assert len(predictions_Mult) == len(y_test_data)
m = len(predictions_Mult)
TOTALELE = 0.
POSRES   = 0.
for j in range(m):
    testing_arr = np.equal( (predictions_Mult[j]>0.5).astype(float) , np.array( y_test_data[j]).astype(float) )
    TOTALELE += testing_arr.size
    POSRES += np.sum( testing_arr )

print POSRES, TOTALELE, POSRES/TOTALELE #  1006.0 1253.0 0.802873104549


 1006.0 1253.0 0.802873104549

In [47]:
print(len(predictions_Mult))
print(type(predictions_Mult[0]));print(predictions_Mult[0].shape)


10
<type 'numpy.ndarray'>
(16, 7)

In [ ]:


In [ ]:


In [ ]:

Draft work


In [48]:
g_t_Mul = Feedforward_g_right(3,[7,9,10],n_hidden)
i_t_Mul = Feedforward_ifo_right(3,[7,9,10],n_hidden,T.nnet.sigmoid,T.nnet.sigmoid)
s_L_Mul = [7,9,10][-1]
Thetaby_Mul = Thetab_right(1,[9,7],activation=T.nnet.sigmoid)

In [49]:
## unroll all the parameters
gThetas = [Weight.Theta for Weight in g_t_Mul.Thetabs]
gbs     = [Weight.b for Weight in g_t_Mul.Thetabs]
gthetas = []
for Weight in g_t_Mul.Thetabs:
    try:
        gthetas.append( Weight.theta )
    except AttributeError:
        print("on layer l=%d" % Weight.l)
params_Mul = gThetas + gbs + gthetas
Thetas_only_Mul = gThetas + gthetas

iThetas = [Weight.Theta for Weight in i_t_Mul.Thetabs]
ibs     = [Weight.b for Weight in i_t_Mul.Thetabs]
ithetas = []
for Weight in i_t_Mul.Thetabs:
    try:
        ithetas.append( Weight.theta )
    except AttributeError:
        print("on layer l=%d" % Weight.l)
params_Mul = params_Mul+iThetas + ibs + ithetas

Thetas_only_Mul = Thetas_only_Mul +iThetas + ithetas


on layer l=2
on layer l=2

In [50]:
print(params_Mul)


[Theta1, Theta2, b1, b2, theta1, Theta1, Theta2, b1, b2, theta1]

In [57]:
print(Thetas_only_Mul)
Thetas_only_Mul


[Theta1, Theta2, theta1, Theta1, Theta2, theta1]
Out[57]:
[Theta1, Theta2, theta1, Theta1, Theta2, theta1]

In [51]:
def lstm_step_Mul(X_t,h_tm1,c_tm1,*args_for_params):
    g_t= g_t_Mul.connect_through(X_t,h_tm1)
    i_t= i_t_Mul.connect_through(X_t,h_tm1,c_tm1)
    c_t = i_t*g_t
    h_t = activations_Mult.h[-1](c_t)
    Thetaby_Mul.al = h_t
    Thetaby_Mul.connect_through()
    y_t = Thetaby_Mul.alp1
    return [h_t,c_t,y_t]

In [52]:
X_Mul = T.matrix(dtype=theano.config.floatX)
c0_Mul = theano.shared(np.zeros(n_hidden).astype(theano.config.floatX))
h0_Mul = T.tanh( c0_Mul )

In [53]:
[h_vals_Mul,c_vals_Mul,y_vals_Mul],updates_from_scan_Mul=theano.scan(fn=lstm_step_Mul, 
                                                                    sequences=dict(input=X_Mul,taps=[0]),
                                                                    outputs_info=[h0_Mul,c0_Mul,None],
                                                                    non_sequences=params_Mul)

In [54]:
#draft work
#lstm_step_Mul(X_Mul,h0_Mul,c0_Mul, *params_Mul)


Out[54]:
[Elemwise{tanh,no_inplace}.0, Elemwise{mul,no_inplace}.0, sigmoid.0]

In [55]:
#draft work
#g_t= g_t_Mul.connect_through(X_Mul,h0_Mul)
#i_t= i_t_Mul.connect_through(X_Mul,h0_Mul,c0_Mul)
#c_t = i_t*g_t
#h_t = activations_Mult.h[-1](c_t)
#Thetaby_Mul.al = h_t

In [56]:
#draft work
#Thetaby_Mul.connect_through()

In [59]:
y_Mul=T.matrix(dtype=theano.config.floatX)
J_Mul = build_cost_functional(np.float32(0.1), y_vals_Mul,y_Mul,Thetas_only_Mul)

In [60]:
updateExp_Mul,gradDesc_step_Mul = build_gradDescent_step( J_Mul, params_Mul,X_Mul,y_Mul,0.01,0.0)

Indeed,


In [69]:
g_t_Mul = Feedforward_g_right(4,[7,8,9,10],n_hidden)


---------------------------------------------------------------------------
TypeError                                 Traceback (most recent call last)
<ipython-input-69-76c22a59fb53> in <module>()
----> 1 g_t_Mul = Feedforward_g_right(4,[7,8,9,10],n_hidden)

/home/topolo/PropD/MLgrabbag/ML/LSTM_Herta.py in __init__(self, L, s_l, H, activation_fxn, psi_Lm1, rng)
    517 
    518                         #initialize an instance of class Thetabtheta_right
--> 519                         Thetabl = Thetabtheta_right(l,(s_l[l-1],s_l[l]),al=inputlayer_al, activation=activation_fxn, rng=rng)
    520                         Thetabl.connect_through()
    521                         Thetabs_lst.append( Thetabl )

/home/topolo/PropD/MLgrabbag/ML/LSTM_Herta.py in __init__(self, s_ls, H, al, _h, Theta, b, theta, activation, l, rng)
    221 
    222 		"""
--> 223                 s_l, s_lp1 = s_ls
    224 
    225                 if rng is None:

TypeError: 'int' object is not iterable

In [70]:
range(2,4-1)


Out[70]:
[2]

In [ ]:


In [ ]:

"Gold" version of LSTM, implemented by Herta


In [22]:
dtype=theano.config.floatX
# squashing of the gates should result in values between 0 and 1
# therefore we use the logistic function
sigma = lambda x: 1 / (1 + T.exp(-x))


# for the other activation function we use the tanh
act = T.tanh

# sequences: x_t
# prior results: h_tm1, c_tm1
# non-sequences: W_xi, W_hi, W_ci, b_i, W_xf, W_hf, W_cf, b_f, W_xc, W_hc, b_c, W_xy, W_hy, W_cy, b_y
def one_lstm_step(x_t, h_tm1, c_tm1, W_xi, W_hi, W_ci, b_i, W_xf, W_hf, W_cf, b_f, W_xc, W_hc, b_c, W_xy, W_ho, W_cy, b_o, W_hy, b_y):
    i_t = sigma(theano.dot(x_t, W_xi) + theano.dot(h_tm1, W_hi) + theano.dot(c_tm1, W_ci) + b_i)
    f_t = sigma(theano.dot(x_t, W_xf) + theano.dot(h_tm1, W_hf) + theano.dot(c_tm1, W_cf) + b_f)
    c_t = f_t * c_tm1 + i_t * act(theano.dot(x_t, W_xc) + theano.dot(h_tm1, W_hc) + b_c) 
    o_t = sigma(theano.dot(x_t, W_xo)+ theano.dot(h_tm1, W_ho) + theano.dot(c_t, W_co)  + b_o)
    h_t = o_t * act(c_t)
    y_t = sigma(theano.dot(h_t, W_hy) + b_y) 
    return [h_t, c_t, y_t]

In [23]:
#TODO: Use a more appropriate initialization method
def sample_weights(sizeX, sizeY):
    values = np.ndarray([sizeX, sizeY], dtype=dtype)
    for dx in xrange(sizeX):
        vals = np.random.uniform(low=-1., high=1.,  size=(sizeY,))
        #vals_norm = np.sqrt((vals**2).sum())
        #vals = vals / vals_norm
        values[dx,:] = vals
    _,svs,_ = np.linalg.svd(values)
    #svs[0] is the largest singular value                      
    values = values / svs[0]
    return values

In [24]:
n_in = 7 # for embedded reber grammar
n_hidden = n_i = n_c = n_o = n_f = 10
n_y = 7 # for embedded reber grammar

# initialize weights
# i_t and o_t should be "open" or "closed"
# f_t should be "open" (don't forget at the beginning of training)
# we try to archive this by appropriate initialization of the corresponding biases 

W_xi = theano.shared(sample_weights(n_in, n_i))  
W_hi = theano.shared(sample_weights(n_hidden, n_i))  
W_ci = theano.shared(sample_weights(n_c, n_i))  
b_i = theano.shared(np.cast[dtype](np.random.uniform(-0.5,.5,size = n_i)))
W_xf = theano.shared(sample_weights(n_in, n_f)) 
W_hf = theano.shared(sample_weights(n_hidden, n_f))
W_cf = theano.shared(sample_weights(n_c, n_f))
b_f = theano.shared(np.cast[dtype](np.random.uniform(0, 1.,size = n_f)))
W_xc = theano.shared(sample_weights(n_in, n_c))  
W_hc = theano.shared(sample_weights(n_hidden, n_c))
b_c = theano.shared(np.zeros(n_c, dtype=dtype))
W_xo = theano.shared(sample_weights(n_in, n_o))
W_ho = theano.shared(sample_weights(n_hidden, n_o))
W_co = theano.shared(sample_weights(n_c, n_o))
b_o = theano.shared(np.cast[dtype](np.random.uniform(-0.5,.5,size = n_o)))
W_hy = theano.shared(sample_weights(n_hidden, n_y))
b_y = theano.shared(np.zeros(n_y, dtype=dtype))

c0 = theano.shared(np.zeros(n_hidden, dtype=dtype))
h0 = T.tanh(c0)

params = [W_xi, W_hi, W_ci, b_i, W_xf, W_hf, W_cf, b_f, W_xc, W_hc, b_c, W_xo, W_ho, W_co, b_o, W_hy, b_y, c0]

In [28]:
#first dimension is time

#input 
v = T.matrix(dtype=dtype)

# target
target = T.matrix(dtype=dtype)

In [16]:
# hidden and outputs of the entire sequence
[h_vals, _, y_vals], _ = theano.scan(fn=one_lstm_step, 
                                  sequences = dict(input=v, taps=[0]), 
                                  outputs_info = [h0, c0, None ], # corresponds to return type of fn
                                  non_sequences = [W_xi, W_hi, W_ci, b_i, W_xf, W_hf, W_cf, b_f, W_xc, W_hc, b_c, W_xo, W_ho, W_co, b_o, W_hy, b_y] )

In [17]:
cost = -T.mean(target * T.log(y_vals)+ (1.- target) * T.log(1. - y_vals))

In [18]:
# learning rate
lr = np.cast[dtype](.1)
learning_rate = theano.shared(lr)

In [19]:
gparams = []
for param in params:
  gparam = T.grad(cost, param)
  gparams.append(gparam)

updates=[]
for param, gparam in zip(params, gparams):
    updates.append((param, param - gparam * learning_rate))

In [20]:
learn_rnn_fn = theano.function(inputs = [v, target],
                               outputs = cost,
                               updates = updates)

In [21]:
nb_epochs=250
train_errors = np.ndarray(nb_epochs)
def train_rnn(train_data):      
  for x in range(nb_epochs):
    error = 0.
    for j in range(len(train_data)):  
        index = np.random.randint(0, len(train_data))
        i, o = train_data[index]
        train_cost = learn_rnn_fn(i, o)
        error += train_cost
    train_errors[x] = error 
    
train_rnn(train_data)

In [22]:
plt.plot(np.arange(nb_epochs), train_errors, 'b-')
plt.xlabel('epochs')
plt.ylabel('error')
plt.ylim(0., 50)


Out[22]:
(0.0, 50)

In [23]:
predictions = theano.function(inputs = [v], outputs = y_vals)

test_data = reberGrammar.get_n_embedded_examples(10)

def print_out(test_data):
    for i,o in test_data:
        p = predictions(i)
        print o[-2] # target
        print p[-2] # prediction
        print 
print_out(test_data)


<<!! BUG IN FGRAPH.REPLACE OR A LISTENER !!>> <type 'exceptions.TypeError'> ('The type of the replacement must be compatible with the type of the original Variable.', AllocEmpty{dtype='float64'}.0, AllocEmpty{dtype='float64'}.0, TensorType(float64, row), TensorType(float64, matrix), 'MergeOptimizer') MergeOptimizer
ERROR (theano.gof.opt): SeqOptimizer apply MergeOptimizer
ERROR (theano.gof.opt): Traceback:
ERROR (theano.gof.opt): Traceback (most recent call last):
  File "/home/topolo/Public/anaconda2/lib/python2.7/site-packages/theano/gof/opt.py", line 230, in apply
    sub_prof = optimizer.optimize(fgraph)
  File "/home/topolo/Public/anaconda2/lib/python2.7/site-packages/theano/gof/opt.py", line 89, in optimize
    ret = self.apply(fgraph, *args, **kwargs)
  File "/home/topolo/Public/anaconda2/lib/python2.7/site-packages/theano/gof/opt.py", line 817, in apply
    fgraph.replace_all_validate(pairs, 'MergeOptimizer')
  File "/home/topolo/Public/anaconda2/lib/python2.7/site-packages/theano/gof/toolbox.py", line 309, in replace_all_validate
    fgraph.replace(r, new_r, reason=reason, verbose=False)
  File "/home/topolo/Public/anaconda2/lib/python2.7/site-packages/theano/gof/fg.py", line 561, in replace
    str(reason))
TypeError: ('The type of the replacement must be compatible with the type of the original Variable.', AllocEmpty{dtype='float64'}.0, AllocEmpty{dtype='float64'}.0, TensorType(float64, row), TensorType(float64, matrix), 'MergeOptimizer')

[ 0.  1.  0.  0.  0.  0.  0.]
[  2.43233675e-06   9.97954670e-01   2.64429935e-04   2.69863801e-04
   1.88016371e-03   1.38660943e-03   5.84324931e-04]

[ 0.  0.  0.  0.  1.  0.  0.]
[  1.96756615e-07   1.35325788e-03   2.84242201e-04   3.04241353e-04
   9.96710433e-01   6.36753491e-05   1.95325091e-03]

[ 0.  0.  0.  0.  1.  0.  0.]
[  1.76485516e-07   2.35432124e-03   4.16199149e-04   4.49826060e-04
   9.96645648e-01   7.17373631e-05   1.36849868e-03]

[ 0.  1.  0.  0.  0.  0.  0.]
[  4.60176790e-06   9.98925203e-01   3.35967186e-04   3.36846540e-04
   3.46051278e-04   1.78814571e-03   7.52967155e-04]

[ 0.  0.  0.  0.  1.  0.  0.]
[  1.87858571e-07   1.62966322e-03   3.50517392e-04   3.77439644e-04
   9.96757444e-01   6.03820571e-05   1.67574070e-03]

[ 0.  0.  0.  0.  1.  0.  0.]
[  1.55065438e-07   4.00838690e-03   3.44138005e-04   3.73617135e-04
   9.96538249e-01   7.98475749e-05   1.17616860e-03]

[ 0.  0.  0.  0.  1.  0.  0.]
[  2.10236734e-07   1.40785547e-03   4.73173202e-04   5.14319271e-04
   9.96140950e-01   5.71095128e-05   1.63558240e-03]

[ 0.  1.  0.  0.  0.  0.  0.]
[  4.51384710e-06   9.98052202e-01   3.52941979e-04   3.59681719e-04
   4.81956011e-04   1.32865006e-03   7.38808002e-04]

[ 0.  0.  0.  0.  1.  0.  0.]
[  1.54871944e-07   4.21813918e-03   1.99854250e-04   2.15277597e-04
   9.96222372e-01   7.54026924e-05   1.47256315e-03]

[ 0.  0.  0.  0.  1.  0.  0.]
[  1.67411996e-07   2.84840073e-03   3.13914015e-04   3.38329435e-04
   9.96608028e-01   7.51464581e-05   1.41701609e-03]


In [24]:
def print_out(test_data):
    for i,o in test_data:
        p = predictions(i)
        print np.argmax( o[-2] ) # target
        print np.argmax( p[-2] ) # prediction
        print 
print_out(test_data)


1
1

4
4

4
4

1
1

4
4

4
4

4
4

1
1

4
4

4
4


In [15]:
test_data = reberGrammar.get_n_embedded_examples(10)

In [16]:
print(type(test_data));print( len( test_data));
print( type( test_data[0][1] )); print( len(test_data[0][1])); test_data[0][1]


<type 'list'>
10
<type 'list'>
14
Out[16]:
[array([ 0.,  1.,  0.,  0.,  1.,  0.,  0.]),
 array([ 1.,  0.,  0.,  0.,  0.,  0.,  0.]),
 array([ 0.,  1.,  0.,  0.,  1.,  0.,  0.]),
 array([ 0.,  1.,  0.,  0.,  0.,  1.,  0.]),
 array([ 0.,  1.,  0.,  0.,  0.,  1.,  0.]),
 array([ 0.,  0.,  0.,  0.,  1.,  1.,  0.]),
 array([ 0.,  0.,  1.,  1.,  0.,  0.,  0.]),
 array([ 0.,  1.,  0.,  0.,  0.,  1.,  0.]),
 array([ 0.,  1.,  0.,  0.,  0.,  1.,  0.]),
 array([ 0.,  1.,  0.,  0.,  0.,  1.,  0.]),
 array([ 0.,  0.,  0.,  0.,  1.,  1.,  0.]),
 array([ 0.,  0.,  0.,  0.,  0.,  0.,  1.]),
 array([ 0.,  0.,  0.,  0.,  1.,  0.,  0.]),
 array([ 0.,  0.,  0.,  0.,  0.,  0.,  1.])]

In [27]:
b_i.get_value().shape


Out[27]:
(10,)

In [14]:
sys.path.append( os.getcwd() + '/ML' )

In [15]:
LSTM_model_Herta= LSTM_Model_right( L_Herta, s_l_Herta, n_hidden, n_y, activations_Herta, T.nnet.sigmoid)


Total number of parameters: 17 

In [18]:
len( LSTM_model_Herta.params )


Out[18]:
17

In [30]:
lstm_step_fxn = LSTM_model_Herta.build_lstm_step()

In [33]:
[h_vals_EY, _, y_vals_EY], _ = theano.scan(fn=lstm_step_fxn_EY, sequences=dict(input=v,taps=[0]), outputs_info=[h0,c0,None], 
               non_sequences = LSTM_model_Herta.params, 
               allow_gc=False) # comment this out to check if this flag helps or not

In [34]:
cost_EY = -T.mean(target * T.log(y_vals_EY) + (1.-target )* T.log(1.-y_vals_EY))

In [ ]:


In [37]:
gparams_EY = []
params_EY = LSTM_model_Herta.params
for param in params_EY:
  gparam = T.grad(cost_EY, param)
  gparams_EY.append(gparam)

updates_EY=[]
for param, gparam in zip(params_EY, gparams_EY):
    updates_EY.append((param, param - gparam * np.float32(0.1)))

In [41]:
print( len(train_data) )
learn_rnn_fn_EY = theano.function(inputs=[v,target],outputs=cost_EY,updates = updates_EY)


1000

In [42]:
nb_epochs=250
train_errors = np.ndarray(nb_epochs)
def train_rnn_EY(train_data):      
  for j in range(nb_epochs):
    error = 0.
    for t in range(len(train_data)):  
        index = np.random.randint(0, len(train_data))
        i, o = train_data[index]
        train_cost = learn_rnn_fn_EY(i, o)
        error += train_cost
    train_errors[x] = error

In [43]:
train_rnn_EY(train_data)


---------------------------------------------------------------------------
ValueError                                Traceback (most recent call last)
<ipython-input-43-03a202ac023f> in <module>()
----> 1 train_rnn_EY(train_data)

<ipython-input-42-419b0e7d73b3> in train_rnn_EY(train_data)
      7         index = np.random.randint(0, len(train_data))
      8         i, o = train_data[index]
----> 9         train_cost = learn_rnn_fn_EY(i, o)
     10         error += train_cost
     11     train_errors[x] = error

/home/topolo/Public/anaconda2/lib/python2.7/site-packages/theano/compile/function_module.pyc in __call__(self, *args, **kwargs)
    869                     node=self.fn.nodes[self.fn.position_of_error],
    870                     thunk=thunk,
--> 871                     storage_map=getattr(self.fn, 'storage_map', None))
    872             else:
    873                 # old-style linkers raise their own exceptions

/home/topolo/Public/anaconda2/lib/python2.7/site-packages/theano/gof/link.pyc in raise_with_op(node, thunk, exc_info, storage_map)
    312         # extra long error message in that case.
    313         pass
--> 314     reraise(exc_type, exc_value, exc_trace)
    315 
    316 

/home/topolo/Public/anaconda2/lib/python2.7/site-packages/theano/compile/function_module.pyc in __call__(self, *args, **kwargs)
    857         t0_fn = time.time()
    858         try:
--> 859             outputs = self.fn()
    860         except Exception:
    861             if hasattr(self.fn, 'position_of_error'):

/home/topolo/Public/anaconda2/lib/python2.7/site-packages/theano/scan_module/scan_op.pyc in rval(p, i, o, n, allow_gc)
    949         def rval(p=p, i=node_input_storage, o=node_output_storage, n=node,
    950                  allow_gc=allow_gc):
--> 951             r = p(n, [x[0] for x in i], o)
    952             for o in node.outputs:
    953                 compute_map[o][0] = True

/home/topolo/Public/anaconda2/lib/python2.7/site-packages/theano/scan_module/scan_op.pyc in <lambda>(node, args, outs)
    938                         args,
    939                         outs,
--> 940                         self, node)
    941         except (ImportError, theano.gof.cmodule.MissingGXX):
    942             p = self.execute

theano/scan_module/scan_perform.pyx in theano.scan_module.scan_perform.perform (/home/topolo/.theano/compiledir_Linux-4.2-fc23.x86_64-x86_64-with-fedora-23-Twenty_Three-x86_64-2.7.11-64/scan_perform/mod.cpp:4316)()

/home/topolo/Public/anaconda2/lib/python2.7/site-packages/theano/gof/link.pyc in raise_with_op(node, thunk, exc_info, storage_map)
    312         # extra long error message in that case.
    313         pass
--> 314     reraise(exc_type, exc_value, exc_trace)
    315 
    316 

theano/scan_module/scan_perform.pyx in theano.scan_module.scan_perform.perform (/home/topolo/.theano/compiledir_Linux-4.2-fc23.x86_64-x86_64-with-fedora-23-Twenty_Three-x86_64-2.7.11-64/scan_perform/mod.cpp:4193)()

ValueError: dimension mismatch in args to gemv (10,7)x(10)->(10)
Apply node that caused the error: GpuGemv{no_inplace}(b1_copy[cuda], TensorConstant{1.0}, GpuDimShuffle{1,0}.0, GpuElemwise{Composite{(scalar_sigmoid((i0 + i1)) * tanh(i2))},no_inplace}.0, TensorConstant{1.0})
Toposort index: 31
Inputs types: [CudaNdarrayType(float32, vector), TensorType(float32, scalar), CudaNdarrayType(float32, matrix), CudaNdarrayType(float32, vector), TensorType(float32, scalar)]
Inputs shapes: [(10,), (), (10, 7), (10,), ()]
Inputs strides: [(1,), (), (1, 10), (1,), ()]
Inputs values: ['not shown', array(1.0, dtype=float32), 'not shown', 'not shown', array(1.0, dtype=float32)]
Outputs clients: [[GpuElemwise{scalar_sigmoid,no_inplace}(GpuGemv{no_inplace}.0)]]

HINT: Re-running with most Theano optimization disabled could give you a back-trace of when this node was created. This can be done with by setting the Theano flag 'optimizer=fast_compile'. If that does not work, Theano optimizations can be disabled with 'optimizer=None'.
HINT: Use the Theano flag 'exception_verbosity=high' for a debugprint and storage map footprint of this apply node.
Apply node that caused the error: forall_inplace,gpu,scan_fn}(Shape_i{0}.0, GpuSubtensor{int64:int64:int8}.0, GpuIncSubtensor{Set;:int64:}.0, GpuIncSubtensor{InplaceSet;:int64:}.0, Shape_i{0}.0, Theta1, b1, theta1, Theta1, b1, theta1, W1, Theta1, b1, theta1, W1, Theta1, b1, theta1, W1, Theta1, b1)
Toposort index: 115
Inputs types: [TensorType(int64, scalar), CudaNdarrayType(float32, matrix), CudaNdarrayType(float32, matrix), CudaNdarrayType(float32, matrix), TensorType(int64, scalar), CudaNdarrayType(float32, matrix), CudaNdarrayType(float32, vector), CudaNdarrayType(float32, matrix), CudaNdarrayType(float32, matrix), CudaNdarrayType(float32, vector), CudaNdarrayType(float32, matrix), CudaNdarrayType(float32, matrix), CudaNdarrayType(float32, matrix), CudaNdarrayType(float32, vector), CudaNdarrayType(float32, matrix), CudaNdarrayType(float32, matrix), CudaNdarrayType(float32, matrix), CudaNdarrayType(float32, vector), CudaNdarrayType(float32, matrix), CudaNdarrayType(float32, matrix), CudaNdarrayType(float32, matrix), CudaNdarrayType(float32, vector)]
Inputs shapes: [(), (21, 7), (22, 10), (22, 10), (), (7, 10), (10,), (10, 10), (7, 10), (10,), (10, 10), (10, 10), (7, 10), (10,), (10, 10), (10, 10), (7, 10), (10,), (10, 10), (10, 10), (7, 10), (10,)]
Inputs strides: [(), (7, 1), (10, 1), (10, 1), (), (10, 1), (1,), (10, 1), (10, 1), (1,), (10, 1), (10, 1), (10, 1), (1,), (10, 1), (10, 1), (10, 1), (1,), (10, 1), (10, 1), (10, 1), (1,)]
Inputs values: [array(21), 'not shown', 'not shown', 'not shown', array(21), 'not shown', 'not shown', 'not shown', 'not shown', 'not shown', 'not shown', 'not shown', 'not shown', 'not shown', 'not shown', 'not shown', 'not shown', 'not shown', 'not shown', 'not shown', 'not shown', 'not shown']
Outputs clients: [[GpuSubtensor{int64:int64:int64}(forall_inplace,gpu,scan_fn}.0, ScalarFromTensor.0, ScalarFromTensor.0, Constant{-1}), GpuSubtensor{int64:int64:int64}(forall_inplace,gpu,scan_fn}.0, ScalarFromTensor.0, ScalarFromTensor.0, Constant{-1})], [GpuSubtensor{int64:int64:int64}(forall_inplace,gpu,scan_fn}.1, ScalarFromTensor.0, ScalarFromTensor.0, Constant{-1}), GpuSubtensor{int64:int64:int64}(forall_inplace,gpu,scan_fn}.1, ScalarFromTensor.0, ScalarFromTensor.0, Constant{-1})], [GpuSubtensor{int64:int64:int64}(forall_inplace,gpu,scan_fn}.2, ScalarFromTensor.0, ScalarFromTensor.0, Constant{-1}), GpuElemwise{sub,no_inplace}(CudaNdarrayConstant{[[ 1.]]}, forall_inplace,gpu,scan_fn}.2), GpuElemwise{Composite{((i0 * log(i1)) + (i2 * log(i3)))}}[(0, 0)](GpuFromHost.0, forall_inplace,gpu,scan_fn}.2, GpuElemwise{sub,no_inplace}.0, GpuElemwise{sub,no_inplace}.0)]]

HINT: Re-running with most Theano optimization disabled could give you a back-trace of when this node was created. This can be done with by setting the Theano flag 'optimizer=fast_compile'. If that does not work, Theano optimizations can be disabled with 'optimizer=None'.
HINT: Use the Theano flag 'exception_verbosity=high' for a debugprint and storage map footprint of this apply node.

In [ ]:


In [11]:
import LSTM
from LSTM import Gates, Psis, LSTM_Model, MemoryBlock

In [16]:
L_Herta = Gates(g=2,i=2,f=2,o=2)
s_l_Herta = Gates(g=[n_in,n_c],i=[n_in,n_i],f=[n_in,n_f],o=[n_in,n_o])
activations_Herta = Psis(g=(T.tanh, T.tanh), i=(T.nnet.sigmoid, T.nnet.sigmoid),f=(T.nnet.sigmoid, T.nnet.sigmoid),
                         o=(T.nnet.sigmoid, T.nnet.sigmoid),h=(T.tanh,))

In [19]:
LSTM_model_Herta= LSTM_Model( L_Herta, s_l_Herta, activations_Herta, T.nnet.sigmoid, n_y)
lstm_step_fxn = LSTM_model_Herta.build_lstm_step()
MemBlck_Herta = MemoryBlock(LSTM_model_Herta)


Total number of parameters: 17 

In [31]:
theano.scan(fn=lstm_step_fxn, sequences=dict(input=v,taps=[0]),outputs_info =[h0,c0,None])


---------------------------------------------------------------------------
ValueError                                Traceback (most recent call last)
<ipython-input-31-fe1731dc90bb> in <module>()
----> 1 theano.scan(fn=lstm_step_fxn, sequences=dict(input=v,taps=[0]),outputs_info =[h0,c0,None])

/home/topolo/Public/anaconda2/lib/python2.7/site-packages/theano/scan_module/scan.pyc in scan(fn, sequences, outputs_info, non_sequences, n_steps, truncate_gradient, go_backwards, mode, name, profile, allow_gc, strict)
    743     # and outputs that needs to be separated
    744 
--> 745     condition, outputs, updates = scan_utils.get_updates_and_outputs(fn(*args))
    746     if condition is not None:
    747         as_while = True

/home/topolo/PropD/MLgrabbag/ML/LSTM.py in lstm_step(X_t, h_tm1, c_tm1, *args_for_params)
    795                 def lstm_step(X_t, h_tm1, c_tm1, *args_for_params):
    796 
--> 797                         g_t = self._gates.g.connect_through(X_t, h_tm1)
    798                         i_t = self._gates.i.connect_through(X_t, h_tm1, c_tm1)
    799                         f_t = self._gates.f.connect_through(X_t, h_tm1, c_tm1)

/home/topolo/PropD/MLgrabbag/ML/LSTM.py in connect_through(self, X_t, h_tm1)
    347                 self.Thetabs[0].al = X_t
    348                 self.Thetabs[0].y  = h_tm1
--> 349                 self.Thetabs[0].connect_through()
    350 
    351                 for l in range(2,L): # l=2,3,...L-1, for each of the Theta operations between layers l

/home/topolo/PropD/MLgrabbag/ML/LSTM.py in connect_through(self)
    271 		lin_zlp1 = T.dot( self.Theta, 
    272                                                         self.al)+T.tile(self.b,
--> 273                                                                                         (1,self.al.shape[1].astype('int32'))) + T.dot( self.theta,
    274 																							self.y) # z^{(l+1)}
    275                 if self.psi is None:

/home/topolo/Public/anaconda2/lib/python2.7/site-packages/theano/tensor/basic.pyc in tile(x, reps, ndim)
   4931     shape = [1] * (ndim - x.ndim) + [x.shape[i] for i in xrange(x.ndim)]
   4932     alloc_shape = reps + shape
-> 4933     y = alloc(x, *alloc_shape)
   4934     shuffle_ind = numpy.arange(ndim * 2).reshape(2, ndim)
   4935     shuffle_ind = shuffle_ind.transpose().flatten()

/home/topolo/Public/anaconda2/lib/python2.7/site-packages/theano/tensor/basic.pyc in __call__(self, val, *shapes, **kwargs)
   2808 
   2809         """
-> 2810         ret = super(Alloc, self).__call__(val, *shapes, **kwargs)
   2811         try:
   2812             # It makes optimization difficult when useless allocs are thrown

/home/topolo/Public/anaconda2/lib/python2.7/site-packages/theano/gof/op.pyc in __call__(self, *inputs, **kwargs)
    609         """
    610         return_list = kwargs.pop('return_list', False)
--> 611         node = self.make_node(*inputs, **kwargs)
    612 
    613         if config.compute_test_value != 'off':

/home/topolo/Public/anaconda2/lib/python2.7/site-packages/theano/tensor/basic.pyc in make_node(self, value, *shape)
   2689     def make_node(self, value, *shape):
   2690         v = as_tensor_variable(value)
-> 2691         sh, bcast = self.validate_shape(shape)
   2692         if v.ndim > len(sh):
   2693             raise TypeError("The Alloc value to use has more dimensions"

/home/topolo/Public/anaconda2/lib/python2.7/site-packages/theano/tensor/basic.pyc in validate_shape(self, shape)
   2681             # if s is constant 1, then we're broadcastable in that dim
   2682             try:
-> 2683                 const_shp = get_scalar_constant_value(s)
   2684             except NotScalarConstantError:
   2685                 const_shp = None

/home/topolo/Public/anaconda2/lib/python2.7/site-packages/theano/tensor/basic.pyc in get_scalar_constant_value(orig_v, elemwise, only_process_constants)
    658                                 get_scalar_constant_value_elemwises):
    659                     const = [get_scalar_constant_value(i)
--> 660                              for i in v.owner.inputs]
    661                     ret = [[None]]
    662                     v.owner.op.perform(v.owner, const, ret)

/home/topolo/Public/anaconda2/lib/python2.7/site-packages/theano/tensor/basic.pyc in get_scalar_constant_value(orig_v, elemwise, only_process_constants)
    770                         else:
    771                             msg += ' x=%s' % str(v)
--> 772                         raise ValueError(msg)
    773 
    774                     if gp_broadcastable[idx]:

ValueError: get_scalar_constant_value detected deterministic IndexError: x.shape[1] when x.ndim=1. x=Subtensor{int64}.0

In [ ]:


In [ ]:


In [33]:
os.getcwd()


Out[33]:
'/home/topolo/PropD/MLgrabbag'

In [35]:
os.listdir('../')


Out[35]:
['CUDACFD_out',
 'servetheloop',
 'cantera',
 'CompPhys',
 'thermopy',
 '.ipynb_checkpoints',
 'setup.py.in',
 'Propulsion',
 'tensorflow',
 'DeepLearningTutorials',
 'NISTchemwb',
 'cfcf3d',
 'cs344',
 'MLgrabbag',
 'OpenNN',
 'thrust']

In [36]:
sys.path.append( '../DeepLearningTutorials/code' )

In [37]:
import lstm

In [55]:
from lstm import *

In [42]:
n_words=10000,  # Vocabulary size
maxlen=100,  # Sequence longer then this get ignored

In [61]:
dataset='imdb'
load_data, prepare_data = get_dataset(dataset)

In [43]:
train, valid, test = load_data(n_words=n_words, valid_portion=0.05,maxlen=maxlen)

In [47]:
print(type(load_data)); print(type(prepare_data)); print(type(train)); print(type(valid)); print(type(test))
print(len(train)); print(len(valid)); print(len(test))
print(type(train[0])); print(type(valid[0])); print(type(test[0]));
print(len(train[0])); print(len(valid[0])); print(len(test[0]))


<type 'function'>
<type 'function'>
<type 'tuple'>
<type 'tuple'>
<type 'tuple'>
2
2
2
<type 'list'>
<type 'list'>
<type 'list'>
23750
1250
25000

In [62]:
# create the initial parameters as numpy ndarrays
model_options=locals().copy()
ydim = numpy.max(train[1]) +1
model_options['ydim']=ydim
params = init_params(model_options)


---------------------------------------------------------------------------
KeyError                                  Traceback (most recent call last)
<ipython-input-62-ded729125024> in <module>()
      3 ydim = numpy.max(train[1]) +1
      4 model_options['ydim']=ydim
----> 5 params = init_params(model_options)

/home/topolo/PropD/DeepLearningTutorials/code/lstm.py in init_params(options)
     95     # embedding
     96     randn = numpy.random.rand(options['n_words'],
---> 97                               options['dim_proj'])
     98     params['Wemb'] = (0.01 * randn).astype(config.floatX)
     99     params = get_layer(options['encoder'])[0](options,

KeyError: 'dim_proj'

In [49]:
help(locals)


Help on built-in function locals in module __builtin__:

locals(...)
    locals() -> dictionary
    
    Update and return a dictionary containing the current scope's local variables.


In [59]:
# This create Theano Shared Variable from the parameters.
    # Dict name (string) -> Theano Tensor Shared Variable
    # params and tparams have different copy of the weights.
tparams = lstm.init_tparams(params)


---------------------------------------------------------------------------
AttributeError                            Traceback (most recent call last)
<ipython-input-59-936b72c65483> in <module>()
      2     # Dict name (string) -> Theano Tensor Shared Variable
      3     # params and tparams have different copy of the weights.
----> 4 tparams = lstm.init_tparams(params)

/home/topolo/PropD/DeepLearningTutorials/code/lstm.py in init_tparams(params)
    120 def init_tparams(params):
    121     tparams = OrderedDict()
--> 122     for kk, pp in params.items():
    123         tparams[kk] = theano.shared(params[kk], name=kk)
    124     return tparams

AttributeError: 'list' object has no attribute 'items'

In [63]:
#train_lstm()


model options {'encoder': 'lstm', 'optimizer': <function adadelta at 0x7fcfacdbbed8>, 'validFreq': 370, 'lrate': 0.0001, 'batch_size': 16, 'decay_c': 0.0, 'patience': 10, 'reload_model': None, 'n_words': 10000, 'max_epochs': 5000, 'dispFreq': 10, 'dataset': 'imdb', 'valid_batch_size': 64, 'use_dropout': True, 'dim_proj': 128, 'maxlen': 100, 'saveto': 'lstm_model.npz', 'noise_std': 0.0, 'test_size': -1, 'saveFreq': 1110}
Loading data
Building model
Optimization
1998 train examples
105 valid examples
25000 test examples
Epoch  0 Update  10 Cost  0.692698359489
Epoch  0 Update  20 Cost  0.691576838493
Epoch  0 Update  30 Cost  0.678899407387
Epoch  0 Update  40 Cost  0.69440060854
Epoch  0 Update  50 Cost  0.683119535446
Epoch  0 Update  60 Cost  0.702967643738
Epoch  0 Update  70 Cost  0.665298700333
Epoch  0 Update  80 Cost  0.699218034744
Epoch  0 Update  90 Cost  0.670724391937
Epoch  0 Update  100 Cost  0.671119213104
Epoch  0 Update  110 Cost  0.723990380764
Epoch  0 Update  120 Cost  0.627523005009
Seen 1998 samples
Epoch  1 Update  130 Cost  0.612293958664
Epoch  1 Update  140 Cost  0.664878606796
Epoch  1 Update  150 Cost  0.585900843143
Epoch  1 Update  160 Cost  0.691015005112
Epoch  1 Update  170 Cost  0.7251060009
Epoch  1 Update  180 Cost  0.708394289017
Epoch  1 Update  190 Cost  0.700806796551
Epoch  1 Update  200 Cost  0.74559122324
Epoch  1 Update  210 Cost  0.651143372059
Epoch  1 Update  220 Cost  0.685461819172
Epoch  1 Update  230 Cost  0.653840303421
Epoch  1 Update  240 Cost  0.701105415821
Epoch  1 Update  250 Cost  0.702791631222
Seen 1998 samples
Epoch  2 Update  260 Cost  0.743963241577
Epoch  2 Update  270 Cost  0.67035317421
Epoch  2 Update  280 Cost  0.639522969723
Epoch  2 Update  290 Cost  0.778525829315
Epoch  2 Update  300 Cost  0.669261515141
Epoch  2 Update  310 Cost  0.702283620834
Epoch  2 Update  320 Cost  0.73783659935
Epoch  2 Update  330 Cost  0.682464301586
Epoch  2 Update  340 Cost  0.702616035938
Epoch  2 Update  350 Cost  0.683843135834
Epoch  2 Update  360 Cost  0.645316123962
Epoch  2 Update  370 Cost  0.740399360657
('Train ', 0.42342342342342343, 'Valid ', 0.40952380952380951, 'Test ', 0.5)
Seen 1998 samples
Epoch  3 Update  380 Cost  0.664936840534
Epoch  3 Update  390 Cost  0.666858136654
Epoch  3 Update  400 Cost  0.702749013901
Epoch  3 Update  410 Cost  0.685796022415
Epoch  3 Update  420 Cost  0.669363379478
Epoch  3 Update  430 Cost  0.683922946453
Epoch  3 Update  440 Cost  0.723559975624
Epoch  3 Update  450 Cost  0.720567464828
Epoch  3 Update  460 Cost  0.667905449867
Epoch  3 Update  470 Cost  0.706657350063
Epoch  3 Update  480 Cost  0.685510575771
Epoch  3 Update  490 Cost  0.706187903881
Epoch  3 Update  500 Cost  0.573962211609
Seen 1998 samples
Epoch  4 Update  510 Cost  0.702898979187
Epoch  4 Update  520 Cost  0.662639200687
Epoch  4 Update  530 Cost  0.708332836628
Epoch  4 Update  540 Cost  0.663423776627
Epoch  4 Update  550 Cost  0.68678432703
Epoch  4 Update  560 Cost  0.668098449707
Epoch  4 Update  570 Cost  0.644453287125
Epoch  4 Update  580 Cost  0.659795403481
Epoch  4 Update  590 Cost  0.622740268707
Epoch  4 Update  600 Cost  0.68503767252
Epoch  4 Update  610 Cost  0.686704456806
Epoch  4 Update  620 Cost  0.682946026325
Seen 1998 samples
Epoch  5 Update  630 Cost  0.745455861092
Epoch  5 Update  640 Cost  0.626753687859
Epoch  5 Update  650 Cost  0.717534661293
Epoch  5 Update  660 Cost  0.619034767151
Epoch  5 Update  670 Cost  0.64300429821
Epoch  5 Update  680 Cost  0.709595680237
Epoch  5 Update  690 Cost  0.680198550224
Epoch  5 Update  700 Cost  0.715785324574
Epoch  5 Update  710 Cost  0.647588670254
Epoch  5 Update  720 Cost  0.738720476627
Epoch  5 Update  730 Cost  0.613356888294
Epoch  5 Update  740 Cost  0.681859910488
('Train ', 0.42342342342342343, 'Valid ', 0.40952380952380951, 'Test ', 0.5)
Epoch  5 Update  750 Cost  0.638185143471
Seen 1998 samples
Epoch  6 Update  760 Cost  0.664708554745
Epoch  6 Update  770 Cost  0.643858492374
Epoch  6 Update  780 Cost  0.684750974178
Epoch  6 Update  790 Cost  0.607481896877
Epoch  6 Update  800 Cost  0.664666891098
Epoch  6 Update  810 Cost  0.657433569431
Epoch  6 Update  820 Cost  0.687919795513
Epoch  6 Update  830 Cost  0.627542614937
Epoch  6 Update  840 Cost  0.685253024101
Epoch  6 Update  850 Cost  0.685834467411
Epoch  6 Update  860 Cost  0.631550908089
Epoch  6 Update  870 Cost  0.693373978138
Seen 1998 samples
Epoch  7 Update  880 Cost  0.67098659277
Epoch  7 Update  890 Cost  0.681502342224
Epoch  7 Update  900 Cost  0.678376972675
Epoch  7 Update  910 Cost  0.612894117832
Epoch  7 Update  920 Cost  0.632097303867
Epoch  7 Update  930 Cost  0.609624743462
Epoch  7 Update  940 Cost  0.709208726883
Epoch  7 Update  950 Cost  0.711176097393
Epoch  7 Update  960 Cost  0.652043879032
Epoch  7 Update  970 Cost  0.676458060741
Epoch  7 Update  980 Cost  0.748211622238
Epoch  7 Update  990 Cost  0.642886817455
Epoch  7 Update  1000 Cost  0.681607723236
Seen 1998 samples
Epoch  8 Update  1010 Cost  0.609392344952
Epoch  8 Update  1020 Cost  0.672912657261
Epoch  8 Update  1030 Cost  0.682039916515
Epoch  8 Update  1040 Cost  0.663565218449
Epoch  8 Update  1050 Cost  0.743454515934
Epoch  8 Update  1060 Cost  0.661156594753
Epoch  8 Update  1070 Cost  0.676486134529
Epoch  8 Update  1080 Cost  0.527149558067
Epoch  8 Update  1090 Cost  0.710656702518
Epoch  8 Update  1100 Cost  0.520303606987
Epoch  8 Update  1110 Cost  0.719430506229
Saving...
Done
('Train ', 0.2617617617617618, 'Valid ', 0.25714285714285712, 'Test ', 0.35399999999999998)
Epoch  8 Update  1120 Cost  0.782182335854
Seen 1998 samples
Epoch  9 Update  1130 Cost  0.619902074337
Epoch  9 Update  1140 Cost  0.583137512207
Epoch  9 Update  1150 Cost  0.545123815536
Epoch  9 Update  1160 Cost  0.627865076065
Epoch  9 Update  1170 Cost  0.492178082466
Epoch  9 Update  1180 Cost  0.59959936142
Epoch  9 Update  1190 Cost  0.473972141743
Epoch  9 Update  1200 Cost  0.32666310668
Epoch  9 Update  1210 Cost  0.30805003643
Epoch  9 Update  1220 Cost  0.522493004799
Epoch  9 Update  1230 Cost  0.347992151976
Epoch  9 Update  1240 Cost  0.52707272768
Epoch  9 Update  1250 Cost  0.838895499706
Seen 1998 samples
Epoch  10 Update  1260 Cost  0.468200445175
Epoch  10 Update  1270 Cost  0.628310680389
Epoch  10 Update  1280 Cost  0.560294389725
Epoch  10 Update  1290 Cost  0.3314191401
Epoch  10 Update  1300 Cost  0.507508814335
Epoch  10 Update  1310 Cost  0.624760866165
Epoch  10 Update  1320 Cost  0.286853164434
Epoch  10 Update  1330 Cost  0.380667775869
Epoch  10 Update  1340 Cost  0.338694006205
Epoch  10 Update  1350 Cost  0.490547180176
Epoch  10 Update  1360 Cost  0.536775529385
Epoch  10 Update  1370 Cost  0.406870543957
Seen 1998 samples
Epoch  11 Update  1380 Cost  0.238712862134
Epoch  11 Update  1390 Cost  0.144520565867
Epoch  11 Update  1400 Cost  0.442646771669
Epoch  11 Update  1410 Cost  0.375552564859
Epoch  11 Update  1420 Cost  0.403922975063
Epoch  11 Update  1430 Cost  0.403555959463
Epoch  11 Update  1440 Cost  0.429494708776
Epoch  11 Update  1450 Cost  0.326758921146
Epoch  11 Update  1460 Cost  0.168657630682
Epoch  11 Update  1470 Cost  0.385389119387
Epoch  11 Update  1480 Cost  0.304343879223
('Train ', 0.090590590590590603, 'Valid ', 0.1428571428571429, 'Test ', 0.25519999999999998)
Epoch  11 Update  1490 Cost  0.376196235418
Epoch  11 Update  1500 Cost  0.578035354614
Seen 1998 samples
Epoch  12 Update  1510 Cost  0.219479650259
Epoch  12 Update  1520 Cost  0.479404628277
Epoch  12 Update  1530 Cost  0.27543643117
Epoch  12 Update  1540 Cost  0.233015537262
Epoch  12 Update  1550 Cost  0.330640405416
Epoch  12 Update  1560 Cost  0.312664270401
Epoch  12 Update  1570 Cost  0.180263116956
Epoch  12 Update  1580 Cost  0.276846498251
Epoch  12 Update  1590 Cost  0.377861082554
Epoch  12 Update  1600 Cost  0.221709951758
Epoch  12 Update  1610 Cost  0.226344540715
Epoch  12 Update  1620 Cost  0.426911473274
Seen 1998 samples
Epoch  13 Update  1630 Cost  0.052586786449
Epoch  13 Update  1640 Cost  0.163543105125
Epoch  13 Update  1650 Cost  0.0713825672865
Epoch  13 Update  1660 Cost  0.289846032858
Epoch  13 Update  1670 Cost  0.209345370531
Epoch  13 Update  1680 Cost  0.369451671839
Epoch  13 Update  1690 Cost  0.25002270937
Epoch  13 Update  1700 Cost  0.369953542948
Epoch  13 Update  1710 Cost  0.136777967215
Epoch  13 Update  1720 Cost  0.068537607789
Epoch  13 Update  1730 Cost  0.350539714098
Epoch  13 Update  1740 Cost  0.285716682673
Epoch  13 Update  1750 Cost  0.0578093230724
Seen 1998 samples
Epoch  14 Update  1760 Cost  0.0736741945148
Epoch  14 Update  1770 Cost  0.021975884214
Epoch  14 Update  1780 Cost  0.363358259201
Epoch  14 Update  1790 Cost  0.129479572177
Epoch  14 Update  1800 Cost  0.130923226476
Epoch  14 Update  1810 Cost  0.229752704501
Epoch  14 Update  1820 Cost  0.0524107888341
Epoch  14 Update  1830 Cost  0.02360445261
Epoch  14 Update  1840 Cost  0.313508927822
Epoch  14 Update  1850 Cost  0.284237056971
('Train ', 0.048548548548548509, 'Valid ', 0.11428571428571432, 'Test ', 0.28539999999999999)
Epoch  14 Update  1860 Cost  0.114400230348
Epoch  14 Update  1870 Cost  0.232896625996
Seen 1998 samples
Epoch  15 Update  1880 Cost  0.222233831882
Epoch  15 Update  1890 Cost  0.0796397775412
Epoch  15 Update  1900 Cost  0.0439472571015
Epoch  15 Update  1910 Cost  0.00937705952674
Epoch  15 Update  1920 Cost  0.0606281831861
Epoch  15 Update  1930 Cost  0.141265645623
Epoch  15 Update  1940 Cost  0.0367593169212
Epoch  15 Update  1950 Cost  0.17584349215
Epoch  15 Update  1960 Cost  0.0265002399683
Epoch  15 Update  1970 Cost  0.282739847898
Epoch  15 Update  1980 Cost  0.0528977960348
Epoch  15 Update  1990 Cost  0.485765337944
Epoch  15 Update  2000 Cost  0.243179991841
Seen 1998 samples
Epoch  16 Update  2010 Cost  0.0249696746469
Epoch  16 Update  2020 Cost  0.0308598149568
Epoch  16 Update  2030 Cost  0.0617641620338
Epoch  16 Update  2040 Cost  0.0358905307949
Epoch  16 Update  2050 Cost  0.217872411013
Epoch  16 Update  2060 Cost  0.0323725230992
Epoch  16 Update  2070 Cost  0.0260690953583
Epoch  16 Update  2080 Cost  0.0324352681637
Epoch  16 Update  2090 Cost  0.0375564135611
Epoch  16 Update  2100 Cost  0.141932919621
Epoch  16 Update  2110 Cost  0.0318977870047
Epoch  16 Update  2120 Cost  0.0851591303945
Seen 1998 samples
Epoch  17 Update  2130 Cost  0.094495549798
Epoch  17 Update  2140 Cost  0.130746781826
Epoch  17 Update  2150 Cost  0.0165700148791
Epoch  17 Update  2160 Cost  0.028234153986
Epoch  17 Update  2170 Cost  0.00589835317805
Epoch  17 Update  2180 Cost  0.328396558762
Epoch  17 Update  2190 Cost  0.0135336564854
Epoch  17 Update  2200 Cost  0.0285934321582
Epoch  17 Update  2210 Cost  0.0279478970915
Epoch  17 Update  2220 Cost  0.0189800430089
Saving...
Done
('Train ', 0.029029029029029041, 'Valid ', 0.1428571428571429, 'Test ', 0.30544000000000004)
Epoch  17 Update  2230 Cost  0.131582796574
Epoch  17 Update  2240 Cost  0.0992561355233
Epoch  17 Update  2250 Cost  0.249515816569
Seen 1998 samples
Epoch  18 Update  2260 Cost  0.153335094452
Epoch  18 Update  2270 Cost  0.00280411960557
Epoch  18 Update  2280 Cost  0.0127126323059
Epoch  18 Update  2290 Cost  0.0315342247486
Epoch  18 Update  2300 Cost  0.0321057066321
Epoch  18 Update  2310 Cost  0.0128439087421
Epoch  18 Update  2320 Cost  0.00771560519934
Epoch  18 Update  2330 Cost  0.0310320239514
Epoch  18 Update  2340 Cost  0.0172711405903
Epoch  18 Update  2350 Cost  0.0112820267677
Epoch  18 Update  2360 Cost  0.0257008317858
Epoch  18 Update  2370 Cost  0.0159540139139
Seen 1998 samples
Epoch  19 Update  2380 Cost  0.0262653250247
Epoch  19 Update  2390 Cost  0.00512914406136
Epoch  19 Update  2400 Cost  0.0096501680091
Epoch  19 Update  2410 Cost  0.000922410050407
Epoch  19 Update  2420 Cost  0.0353888943791
Epoch  19 Update  2430 Cost  0.0458313152194
Epoch  19 Update  2440 Cost  0.0207377616316
Epoch  19 Update  2450 Cost  0.0447564274073
Epoch  19 Update  2460 Cost  0.0114270178601
Epoch  19 Update  2470 Cost  0.0032439045608
Epoch  19 Update  2480 Cost  0.00210372544825
Epoch  19 Update  2490 Cost  0.00831729266793
Epoch  19 Update  2500 Cost  0.136446490884
Seen 1998 samples
Epoch  20 Update  2510 Cost  0.0159451887012
Epoch  20 Update  2520 Cost  0.0336063094437
Epoch  20 Update  2530 Cost  0.00414951331913
Epoch  20 Update  2540 Cost  0.00148038216867
Epoch  20 Update  2550 Cost  0.004880157765
Epoch  20 Update  2560 Cost  0.0214680749923
Epoch  20 Update  2570 Cost  0.00278742355295
Epoch  20 Update  2580 Cost  0.000976472569164
Epoch  20 Update  2590 Cost  0.0101894587278
('Train ', 0.0020020020020019569, 'Valid ', 0.11428571428571432, 'Test ', 0.21431999999999995)
Epoch  20 Update  2600 Cost  0.0177323762327
Epoch  20 Update  2610 Cost  0.00497570168227
Epoch  20 Update  2620 Cost  0.00639061024413
Seen 1998 samples
Epoch  21 Update  2630 Cost  0.00102666253224
Epoch  21 Update  2640 Cost  0.000371598580386
Epoch  21 Update  2650 Cost  0.00237970822491
Epoch  21 Update  2660 Cost  0.000947939057369
Epoch  21 Update  2670 Cost  0.0134694408625
Epoch  21 Update  2680 Cost  0.0485311076045
Epoch  21 Update  2690 Cost  0.00560057815164
Epoch  21 Update  2700 Cost  0.180613487959
Epoch  21 Update  2710 Cost  0.302919209003
Epoch  21 Update  2720 Cost  0.0396358780563
Epoch  21 Update  2730 Cost  0.00234019639902
Epoch  21 Update  2740 Cost  0.00331675214693
Epoch  21 Update  2750 Cost  0.00883675646037
Seen 1998 samples
Epoch  22 Update  2760 Cost  0.00239889929071
Epoch  22 Update  2770 Cost  0.00257943035103
Epoch  22 Update  2780 Cost  0.00149185431655
Epoch  22 Update  2790 Cost  0.000589217466768
Epoch  22 Update  2800 Cost  0.00940593983978
Epoch  22 Update  2810 Cost  0.0137534644455
Epoch  22 Update  2820 Cost  0.00166488753166
Epoch  22 Update  2830 Cost  0.0171569176018
Epoch  22 Update  2840 Cost  0.00102008739486
Epoch  22 Update  2850 Cost  0.000791911152191
Epoch  22 Update  2860 Cost  0.00324920727871
Epoch  22 Update  2870 Cost  0.0950810015202
Seen 1998 samples
Epoch  23 Update  2880 Cost  0.0128028327599
Epoch  23 Update  2890 Cost  0.00346719077788
Epoch  23 Update  2900 Cost  0.0087534757331
Epoch  23 Update  2910 Cost  0.00122937886044
Epoch  23 Update  2920 Cost  0.00250567542389
Epoch  23 Update  2930 Cost  0.00414408231154
Epoch  23 Update  2940 Cost  0.00259612081572
Epoch  23 Update  2950 Cost  0.00774257257581
Epoch  23 Update  2960 Cost  0.000689741747919
('Train ', 0.0010010010010009784, 'Valid ', 0.11428571428571432, 'Test ', 0.21260000000000001)
Epoch  23 Update  2970 Cost  0.00163245317526
Epoch  23 Update  2980 Cost  0.00258441246115
Epoch  23 Update  2990 Cost  0.00037192957825
Epoch  23 Update  3000 Cost  0.0052351243794
Seen 1998 samples
Epoch  24 Update  3010 Cost  0.000277206592727
Epoch  24 Update  3020 Cost  0.00103787530679
Epoch  24 Update  3030 Cost  0.000995592679828
Epoch  24 Update  3040 Cost  0.0046515436843
Epoch  24 Update  3050 Cost  0.000422397599323
Epoch  24 Update  3060 Cost  6.77570496919e-05
Epoch  24 Update  3070 Cost  6.51707450743e-05
Epoch  24 Update  3080 Cost  0.00100990070496
Epoch  24 Update  3090 Cost  0.000502897310071
Epoch  24 Update  3100 Cost  0.00414153048769
Epoch  24 Update  3110 Cost  0.00071432616096
Epoch  24 Update  3120 Cost  0.0043924273923
Seen 1998 samples
Epoch  25 Update  3130 Cost  0.00138232880272
Epoch  25 Update  3140 Cost  0.0143043184653
Epoch  25 Update  3150 Cost  0.000674939888995
Epoch  25 Update  3160 Cost  0.00632217759266
Epoch  25 Update  3170 Cost  0.0204309727997
Epoch  25 Update  3180 Cost  0.0265913940966
Epoch  25 Update  3190 Cost  0.00629460485652
Epoch  25 Update  3200 Cost  0.00198838463984
Epoch  25 Update  3210 Cost  0.000771439343225
Epoch  25 Update  3220 Cost  0.000328363385051
Epoch  25 Update  3230 Cost  0.00278966035694
Epoch  25 Update  3240 Cost  0.01374412328
Epoch  25 Update  3250 Cost  0.00119380804244
Seen 1998 samples
Epoch  26 Update  3260 Cost  0.00354882120155
Epoch  26 Update  3270 Cost  0.00340070505626
Epoch  26 Update  3280 Cost  0.00180796696804
Epoch  26 Update  3290 Cost  0.00388853205368
Epoch  26 Update  3300 Cost  6.58762655803e-05
Epoch  26 Update  3310 Cost  0.00134636636358
Epoch  26 Update  3320 Cost  0.0823284015059
Epoch  26 Update  3330 Cost  0.0009768611053
Saving...
Done
('Train ', 0.0, 'Valid ', 0.10476190476190472, 'Test ', 0.20896000000000003)
Epoch  26 Update  3340 Cost  0.000786502263509
Epoch  26 Update  3350 Cost  0.00174854591023
Epoch  26 Update  3360 Cost  0.000935860851314
Epoch  26 Update  3370 Cost  0.042400047183
Seen 1998 samples
Epoch  27 Update  3380 Cost  0.00130186846945
Epoch  27 Update  3390 Cost  0.000418418407207
Epoch  27 Update  3400 Cost  0.000616938516032
Epoch  27 Update  3410 Cost  0.000307565525873
Epoch  27 Update  3420 Cost  0.00116601563059
Epoch  27 Update  3430 Cost  0.00018894468667
Epoch  27 Update  3440 Cost  0.000280799198663
Epoch  27 Update  3450 Cost  0.000353893032297
Epoch  27 Update  3460 Cost  0.000566475151572
Epoch  27 Update  3470 Cost  0.000943257531617
Epoch  27 Update  3480 Cost  0.0103253060952
Epoch  27 Update  3490 Cost  0.000689662585501
Epoch  27 Update  3500 Cost  0.000305496592773
Seen 1998 samples
Epoch  28 Update  3510 Cost  0.000148388600792
Epoch  28 Update  3520 Cost  0.000416049937485
Epoch  28 Update  3530 Cost  0.000268755917205
Epoch  28 Update  3540 Cost  0.000486299803015
Epoch  28 Update  3550 Cost  0.00236166198738
Epoch  28 Update  3560 Cost  0.000528966251295
Epoch  28 Update  3570 Cost  0.0180685035884
Epoch  28 Update  3580 Cost  0.000133435067255
Epoch  28 Update  3590 Cost  0.00935040134937
Epoch  28 Update  3600 Cost  5.41803565284e-05
Epoch  28 Update  3610 Cost  0.000558834057301
Epoch  28 Update  3620 Cost  0.000110324501293
Seen 1998 samples
Epoch  29 Update  3630 Cost  0.307240366936
Epoch  29 Update  3640 Cost  0.0177686866373
Epoch  29 Update  3650 Cost  0.00117836426944
Epoch  29 Update  3660 Cost  0.00124371622223
Epoch  29 Update  3670 Cost  0.00364556279965
Epoch  29 Update  3680 Cost  0.00351628917269
Epoch  29 Update  3690 Cost  0.000407897023251
Epoch  29 Update  3700 Cost  0.00144944083877
('Train ', 0.0, 'Valid ', 0.11428571428571432, 'Test ', 0.21048)
Epoch  29 Update  3710 Cost  4.94527848787e-05
Epoch  29 Update  3720 Cost  4.62944117317e-05
Epoch  29 Update  3730 Cost  0.00257904361933
Epoch  29 Update  3740 Cost  9.85429505818e-05
Epoch  29 Update  3750 Cost  0.00272028101608
Seen 1998 samples
Epoch  30 Update  3760 Cost  0.000870466290507
Epoch  30 Update  3770 Cost  0.00117484328803
Epoch  30 Update  3780 Cost  0.000765410833992
Epoch  30 Update  3790 Cost  0.13168078661
Epoch  30 Update  3800 Cost  0.00671979039907
Epoch  30 Update  3810 Cost  0.0014395553153
Epoch  30 Update  3820 Cost  0.000964768987615
Epoch  30 Update  3830 Cost  0.000771940045524
Epoch  30 Update  3840 Cost  0.000123798774439
Epoch  30 Update  3850 Cost  0.00151393644046
Epoch  30 Update  3860 Cost  0.0023775305599
Epoch  30 Update  3870 Cost  7.01951794326e-05
Seen 1998 samples
Epoch  31 Update  3880 Cost  0.000492439605296
Epoch  31 Update  3890 Cost  0.000212964776438
Epoch  31 Update  3900 Cost  0.00029200289282
Epoch  31 Update  3910 Cost  0.000356875883881
Epoch  31 Update  3920 Cost  0.000754276057705
Epoch  31 Update  3930 Cost  0.000224542876822
Epoch  31 Update  3940 Cost  0.000669337634463
Epoch  31 Update  3950 Cost  0.000456109934021
Epoch  31 Update  3960 Cost  0.00130946282297
Epoch  31 Update  3970 Cost  0.000216702363105
Epoch  31 Update  3980 Cost  0.00691257743165
Epoch  31 Update  3990 Cost  4.00098906539e-06
Epoch  31 Update  4000 Cost  7.74873024056e-06
Seen 1998 samples
Epoch  32 Update  4010 Cost  0.000143646873767
Epoch  32 Update  4020 Cost  0.810265481472
Epoch  32 Update  4030 Cost  0.00225186417811
Epoch  32 Update  4040 Cost  0.00161474221386
Epoch  32 Update  4050 Cost  0.00442250259221
Epoch  32 Update  4060 Cost  0.000357590208296
Epoch  32 Update  4070 Cost  0.00132624909747
('Train ', 0.0, 'Valid ', 0.12380952380952381, 'Test ', 0.20899999999999996)
Epoch  32 Update  4080 Cost  0.00101920543239
Epoch  32 Update  4090 Cost  0.000519850174896
Epoch  32 Update  4100 Cost  0.00152625818737
Epoch  32 Update  4110 Cost  0.000140472169733
Epoch  32 Update  4120 Cost  0.00134973181412
Seen 1998 samples
Epoch  33 Update  4130 Cost  0.000326518405927
Epoch  33 Update  4140 Cost  9.46540822042e-05
Epoch  33 Update  4150 Cost  0.000170942366822
Epoch  33 Update  4160 Cost  7.4787021731e-05
Epoch  33 Update  4170 Cost  0.000473034771858
Epoch  33 Update  4180 Cost  0.000198813417228
Epoch  33 Update  4190 Cost  0.000687624502461
Epoch  33 Update  4200 Cost  0.00115955946967
Epoch  33 Update  4210 Cost  2.94684396067e-05
Epoch  33 Update  4220 Cost  4.40489202447e-05
Epoch  33 Update  4230 Cost  0.000187881189049
Epoch  33 Update  4240 Cost  0.00156319374219
Epoch  33 Update  4250 Cost  2.42603382503e-05
Seen 1998 samples
Epoch  34 Update  4260 Cost  0.0176779385656
Epoch  34 Update  4270 Cost  0.000452817825135
Epoch  34 Update  4280 Cost  0.000335407850798
Epoch  34 Update  4290 Cost  9.40415411605e-05
Epoch  34 Update  4300 Cost  0.000610103539657
Epoch  34 Update  4310 Cost  0.000218064102228
Epoch  34 Update  4320 Cost  0.000175186898559
Epoch  34 Update  4330 Cost  0.000705145765096
Epoch  34 Update  4340 Cost  0.000198374356842
Epoch  34 Update  4350 Cost  0.000113945665362
Epoch  34 Update  4360 Cost  0.000314099423122
Epoch  34 Update  4370 Cost  0.000396590417949
Seen 1998 samples
Epoch  35 Update  4380 Cost  0.000353531533619
Epoch  35 Update  4390 Cost  1.90891423699e-05
Epoch  35 Update  4400 Cost  5.83090441069e-05
Epoch  35 Update  4410 Cost  0.000108294800157
Epoch  35 Update  4420 Cost  4.72273386549e-05
Epoch  35 Update  4430 Cost  0.000340011261869
Epoch  35 Update  4440 Cost  0.000379962992156
Saving...
Done
('Train ', 0.0, 'Valid ', 0.12380952380952381, 'Test ', 0.21504000000000001)
Epoch  35 Update  4450 Cost  3.38738209393e-05
Epoch  35 Update  4460 Cost  5.98419028393e-05
Epoch  35 Update  4470 Cost  2.14633255382e-05
Epoch  35 Update  4480 Cost  0.000184181655641
Epoch  35 Update  4490 Cost  0.00071235105861
Epoch  35 Update  4500 Cost  0.000325779285049
Seen 1998 samples
Epoch  36 Update  4510 Cost  0.00103620334994
Epoch  36 Update  4520 Cost  9.07053108676e-05
Epoch  36 Update  4530 Cost  0.000149376399349
Epoch  36 Update  4540 Cost  0.000145306636114
Epoch  36 Update  4550 Cost  0.000402607838623
Epoch  36 Update  4560 Cost  2.12578252103e-05
Epoch  36 Update  4570 Cost  6.86561616021e-05
Epoch  36 Update  4580 Cost  0.000722570403013
Epoch  36 Update  4590 Cost  0.00142799294554
Epoch  36 Update  4600 Cost  0.00119431456551
Epoch  36 Update  4610 Cost  3.11228104692e-05
Epoch  36 Update  4620 Cost  0.00140045257285
Seen 1998 samples
Epoch  37 Update  4630 Cost  0.00024942602613
Epoch  37 Update  4640 Cost  0.000131979730213
Epoch  37 Update  4650 Cost  8.73656608746e-05
Epoch  37 Update  4660 Cost  3.08338603645e-05
Epoch  37 Update  4670 Cost  0.000231546509895
Epoch  37 Update  4680 Cost  0.000219878987991
Epoch  37 Update  4690 Cost  0.000131046836032
Epoch  37 Update  4700 Cost  7.42610209272e-05
Epoch  37 Update  4710 Cost  0.000116457798867
Epoch  37 Update  4720 Cost  7.72863859311e-05
Epoch  37 Update  4730 Cost  0.000652568764053
Epoch  37 Update  4740 Cost  0.000345822481904
Epoch  37 Update  4750 Cost  0.000193397630937
Seen 1998 samples
Epoch  38 Update  4760 Cost  0.000319140235661
Epoch  38 Update  4770 Cost  3.91904677599e-06
Epoch  38 Update  4780 Cost  0.00068951677531
Epoch  38 Update  4790 Cost  7.61411720305e-05
Epoch  38 Update  4800 Cost  3.10119030473e-05
Epoch  38 Update  4810 Cost  0.000227168493439
('Train ', 0.0, 'Valid ', 0.10476190476190472, 'Test ', 0.22160000000000002)
Epoch  38 Update  4820 Cost  9.33326446102e-05
Epoch  38 Update  4830 Cost  0.000171072446392
Epoch  38 Update  4840 Cost  0.000106556472019
Epoch  38 Update  4850 Cost  0.000108919855847
Epoch  38 Update  4860 Cost  4.97746732435e-05
Epoch  38 Update  4870 Cost  0.000109980799607
Seen 1998 samples
Epoch  39 Update  4880 Cost  3.9351034502e-05
Epoch  39 Update  4890 Cost  4.25280450145e-05
Epoch  39 Update  4900 Cost  6.36756740278e-05
Epoch  39 Update  4910 Cost  6.00525927439e-05
Epoch  39 Update  4920 Cost  0.000767446763348
Epoch  39 Update  4930 Cost  0.000101729950984
Epoch  39 Update  4940 Cost  3.97067851736e-05
Epoch  39 Update  4950 Cost  1.84931450349e-05
Epoch  39 Update  4960 Cost  0.000285596761387
Epoch  39 Update  4970 Cost  0.000380296580261
Epoch  39 Update  4980 Cost  7.36148376745e-06
Epoch  39 Update  4990 Cost  4.83443072881e-05
Epoch  39 Update  5000 Cost  5.3749208746e-05
Seen 1998 samples
Epoch  40 Update  5010 Cost  6.95983981132e-05
Epoch  40 Update  5020 Cost  0.000154872235726
Epoch  40 Update  5030 Cost  2.09795234696e-05
Epoch  40 Update  5040 Cost  3.77749552172e-06
Epoch  40 Update  5050 Cost  6.84007172822e-05
Epoch  40 Update  5060 Cost  0.000173857421032
Epoch  40 Update  5070 Cost  8.96639976418e-05
Epoch  40 Update  5080 Cost  2.27449509111e-05
Epoch  40 Update  5090 Cost  3.31554724653e-06
Epoch  40 Update  5100 Cost  7.70413225837e-06
Epoch  40 Update  5110 Cost  1.75543609657e-05
Epoch  40 Update  5120 Cost  1.39036583278e-05
Seen 1998 samples
Epoch  41 Update  5130 Cost  6.16170746071e-06
Epoch  41 Update  5140 Cost  5.81156564294e-06
Epoch  41 Update  5150 Cost  9.4944058219e-05
Epoch  41 Update  5160 Cost  2.82095588773e-05
Epoch  41 Update  5170 Cost  8.12115217741e-07
Epoch  41 Update  5180 Cost  1.82293260877e-05
('Train ', 0.0, 'Valid ', 0.10476190476190472, 'Test ', 0.21987999999999996)
Epoch  41 Update  5190 Cost  9.70168839558e-05
Epoch  41 Update  5200 Cost  3.73290749849e-05
Epoch  41 Update  5210 Cost  2.69713837042e-06
Epoch  41 Update  5220 Cost  2.22773633141e-06
Epoch  41 Update  5230 Cost  0.00010659792315
Epoch  41 Update  5240 Cost  1.92807747226e-05
Epoch  41 Update  5250 Cost  5.14144085173e-05
Seen 1998 samples
Epoch  42 Update  5260 Cost  3.12929455504e-06
Epoch  42 Update  5270 Cost  2.42122241616e-05
Epoch  42 Update  5280 Cost  8.80989682628e-05
Epoch  42 Update  5290 Cost  6.4640014898e-05
Epoch  42 Update  5300 Cost  9.14791235118e-05
Epoch  42 Update  5310 Cost  0.000216640648432
Epoch  42 Update  5320 Cost  0.000242722700932
Epoch  42 Update  5330 Cost  9.13235489861e-05
Epoch  42 Update  5340 Cost  0.000185252938536
Epoch  42 Update  5350 Cost  0.000269984593615
Epoch  42 Update  5360 Cost  1.13481082735e-05
Epoch  42 Update  5370 Cost  0.000585146073718
Seen 1998 samples
Epoch  43 Update  5380 Cost  0.000640544109046
Epoch  43 Update  5390 Cost  7.7496159065e-05
Epoch  43 Update  5400 Cost  0.000119090174849
Epoch  43 Update  5410 Cost  1.27487783175e-05
Epoch  43 Update  5420 Cost  6.51725204079e-05
Epoch  43 Update  5430 Cost  0.000615065917373
Epoch  43 Update  5440 Cost  1.81772520591e-05
Epoch  43 Update  5450 Cost  7.70114929765e-05
Epoch  43 Update  5460 Cost  4.39589757661e-06
Epoch  43 Update  5470 Cost  4.54913897556e-05
Epoch  43 Update  5480 Cost  1.3538488929e-05
Epoch  43 Update  5490 Cost  0.000212723592995
Epoch  43 Update  5500 Cost  6.70134477332e-06
Seen 1998 samples
Epoch  44 Update  5510 Cost  0.000162975935382
Epoch  44 Update  5520 Cost  5.50014556211e-05
Epoch  44 Update  5530 Cost  7.08568268237e-06
Epoch  44 Update  5540 Cost  7.57737461754e-06
Epoch  44 Update  5550 Cost  0.000108509972051
Saving...
Done
('Train ', 0.0, 'Valid ', 0.10476190476190472, 'Test ', 0.21967999999999999)
Epoch  44 Update  5560 Cost  2.45305618591e-05
Epoch  44 Update  5570 Cost  6.14964301349e-05
Epoch  44 Update  5580 Cost  4.82801488033e-06
Epoch  44 Update  5590 Cost  2.08255169127e-05
Epoch  44 Update  5600 Cost  1.38064033308e-05
Epoch  44 Update  5610 Cost  2.92625481961e-05
Epoch  44 Update  5620 Cost  2.51469391515e-05
Seen 1998 samples
Epoch  45 Update  5630 Cost  2.18059485633e-05
Epoch  45 Update  5640 Cost  0.000241188288783
Epoch  45 Update  5650 Cost  1.86922788998e-05
Epoch  45 Update  5660 Cost  1.60865856742e-05
Epoch  45 Update  5670 Cost  2.20994097617e-05
Epoch  45 Update  5680 Cost  7.48442835175e-05
Epoch  45 Update  5690 Cost  1.48267076838e-06
Epoch  45 Update  5700 Cost  3.5719906009e-05
Epoch  45 Update  5710 Cost  7.21417891327e-05
Epoch  45 Update  5720 Cost  7.82329971116e-06
Epoch  45 Update  5730 Cost  3.82337420888e-05
Epoch  45 Update  5740 Cost  1.13847227112e-05
Epoch  45 Update  5750 Cost  0.000198555411771
Seen 1998 samples
Epoch  46 Update  5760 Cost  3.27292327711e-05
Epoch  46 Update  5770 Cost  3.15098550345e-05
Epoch  46 Update  5780 Cost  3.04774821416e-05
Epoch  46 Update  5790 Cost  3.15161332765e-06
Epoch  46 Update  5800 Cost  5.46408955415e-05
Epoch  46 Update  5810 Cost  0.000107777399535
Epoch  46 Update  5820 Cost  1.56441637955e-05
Epoch  46 Update  5830 Cost  0.000619835220277
Epoch  46 Update  5840 Cost  1.78673963092e-05
Epoch  46 Update  5850 Cost  3.81747158826e-05
Epoch  46 Update  5860 Cost  8.15864041215e-06
Epoch  46 Update  5870 Cost  0.000108253225335
Seen 1998 samples
Epoch  47 Update  5880 Cost  8.65582405822e-05
Epoch  47 Update  5890 Cost  2.13841194636e-05
Epoch  47 Update  5900 Cost  3.09629685944e-05
Epoch  47 Update  5910 Cost  4.77646717627e-05
Epoch  47 Update  5920 Cost  6.78419382893e-05
('Train ', 0.0, 'Valid ', 0.11428571428571432, 'Test ', 0.21963999999999995)
Epoch  47 Update  5930 Cost  0.000446978083346
Epoch  47 Update  5940 Cost  9.77183808573e-05
Epoch  47 Update  5950 Cost  1.48469580381e-05
Epoch  47 Update  5960 Cost  1.49763018271e-05
Epoch  47 Update  5970 Cost  5.89366900385e-05
Epoch  47 Update  5980 Cost  5.0643357099e-05
Epoch  47 Update  5990 Cost  3.94930757466e-05
Epoch  47 Update  6000 Cost  5.47518220628e-06
Seen 1998 samples
Epoch  48 Update  6010 Cost  9.17265788303e-05
Epoch  48 Update  6020 Cost  2.05613359867e-05
Epoch  48 Update  6030 Cost  0.00019926004461
Epoch  48 Update  6040 Cost  4.5821770982e-06
Epoch  48 Update  6050 Cost  0.000151596861542
Epoch  48 Update  6060 Cost  2.41529432969e-05
Epoch  48 Update  6070 Cost  0.000152563981828
Epoch  48 Update  6080 Cost  4.02336354455e-06
Epoch  48 Update  6090 Cost  0.000178187648999
Epoch  48 Update  6100 Cost  7.41352050682e-06
Epoch  48 Update  6110 Cost  1.6868867533e-05
Epoch  48 Update  6120 Cost  7.79364108894e-06
Seen 1998 samples
Epoch  49 Update  6130 Cost  6.88446925778e-06
Epoch  49 Update  6140 Cost  2.80203421426e-05
Epoch  49 Update  6150 Cost  3.33267416863e-05
Epoch  49 Update  6160 Cost  0.000104290746094
Epoch  49 Update  6170 Cost  2.80394815491e-05
Epoch  49 Update  6180 Cost  5.9232716012e-06
Epoch  49 Update  6190 Cost  5.44320137124e-05
Epoch  49 Update  6200 Cost  0.000117406285426
Epoch  49 Update  6210 Cost  5.20057210451e-06
Epoch  49 Update  6220 Cost  7.18248384146e-06
Epoch  49 Update  6230 Cost  4.04766033171e-05
Epoch  49 Update  6240 Cost  0.000172453830601
Epoch  49 Update  6250 Cost  6.27573581369e-06
Seen 1998 samples
Epoch  50 Update  6260 Cost  5.77424134462e-06
Epoch  50 Update  6270 Cost  1.46562515511e-05
Epoch  50 Update  6280 Cost  3.18162456097e-05
Epoch  50 Update  6290 Cost  4.16494503952e-06
('Train ', 0.0, 'Valid ', 0.10476190476190472, 'Test ', 0.22092000000000001)
Epoch  50 Update  6300 Cost  1.2293492091e-06
Epoch  50 Update  6310 Cost  0.000100280420156
Epoch  50 Update  6320 Cost  1.43580700751e-05
Epoch  50 Update  6330 Cost  7.51784546082e-06
Epoch  50 Update  6340 Cost  1.00437182482e-05
Epoch  50 Update  6350 Cost  0.000152109525516
Epoch  50 Update  6360 Cost  4.41081874669e-06
Epoch  50 Update  6370 Cost  0.000132639775984
Seen 1998 samples
Epoch  51 Update  6380 Cost  2.31852645811e-05
Epoch  51 Update  6390 Cost  1.82513213076e-05
Epoch  51 Update  6400 Cost  1.72114851011e-05
Epoch  51 Update  6410 Cost  0.000106247134681
Epoch  51 Update  6420 Cost  0.000212659258978
Epoch  51 Update  6430 Cost  7.51972693251e-05
Epoch  51 Update  6440 Cost  7.62326162658e-05
Epoch  51 Update  6450 Cost  3.67317807104e-06
Epoch  51 Update  6460 Cost  9.35812840908e-06
Epoch  51 Update  6470 Cost  1.70402508957e-05
Epoch  51 Update  6480 Cost  3.49039764842e-05
Epoch  51 Update  6490 Cost  3.1005267374e-05
Epoch  51 Update  6500 Cost  1.10868058982e-05
Seen 1998 samples
Epoch  52 Update  6510 Cost  3.49435686076e-06
Epoch  52 Update  6520 Cost  6.7073655373e-05
Epoch  52 Update  6530 Cost  2.2128369892e-06
Epoch  52 Update  6540 Cost  5.62502136745e-05
Epoch  52 Update  6550 Cost  6.26740802545e-05
Epoch  52 Update  6560 Cost  0.000128955463879
Epoch  52 Update  6570 Cost  3.66379099432e-05
Epoch  52 Update  6580 Cost  4.54501287095e-06
Epoch  52 Update  6590 Cost  0.000170490326127
Epoch  52 Update  6600 Cost  3.52137030859e-05
Epoch  52 Update  6610 Cost  1.16229841751e-06
Epoch  52 Update  6620 Cost  5.911667904e-05
Seen 1998 samples
Epoch  53 Update  6630 Cost  1.12435473056e-05
Epoch  53 Update  6640 Cost  2.5452744012e-05
Epoch  53 Update  6650 Cost  8.89631064638e-06
Epoch  53 Update  6660 Cost  1.41612063089e-05
Saving...
Done
('Train ', 0.0, 'Valid ', 0.12380952380952381, 'Test ', 0.22255999999999998)
Epoch  53 Update  6670 Cost  0.000183772397577
Epoch  53 Update  6680 Cost  5.06646802023e-06
Epoch  53 Update  6690 Cost  1.58600578288e-05
Epoch  53 Update  6700 Cost  7.11556458555e-06
Epoch  53 Update  6710 Cost  5.13562445121e-05
Epoch  53 Update  6720 Cost  4.33627792518e-06
Epoch  53 Update  6730 Cost  2.89590461762e-05
Epoch  53 Update  6740 Cost  7.78911999078e-05
Epoch  53 Update  6750 Cost  4.78596266476e-05
Seen 1998 samples
Epoch  54 Update  6760 Cost  1.13181822599e-05
Epoch  54 Update  6770 Cost  7.61492128731e-06
Epoch  54 Update  6780 Cost  4.28319362982e-05
Epoch  54 Update  6790 Cost  3.54655662704e-06
Epoch  54 Update  6800 Cost  0.000119179072499
Epoch  54 Update  6810 Cost  6.07530164416e-05
Epoch  54 Update  6820 Cost  1.38362356665e-05
Epoch  54 Update  6830 Cost  4.31851840403e-05
Epoch  54 Update  6840 Cost  0.000217873734073
Epoch  54 Update  6850 Cost  0.000368709559552
Epoch  54 Update  6860 Cost  2.5916917366e-05
Epoch  54 Update  6870 Cost  5.06640105868e-07
Seen 1998 samples
Epoch  55 Update  6880 Cost  2.27124637604e-05
Epoch  55 Update  6890 Cost  7.063301382e-06
Epoch  55 Update  6900 Cost  9.38778214277e-07
Epoch  55 Update  6910 Cost  6.54179075354e-06
Epoch  55 Update  6920 Cost  1.57588583534e-05
Epoch  55 Update  6930 Cost  4.06891485909e-05
Epoch  55 Update  6940 Cost  1.49241259351e-05
Epoch  55 Update  6950 Cost  6.19917591393e-06
Epoch  55 Update  6960 Cost  5.04416084368e-06
Epoch  55 Update  6970 Cost  1.14077056423e-05
Epoch  55 Update  6980 Cost  9.31325416786e-07
Epoch  55 Update  6990 Cost  2.34598574025e-05
Epoch  55 Update  7000 Cost  0.000292899785563
Seen 1998 samples
Epoch  56 Update  7010 Cost  4.31785738328e-05
Epoch  56 Update  7020 Cost  7.29426392354e-06
Epoch  56 Update  7030 Cost  7.89040677773e-06
('Train ', 0.0, 'Valid ', 0.11428571428571432, 'Test ', 0.22328000000000003)
Epoch  56 Update  7040 Cost  2.57874344243e-05
Epoch  56 Update  7050 Cost  8.06187381386e-06
Epoch  56 Update  7060 Cost  2.16066922576e-07
Epoch  56 Update  7070 Cost  1.2562444681e-05
Epoch  56 Update  7080 Cost  4.87450015498e-05
Epoch  56 Update  7090 Cost  8.95596804185e-06
Epoch  56 Update  7100 Cost  2.50725825026e-05
Epoch  56 Update  7110 Cost  1.02229414551e-05
Epoch  56 Update  7120 Cost  4.26845726906e-05
Seen 1998 samples
Epoch  57 Update  7130 Cost  2.12895320146e-05
Epoch  57 Update  7140 Cost  2.48389897024e-05
Epoch  57 Update  7150 Cost  6.36168188066e-05
Epoch  57 Update  7160 Cost  1.89246952687e-06
Epoch  57 Update  7170 Cost  7.15317146387e-05
Epoch  57 Update  7180 Cost  3.50184359377e-06
Epoch  57 Update  7190 Cost  1.82439453056e-05
Epoch  57 Update  7200 Cost  3.56886471309e-06
Epoch  57 Update  7210 Cost  1.54803801706e-05
Epoch  57 Update  7220 Cost  2.18304580812e-06
Epoch  57 Update  7230 Cost  1.24057587527e-05
Epoch  57 Update  7240 Cost  8.32997466205e-06
Epoch  57 Update  7250 Cost  4.84271149617e-05
Seen 1998 samples
Epoch  58 Update  7260 Cost  0.000207813311135
Epoch  58 Update  7270 Cost  0.000118903262774
Epoch  58 Update  7280 Cost  2.69176780421e-05
Epoch  58 Update  7290 Cost  9.64125047176e-06
Epoch  58 Update  7300 Cost  1.35683194458e-05
Epoch  58 Update  7310 Cost  6.25114489594e-06
Epoch  58 Update  7320 Cost  6.1399929109e-05
Epoch  58 Update  7330 Cost  1.21824923554e-05
Epoch  58 Update  7340 Cost  6.28837324257e-06
Epoch  58 Update  7350 Cost  1.01926525531e-05
Epoch  58 Update  7360 Cost  2.5673158234e-05
Epoch  58 Update  7370 Cost  0.000171797611983
Seen 1998 samples
Epoch  59 Update  7380 Cost  8.64338653628e-05
Epoch  59 Update  7390 Cost  4.23942856287e-06
Epoch  59 Update  7400 Cost  5.27407282789e-05
('Train ', 0.0, 'Valid ', 0.11428571428571432, 'Test ', 0.22211999999999998)
Epoch  59 Update  7410 Cost  2.71260287263e-05
Epoch  59 Update  7420 Cost  1.60934052928e-06
Epoch  59 Update  7430 Cost  3.7900517782e-05
Epoch  59 Update  7440 Cost  0.000109739958134
Epoch  59 Update  7450 Cost  0.000147223327076
Epoch  59 Update  7460 Cost  3.53691284545e-05
Epoch  59 Update  7470 Cost  0.000201925780857
Epoch  59 Update  7480 Cost  2.01592047233e-05
Epoch  59 Update  7490 Cost  9.28364443098e-06
Epoch  59 Update  7500 Cost  4.42777974285e-07
Seen 1998 samples
Epoch  60 Update  7510 Cost  9.05175620574e-05
Epoch  60 Update  7520 Cost  8.29275995784e-06
Epoch  60 Update  7530 Cost  7.59239310355e-06
Epoch  60 Update  7540 Cost  3.13672421726e-06
Epoch  60 Update  7550 Cost  1.76341927727e-05
Epoch  60 Update  7560 Cost  4.61200615973e-05
Epoch  60 Update  7570 Cost  6.45973113933e-06
Epoch  60 Update  7580 Cost  8.93800461199e-05
Epoch  60 Update  7590 Cost  6.76653726259e-05
Epoch  60 Update  7600 Cost  1.98615325644e-05
Epoch  60 Update  7610 Cost  5.61790102438e-06
Epoch  60 Update  7620 Cost  1.78788232006e-05
Seen 1998 samples
Epoch  61 Update  7630 Cost  5.22566151631e-05
Epoch  61 Update  7640 Cost  1.95951929527e-06
Epoch  61 Update  7650 Cost  1.52597222041e-05
Epoch  61 Update  7660 Cost  3.55072552338e-05
Epoch  61 Update  7670 Cost  3.12924498758e-07
Epoch  61 Update  7680 Cost  7.97214511294e-07
Epoch  61 Update  7690 Cost  8.24057497084e-06
Epoch  61 Update  7700 Cost  2.99098228425e-05
Epoch  61 Update  7710 Cost  5.79569568799e-05
Epoch  61 Update  7720 Cost  8.87403621164e-06
Epoch  61 Update  7730 Cost  0.000270180287771
Epoch  61 Update  7740 Cost  0.000197561865207
Epoch  61 Update  7750 Cost  8.28190386528e-05
Seen 1998 samples
Epoch  62 Update  7760 Cost  0.000390141474782
Epoch  62 Update  7770 Cost  6.92905189226e-07
Saving...
Done
('Train ', 0.0, 'Valid ', 0.11428571428571432, 'Test ', 0.21923999999999999)
Epoch  62 Update  7780 Cost  2.55595641647e-05
Epoch  62 Update  7790 Cost  2.14456504182e-05
Epoch  62 Update  7800 Cost  5.54341841053e-06
Epoch  62 Update  7810 Cost  1.16829605759e-05
Epoch  62 Update  7820 Cost  7.04855847289e-06
Epoch  62 Update  7830 Cost  4.17545343225e-05
Epoch  62 Update  7840 Cost  4.4703490687e-08
Epoch  62 Update  7850 Cost  1.9161398086e-05
Epoch  62 Update  7860 Cost  0.00055049394723
Epoch  62 Update  7870 Cost  5.46980809304e-05
Seen 1998 samples
Epoch  63 Update  7880 Cost  1.26440099848e-05
Epoch  63 Update  7890 Cost  2.08148667298e-05
Epoch  63 Update  7900 Cost  5.75196145292e-06
Epoch  63 Update  7910 Cost  3.49173169525e-05
Epoch  63 Update  7920 Cost  1.61287953233e-05
Epoch  63 Update  7930 Cost  4.71632847621e-06
Epoch  63 Update  7940 Cost  3.24099892168e-05
Epoch  63 Update  7950 Cost  2.13088037526e-06
Epoch  63 Update  7960 Cost  5.99786199018e-06
Epoch  63 Update  7970 Cost  3.23976782965e-05
Epoch  63 Update  7980 Cost  4.45549903816e-06
Epoch  63 Update  7990 Cost  1.84936197911e-05
Epoch  63 Update  8000 Cost  0.00106859498192
Seen 1998 samples
Epoch  64 Update  8010 Cost  1.89992124433e-06
Epoch  64 Update  8020 Cost  1.36427788675e-05
Epoch  64 Update  8030 Cost  8.60892396304e-05
Epoch  64 Update  8040 Cost  0.000112375309982
Epoch  64 Update  8050 Cost  1.5646226359e-07
Epoch  64 Update  8060 Cost  1.09677121145e-05
Epoch  64 Update  8070 Cost  7.50313529352e-06
Epoch  64 Update  8080 Cost  5.34839600732e-05
Epoch  64 Update  8090 Cost  3.48689650309e-06
Epoch  64 Update  8100 Cost  5.47532981727e-05
Epoch  64 Update  8110 Cost  5.2931416576e-05
Epoch  64 Update  8120 Cost  4.42570762971e-06
Seen 1998 samples
Epoch  65 Update  8130 Cost  4.50774905403e-06
Epoch  65 Update  8140 Cost  7.65658041928e-05
('Train ', 0.0, 'Valid ', 0.12380952380952381, 'Test ', 0.22008000000000005)
Epoch  65 Update  8150 Cost  1.73756034201e-05
Epoch  65 Update  8160 Cost  1.19660553537e-05
Epoch  65 Update  8170 Cost  3.97106923629e-05
Epoch  65 Update  8180 Cost  8.80681818671e-06
Epoch  65 Update  8190 Cost  1.55717907546e-06
Epoch  65 Update  8200 Cost  4.46621706942e-05
Epoch  65 Update  8210 Cost  1.86426877917e-05
Epoch  65 Update  8220 Cost  2.66733445642e-06
Epoch  65 Update  8230 Cost  5.51320590603e-05
Epoch  65 Update  8240 Cost  1.52040038301e-05
Epoch  65 Update  8250 Cost  6.80337543599e-05
Seen 1998 samples
Epoch  66 Update  8260 Cost  4.84288932512e-07
Epoch  66 Update  8270 Cost  3.93402751797e-06
Epoch  66 Update  8280 Cost  5.22292339156e-06
Epoch  66 Update  8290 Cost  7.71151553636e-06
Epoch  66 Update  8300 Cost  3.51366506948e-05
Epoch  66 Update  8310 Cost  1.2670923752e-05
Epoch  66 Update  8320 Cost  2.00336180569e-05
Epoch  66 Update  8330 Cost  4.22154625994e-05
Epoch  66 Update  8340 Cost  5.78800063522e-05
Epoch  66 Update  8350 Cost  4.42577220383e-06
Epoch  66 Update  8360 Cost  7.32401758796e-06
Epoch  66 Update  8370 Cost  7.38368589737e-06
Seen 1998 samples
Epoch  67 Update  8380 Cost  2.16814714804e-06
Epoch  67 Update  8390 Cost  3.78693912353e-05
Epoch  67 Update  8400 Cost  3.44963723364e-06
Epoch  67 Update  8410 Cost  0.000155371104483
Epoch  67 Update  8420 Cost  2.16423304664e-05
Epoch  67 Update  8430 Cost  2.3543966563e-06
Epoch  67 Update  8440 Cost  1.26144577735e-05
Epoch  67 Update  8450 Cost  5.04424406245e-06
Epoch  67 Update  8460 Cost  1.36871449286e-05
Epoch  67 Update  8470 Cost  4.91738944675e-07
Epoch  67 Update  8480 Cost  1.4935983927e-05
Epoch  67 Update  8490 Cost  4.94948653795e-05
Epoch  67 Update  8500 Cost  8.39531930978e-05
Seen 1998 samples
Epoch  68 Update  8510 Cost  6.921768545e-06
('Train ', 0.0, 'Valid ', 0.11428571428571432, 'Test ', 0.22096000000000005)
Epoch  68 Update  8520 Cost  3.40222759405e-05
Epoch  68 Update  8530 Cost  1.68435981323e-05
Epoch  68 Update  8540 Cost  1.10422561193e-05
Epoch  68 Update  8550 Cost  3.180897329e-05
Epoch  68 Update  8560 Cost  0.000102940684883
Epoch  68 Update  8570 Cost  4.32143269791e-06
Epoch  68 Update  8580 Cost  4.52789172414e-05
Epoch  68 Update  8590 Cost  5.3607938753e-05
Epoch  68 Update  8600 Cost  0.000133985435241
Epoch  68 Update  8610 Cost  2.00999820663e-05
Epoch  68 Update  8620 Cost  5.57129715162e-05
Seen 1998 samples
Epoch  69 Update  8630 Cost  1.35382651933e-05
Epoch  69 Update  8640 Cost  3.61357501788e-06
Epoch  69 Update  8650 Cost  8.3598170022e-06
Epoch  69 Update  8660 Cost  1.28895396756e-06
Epoch  69 Update  8670 Cost  1.73609423655e-05
Epoch  69 Update  8680 Cost  3.38410172844e-05
Epoch  69 Update  8690 Cost  5.014323051e-06
Epoch  69 Update  8700 Cost  4.24683548772e-07
Epoch  69 Update  8710 Cost  1.10572864287e-05
Epoch  69 Update  8720 Cost  1.81136510946e-05
Epoch  69 Update  8730 Cost  3.4496445096e-06
Epoch  69 Update  8740 Cost  1.75543609657e-05
Epoch  69 Update  8750 Cost  1.70986422745e-05
Seen 1998 samples
Epoch  70 Update  8760 Cost  2.35648331e-05
Epoch  70 Update  8770 Cost  3.73530019715e-05
Epoch  70 Update  8780 Cost  0.000156036767294
Epoch  70 Update  8790 Cost  4.69396854896e-06
Epoch  70 Update  8800 Cost  5.4229520174e-05
Epoch  70 Update  8810 Cost  1.11241988634e-05
Epoch  70 Update  8820 Cost  3.54650683221e-06
Epoch  70 Update  8830 Cost  1.09523807623e-06
Epoch  70 Update  8840 Cost  5.38206440979e-05
Epoch  70 Update  8850 Cost  3.72565809812e-05
Epoch  70 Update  8860 Cost  9.65669005382e-06
Epoch  70 Update  8870 Cost  4.32134839912e-07
Seen 1998 samples
Epoch  71 Update  8880 Cost  8.37894840515e-05
Saving...
Done
('Train ', 0.0, 'Valid ', 0.12380952380952381, 'Test ', 0.22155999999999998)
Epoch  71 Update  8890 Cost  9.58629170782e-05
Epoch  71 Update  8900 Cost  3.2969568565e-05
Epoch  71 Update  8910 Cost  2.13066996366e-05
Epoch  71 Update  8920 Cost  7.12724213372e-05
Epoch  71 Update  8930 Cost  8.56818189732e-07
Epoch  71 Update  8940 Cost  2.23517531595e-07
Epoch  71 Update  8950 Cost  1.40153415487e-05
Epoch  71 Update  8960 Cost  3.82219332096e-06
Epoch  71 Update  8970 Cost  2.78765601252e-05
Epoch  71 Update  8980 Cost  8.56818758166e-07
Epoch  71 Update  8990 Cost  1.03563320408e-06
Epoch  71 Update  9000 Cost  9.06542190933e-05
Seen 1998 samples
Epoch  72 Update  9010 Cost  3.69959379896e-05
Epoch  72 Update  9020 Cost  1.32406066768e-05
Epoch  72 Update  9030 Cost  3.1358540582e-05
Epoch  72 Update  9040 Cost  1.42235758176e-05
Epoch  72 Update  9050 Cost  3.79980008347e-07
Epoch  72 Update  9060 Cost  1.40451593325e-05
Epoch  72 Update  9070 Cost  5.50611912331e-06
Epoch  72 Update  9080 Cost  2.41402835854e-06
Epoch  72 Update  9090 Cost  1.9588491341e-05
Epoch  72 Update  9100 Cost  2.34780163737e-05
Epoch  72 Update  9110 Cost  3.28576811626e-06
Epoch  72 Update  9120 Cost  2.7245558158e-05
Seen 1998 samples
Epoch  73 Update  9130 Cost  1.39326880344e-06
Epoch  73 Update  9140 Cost  1.13405694719e-05
Epoch  73 Update  9150 Cost  2.79398591374e-06
Epoch  73 Update  9160 Cost  2.19680932787e-05
Epoch  73 Update  9170 Cost  1.50733858391e-05
Epoch  73 Update  9180 Cost  1.21378379845e-05
Epoch  73 Update  9190 Cost  0.00029565760633
Epoch  73 Update  9200 Cost  2.65987773673e-06
Epoch  73 Update  9210 Cost  6.13941165284e-06
Epoch  73 Update  9220 Cost  1.95557167899e-05
Epoch  73 Update  9230 Cost  3.84687446058e-05
Epoch  73 Update  9240 Cost  4.46554004156e-05
Epoch  73 Update  9250 Cost  0.00025760877179
('Train ', 0.0, 'Valid ', 0.1333333333333333, 'Test ', 0.22363999999999995)
Seen 1998 samples
Epoch  74 Update  9260 Cost  1.41561974942e-06
Epoch  74 Update  9270 Cost  1.65336969076e-05
Epoch  74 Update  9280 Cost  1.03719057734e-05
Epoch  74 Update  9290 Cost  2.80144331555e-06
Epoch  74 Update  9300 Cost  3.66460808436e-05
Epoch  74 Update  9310 Cost  5.17824673807e-06
Epoch  74 Update  9320 Cost  7.45845090933e-06
Epoch  74 Update  9330 Cost  7.00357247752e-07
Epoch  74 Update  9340 Cost  1.65152905538e-05
Epoch  74 Update  9350 Cost  1.46827505887e-05
Epoch  74 Update  9360 Cost  3.2069721783e-05
Epoch  74 Update  9370 Cost  3.75703457394e-05
Seen 1998 samples
Epoch  75 Update  9380 Cost  7.04223057255e-05
Epoch  75 Update  9390 Cost  3.5695189581e-05
Epoch  75 Update  9400 Cost  1.35308682729e-05
Epoch  75 Update  9410 Cost  7.37609639145e-07
Epoch  75 Update  9420 Cost  2.45869529181e-07
Epoch  75 Update  9430 Cost  4.78332776765e-06
Epoch  75 Update  9440 Cost  1.76580601874e-06
Epoch  75 Update  9450 Cost  4.00845374315e-06
Epoch  75 Update  9460 Cost  3.07383852487e-05
Epoch  75 Update  9470 Cost  5.99777649768e-06
Epoch  75 Update  9480 Cost  1.86266800029e-06
Epoch  75 Update  9490 Cost  3.43705214618e-05
Epoch  75 Update  9500 Cost  5.19053683092e-05
Seen 1998 samples
Epoch  76 Update  9510 Cost  1.12504244498e-06
Epoch  76 Update  9520 Cost  7.72657858761e-06
Epoch  76 Update  9530 Cost  8.86655379873e-06
Epoch  76 Update  9540 Cost  1.195879031e-05
Epoch  76 Update  9550 Cost  4.47424426966e-05
Epoch  76 Update  9560 Cost  1.283786969e-05
Epoch  76 Update  9570 Cost  1.34110493377e-07
Epoch  76 Update  9580 Cost  2.92455442832e-05
Epoch  76 Update  9590 Cost  0.000113639784104
Epoch  76 Update  9600 Cost  3.46457818523e-06
Epoch  76 Update  9610 Cost  7.45059935525e-07
Epoch  76 Update  9620 Cost  1.15115981316e-05
('Train ', 0.0, 'Valid ', 0.12380952380952381, 'Test ', 0.22099999999999997)
Seen 1998 samples
Epoch  77 Update  9630 Cost  4.34619942098e-05
Epoch  77 Update  9640 Cost  5.13769737154e-05
Epoch  77 Update  9650 Cost  1.66497320606e-05
Epoch  77 Update  9660 Cost  7.31658292352e-06
Epoch  77 Update  9670 Cost  1.81796008292e-06
Epoch  77 Update  9680 Cost  8.91017043614e-05
Epoch  77 Update  9690 Cost  5.6327639868e-06
Epoch  77 Update  9700 Cost  1.88501007869e-06
Epoch  77 Update  9710 Cost  1.79866201506e-05
Epoch  77 Update  9720 Cost  6.25857910563e-06
Epoch  77 Update  9730 Cost  3.52416259375e-06
Epoch  77 Update  9740 Cost  3.54155345121e-05
Epoch  77 Update  9750 Cost  1.93290429706e-06
Seen 1998 samples
Epoch  78 Update  9760 Cost  1.09901802716e-05
Epoch  78 Update  9770 Cost  0.000174464526935
Epoch  78 Update  9780 Cost  2.06562726817e-05
Epoch  78 Update  9790 Cost  1.66082718351e-05
Epoch  78 Update  9800 Cost  6.92929188517e-06
Epoch  78 Update  9810 Cost  9.8798682302e-06
Epoch  78 Update  9820 Cost  4.15747081206e-06
Epoch  78 Update  9830 Cost  1.63028107636e-05
Epoch  78 Update  9840 Cost  1.78070501988e-06
Epoch  78 Update  9850 Cost  3.64185070794e-05
Epoch  78 Update  9860 Cost  4.4480671022e-06
Epoch  78 Update  9870 Cost  5.82238972129e-05
Seen 1998 samples
Epoch  79 Update  9880 Cost  1.6153402612e-05
Epoch  79 Update  9890 Cost  5.17824300914e-05
Epoch  79 Update  9900 Cost  5.00686701344e-06
Epoch  79 Update  9910 Cost  2.30968112191e-07
Epoch  79 Update  9920 Cost  9.84257076198e-06
Epoch  79 Update  9930 Cost  6.09835951764e-05
Epoch  79 Update  9940 Cost  3.10954892484e-05
Epoch  79 Update  9950 Cost  1.18464993193e-06
Epoch  79 Update  9960 Cost  1.86327724805e-05
Epoch  79 Update  9970 Cost  9.84280541161e-06
Epoch  79 Update  9980 Cost  5.37736377737e-05
Epoch  79 Update  9990 Cost  2.92812364933e-06
Saving...
Done
('Train ', 0.0, 'Valid ', 0.12380952380952381, 'Test ', 0.2208)
Epoch  79 Update  10000 Cost  1.77111633093e-06
Seen 1998 samples
Epoch  80 Update  10010 Cost  1.29016525534e-05
Epoch  80 Update  10020 Cost  6.2843246269e-05
Epoch  80 Update  10030 Cost  0.000333852425683
Epoch  80 Update  10040 Cost  2.23519850806e-06
Epoch  80 Update  10050 Cost  2.15971249418e-05
Epoch  80 Update  10060 Cost  4.48505561508e-05
Epoch  80 Update  10070 Cost  2.75673937722e-06
Epoch  80 Update  10080 Cost  2.75789825537e-05
Epoch  80 Update  10090 Cost  8.7543274276e-05
Epoch  80 Update  10100 Cost  2.11597875932e-06
Epoch  80 Update  10110 Cost  1.33594958243e-05
Epoch  80 Update  10120 Cost  1.98188308786e-06
Seen 1998 samples
Epoch  81 Update  10130 Cost  0.000167067628354
Epoch  81 Update  10140 Cost  1.3262550965e-05
Epoch  81 Update  10150 Cost  4.33628838437e-06
Epoch  81 Update  10160 Cost  5.85629641137e-06
Epoch  81 Update  10170 Cost  1.10350583782e-05
Epoch  81 Update  10180 Cost  6.68335314913e-06
Epoch  81 Update  10190 Cost  6.4210347773e-05
Epoch  81 Update  10200 Cost  2.00359900191e-05
Epoch  81 Update  10210 Cost  1.44546111187e-05
Epoch  81 Update  10220 Cost  1.66749614436e-05
Epoch  81 Update  10230 Cost  1.89231122931e-05
Epoch  81 Update  10240 Cost  6.55653650483e-07
Epoch  81 Update  10250 Cost  6.68438951834e-06
Seen 1998 samples
Epoch  82 Update  10260 Cost  2.5055342121e-05
Epoch  82 Update  10270 Cost  2.28189765039e-05
Epoch  82 Update  10280 Cost  1.06846891867e-05
Epoch  82 Update  10290 Cost  0.000115037968499
Epoch  82 Update  10300 Cost  3.51488160959e-05
Epoch  82 Update  10310 Cost  1.423843878e-05
Epoch  82 Update  10320 Cost  4.87276474814e-06
Epoch  82 Update  10330 Cost  3.81476684197e-06
Epoch  82 Update  10340 Cost  5.471428085e-05
Epoch  82 Update  10350 Cost  2.10107168641e-06
Epoch  82 Update  10360 Cost  1.42430908454e-05
('Train ', 0.0, 'Valid ', 0.12380952380952381, 'Test ', 0.22192000000000001)
Early Stop!
Seen 1760 samples
Train  0.0 Valid  0.104761904762 Test  0.22092
The code run for 83 epochs, with 5.504656 sec/epochs
Training took 456.9s
Out[63]:
(0.0, 0.10476190476190472, 0.22092000000000001)

In [64]:
dim_proj=128,  # word embeding dimension and LSTM number of hidden units.
    patience=10,  # Number of epoch to wait before early stop if no progress
    max_epochs=5000,  # The maximum number of epoch to run
    dispFreq=10,  # Display to stdout the training progress every N updates
    decay_c=0.,  # Weight decay for the classifier applied to the U weights.
    lrate=0.0001,  # Learning rate for sgd (not used for adadelta and rmsprop)
    n_words=10000,  # Vocabulary size
    optimizer=adadelta,  # sgd, adadelta and rmsprop available, sgd very hard to use, not recommanded (probably need momentum and decaying learning rate).
    encoder='lstm',  # TODO: can be removed must be lstm.
    saveto='lstm_model.npz',  # The best model will be saved there
    validFreq=370,  # Compute the validation error after this number of update.
    saveFreq=1110,  # Save the parameters after every saveFreq updates
    maxlen=100,  # Sequence longer then this get ignored
    batch_size=16,  # The batch size during training.
    valid_batch_size=64,  # The batch size used for validation/test set.
    dataset='imdb',

    # Parameter for extra option
    noise_std=0.,
    use_dropout=True,  # if False slightly faster, but worst test error
                       # This frequently need a bigger model.
    reload_model=None,  # Path to a saved model we want to start from.
    test_size=-1,  # If >0, we keep only this number of test example.

In [ ]:
# Model options
    model_options = locals().copy()
    print("model options", model_options)

    load_data, prepare_data = get_dataset(dataset)

    print('Loading data')
    train, valid, test = load_data(n_words=n_words, valid_portion=0.05,
                                   maxlen=maxlen)
    if test_size > 0:
        # The test set is sorted by size, but we want to keep random
        # size example.  So we must select a random selection of the
        # examples.
        idx = numpy.arange(len(test[0]))
        numpy.random.shuffle(idx)
        idx = idx[:test_size]
        test = ([test[0][n] for n in idx], [test[1][n] for n in idx])

    ydim = numpy.max(train[1]) + 1

    model_options['ydim'] = ydim

    print('Building model')
    # This create the initial parameters as numpy ndarrays.
    # Dict name (string) -> numpy ndarray

In [ ]:
params = init_params(model_options)

In [33]:
T.matrix().astype(theano.config.floatX)


Out[33]:
<TensorType(float64, matrix)>

In [37]:
theano.shared(np.zeros(9)).astype(theano.config.floatX)


Out[37]:
<TensorType(float64, vector)>

copies of the classes defined in LSTM_Herta.py


In [20]:
%time j_test_time = 3+2


CPU times: user 33 µs, sys: 0 ns, total: 33 µs
Wall time: 17.2 µs

In [21]:
j_test_time


Out[21]:
5

In [12]:
Gates = namedtuple("Gates",['g','i','f','o'])
Psis = namedtuple("Psis",['g','i','f','o','h'])

In [ ]:
class Thetab_right(object):
def __init__(self, l, s_ls, al=None, Theta=None, b=None, activation=T.tanh, rng=None ):
        s_lp1, s_l = s_ls

        if rng is None:
            rng = np.random.RandomState(1234)

        if Theta is None:
            Theta_values = np.asarray( 
            rng.uniform( 
					low=-np.sqrt(6. / ( s_l + s_lp1 )), 
					high=np.sqrt(6. / ( s_l + s_lp1 )), size=(s_l, s_lp1) ), 
					dtype=theano.config.floatX 
			)
			if activation == T.nnet.sigmoid:
				Theta_values *= np.float32( 4 )
			
			Theta = theano.shared(value=Theta_values, name="Theta"+str(l), borrow=True)
		

		if b is None:
			b_values =  np.zeros(s_lp1).astype(theano.config.floatX)
			b= theano.shared(value=b_values, name='b'+str(l), borrow=True)	
			
		if al is None:
			al = T.matrix(dtype=theano.config.floatX)
			
			
		self.Theta = Theta  # size dims. (s_l,s_lp1) i.e. s_l x s_lp1
		self.b     = b      # dims. s_lp1
		self.al    = al     # dims. s_l
		
		self.l     = l

		if activation is None:
			self.psi = None
		else:
			self.psi = activation
	

	def connect_through(self):
		""" connect_through

			Note that I made connect_through a separate class method, separate from the automatic initialization, 
			because you can then make changes to the "layer units" or "nodes" before "connecting the layers"
				"""
# my attempt at left action version; T.tile made problems for scan 
#		lin_zlp1 = T.dot( self.Theta, self.al)+T.tile(self.b, (1,self.al.shape[1].astype('int32') ) ) # z^{(l+1)}
		lin_zlp1 = T.dot( self.al, self.Theta) + self.b
		if self.psi is None:
			self.alp1 = lin_zlp1
		else:
			self.alp1 = self.psi( lin_zlp1 )

In [40]:
test_data[0][0]


Out[40]:
[array([ 1.,  0.,  0.,  0.,  0.,  0.,  0.]),
 array([ 0.,  1.,  0.,  0.,  0.,  0.,  0.]),
 array([ 1.,  0.,  0.,  0.,  0.,  0.,  0.]),
 array([ 0.,  0.,  0.,  0.,  1.,  0.,  0.]),
 array([ 0.,  0.,  0.,  0.,  0.,  1.,  0.]),
 array([ 0.,  0.,  0.,  0.,  1.,  0.,  0.]),
 array([ 0.,  0.,  0.,  1.,  0.,  0.,  0.]),
 array([ 0.,  1.,  0.,  0.,  0.,  0.,  0.]),
 array([ 0.,  0.,  0.,  0.,  0.,  1.,  0.]),
 array([ 0.,  0.,  0.,  0.,  1.,  0.,  0.]),
 array([ 0.,  0.,  0.,  1.,  0.,  0.,  0.]),
 array([ 0.,  0.,  0.,  0.,  0.,  1.,  0.]),
 array([ 0.,  0.,  0.,  0.,  1.,  0.,  0.]),
 array([ 0.,  0.,  1.,  0.,  0.,  0.,  0.]),
 array([ 0.,  0.,  0.,  0.,  0.,  0.,  1.]),
 array([ 0.,  1.,  0.,  0.,  0.,  0.,  0.])]

In [41]:
test_data[0][1]


Out[41]:
[array([ 0.,  1.,  0.,  0.,  1.,  0.,  0.]),
 array([ 1.,  0.,  0.,  0.,  0.,  0.,  0.]),
 array([ 0.,  1.,  0.,  0.,  1.,  0.,  0.]),
 array([ 0.,  1.,  0.,  0.,  0.,  1.,  0.]),
 array([ 0.,  0.,  0.,  0.,  1.,  1.,  0.]),
 array([ 0.,  0.,  1.,  1.,  0.,  0.,  0.]),
 array([ 0.,  1.,  0.,  0.,  0.,  1.,  0.]),
 array([ 0.,  1.,  0.,  0.,  0.,  1.,  0.]),
 array([ 0.,  0.,  0.,  0.,  1.,  1.,  0.]),
 array([ 0.,  0.,  1.,  1.,  0.,  0.,  0.]),
 array([ 0.,  1.,  0.,  0.,  0.,  1.,  0.]),
 array([ 0.,  0.,  0.,  0.,  1.,  1.,  0.]),
 array([ 0.,  0.,  1.,  1.,  0.,  0.,  0.]),
 array([ 0.,  0.,  0.,  0.,  0.,  0.,  1.]),
 array([ 0.,  1.,  0.,  0.,  0.,  0.,  0.]),
 array([ 0.,  0.,  0.,  0.,  0.,  0.,  1.])]

In [43]:
X_test_data[0]


Out[43]:
[array([ 1.,  0.,  0.,  0.,  0.,  0.,  0.]),
 array([ 0.,  1.,  0.,  0.,  0.,  0.,  0.]),
 array([ 1.,  0.,  0.,  0.,  0.,  0.,  0.]),
 array([ 0.,  0.,  0.,  0.,  1.,  0.,  0.]),
 array([ 0.,  0.,  0.,  0.,  0.,  1.,  0.]),
 array([ 0.,  0.,  0.,  0.,  1.,  0.,  0.]),
 array([ 0.,  0.,  0.,  1.,  0.,  0.,  0.]),
 array([ 0.,  1.,  0.,  0.,  0.,  0.,  0.]),
 array([ 0.,  0.,  0.,  0.,  0.,  1.,  0.]),
 array([ 0.,  0.,  0.,  0.,  1.,  0.,  0.]),
 array([ 0.,  0.,  0.,  1.,  0.,  0.,  0.]),
 array([ 0.,  0.,  0.,  0.,  0.,  1.,  0.]),
 array([ 0.,  0.,  0.,  0.,  1.,  0.,  0.]),
 array([ 0.,  0.,  1.,  0.,  0.,  0.,  0.]),
 array([ 0.,  0.,  0.,  0.,  0.,  0.,  1.]),
 array([ 0.,  1.,  0.,  0.,  0.,  0.,  0.])]

In [48]:
dir(theano.sandbox)


Out[48]:
['__builtins__',
 '__doc__',
 '__file__',
 '__name__',
 '__package__',
 '__path__',
 'cuda',
 'gpuarray',
 'multinomial',
 'rng_mrg']

In [ ]: