Imports


In [1]:
import numpy as np
import matplotlib.pyplot as plt

%matplotlib inline

# Local
import Neuron
import models as models
import train as train
import batch_utils
import data_transforms
import generate_training_data


Using Theano backend.

Data


In [2]:
training_data = generate_training_data.y_shape(n_nodes=20, 
                                               data_size=1000, 
                                               first_length=10, 
                                               branching_node=6)

Global parameters


In [3]:
n_nodes = 20
input_dim = 100
n_epochs = 5
batch_size = 32
n_batch_per_epoch = np.floor(training_data['morphology']['n20'].shape[0]/batch_size).astype(int)
d_iters = 20
lr_discriminator =  0.001
lr_generator = 0.001
train_loss = 'binary_crossentropy'
#train_loss = 'wasserstein_loss'

rule = 'none'
d_weight_constraint = [-.03, .03]
g_weight_constraint = [-33.3, 33.3]
m_weight_constraint = [-33.3, 33.3]

Run


In [5]:
geom_model, morph_model, disc_model, gan_model = \
    train.train_model(training_data=training_data,
                      n_nodes=n_nodes,
                      input_dim=input_dim,
                      n_epochs=n_epochs,
                      batch_size=batch_size,
                      n_batch_per_epoch=n_batch_per_epoch,
                      d_iters=d_iters,
                      lr_discriminator=lr_discriminator,
                      lr_generator=lr_generator,
                      d_weight_constraint=d_weight_constraint,
                      g_weight_constraint=g_weight_constraint,
                      m_weight_constraint=m_weight_constraint,
                      rule=rule,
                      train_loss=train_loss,
                      verbose=True)


____________________________________________________________________________________________________
Layer (type)                     Output Shape          Param #     Connected to                     
====================================================================================================
input_1 (InputLayer)             (None, 19, 3)         0                                            
____________________________________________________________________________________________________
input_2 (InputLayer)             (None, 19, 20)        0                                            
____________________________________________________________________________________________________
merge_1 (Merge)                  (None, 19, 23)        0           input_1[0][0]                    
                                                                   input_2[0][0]                    
____________________________________________________________________________________________________
lambda_1 (Lambda)                (None, 20, 103)       0           merge_1[0][0]                    
____________________________________________________________________________________________________
reshape_1 (Reshape)              (None, 1, 2060)       0           lambda_1[0][0]                   
____________________________________________________________________________________________________
dense_1 (Dense)                  (None, 1, 200)        412200      reshape_1[0][0]                  
____________________________________________________________________________________________________
dense_2 (Dense)                  (None, 1, 50)         10050       dense_1[0][0]                    
____________________________________________________________________________________________________
dense_3 (Dense)                  (None, 1, 10)         510         dense_2[0][0]                    
____________________________________________________________________________________________________
dense_4 (Dense)                  (None, 1, 1)          11          dense_3[0][0]                    
====================================================================================================
Total params: 422,771
Trainable params: 422,771
Non-trainable params: 0
____________________________________________________________________________________________________
____________________________________________________________________________________________________
Layer (type)                     Output Shape          Param #     Connected to                     
====================================================================================================
noise_input (InputLayer)         (None, 1, 100)        0                                            
____________________________________________________________________________________________________
dense_5 (Dense)                  (None, 1, 100)        10100       noise_input[0][0]                
____________________________________________________________________________________________________
dense_6 (Dense)                  (None, 1, 100)        10100       dense_5[0][0]                    
____________________________________________________________________________________________________
dense_7 (Dense)                  (None, 1, 50)         5050        dense_6[0][0]                    
____________________________________________________________________________________________________
dense_8 (Dense)                  (None, 1, 57)         2907        dense_7[0][0]                    
____________________________________________________________________________________________________
reshape_2 (Reshape)              (None, 19, 3)         0           dense_8[0][0]                    
====================================================================================================
Total params: 28,157
Trainable params: 28,157
Non-trainable params: 0
____________________________________________________________________________________________________
____________________________________________________________________________________________________
Layer (type)                     Output Shape          Param #     Connected to                     
====================================================================================================
noise_input (InputLayer)         (None, 1, 100)        0                                            
____________________________________________________________________________________________________
dense_9 (Dense)                  (None, 1, 100)        10100       noise_input[0][0]                
____________________________________________________________________________________________________
dense_10 (Dense)                 (None, 1, 100)        10100       dense_9[0][0]                    
____________________________________________________________________________________________________
dense_11 (Dense)                 (None, 1, 380)        38380       dense_10[0][0]                   
____________________________________________________________________________________________________
reshape_3 (Reshape)              (None, 19, 20)        0           dense_11[0][0]                   
____________________________________________________________________________________________________
lambda_2 (Lambda)                (None, 19, 20)        0           reshape_3[0][0]                  
====================================================================================================
Total params: 58,580
Trainable params: 58,580
Non-trainable params: 0
____________________________________________________________________________________________________

====================

Epoch #0

    After 20 iterations
        Discriminator Loss                     = 0.0047345017083
/Users/RoozbehFarhoudi/anaconda/lib/python2.7/site-packages/scipy/sparse/compressed.py:730: SparseEfficiencyWarning: Changing the sparsity structure of a csr_matrix is expensive. lil_matrix is more efficient.
  SparseEfficiencyWarning)
/Users/RoozbehFarhoudi/anaconda/lib/python2.7/site-packages/matplotlib/collections.py:590: FutureWarning: elementwise comparison failed; returning scalar instead, but in the future will perform elementwise comparison
  if self._edgecolors == str('face'):
    Generator_Loss: 5.39226818085
2
     Level #1 Epoch #0 Batch #2
    After 20 iterations
        Discriminator Loss                     = 0.00274331355467

    Generator_Loss: 5.00458955765
3
    After 20 iterations
        Discriminator Loss                     = 0.00540957273915

    Generator_Loss: 5.26497173309
4
     Level #1 Epoch #0 Batch #4
    After 20 iterations
        Discriminator Loss                     = 0.0299781206995

    Generator_Loss: 5.81227207184
5
    After 20 iterations
        Discriminator Loss                     = 0.0337119176984

    Generator_Loss: 5.42659473419
6
     Level #1 Epoch #0 Batch #6
    After 20 iterations
        Discriminator Loss                     = 0.0330997295678

    Generator_Loss: 5.48542070389
7
    After 20 iterations
        Discriminator Loss                     = 0.100402034819

    Generator_Loss: 4.51286172867
8
     Level #1 Epoch #0 Batch #8
    After 20 iterations
        Discriminator Loss                     = 0.118518576026

    Generator_Loss: 4.84774112701
9
    After 20 iterations
        Discriminator Loss                     = 0.125451266766

    Generator_Loss: 5.12430334091
10
     Level #1 Epoch #0 Batch #10
    After 20 iterations
        Discriminator Loss                     = 0.13253980875

    Generator_Loss: 5.21964406967
11
    After 20 iterations
        Discriminator Loss                     = 0.208805814385

    Generator_Loss: 4.31326818466
12
     Level #1 Epoch #0 Batch #12
    After 20 iterations
        Discriminator Loss                     = 0.239489763975

    Generator_Loss: 4.4233584404
13
    After 20 iterations
        Discriminator Loss                     = 0.25026935339

    Generator_Loss: 4.2036819458
14
     Level #1 Epoch #0 Batch #14
    After 20 iterations
        Discriminator Loss                     = 0.272627562284

    Generator_Loss: 3.60699915886
15
    After 20 iterations
        Discriminator Loss                     = 0.339631408453

    Generator_Loss: 3.72375679016
16
     Level #1 Epoch #0 Batch #16
    After 20 iterations
        Discriminator Loss                     = 0.20184931159

    Generator_Loss: 3.19971871376
17
    After 20 iterations
        Discriminator Loss                     = 0.364580959082

    Generator_Loss: 3.25285124779
18
     Level #1 Epoch #0 Batch #18
    After 20 iterations
        Discriminator Loss                     = 0.485932379961

    Generator_Loss: 2.58915328979
19
    After 20 iterations
        Discriminator Loss                     = 0.344829201698

    Generator_Loss: 2.45035338402
20
     Level #1 Epoch #0 Batch #20
    After 20 iterations
        Discriminator Loss                     = 0.438456207514

    Generator_Loss: 1.5708386898
21
    After 20 iterations
        Discriminator Loss                     = 0.414980202913

    Generator_Loss: 2.06802582741
22
     Level #1 Epoch #0 Batch #22
    After 20 iterations
        Discriminator Loss                     = 0.505568742752

    Generator_Loss: 1.85510134697
23
---------------------------------------------------------------------------
KeyboardInterrupt                         Traceback (most recent call last)
<ipython-input-5-db9248fa6980> in <module>()
     13                       rule=rule,
     14                       train_loss=train_loss,
---> 15                       verbose=True)

/Users/RoozbehFarhoudi/Documents/Repos/BonsaiNet/train.pyc in train_model(training_data, n_nodes, input_dim, n_epochs, batch_size, n_batch_per_epoch, d_iters, lr_discriminator, lr_generator, d_weight_constraint, g_weight_constraint, m_weight_constraint, rule, train_loss, verbose)
    301                     d_model.train_on_batch([X_locations_real_first_half,
    302                                             X_parent_real_first_half],
--> 303                                             y_real_first_half)
    304                 list_d_loss.append(disc_loss)
    305                 disc_loss = \

/Users/RoozbehFarhoudi/anaconda/lib/python2.7/site-packages/keras/engine/training.pyc in train_on_batch(self, x, y, sample_weight, class_weight)
   1318             ins = x + y + sample_weights
   1319         self._make_train_function()
-> 1320         outputs = self.train_function(ins)
   1321         if len(outputs) == 1:
   1322             return outputs[0]

/Users/RoozbehFarhoudi/anaconda/lib/python2.7/site-packages/keras/backend/theano_backend.pyc in __call__(self, inputs)
    957     def __call__(self, inputs):
    958         assert isinstance(inputs, (list, tuple))
--> 959         return self.function(*inputs)
    960 
    961 

/Users/RoozbehFarhoudi/anaconda/lib/python2.7/site-packages/theano/compile/function_module.pyc in __call__(self, *args, **kwargs)
    857         t0_fn = time.time()
    858         try:
--> 859             outputs = self.fn()
    860         except Exception:
    861             if hasattr(self.fn, 'position_of_error'):

/Users/RoozbehFarhoudi/anaconda/lib/python2.7/site-packages/theano/gof/op.pyc in rval(p, i, o, n)
    910             # default arguments are stored in the closure of `rval`
    911             def rval(p=p, i=node_input_storage, o=node_output_storage, n=node):
--> 912                 r = p(n, [x[0] for x in i], o)
    913                 for o in node.outputs:
    914                     compute_map[o][0] = True

/Users/RoozbehFarhoudi/anaconda/lib/python2.7/site-packages/theano/tensor/blas.pyc in perform(self, node, inp, out)
   1550         z, = out
   1551         try:
-> 1552             z[0] = numpy.asarray(numpy.dot(x, y))
   1553         except ValueError as e:
   1554             # The error raised by numpy has no shape information, we mean to

KeyboardInterrupt: 

In [ ]: