In [1]:
import numpy as np
import pandas as pd
from keras import callbacks
from keras.models import Sequential
from keras.layers import Dense, Dropout
from keras.layers import LSTM
from sklearn.preprocessing import StandardScaler, MinMaxScaler

import theano
theano.config.openmp = True

%matplotlib inline
%env OMP_NUM_THREADS=12


Using Theano backend.
env: OMP_NUM_THREADS=12

In [ ]:


In [2]:
def create_dataset_lags(dataset, signal, nblags=1):
    df = dataset.copy();
    for i in range(nblags):
        name = signal + "_" + str(i+1);
        df[name] = dataset[signal].shift(i+1);
    return df

In [3]:
def load_dataset(source , signal):
    dataframe = pd.read_csv(source, engine='python')
    return dataframe ;

def get_lag_names(signal, nblags):
    names = [];
    for i in range(nblags):
        name = signal + "_" + str(i+1);
        names.append(name);
    return names;

def cut_dataset(dataframe , signal, lags):
    train_size = int(dataframe.shape[0] * 0.67)
    scaler = MinMaxScaler(); # StandardScaler()
    scaler.fit(dataframe[signal][0:train_size].values.ravel())
    dataframe['scaled_' + signal] = scaler.transform(dataframe[signal].values.ravel())
    lagged_df = create_dataset_lags(dataframe, 'scaled_' + signal, lags)
    (train_df, test_df) = (lagged_df[0:train_size] , lagged_df[train_size:])

    return (scaler, train_df , test_df)

In [4]:
# create and fit the LSTM network

def train_model(train_df , signal, lags, epochs):
    model = Sequential()
    model.add(Dense(40, input_dim=lags))
    # model.add(LSTM(40, input_dim=lags))
    model.add(Dropout(0.1))
    model.add(Dense(1))
    model.compile(loss='mse', optimizer='adam')
    
    lag_names = get_lag_names('scaled_' + signal , lags);
    N = train_df.shape[0] - lags
    NEstim = (N * 4) // 5;
    trainX = train_df[lag_names][lags:].values
    # trainX = np.reshape(trainX, (trainX.shape[0], 1, trainX.shape[1]))
    trainY = train_df['scaled_' + signal][lags:].values
    estimX = trainX[0:NEstim]
    estimY = trainY[0:NEstim]
    valX = trainX[ NEstim : ]
    valY = trainY[ NEstim : ]

    lStopCallback = callbacks.EarlyStopping(monitor='val_loss', patience=5, verbose=1, mode='auto')
    lHistory = model.fit(estimX, estimY, nb_epoch=epochs, batch_size=1, validation_data=(valX , valY), verbose=2, 
                        callbacks=[lStopCallback])
    print(lHistory.__dict__)
    return model;

def plot_model(model):
    from IPython.display import SVG
    from keras.utils.visualize_util import model_to_dot

    SVG(model_to_dot(model, show_shapes=True, show_layer_names=True).create(prog='dot', format='svg'))

In [5]:
# make predictions

def compute_L2_MAPE(signal , estimator):
    lMean = np.mean( (signal - estimator)**2 );
    lMAPE = np.mean( np.abs((signal - estimator) / signal ));
    lL2 = np.sqrt(lMean);
    return (lL2 , lMAPE);


def predict_signal(model, scaler, signal, nblags, train_df, test_df, idataframe):
    lag_names = get_lag_names('scaled_' + signal , nblags);
    trainX = train_df[lag_names].values
    #trainX = np.reshape(trainX, (trainX.shape[0], 1, trainX.shape[1]))
    trainPredict = model.predict(trainX)
    testX = test_df[lag_names].values
    #testX = np.reshape(testX, (testX.shape[0], 1, testX.shape[1]))
    testPredict = model.predict(testX)
    trainPredict = scaler.inverse_transform(trainPredict[nblags:])
    testPredict = scaler.inverse_transform(testPredict)
    
    # calculate root mean squared error
    lTrainL2 = compute_L2_MAPE(train_df[signal][nblags:].values, trainPredict)
    lTestL2 = compute_L2_MAPE(test_df[signal].values, testPredict)
    print('TRAIN_TEST_RMSE_MAPE', lTrainL2 , lTestL2)

    out_df = pd.DataFrame()
    out_df = idataframe.copy();
    out_N = out_df.shape[0]
    out_df['Time'] = range(out_N)
    # out_df['scaled_output'] = 0;
    lSeries = pd.Series(np.full(nblags , np.nan));
    lSeries1 = pd.Series(trainPredict.ravel());
    lSeries2 = pd.Series(testPredict.ravel());
    # print(out_N , lSeries.shape[0], lSeries1.shape[0], lSeries2.shape[0])
    lSeries = lSeries.append(lSeries1);
    lSeries = lSeries.append(lSeries2);
    out_df['output'] = lSeries.values;
    out_df['output'] = out_df['output'];
    return out_df;

In [ ]:


In [6]:
def full_test(dataset, signal, nblags , epochs):
    full_df = load_dataset(dataset , signal);
    (scaler, train_df, test_df) = cut_dataset(full_df, signal , nblags);
    model = train_model(train_df, signal , nblags, epochs);
    print(model.__dict__)
    plot_model(model);
    out_df = predict_signal(model, scaler, signal, nblags, train_df, test_df, full_df);
    lNewName = signal + "_" + str(nblags) +  "_" + str(epochs) 
    out_df[lNewName] = out_df[signal]
    out_df.plot('Time' , [lNewName,  'output'] , figsize=(22,12));

In [7]:
full_test('../data/ozone-la.csv', 'Ozone', 24, 100)


/usr/lib/python3/dist-packages/sklearn/preprocessing/data.py:321: DeprecationWarning: Passing 1d arrays as data is deprecated in 0.17 and will raise ValueError in 0.19. Reshape your data either using X.reshape(-1, 1) if your data has a single feature or X.reshape(1, -1) if it contains a single sample.
  warnings.warn(DEPRECATION_MSG_1D, DeprecationWarning)
/usr/lib/python3/dist-packages/sklearn/preprocessing/data.py:356: DeprecationWarning: Passing 1d arrays as data is deprecated in 0.17 and will raise ValueError in 0.19. Reshape your data either using X.reshape(-1, 1) if your data has a single feature or X.reshape(1, -1) if it contains a single sample.
  warnings.warn(DEPRECATION_MSG_1D, DeprecationWarning)
Train on 96 samples, validate on 24 samples
Epoch 1/100
0s - loss: 0.0696 - val_loss: 0.0351
Epoch 2/100
0s - loss: 0.0423 - val_loss: 0.0413
Epoch 3/100
0s - loss: 0.0436 - val_loss: 0.0305
Epoch 4/100
0s - loss: 0.0278 - val_loss: 0.0336
Epoch 5/100
0s - loss: 0.0309 - val_loss: 0.0280
Epoch 6/100
0s - loss: 0.0305 - val_loss: 0.0258
Epoch 7/100
0s - loss: 0.0291 - val_loss: 0.0293
Epoch 8/100
0s - loss: 0.0194 - val_loss: 0.0226
Epoch 9/100
0s - loss: 0.0220 - val_loss: 0.0214
Epoch 10/100
0s - loss: 0.0214 - val_loss: 0.0220
Epoch 11/100
0s - loss: 0.0174 - val_loss: 0.0234
Epoch 12/100
0s - loss: 0.0226 - val_loss: 0.0220
Epoch 13/100
0s - loss: 0.0170 - val_loss: 0.0208
Epoch 14/100
0s - loss: 0.0165 - val_loss: 0.0203
Epoch 15/100
0s - loss: 0.0153 - val_loss: 0.0197
Epoch 16/100
0s - loss: 0.0139 - val_loss: 0.0262
Epoch 17/100
0s - loss: 0.0195 - val_loss: 0.0338
Epoch 18/100
0s - loss: 0.0158 - val_loss: 0.0201
Epoch 19/100
0s - loss: 0.0166 - val_loss: 0.0213
Epoch 20/100
0s - loss: 0.0146 - val_loss: 0.0210
Epoch 21/100
Epoch 00020: early stopping
0s - loss: 0.0123 - val_loss: 0.0198
{'history': {'loss': [0.069583153157585301, 0.042325833248495891, 0.04362075918655365, 0.027753840169594923, 0.030946255625341479, 0.030459166032930213, 0.029070709329336825, 0.01938001645351406, 0.021957699189177698, 0.021366767710301104, 0.017376053036420796, 0.022605090407305777, 0.016980071553563885, 0.016516372370157722, 0.015293252275644894, 0.013914036258475981, 0.019500149697555053, 0.015848472570915983, 0.016583438535648771, 0.014646618834414463, 0.012287924383780976], 'val_loss': [0.035060963011346757, 0.041321042391549177, 0.03046425021334746, 0.033550450612437999, 0.02804786442114467, 0.025814555435469327, 0.02927899477193326, 0.022624582150172046, 0.021429513973695673, 0.022016059763700468, 0.02344419140960478, 0.022041325097234221, 0.020804481787005596, 0.020252447278229131, 0.019745127595683698, 0.026194735478990577, 0.033832657173964741, 0.020080254802148072, 0.021293394370180369, 0.020951106115139357, 0.019787994044842588]}, 'epoch': [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20], 'model': <keras.models.Sequential object at 0x7fbdfa0d3f28>, 'params': {'nb_epoch': 100, 'metrics': ['loss', 'val_loss'], 'batch_size': 1, 'nb_sample': 96, 'verbose': 2, 'do_validation': True}}
{'outputs': [Elemwise{add,no_inplace}.0], 'supports_masking': False, 'inputs': [dense_input_1], 'outbound_nodes': [], 'output_layers_tensor_indices': [0], '_flattened_layers': None, 'input_layers_node_indices': [0], 'input_layers_tensor_indices': [0], 'input_names': ['dense_input_1'], 'container_nodes': {'dense_1_ib-0', 'dropout_1_ib-0', 'dense_2_ib-0', 'dense_input_1_ib-0'}, 'output_names': ['dense_2'], '_output_shape_cache': {}, 'loss_weights': None, 'metrics_names': ['loss'], 'stop_training': True, 'input_layers': [<keras.engine.topology.InputLayer object at 0x7fbdfa0d3198>], 'sample_weight_mode': None, 'nodes_by_depth': {0: [<keras.engine.topology.Node object at 0x7fbdf8d69b70>], 1: [<keras.engine.topology.Node object at 0x7fbdf8d5fc88>], 2: [<keras.engine.topology.Node object at 0x7fbdfa0f8908>], 3: [<keras.engine.topology.Node object at 0x7fbdfa0d3f60>]}, 'loss': 'mse', 'name': 'sequential_1', 'output_layers': [<keras.layers.core.Dense object at 0x7fbdf8d5fc50>], 'layers': [<keras.layers.core.Dense object at 0x7fbdfa0d3c18>, <keras.layers.core.Dropout object at 0x7fbdfa0f8828>, <keras.layers.core.Dense object at 0x7fbdf8d5fc50>], 'optimizer': <keras.optimizers.Adam object at 0x7fbdf8aeaf60>, '_output_tensor_cache': {}, 'metrics': [], 'metrics_tensors': [], 'built': True, '_output_mask_cache': {'140453920715888_94179042275360': None}, 'model': <keras.engine.training.Model object at 0x7fbdf8d69ba8>, 'inbound_nodes': [<keras.engine.topology.Node object at 0x7fbdfa0f8898>], 'output_layers_node_indices': [0]}
TRAIN_TEST_RMSE_MAPE (1.8729268019755232, 0.46069062612989559) (1.4523625949723578, 0.47523035672006725)

In [8]:
#for ep in [10 , 40, 160, 640]:
#    for lags in [8 ,32]:
#        full_test('../data/passengers_train.csv', 'AirPassengers',lags, ep)

In [9]:
full_test('../data/passengers_train.csv', 'AirPassengers', 12, 100)


/usr/lib/python3/dist-packages/sklearn/utils/validation.py:429: DataConversionWarning: Data with input dtype int64 was converted to float64 by MinMaxScaler.
  warnings.warn(msg, _DataConversionWarning)
/usr/lib/python3/dist-packages/sklearn/preprocessing/data.py:321: DeprecationWarning: Passing 1d arrays as data is deprecated in 0.17 and will raise ValueError in 0.19. Reshape your data either using X.reshape(-1, 1) if your data has a single feature or X.reshape(1, -1) if it contains a single sample.
  warnings.warn(DEPRECATION_MSG_1D, DeprecationWarning)
/usr/lib/python3/dist-packages/sklearn/preprocessing/data.py:356: DeprecationWarning: Passing 1d arrays as data is deprecated in 0.17 and will raise ValueError in 0.19. Reshape your data either using X.reshape(-1, 1) if your data has a single feature or X.reshape(1, -1) if it contains a single sample.
  warnings.warn(DEPRECATION_MSG_1D, DeprecationWarning)
Train on 67 samples, validate on 17 samples
Epoch 1/100
0s - loss: 0.0348 - val_loss: 0.0205
Epoch 2/100
0s - loss: 0.0136 - val_loss: 0.0186
Epoch 3/100
0s - loss: 0.0102 - val_loss: 0.0094
Epoch 4/100
0s - loss: 0.0106 - val_loss: 0.0075
Epoch 5/100
0s - loss: 0.0096 - val_loss: 0.0195
Epoch 6/100
0s - loss: 0.0092 - val_loss: 0.0165
Epoch 7/100
0s - loss: 0.0037 - val_loss: 0.0064
Epoch 8/100
0s - loss: 0.0040 - val_loss: 0.0095
Epoch 9/100
0s - loss: 0.0087 - val_loss: 0.0079
Epoch 10/100
0s - loss: 0.0062 - val_loss: 0.0092
Epoch 11/100
0s - loss: 0.0046 - val_loss: 0.0173
Epoch 12/100
0s - loss: 0.0069 - val_loss: 0.0047
Epoch 13/100
0s - loss: 0.0058 - val_loss: 0.0049
Epoch 14/100
0s - loss: 0.0060 - val_loss: 0.0062
Epoch 15/100
0s - loss: 0.0030 - val_loss: 0.0030
Epoch 16/100
0s - loss: 0.0060 - val_loss: 0.0096
Epoch 17/100
0s - loss: 0.0039 - val_loss: 0.0027
Epoch 18/100
0s - loss: 0.0053 - val_loss: 0.0026
Epoch 19/100
0s - loss: 0.0040 - val_loss: 0.0061
Epoch 20/100
0s - loss: 0.0043 - val_loss: 0.0071
Epoch 21/100
0s - loss: 0.0043 - val_loss: 0.0022
Epoch 22/100
0s - loss: 0.0028 - val_loss: 0.0036
Epoch 23/100
0s - loss: 0.0021 - val_loss: 0.0059
Epoch 24/100
0s - loss: 0.0048 - val_loss: 0.0042
Epoch 25/100
0s - loss: 0.0056 - val_loss: 0.0058
Epoch 26/100
0s - loss: 0.0035 - val_loss: 0.0022
Epoch 27/100
0s - loss: 0.0033 - val_loss: 0.0018
Epoch 28/100
0s - loss: 0.0043 - val_loss: 0.0019
Epoch 29/100
0s - loss: 0.0027 - val_loss: 0.0031
Epoch 30/100
0s - loss: 0.0018 - val_loss: 0.0032
Epoch 31/100
0s - loss: 0.0027 - val_loss: 0.0026
Epoch 32/100
0s - loss: 0.0030 - val_loss: 0.0037
Epoch 33/100
0s - loss: 0.0024 - val_loss: 0.0016
Epoch 34/100
0s - loss: 0.0043 - val_loss: 0.0103
Epoch 35/100
0s - loss: 0.0024 - val_loss: 0.0019
Epoch 36/100
0s - loss: 0.0027 - val_loss: 0.0018
Epoch 37/100
0s - loss: 0.0042 - val_loss: 0.0021
Epoch 38/100
0s - loss: 0.0026 - val_loss: 0.0034
Epoch 39/100
0s - loss: 0.0037 - val_loss: 0.0015
Epoch 40/100
0s - loss: 0.0031 - val_loss: 0.0018
Epoch 41/100
0s - loss: 0.0033 - val_loss: 0.0027
Epoch 42/100
0s - loss: 0.0038 - val_loss: 0.0018
Epoch 43/100
0s - loss: 0.0033 - val_loss: 0.0017
Epoch 44/100
0s - loss: 0.0027 - val_loss: 0.0016
Epoch 45/100
0s - loss: 0.0020 - val_loss: 0.0014
Epoch 46/100
0s - loss: 0.0028 - val_loss: 0.0039
Epoch 47/100
0s - loss: 0.0024 - val_loss: 0.0049
Epoch 48/100
0s - loss: 0.0026 - val_loss: 0.0019
Epoch 49/100
0s - loss: 0.0025 - val_loss: 0.0036
Epoch 50/100
0s - loss: 0.0020 - val_loss: 0.0050
Epoch 51/100
Epoch 00050: early stopping
0s - loss: 0.0027 - val_loss: 0.0040
{'history': {'loss': [0.03483477060374536, 0.013566897170651781, 0.010209130522623802, 0.010593217109985812, 0.0096240363830812151, 0.0092213063203064865, 0.0037307492724113355, 0.0039851072270161436, 0.0087211210551687932, 0.0061822005069289004, 0.0045855425325915366, 0.0069013842281961712, 0.0058162394931976918, 0.0059541803074874434, 0.0029514626287572587, 0.0059612898463988874, 0.0039132932779633268, 0.0052526705342706894, 0.0039877317086414541, 0.0042991354874986664, 0.0043289186831159581, 0.0027870022496903932, 0.0020851135953134231, 0.0047650188090552961, 0.0055688457377066278, 0.003532103392702613, 0.0033089356654591707, 0.0043066570284755034, 0.0026569336365838366, 0.0018265890584860977, 0.0027290233883899987, 0.0030025121542579707, 0.0024097762280967773, 0.004285362323420445, 0.0023630135452530772, 0.0027036025847544452, 0.004196284381825649, 0.0025779854269930651, 0.0037422664495812626, 0.0031125830667048022, 0.0033331675355415895, 0.0037896294584887943, 0.0032985264595680095, 0.0026796396687081168, 0.0019994119473151963, 0.0028337723404109655, 0.0023616899951436061, 0.0026440596608544704, 0.0024542211158895314, 0.001997825436428141, 0.0027422182660418728], 'val_loss': [0.020488690427822909, 0.018561870266293065, 0.0094423937946831911, 0.0074903452054701017, 0.01946010464513058, 0.016536565508217523, 0.0064143078334917151, 0.0095205601477680504, 0.0079163273786595229, 0.0092004586174024398, 0.017286304507492519, 0.0047078906350781621, 0.0048677041715349976, 0.0062390872928303641, 0.0030395112358639702, 0.0096001511253823134, 0.0026933115086114198, 0.002585091715980739, 0.0060993671538921365, 0.0071240220765021681, 0.0021631967429129872, 0.0035958329198739888, 0.0059133166950606905, 0.0041983899804215663, 0.0057982548558570936, 0.0021554783962622372, 0.0017844627523385605, 0.0018928440153445081, 0.0031096138250869053, 0.0032338915739006445, 0.0025878353478540651, 0.0037130411698907506, 0.0016490457905199624, 0.010293922318583903, 0.0018632080419773319, 0.001775570752138666, 0.0020526984307924112, 0.0033902956227607585, 0.0015430549832762394, 0.0018284305916254466, 0.0026934305984896664, 0.0018451065367678572, 0.0016950727730585748, 0.0016182877161019646, 0.001429020034208861, 0.0039171218153676501, 0.0049042946274686769, 0.0018989770278544844, 0.0036087304945206546, 0.0050362474130987026, 0.0040082838050319184]}, 'epoch': [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50], 'model': <keras.models.Sequential object at 0x7fbdf188c208>, 'params': {'nb_epoch': 100, 'metrics': ['loss', 'val_loss'], 'batch_size': 1, 'nb_sample': 67, 'verbose': 2, 'do_validation': True}}
{'outputs': [Elemwise{add,no_inplace}.0], 'supports_masking': False, 'inputs': [dense_input_2], 'outbound_nodes': [], 'output_layers_tensor_indices': [0], '_flattened_layers': None, 'input_layers_node_indices': [0], 'input_layers_tensor_indices': [0], 'input_names': ['dense_input_2'], 'container_nodes': {'dense_3_ib-0', 'dense_4_ib-0', 'dropout_2_ib-0', 'dense_input_2_ib-0'}, 'output_names': ['dense_4'], '_output_shape_cache': {}, 'loss_weights': None, 'metrics_names': ['loss'], 'stop_training': True, 'input_layers': [<keras.engine.topology.InputLayer object at 0x7fbdf11f3978>], 'sample_weight_mode': None, 'nodes_by_depth': {0: [<keras.engine.topology.Node object at 0x7fbdf18ae898>], 1: [<keras.engine.topology.Node object at 0x7fbdf1d71240>], 2: [<keras.engine.topology.Node object at 0x7fbdf1888978>], 3: [<keras.engine.topology.Node object at 0x7fbdf11f3a90>]}, 'loss': 'mse', 'name': 'sequential_2', 'output_layers': [<keras.layers.core.Dense object at 0x7fbdf1d71eb8>], 'layers': [<keras.layers.core.Dense object at 0x7fbdf188c1d0>, <keras.layers.core.Dropout object at 0x7fbdf1888860>, <keras.layers.core.Dense object at 0x7fbdf1d71eb8>], 'optimizer': <keras.optimizers.Adam object at 0x7fbdf185a748>, '_output_tensor_cache': {}, 'metrics': [], 'metrics_tensors': [], 'built': True, '_output_mask_cache': {'140453770902272_94179042275360': None}, 'model': <keras.engine.training.Model object at 0x7fbdf18ae630>, 'inbound_nodes': [<keras.engine.topology.Node object at 0x7fbdf1888b38>], 'output_layers_node_indices': [0]}
TRAIN_TEST_RMSE_MAPE (93.281087000416221, 0.34525995058714615) (109.68940404089903, 0.2069614320798977)

In [10]:
callbacks.EarlyStopping?

In [11]:
mod0 = Sequential()

In [12]:
mod0.fit?

In [13]:
LSTM?

In [14]:
mod0.compile(loss='mse', optimizer='adam')


---------------------------------------------------------------------------
Exception                                 Traceback (most recent call last)
<ipython-input-14-2770084db40b> in <module>()
----> 1 mod0.compile(loss='mse', optimizer='adam')

/usr/lib/python3/dist-packages/keras/models.py in compile(self, optimizer, loss, metrics, sample_weight_mode, **kwargs)
    512         '''
    513         # create the underlying model
--> 514         self.build()
    515         # legacy kwarg support
    516         if 'class_mode' in kwargs:

/usr/lib/python3/dist-packages/keras/models.py in build(self, input_shape)
    347     def build(self, input_shape=None):
    348         if not self.inputs or not self.outputs:
--> 349             raise Exception('Sequential model cannot be built: model is empty.'
    350                             ' Add some layers first.')
    351         # actually create the model

Exception: Sequential model cannot be built: model is empty. Add some layers first.

In [15]:
def build_fake_model():
    model = Sequential()
    model.add(Dense(40, input_dim=5))
    # model.add(LSTM(40, input_dim=lags))
    model.add(Dropout(0.1))
    model.add(Dense(1))
    model.compile(loss='mse', optimizer='adam')
    return model;

In [16]:
mod0 = build_fake_model();

In [18]:
# mod0.__dict__

In [19]:
mod0.reset_states()

In [21]:
#mod0.__dict__

In [22]:
import copy;
mod1 = copy.deepcopy(mod0);

In [23]:
mod1.__dict__


Out[23]:
{'_flattened_layers': None,
 '_output_mask_cache': {'140453775074416_94179042275360': None},
 '_output_shape_cache': {},
 '_output_tensor_cache': {},
 'built': True,
 'container_nodes': {'dense_5_ib-0',
  'dense_6_ib-0',
  'dense_input_3_ib-0',
  'dropout_3_ib-0'},
 'inbound_nodes': [<keras.engine.topology.Node at 0x7fbdf1537eb8>],
 'input_layers': [<keras.engine.topology.InputLayer at 0x7fbdf152fda0>],
 'input_layers_node_indices': [0],
 'input_layers_tensor_indices': [0],
 'input_names': ['dense_input_3'],
 'inputs': [dense_input_3],
 'layers': [<keras.layers.core.Dense at 0x7fbdf152fe48>,
  <keras.layers.core.Dropout at 0x7fbdf1532048>,
  <keras.layers.core.Dense at 0x7fbdf15320f0>],
 'loss': 'mse',
 'loss_weights': None,
 'metrics': [],
 'metrics_names': ['loss'],
 'metrics_tensors': [],
 'model': <keras.engine.training.Model at 0x7fbdf1537a90>,
 'name': 'sequential_4',
 'nodes_by_depth': {0: [<keras.engine.topology.Node at 0x7fbdf15320b8>],
  1: [<keras.engine.topology.Node at 0x7fbdf152ffd0>],
  2: [<keras.engine.topology.Node at 0x7fbdf152fe10>],
  3: [<keras.engine.topology.Node at 0x7fbdf152ff98>]},
 'optimizer': <keras.optimizers.Adam at 0x7fbdf159bcc0>,
 'outbound_nodes': [],
 'output_layers': [<keras.layers.core.Dense at 0x7fbdf15320f0>],
 'output_layers_node_indices': [0],
 'output_layers_tensor_indices': [0],
 'output_names': ['dense_6'],
 'outputs': [Elemwise{add,no_inplace}.0],
 'sample_weight_mode': None,
 'supports_masking': False}

In [24]:
mod1.reset_states()

In [25]:
mod1.summary()


____________________________________________________________________________________________________
Layer (type)                     Output Shape          Param #     Connected to                     
====================================================================================================
dense_5 (Dense)                  (None, 40)            240         dense_input_3[0][0]              
____________________________________________________________________________________________________
dropout_3 (Dropout)              (None, 40)            0           dense_5[0][0]                    
____________________________________________________________________________________________________
dense_6 (Dense)                  (None, 1)             41          dropout_3[0][0]                  
====================================================================================================
Total params: 281
____________________________________________________________________________________________________

In [26]:
mod0.summary()


____________________________________________________________________________________________________
Layer (type)                     Output Shape          Param #     Connected to                     
====================================================================================================
dense_5 (Dense)                  (None, 40)            240         dense_input_3[0][0]              
____________________________________________________________________________________________________
dropout_3 (Dropout)              (None, 40)            0           dense_5[0][0]                    
____________________________________________________________________________________________________
dense_6 (Dense)                  (None, 1)             41          dropout_3[0][0]                  
====================================================================================================
Total params: 281
____________________________________________________________________________________________________

In [28]:
n0 = mod0.nodes_by_depth[0]
n1 = mod1.nodes_by_depth[0]

In [31]:
n0 , n1


Out[31]:
([<keras.engine.topology.Node at 0x7fbdf159b518>],
 [<keras.engine.topology.Node at 0x7fbdf15320b8>])

In [ ]: