In [1]:
import numpy as np

In [2]:
X = np.array([np.random.normal(0.3, 5, 1000), np.random.normal(-3.5, 1.1, 1000)]).T

In [3]:
y = np.zeros(1000)
y[(X[:,0] < -3) | (X[:,0] > 10)] = 1

In [4]:
from sklearn.cross_validation import train_test_split

In [5]:
X_train, X_test, y_train, y_test = train_test_split(X, y, train_size = 0.7)

In [6]:
import matplotlib.pyplot as plt
%matplotlib inline

In [7]:
plt.scatter(X[:,0], X[:,1], c=y)
plt.show()


/usr/local/lib/python2.7/site-packages/matplotlib/collections.py:590: FutureWarning: elementwise comparison failed; returning scalar instead, but in the future will perform elementwise comparison
  if self._edgecolors == str('face'):

In [8]:
from keras.models import Sequential
from keras.layers.core import Dense, Dropout, Activation
from keras.callbacks import EarlyStopping, ModelCheckpoint


Using TensorFlow backend.

In [9]:
model = Sequential()
model.add(Dense(2, input_shape= (2,)))
model.add(Activation('relu'))
model.add(Dense(1, activation='sigmoid'))
print 'Compiling model...'
model.compile('adam', 'binary_crossentropy')
model.summary()


Compiling model...
--------------------------------------------------------------------------------
Initial input shape: (None, 2)
--------------------------------------------------------------------------------
Layer (name)                  Output Shape                  Param #             
--------------------------------------------------------------------------------
Dense (dense)                 (None, 2)                     6                   
Activation (activation)       (None, 2)                     0                   
Dense (dense)                 (None, 1)                     3                   
--------------------------------------------------------------------------------
Total params: 9
--------------------------------------------------------------------------------

In [12]:
MODEL_FILE = 'dummynet'

In [ ]:
try:
    model.fit(X_train, y_train, batch_size=16,
        callbacks = [
            EarlyStopping(verbose=True, patience=20, monitor='val_loss'),
            ModelCheckpoint(MODEL_FILE + '-progress', monitor='val_loss', verbose=True, save_best_only=True)
        ],
    nb_epoch=100,
    validation_split = 0.2,
    show_accuracy=True)

except KeyboardInterrupt:
    print 'Training ended early.'

# -- load in best network                                                                                                                                                                                      
model.load_weights(MODEL_FILE + '-progress')


Train on 560 samples, validate on 140 samples
Epoch 1/100
544/560 [============================>.] - ETA: 0s - loss: 1.2397 - acc: 1.0000Epoch 00000: val_loss improved from inf to 1.08817, saving model to dummynet-progress
560/560 [==============================] - 0s - loss: 1.2261 - acc: 1.0000 - val_loss: 1.0882 - val_acc: 1.0000
Epoch 2/100
544/560 [============================>.] - ETA: 0s - loss: 1.0417 - acc: 1.0000Epoch 00001: val_loss improved from 1.08817 to 0.94662, saving model to dummynet-progress
560/560 [==============================] - 0s - loss: 1.0454 - acc: 1.0000 - val_loss: 0.9466 - val_acc: 1.0000
Epoch 3/100
544/560 [============================>.] - ETA: 0s - loss: 0.8822 - acc: 1.0000Epoch 00002: val_loss improved from 0.94662 to 0.80772, saving model to dummynet-progress
560/560 [==============================] - 0s - loss: 0.8794 - acc: 1.0000 - val_loss: 0.8077 - val_acc: 1.0000
Epoch 4/100
544/560 [============================>.] - ETA: 0s - loss: 0.7291 - acc: 1.0000Epoch 00003: val_loss improved from 0.80772 to 0.69156, saving model to dummynet-progress
560/560 [==============================] - 0s - loss: 0.7267 - acc: 1.0000 - val_loss: 0.6916 - val_acc: 1.0000
Epoch 5/100
544/560 [============================>.] - ETA: 0s - loss: 0.6048 - acc: 1.0000Epoch 00004: val_loss improved from 0.69156 to 0.59554, saving model to dummynet-progress
560/560 [==============================] - 0s - loss: 0.6010 - acc: 1.0000 - val_loss: 0.5955 - val_acc: 1.0000
Epoch 6/100
544/560 [============================>.] - ETA: 0s - loss: 0.5083 - acc: 1.0000Epoch 00005: val_loss improved from 0.59554 to 0.51874, saving model to dummynet-progress
560/560 [==============================] - 0s - loss: 0.5043 - acc: 1.0000 - val_loss: 0.5187 - val_acc: 1.0000
Epoch 7/100
544/560 [============================>.] - ETA: 0s - loss: 0.4321 - acc: 1.0000Epoch 00006: val_loss improved from 0.51874 to 0.46588, saving model to dummynet-progress
560/560 [==============================] - 0s - loss: 0.4329 - acc: 1.0000 - val_loss: 0.4659 - val_acc: 1.0000
Epoch 8/100
544/560 [============================>.] - ETA: 0s - loss: 0.3777 - acc: 1.0000Epoch 00007: val_loss improved from 0.46588 to 0.42702, saving model to dummynet-progress
560/560 [==============================] - 0s - loss: 0.3848 - acc: 1.0000 - val_loss: 0.4270 - val_acc: 1.0000
Epoch 9/100
544/560 [============================>.] - ETA: 0s - loss: 0.3530 - acc: 1.0000Epoch 00008: val_loss improved from 0.42702 to 0.40161, saving model to dummynet-progress
560/560 [==============================] - 0s - loss: 0.3517 - acc: 1.0000 - val_loss: 0.4016 - val_acc: 1.0000
Epoch 10/100
544/560 [============================>.] - ETA: 0s - loss: 0.3281 - acc: 1.0000Epoch 00009: val_loss improved from 0.40161 to 0.38320, saving model to dummynet-progress
560/560 [==============================] - 0s - loss: 0.3313 - acc: 1.0000 - val_loss: 0.3832 - val_acc: 1.0000
Epoch 11/100
544/560 [============================>.] - ETA: 0s - loss: 0.3183 - acc: 1.0000Epoch 00010: val_loss improved from 0.38320 to 0.37276, saving model to dummynet-progress
560/560 [==============================] - 0s - loss: 0.3162 - acc: 1.0000 - val_loss: 0.3728 - val_acc: 1.0000
Epoch 12/100
544/560 [============================>.] - ETA: 0s - loss: 0.3030 - acc: 1.0000Epoch 00011: val_loss improved from 0.37276 to 0.36508, saving model to dummynet-progress
560/560 [==============================] - 0s - loss: 0.3070 - acc: 1.0000 - val_loss: 0.3651 - val_acc: 1.0000
Epoch 13/100
544/560 [============================>.] - ETA: 0s - loss: 0.3039 - acc: 1.0000Epoch 00012: val_loss improved from 0.36508 to 0.35929, saving model to dummynet-progress
560/560 [==============================] - 0s - loss: 0.3007 - acc: 1.0000 - val_loss: 0.3593 - val_acc: 1.0000
Epoch 14/100
544/560 [============================>.] - ETA: 0s - loss: 0.2902 - acc: 1.0000Epoch 00013: val_loss improved from 0.35929 to 0.35512, saving model to dummynet-progress
560/560 [==============================] - 0s - loss: 0.2958 - acc: 1.0000 - val_loss: 0.3551 - val_acc: 1.0000
Epoch 15/100
544/560 [============================>.] - ETA: 0s - loss: 0.2964 - acc: 1.0000Epoch 00014: val_loss improved from 0.35512 to 0.35162, saving model to dummynet-progress
560/560 [==============================] - 0s - loss: 0.2921 - acc: 1.0000 - val_loss: 0.3516 - val_acc: 1.0000
Epoch 16/100
544/560 [============================>.] - ETA: 0s - loss: 0.2875 - acc: 1.0000Epoch 00015: val_loss improved from 0.35162 to 0.34871, saving model to dummynet-progress
560/560 [==============================] - 0s - loss: 0.2890 - acc: 1.0000 - val_loss: 0.3487 - val_acc: 1.0000
Epoch 17/100
544/560 [============================>.] - ETA: 0s - loss: 0.2890 - acc: 1.0000Epoch 00016: val_loss improved from 0.34871 to 0.34618, saving model to dummynet-progress
560/560 [==============================] - 0s - loss: 0.2864 - acc: 1.0000 - val_loss: 0.3462 - val_acc: 1.0000
Epoch 18/100
544/560 [============================>.] - ETA: 0s - loss: 0.2792 - acc: 1.0000Epoch 00017: val_loss improved from 0.34618 to 0.34386, saving model to dummynet-progress
560/560 [==============================] - 0s - loss: 0.2840 - acc: 1.0000 - val_loss: 0.3439 - val_acc: 1.0000
Epoch 19/100
544/560 [============================>.] - ETA: 0s - loss: 0.2816 - acc: 1.0000Epoch 00018: val_loss improved from 0.34386 to 0.34139, saving model to dummynet-progress
560/560 [==============================] - 0s - loss: 0.2818 - acc: 1.0000 - val_loss: 0.3414 - val_acc: 1.0000
Epoch 20/100
544/560 [============================>.] - ETA: 0s - loss: 0.2842 - acc: 1.0000Epoch 00019: val_loss improved from 0.34139 to 0.33975, saving model to dummynet-progress
560/560 [==============================] - 0s - loss: 0.2797 - acc: 1.0000 - val_loss: 0.3397 - val_acc: 1.0000
Epoch 21/100
544/560 [============================>.] - ETA: 0s - loss: 0.2749 - acc: 1.0000Epoch 00020: val_loss improved from 0.33975 to 0.33707, saving model to dummynet-progress
560/560 [==============================] - 0s - loss: 0.2771 - acc: 1.0000 - val_loss: 0.3371 - val_acc: 1.0000
Epoch 22/100
544/560 [============================>.] - ETA: 0s - loss: 0.2789 - acc: 1.0000Epoch 00021: val_loss improved from 0.33707 to 0.33453, saving model to dummynet-progress
560/560 [==============================] - 0s - loss: 0.2752 - acc: 1.0000 - val_loss: 0.3345 - val_acc: 1.0000
Epoch 23/100
544/560 [============================>.] - ETA: 0s - loss: 0.2749 - acc: 1.0000Epoch 00022: val_loss improved from 0.33453 to 0.33248, saving model to dummynet-progress
560/560 [==============================] - 0s - loss: 0.2729 - acc: 1.0000 - val_loss: 0.3325 - val_acc: 1.0000
Epoch 24/100
544/560 [============================>.] - ETA: 0s - loss: 0.2689 - acc: 1.0000Epoch 00023: val_loss improved from 0.33248 to 0.33045, saving model to dummynet-progress
560/560 [==============================] - 0s - loss: 0.2707 - acc: 1.0000 - val_loss: 0.3304 - val_acc: 1.0000
Epoch 25/100
544/560 [============================>.] - ETA: 0s - loss: 0.2727 - acc: 1.0000Epoch 00024: val_loss improved from 0.33045 to 0.32796, saving model to dummynet-progress
560/560 [==============================] - 0s - loss: 0.2686 - acc: 1.0000 - val_loss: 0.3280 - val_acc: 1.0000
Epoch 26/100
544/560 [============================>.] - ETA: 0s - loss: 0.2693 - acc: 1.0000Epoch 00025: val_loss improved from 0.32796 to 0.32578, saving model to dummynet-progress
560/560 [==============================] - 0s - loss: 0.2665 - acc: 1.0000 - val_loss: 0.3258 - val_acc: 1.0000
Epoch 27/100
544/560 [============================>.] - ETA: 0s - loss: 0.2649 - acc: 1.0000Epoch 00026: val_loss improved from 0.32578 to 0.32276, saving model to dummynet-progress
560/560 [==============================] - 0s - loss: 0.2645 - acc: 1.0000 - val_loss: 0.3228 - val_acc: 1.0000
Epoch 28/100
544/560 [============================>.] - ETA: 0s - loss: 0.2578 - acc: 1.0000Epoch 00027: val_loss improved from 0.32276 to 0.32079, saving model to dummynet-progress
560/560 [==============================] - 0s - loss: 0.2623 - acc: 1.0000 - val_loss: 0.3208 - val_acc: 1.0000
Epoch 29/100
544/560 [============================>.] - ETA: 0s - loss: 0.2574 - acc: 1.0000Epoch 00028: val_loss improved from 0.32079 to 0.31779, saving model to dummynet-progress
560/560 [==============================] - 0s - loss: 0.2602 - acc: 1.0000 - val_loss: 0.3178 - val_acc: 1.0000
Epoch 30/100
544/560 [============================>.] - ETA: 0s - loss: 0.2590 - acc: 1.0000Epoch 00029: val_loss improved from 0.31779 to 0.31561, saving model to dummynet-progress
560/560 [==============================] - 0s - loss: 0.2577 - acc: 1.0000 - val_loss: 0.3156 - val_acc: 1.0000
Epoch 31/100
544/560 [============================>.] - ETA: 0s - loss: 0.2534 - acc: 1.0000Epoch 00030: val_loss improved from 0.31561 to 0.31343, saving model to dummynet-progress
560/560 [==============================] - 0s - loss: 0.2559 - acc: 1.0000 - val_loss: 0.3134 - val_acc: 1.0000
Epoch 32/100
544/560 [============================>.] - ETA: 0s - loss: 0.2493 - acc: 1.0000Epoch 00031: val_loss improved from 0.31343 to 0.31053, saving model to dummynet-progress
560/560 [==============================] - 0s - loss: 0.2534 - acc: 1.0000 - val_loss: 0.3105 - val_acc: 1.0000
Epoch 33/100
544/560 [============================>.] - ETA: 0s - loss: 0.2481 - acc: 1.0000Epoch 00032: val_loss improved from 0.31053 to 0.30754, saving model to dummynet-progress
560/560 [==============================] - 0s - loss: 0.2513 - acc: 1.0000 - val_loss: 0.3075 - val_acc: 1.0000
Epoch 34/100
544/560 [============================>.] - ETA: 0s - loss: 0.2500 - acc: 1.0000Epoch 00033: val_loss improved from 0.30754 to 0.30495, saving model to dummynet-progress
560/560 [==============================] - 0s - loss: 0.2491 - acc: 1.0000 - val_loss: 0.3049 - val_acc: 1.0000
Epoch 35/100
544/560 [============================>.] - ETA: 0s - loss: 0.2475 - acc: 1.0000Epoch 00034: val_loss improved from 0.30495 to 0.30245, saving model to dummynet-progress
560/560 [==============================] - 0s - loss: 0.2469 - acc: 1.0000 - val_loss: 0.3025 - val_acc: 1.0000
Epoch 36/100
544/560 [============================>.] - ETA: 0s - loss: 0.2452 - acc: 1.0000Epoch 00035: val_loss improved from 0.30245 to 0.29958, saving model to dummynet-progress
560/560 [==============================] - 0s - loss: 0.2449 - acc: 1.0000 - val_loss: 0.2996 - val_acc: 1.0000
Epoch 37/100
544/560 [============================>.] - ETA: 0s - loss: 0.2416 - acc: 1.0000Epoch 00036: val_loss improved from 0.29958 to 0.29698, saving model to dummynet-progress
560/560 [==============================] - 0s - loss: 0.2424 - acc: 1.0000 - val_loss: 0.2970 - val_acc: 1.0000
Epoch 38/100
544/560 [============================>.] - ETA: 0s - loss: 0.2436 - acc: 1.0000Epoch 00037: val_loss improved from 0.29698 to 0.29390, saving model to dummynet-progress
560/560 [==============================] - 0s - loss: 0.2402 - acc: 1.0000 - val_loss: 0.2939 - val_acc: 1.0000
Epoch 39/100
544/560 [============================>.] - ETA: 0s - loss: 0.2396 - acc: 1.0000Epoch 00038: val_loss improved from 0.29390 to 0.29025, saving model to dummynet-progress
560/560 [==============================] - 0s - loss: 0.2384 - acc: 1.0000 - val_loss: 0.2902 - val_acc: 1.0000
Epoch 40/100
544/560 [============================>.] - ETA: 0s - loss: 0.2338 - acc: 1.0000Epoch 00039: val_loss improved from 0.29025 to 0.28829, saving model to dummynet-progress
560/560 [==============================] - 0s - loss: 0.2357 - acc: 1.0000 - val_loss: 0.2883 - val_acc: 1.0000
Epoch 41/100
544/560 [============================>.] - ETA: 0s - loss: 0.2383 - acc: 1.0000Epoch 00040: val_loss improved from 0.28829 to 0.28560, saving model to dummynet-progress
560/560 [==============================] - 0s - loss: 0.2336 - acc: 1.0000 - val_loss: 0.2856 - val_acc: 1.0000
Epoch 42/100
544/560 [============================>.] - ETA: 0s - loss: 0.2292 - acc: 1.0000Epoch 00041: val_loss improved from 0.28560 to 0.28302, saving model to dummynet-progress
560/560 [==============================] - 0s - loss: 0.2320 - acc: 1.0000 - val_loss: 0.2830 - val_acc: 1.0000
Epoch 43/100
544/560 [============================>.] - ETA: 0s - loss: 0.2287 - acc: 1.0000Epoch 00042: val_loss improved from 0.28302 to 0.27963, saving model to dummynet-progress
560/560 [==============================] - 0s - loss: 0.2296 - acc: 1.0000 - val_loss: 0.2796 - val_acc: 1.0000
Epoch 44/100
544/560 [============================>.] - ETA: 0s - loss: 0.2164 - acc: 1.0000Epoch 00043: val_loss improved from 0.27963 to 0.27773, saving model to dummynet-progress
560/560 [==============================] - 0s - loss: 0.2276 - acc: 1.0000 - val_loss: 0.2777 - val_acc: 1.0000
Epoch 45/100
544/560 [============================>.] - ETA: 0s - loss: 0.2262 - acc: 1.0000Epoch 00044: val_loss improved from 0.27773 to 0.27329, saving model to dummynet-progress
560/560 [==============================] - 0s - loss: 0.2251 - acc: 1.0000 - val_loss: 0.2733 - val_acc: 1.0000
Epoch 46/100
544/560 [============================>.] - ETA: 0s - loss: 0.2176 - acc: 1.0000Epoch 00045: val_loss improved from 0.27329 to 0.27094, saving model to dummynet-progress
560/560 [==============================] - 0s - loss: 0.2230 - acc: 1.0000 - val_loss: 0.2709 - val_acc: 1.0000
Epoch 47/100
544/560 [============================>.] - ETA: 0s - loss: 0.2235 - acc: 1.0000Epoch 00046: val_loss improved from 0.27094 to 0.26786, saving model to dummynet-progress
560/560 [==============================] - 0s - loss: 0.2210 - acc: 1.0000 - val_loss: 0.2679 - val_acc: 1.0000
Epoch 48/100
544/560 [============================>.] - ETA: 0s - loss: 0.2155 - acc: 1.0000Epoch 00047: val_loss improved from 0.26786 to 0.26500, saving model to dummynet-progress
560/560 [==============================] - 0s - loss: 0.2187 - acc: 1.0000 - val_loss: 0.2650 - val_acc: 1.0000
Epoch 49/100
 64/560 [==>...........................] - ETA: 0s - loss: 0.3197 - acc: 1.0000

In [15]:
# -- save model and weights to protobufs
import tensorflow as tf                                                                                                                                                                                        
import keras.backend.tensorflow_backend as tfbe                                                                                                                                                                

sess = tfbe._SESSION                                                                                                                                                                                           
saver = tf.train.Saver()                                                                                                                                                                                       
tf.train.write_graph(sess.graph_def, 'models/', 'graph.pb', as_text=False)                                                                                                                                     
save_path = saver.save(sess, "./model-weights.ckpt")                                                                                                                                                           
print "Model saved in file: %s" % save_path                                                                                                                                                                    

# -- print out this information: it will be useful when freezing the graph
print 'filename_tensor_name = ' + saver.as_saver_def().filename_tensor_name                                                                                                                                                                
print 'restore_op_name = ' + saver.as_saver_def().restore_op_name


Model saved in file: ./model-weights.ckpt
filename_tensor_name = save_1/Const:0
restore_op_name = save_1/restore_all

In [16]:
# -- find out the name of your output node: it will be useful when freezing the graph and evaluating
model.get_output()


Out[16]:
<tf.Tensor 'Sigmoid_2:0' shape=(?, 1) dtype=float32>

In [17]:
# -- find out the name of your output node: it will be useful when evaluating
model.get_input()


Out[17]:
<tf.Tensor 'Placeholder:0' shape=(?, 2) dtype=float32>

In [18]:
# -- testing
yhat = model.predict(X_test, verbose = True, batch_size = 516)


300/300 [==============================] - 0s

In [19]:
# -- plot classifier output
_ = plt.hist(yhat[y_test == 1], normed = True, histtype = 'stepfilled', color = 'green', alpha = 0.5)
_ = plt.hist(yhat[y_test == 0], normed = True, histtype = 'stepfilled', color = 'red', alpha = 0.5)



In [20]:
# -- real flavor of test set examples
plt.scatter(X_test[:,0], X_test[:,1], c=y_test)
plt.show()



In [21]:
# -- net predictions on test set examples
plt.scatter(X_test[:,0], X_test[:,1], c=yhat)
plt.show()


Pick an example to test if load.cc works


In [25]:
# -- inputs
X_test[0]


Out[25]:
array([ 3.24311246, -5.56385935])

In [26]:
# -- predicted output (using Keras)
yhat[0]


Out[26]:
array([ 0.00248217])

Inspect the protobuf containing the model's architecture and logic


In [27]:
from tensorflow.core.framework import graph_pb2

# -- read in the graph
f = open("models/graph.pb", "rb")
graph_def = graph_pb2.GraphDef()
graph_def.ParseFromString(f.read())

In [28]:
import tensorflow as tf
# -- actually import the graph described by graph_def
tf.import_graph_def(graph_def, name = '')

In [29]:
for node in graph_def.node:
    print node.name


Placeholder
dense_W/initial_value
dense_W
dense_W/Assign
dense_W/read
dense_b/initial_value
dense_b
dense_b/Assign
dense_b/read
dense_W_1/initial_value
dense_W_1
dense_W_1/Assign
dense_W_1/read
dense_b_1/initial_value
dense_b_1
dense_b_1/Assign
dense_b_1/read
Variable/initial_value
Variable
Variable/Assign
Variable/read
Variable_1/initial_value
Variable_1
Variable_1/Assign
Variable_1/read
Variable_2/initial_value
Variable_2
Variable_2/Assign
Variable_2/read
Variable_3/initial_value
Variable_3
Variable_3/Assign
Variable_3/read
MatMul
add
Neg
Relu
Relu_1
Const
mul
sub
MatMul_1
add_1
Sigmoid
MatMul_2
add_2
Neg_1
Relu_2
Relu_3
Const_1
mul_1
sub_1
MatMul_3
add_3
Sigmoid_1
Placeholder_1
Placeholder_2
Cast/x
Cast_1/x
clip_by_value/Minimum
clip_by_value
sub_2/x
sub_2
div
Log
logistic_loss/Relu
logistic_loss/mul
logistic_loss/sub
logistic_loss/Abs
logistic_loss/Neg
logistic_loss/Exp
logistic_loss/add/x
logistic_loss/add
logistic_loss/Log
logistic_loss
Mean/reduction_indices
Mean
Mean_1/reduction_indices
Mean_1
mul_2
NotEqual/y
NotEqual
Cast_2
Rank
range/start
range/delta
range
Mean_2
div_1
Rank_1
range_1/start
range_1/delta
range_1
Mean_3
Cast_3/x
Cast_4/x
clip_by_value_1/Minimum
clip_by_value_1
sub_3/x
sub_3
div_2
Log_1
logistic_loss_1/Relu
logistic_loss_1/mul
logistic_loss_1/sub
logistic_loss_1/Abs
logistic_loss_1/Neg
logistic_loss_1/Exp
logistic_loss_1/add/x
logistic_loss_1/add
logistic_loss_1/Log
logistic_loss_1
Mean_4/reduction_indices
Mean_4
Mean_5/reduction_indices
Mean_5
mul_3
NotEqual_1/y
NotEqual_1
Cast_5
Rank_2
range_2/start
range_2/delta
range_2
Mean_6
div_3
Rank_3
range_3/start
range_3/delta
range_3
Mean_7
ArgMax/dimension
ArgMax
ArgMax_1/dimension
ArgMax_1
Equal
Cast_6
Rank_4
range_4/start
range_4/delta
range_4
Mean_8
ArgMax_2/dimension
ArgMax_2
ArgMax_3/dimension
ArgMax_3
Equal_1
Cast_7
Rank_5
range_5/start
range_5/delta
range_5
Mean_9
gradients/Shape
gradients/Const
gradients/Fill
gradients/Mean_3_grad/Shape
gradients/Mean_3_grad/Rank
gradients/Mean_3_grad/Shape_1
gradients/Mean_3_grad/range/start
gradients/Mean_3_grad/range/delta
gradients/Mean_3_grad/range
gradients/Mean_3_grad/Fill/value
gradients/Mean_3_grad/Fill
gradients/Mean_3_grad/DynamicStitch
gradients/Mean_3_grad/floordiv
gradients/Mean_3_grad/Reshape
gradients/Mean_3_grad/Tile
gradients/Mean_3_grad/Shape_2
gradients/Mean_3_grad/Shape_3
gradients/Mean_3_grad/Rank_1
gradients/Mean_3_grad/range_1/start
gradients/Mean_3_grad/range_1/delta
gradients/Mean_3_grad/range_1
gradients/Mean_3_grad/Prod
gradients/Mean_3_grad/Rank_2
gradients/Mean_3_grad/range_2/start
gradients/Mean_3_grad/range_2/delta
gradients/Mean_3_grad/range_2
gradients/Mean_3_grad/Prod_1
gradients/Mean_3_grad/floordiv_1
gradients/Mean_3_grad/Cast
gradients/Mean_3_grad/truediv
gradients/div_1_grad/Shape
gradients/div_1_grad/Shape_1
gradients/div_1_grad/BroadcastGradientArgs
gradients/div_1_grad/truediv
gradients/div_1_grad/Sum
gradients/div_1_grad/Reshape
gradients/div_1_grad/Neg
gradients/div_1_grad/Square
gradients/div_1_grad/truediv_1
gradients/div_1_grad/mul
gradients/div_1_grad/Sum_1
gradients/div_1_grad/Reshape_1
gradients/mul_2_grad/Shape
gradients/mul_2_grad/Shape_1
gradients/mul_2_grad/BroadcastGradientArgs
gradients/mul_2_grad/mul
gradients/mul_2_grad/Sum
gradients/mul_2_grad/Reshape
gradients/mul_2_grad/mul_1
gradients/mul_2_grad/Sum_1
gradients/mul_2_grad/Reshape_1
gradients/Mean_1_grad/Shape
gradients/Mean_1_grad/Rank
gradients/Mean_1_grad/Shape_1
gradients/Mean_1_grad/range/start
gradients/Mean_1_grad/range/delta
gradients/Mean_1_grad/range
gradients/Mean_1_grad/Fill/value
gradients/Mean_1_grad/Fill
gradients/Mean_1_grad/DynamicStitch
gradients/Mean_1_grad/floordiv
gradients/Mean_1_grad/Reshape
gradients/Mean_1_grad/Tile
gradients/Mean_1_grad/Shape_2
gradients/Mean_1_grad/Shape_3
gradients/Mean_1_grad/Rank_1
gradients/Mean_1_grad/range_1/start
gradients/Mean_1_grad/range_1/delta
gradients/Mean_1_grad/range_1
gradients/Mean_1_grad/Prod
gradients/Mean_1_grad/Rank_2
gradients/Mean_1_grad/range_2/start
gradients/Mean_1_grad/range_2/delta
gradients/Mean_1_grad/range_2
gradients/Mean_1_grad/Prod_1
gradients/Mean_1_grad/floordiv_1
gradients/Mean_1_grad/Cast
gradients/Mean_1_grad/truediv
gradients/Mean_grad/Shape
gradients/Mean_grad/Rank
gradients/Mean_grad/Shape_1
gradients/Mean_grad/range/start
gradients/Mean_grad/range/delta
gradients/Mean_grad/range
gradients/Mean_grad/Fill/value
gradients/Mean_grad/Fill
gradients/Mean_grad/DynamicStitch
gradients/Mean_grad/floordiv
gradients/Mean_grad/Reshape
gradients/Mean_grad/Tile
gradients/Mean_grad/Shape_2
gradients/Mean_grad/Shape_3
gradients/Mean_grad/Rank_1
gradients/Mean_grad/range_1/start
gradients/Mean_grad/range_1/delta
gradients/Mean_grad/range_1
gradients/Mean_grad/Prod
gradients/Mean_grad/Rank_2
gradients/Mean_grad/range_2/start
gradients/Mean_grad/range_2/delta
gradients/Mean_grad/range_2
gradients/Mean_grad/Prod_1
gradients/Mean_grad/floordiv_1
gradients/Mean_grad/Cast
gradients/Mean_grad/truediv
gradients/logistic_loss_grad/Shape
gradients/logistic_loss_grad/Shape_1
gradients/logistic_loss_grad/BroadcastGradientArgs
gradients/logistic_loss_grad/Sum
gradients/logistic_loss_grad/Reshape
gradients/logistic_loss_grad/Sum_1
gradients/logistic_loss_grad/Reshape_1
gradients/logistic_loss/sub_grad/Shape
gradients/logistic_loss/sub_grad/Shape_1
gradients/logistic_loss/sub_grad/BroadcastGradientArgs
gradients/logistic_loss/sub_grad/Sum
gradients/logistic_loss/sub_grad/Reshape
gradients/logistic_loss/sub_grad/Sum_1
gradients/logistic_loss/sub_grad/Neg
gradients/logistic_loss/sub_grad/Reshape_1
gradients/logistic_loss/Log_grad/Inv
gradients/logistic_loss/Log_grad/mul
gradients/logistic_loss/Relu_grad/ReluGrad
gradients/logistic_loss/mul_grad/Shape
gradients/logistic_loss/mul_grad/Shape_1
gradients/logistic_loss/mul_grad/BroadcastGradientArgs
gradients/logistic_loss/mul_grad/mul
gradients/logistic_loss/mul_grad/Sum
gradients/logistic_loss/mul_grad/Reshape
gradients/logistic_loss/mul_grad/mul_1
gradients/logistic_loss/mul_grad/Sum_1
gradients/logistic_loss/mul_grad/Reshape_1
gradients/logistic_loss/add_grad/Shape
gradients/logistic_loss/add_grad/Shape_1
gradients/logistic_loss/add_grad/BroadcastGradientArgs
gradients/logistic_loss/add_grad/Sum
gradients/logistic_loss/add_grad/Reshape
gradients/logistic_loss/add_grad/Sum_1
gradients/logistic_loss/add_grad/Reshape_1
gradients/logistic_loss/Exp_grad/mul
gradients/logistic_loss/Neg_grad/Neg
gradients/logistic_loss/Abs_grad/Sign
gradients/logistic_loss/Abs_grad/mul
gradients/AddN
gradients/Log_grad/Inv
gradients/Log_grad/mul
gradients/div_grad/Shape
gradients/div_grad/Shape_1
gradients/div_grad/BroadcastGradientArgs
gradients/div_grad/truediv
gradients/div_grad/Sum
gradients/div_grad/Reshape
gradients/div_grad/Neg
gradients/div_grad/Square
gradients/div_grad/truediv_1
gradients/div_grad/mul
gradients/div_grad/Sum_1
gradients/div_grad/Reshape_1
gradients/sub_2_grad/Shape
gradients/sub_2_grad/Shape_1
gradients/sub_2_grad/BroadcastGradientArgs
gradients/sub_2_grad/Sum
gradients/sub_2_grad/Reshape
gradients/sub_2_grad/Sum_1
gradients/sub_2_grad/Neg
gradients/sub_2_grad/Reshape_1
gradients/AddN_1
gradients/clip_by_value_grad/Shape
gradients/clip_by_value_grad/Shape_1
gradients/clip_by_value_grad/Shape_2
gradients/clip_by_value_grad/zeros/Const
gradients/clip_by_value_grad/zeros
gradients/clip_by_value_grad/GreaterEqual
gradients/clip_by_value_grad/BroadcastGradientArgs
gradients/clip_by_value_grad/Select
gradients/clip_by_value_grad/LogicalNot
gradients/clip_by_value_grad/Select_1
gradients/clip_by_value_grad/Sum
gradients/clip_by_value_grad/Reshape
gradients/clip_by_value_grad/Sum_1
gradients/clip_by_value_grad/Reshape_1
gradients/clip_by_value/Minimum_grad/Shape
gradients/clip_by_value/Minimum_grad/Shape_1
gradients/clip_by_value/Minimum_grad/Shape_2
gradients/clip_by_value/Minimum_grad/zeros/Const
gradients/clip_by_value/Minimum_grad/zeros
gradients/clip_by_value/Minimum_grad/LessEqual
gradients/clip_by_value/Minimum_grad/BroadcastGradientArgs
gradients/clip_by_value/Minimum_grad/Select
gradients/clip_by_value/Minimum_grad/LogicalNot
gradients/clip_by_value/Minimum_grad/Select_1
gradients/clip_by_value/Minimum_grad/Sum
gradients/clip_by_value/Minimum_grad/Reshape
gradients/clip_by_value/Minimum_grad/Sum_1
gradients/clip_by_value/Minimum_grad/Reshape_1
gradients/Sigmoid_grad/sub/x
gradients/Sigmoid_grad/sub
gradients/Sigmoid_grad/mul
gradients/Sigmoid_grad/mul_1
gradients/add_1_grad/Shape
gradients/add_1_grad/Shape_1
gradients/add_1_grad/BroadcastGradientArgs
gradients/add_1_grad/Sum
gradients/add_1_grad/Reshape
gradients/add_1_grad/Sum_1
gradients/add_1_grad/Reshape_1
gradients/MatMul_1_grad/MatMul
gradients/MatMul_1_grad/MatMul_1
gradients/sub_grad/Shape
gradients/sub_grad/Shape_1
gradients/sub_grad/BroadcastGradientArgs
gradients/sub_grad/Sum
gradients/sub_grad/Reshape
gradients/sub_grad/Sum_1
gradients/sub_grad/Neg
gradients/sub_grad/Reshape_1
gradients/Relu_1_grad/ReluGrad
gradients/mul_grad/Shape
gradients/mul_grad/Shape_1
gradients/mul_grad/BroadcastGradientArgs
gradients/mul_grad/mul
gradients/mul_grad/Sum
gradients/mul_grad/Reshape
gradients/mul_grad/mul_1
gradients/mul_grad/Sum_1
gradients/mul_grad/Reshape_1
gradients/Relu_grad/ReluGrad
gradients/Neg_grad/Neg
gradients/AddN_2
gradients/add_grad/Shape
gradients/add_grad/Shape_1
gradients/add_grad/BroadcastGradientArgs
gradients/add_grad/Sum
gradients/add_grad/Reshape
gradients/add_grad/Sum_1
gradients/add_grad/Reshape_1
gradients/MatMul_grad/MatMul
gradients/MatMul_grad/MatMul_1
add_4/y
add_4
add_5/y
add_5
Pow
sub_4/x
sub_4
Cast_8/x
Cast_9/x
clip_by_value_2/Minimum
clip_by_value_2
Sqrt
mul_4
Pow_1
sub_5/x
sub_5
div_4
Variable_4/initial_value
Variable_4
Variable_4/Assign
Variable_4/read
Variable_5/initial_value
Variable_5
Variable_5/Assign
Variable_5/read
mul_5
sub_6/x
sub_6
mul_6
add_6
mul_7
sub_7/x
sub_7
Square
mul_8
add_7
mul_9
Cast_10/x
Cast_11/x
clip_by_value_3/Minimum
clip_by_value_3
Sqrt_1
add_8/y
add_8
div_5
sub_8
Variable_6/initial_value
Variable_6
Variable_6/Assign
Variable_6/read
Variable_7/initial_value
Variable_7
Variable_7/Assign
Variable_7/read
mul_10
sub_9/x
sub_9
mul_11
add_9
mul_12
sub_10/x
sub_10
Square_1
mul_13
add_10
mul_14
Cast_12/x
Cast_13/x
clip_by_value_4/Minimum
clip_by_value_4
Sqrt_2
add_11/y
add_11
div_6
sub_11
Variable_8/initial_value
Variable_8
Variable_8/Assign
Variable_8/read
Variable_9/initial_value
Variable_9
Variable_9/Assign
Variable_9/read
mul_15
sub_12/x
sub_12
mul_16
add_12
mul_17
sub_13/x
sub_13
Square_2
mul_18
add_13
mul_19
Cast_14/x
Cast_15/x
clip_by_value_5/Minimum
clip_by_value_5
Sqrt_3
add_14/y
add_14
div_7
sub_14
Variable_10/initial_value
Variable_10
Variable_10/Assign
Variable_10/read
Variable_11/initial_value
Variable_11
Variable_11/Assign
Variable_11/read
mul_20
sub_15/x
sub_15
mul_21
add_15
mul_22
sub_16/x
sub_16
Square_3
mul_23
add_16
mul_24
Cast_16/x
Cast_17/x
clip_by_value_6/Minimum
clip_by_value_6
Sqrt_4
add_17/y
add_17
div_8
sub_17
Assign
Assign_1
Assign_2
Assign_3
Assign_4
Assign_5
Assign_6
Assign_7
Assign_8
Assign_9
Assign_10
Assign_11
Assign_12
Assign_13
Assign_14
Assign_15
Assign_16
Assign_17
Assign_18
Assign_19
Assign_20
Assign_21
Assign_22
Assign_23
Assign_24
Assign_25
Assign_26/value
Assign_26
Assign_27/value
Assign_27
Assign_28/value
Assign_28
Assign_29/value
Assign_29
save/Const
save/save/tensor_names
save/save/shapes_and_slices
save/save
save/control_dependency
save/restore_slice/tensor_name
save/restore_slice/shape_and_slice
save/restore_slice
save/Assign
save/restore_slice_1/tensor_name
save/restore_slice_1/shape_and_slice
save/restore_slice_1
save/Assign_1
save/restore_slice_2/tensor_name
save/restore_slice_2/shape_and_slice
save/restore_slice_2
save/Assign_2
save/restore_slice_3/tensor_name
save/restore_slice_3/shape_and_slice
save/restore_slice_3
save/Assign_3
save/restore_slice_4/tensor_name
save/restore_slice_4/shape_and_slice
save/restore_slice_4
save/Assign_4
save/restore_slice_5/tensor_name
save/restore_slice_5/shape_and_slice
save/restore_slice_5
save/Assign_5
save/restore_slice_6/tensor_name
save/restore_slice_6/shape_and_slice
save/restore_slice_6
save/Assign_6
save/restore_slice_7/tensor_name
save/restore_slice_7/shape_and_slice
save/restore_slice_7
save/Assign_7
save/restore_slice_8/tensor_name
save/restore_slice_8/shape_and_slice
save/restore_slice_8
save/Assign_8
save/restore_slice_9/tensor_name
save/restore_slice_9/shape_and_slice
save/restore_slice_9
save/Assign_9
save/restore_slice_10/tensor_name
save/restore_slice_10/shape_and_slice
save/restore_slice_10
save/Assign_10
save/restore_slice_11/tensor_name
save/restore_slice_11/shape_and_slice
save/restore_slice_11
save/Assign_11
save/restore_slice_12/tensor_name
save/restore_slice_12/shape_and_slice
save/restore_slice_12
save/Assign_12
save/restore_slice_13/tensor_name
save/restore_slice_13/shape_and_slice
save/restore_slice_13
save/Assign_13
save/restore_slice_14/tensor_name
save/restore_slice_14/shape_and_slice
save/restore_slice_14
save/Assign_14
save/restore_slice_15/tensor_name
save/restore_slice_15/shape_and_slice
save/restore_slice_15
save/Assign_15
save/restore_all
save_1/Const
save_1/save/tensor_names
save_1/save/shapes_and_slices
save_1/save
save_1/control_dependency
save_1/restore_slice/tensor_name
save_1/restore_slice/shape_and_slice
save_1/restore_slice
save_1/Assign
save_1/restore_slice_1/tensor_name
save_1/restore_slice_1/shape_and_slice
save_1/restore_slice_1
save_1/Assign_1
save_1/restore_slice_2/tensor_name
save_1/restore_slice_2/shape_and_slice
save_1/restore_slice_2
save_1/Assign_2
save_1/restore_slice_3/tensor_name
save_1/restore_slice_3/shape_and_slice
save_1/restore_slice_3
save_1/Assign_3
save_1/restore_slice_4/tensor_name
save_1/restore_slice_4/shape_and_slice
save_1/restore_slice_4
save_1/Assign_4
save_1/restore_slice_5/tensor_name
save_1/restore_slice_5/shape_and_slice
save_1/restore_slice_5
save_1/Assign_5
save_1/restore_slice_6/tensor_name
save_1/restore_slice_6/shape_and_slice
save_1/restore_slice_6
save_1/Assign_6
save_1/restore_slice_7/tensor_name
save_1/restore_slice_7/shape_and_slice
save_1/restore_slice_7
save_1/Assign_7
save_1/restore_slice_8/tensor_name
save_1/restore_slice_8/shape_and_slice
save_1/restore_slice_8
save_1/Assign_8
save_1/restore_slice_9/tensor_name
save_1/restore_slice_9/shape_and_slice
save_1/restore_slice_9
save_1/Assign_9
save_1/restore_slice_10/tensor_name
save_1/restore_slice_10/shape_and_slice
save_1/restore_slice_10
save_1/Assign_10
save_1/restore_slice_11/tensor_name
save_1/restore_slice_11/shape_and_slice
save_1/restore_slice_11
save_1/Assign_11
save_1/restore_slice_12/tensor_name
save_1/restore_slice_12/shape_and_slice
save_1/restore_slice_12
save_1/Assign_12
save_1/restore_slice_13/tensor_name
save_1/restore_slice_13/shape_and_slice
save_1/restore_slice_13
save_1/Assign_13
save_1/restore_slice_14/tensor_name
save_1/restore_slice_14/shape_and_slice
save_1/restore_slice_14
save_1/Assign_14
save_1/restore_slice_15/tensor_name
save_1/restore_slice_15/shape_and_slice
save_1/restore_slice_15
save_1/Assign_15
save_1/restore_all

In [ ]: