Hello world TensorFlow-Android

This notebook focuses on the basics of creating your first Andoird App based on TensorFlow. I've created a small DNN to classify IRIS dataset. I've discussed in detail about training this dataset in my YouTube channel. In this notebook I discuss what we need to do from the Python end.

Step 1: Train a deep network

Step 2: Save the TF graph and model parameters

Step 3: Make the model ready for inference and export them


In [1]:
#import desired packages
import tensorflow as tf
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
import os.path
import sys
# library for freezing the graph
from tensorflow.python.tools import freeze_graph
# library for optmising inference
from tensorflow.python.tools import optimize_for_inference_lib
%matplotlib inline

Preparing data for training


In [ ]:
#import data 
data=pd.read_csv('/Users/Enkay/Documents/Viky/python/tensorflow/iris/iris.data', names=['f1','f2','f3','f4','f5'])


#map data into arrays
s=np.asarray([1,0,0])
ve=np.asarray([0,1,0])
vi=np.asarray([0,0,1])
data['f5'] = data['f5'].map({'Iris-setosa': s, 'Iris-versicolor': ve,'Iris-virginica':vi})


#shuffle the data
data=data.iloc[np.random.permutation(len(data))]
data=data.reset_index(drop=True)


#training data
x_input=data.loc[0:105,['f1','f2','f3','f4']]
y_input=data['f5'][0:106]

#test data
x_test=data.loc[106:149,['f1','f2','f3','f4']]
y_test=data['f5'][106:150]

Step 1: Train a deep network


In [ ]:
#placeholders and variables. input has 4 features and output has 3 classes
x=tf.placeholder(tf.float32,shape=[None,4] , name="Input")
y_=tf.placeholder(tf.float32,shape=[None, 3])
#weight and bias
W=tf.Variable(tf.zeros([4,3]))
b=tf.Variable(tf.zeros([3]))

# model 
#softmax function for multiclass classification
y = tf.nn.softmax((tf.matmul(x, W) + b),name="output")

# Cost function
cross_entropy = tf.reduce_mean(-tf.reduce_sum(y_ * tf.log(y), reduction_indices=[1]))

# Optimiser 
train_step = tf.train.AdamOptimizer(0.01).minimize(cross_entropy)
#calculating accuracy of our model 
correct_prediction = tf.equal(tf.argmax(y,1), tf.argmax(y_,1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
output=tf.argmax(y,axis=1)

#session parameters
sess = tf.InteractiveSession()
#initialising variables
init = tf.global_variables_initializer()
sess.run(init)
#number of interations
epoch=2000

In [ ]:
#Training 
for step in range(epoch):
    _, loss=sess.run([train_step,cross_entropy], feed_dict={x: x_input, y_:[t for t in y_input.as_matrix()]})
    if step%500==0 :
        print (loss)

In [ ]:
# grabbing the default graph
g = tf.get_default_graph()

# every operations in our graph
[op.name for op in g.get_operations()]

Step 2: Saving the model


In [ ]:
saver = tf.train.Saver()
model_directory='model_files/'
if not os.path.exists(model_directory):
        os.makedirs(model_directory)
#saving the graph
tf.train.write_graph(sess.graph_def, model_directory, 'savegraph.pbtxt')

In [ ]:
# saving the values of weights and other parameters of the model
saver.save(sess, 'model_files/model.ckpt')

Step 3 : Make the model ready for inference

Freezing the model


In [2]:
# Freeze the graph
MODEL_NAME = 'iris'
input_graph_path = 'model_files/savegraph.pbtxt'
checkpoint_path = 'model_files/model.ckpt'
input_saver_def_path = ""
input_binary = False
output_node_names = "output"
restore_op_name = "save/restore_all"
filename_tensor_name = "save/Const:0"
output_frozen_graph_name = 'model_files/frozen_model_'+MODEL_NAME+'.pb'
output_optimized_graph_name = 'model_files/optimized_inference_model_'+MODEL_NAME+'.pb'
clear_devices = True

In [ ]:
freeze_graph.freeze_graph(input_graph_path, input_saver_def_path,
                          input_binary, checkpoint_path, output_node_names,
                          restore_op_name, filename_tensor_name,
                          output_frozen_graph_name, clear_devices, "")

In [ ]:


In [ ]:
output_graph_def = optimize_for_inference_lib.optimize_for_inference(
        sess.graph_def,
        ["Input"], # an array of the input node(s)
        ["output"], # an array of output nodes
        tf.float32.as_datatype_enum)

Cross check whether input and output nodes are present in graph def


In [ ]:
output_graph_def

In [ ]:
with tf.gfile.GFile(output_optimized_graph_name, "wb") as f:
            f.write(output_graph_def.SerializeToString())

Cross checking input and output nodes in the .pb file


In [11]:
g = tf.GraphDef()
##checking frozen graph
g.ParseFromString(open(output_optimized_graph_name, 'rb').read())
g


Out[11]:
node {
  name: "Input"
  op: "Placeholder"
  attr {
    key: "dtype"
    value {
      type: DT_FLOAT
    }
  }
}
node {
  name: "Variable"
  op: "VariableV2"
  attr {
    key: "container"
    value {
      s: ""
    }
  }
  attr {
    key: "dtype"
    value {
      type: DT_FLOAT
    }
  }
  attr {
    key: "shape"
    value {
      shape {
        dim {
          size: 4
        }
        dim {
          size: 3
        }
      }
    }
  }
  attr {
    key: "shared_name"
    value {
      s: ""
    }
  }
}
node {
  name: "Variable_1"
  op: "VariableV2"
  attr {
    key: "container"
    value {
      s: ""
    }
  }
  attr {
    key: "dtype"
    value {
      type: DT_FLOAT
    }
  }
  attr {
    key: "shape"
    value {
      shape {
        dim {
          size: 3
        }
      }
    }
  }
  attr {
    key: "shared_name"
    value {
      s: ""
    }
  }
}
node {
  name: "MatMul"
  op: "MatMul"
  input: "Input"
  input: "Variable"
  attr {
    key: "T"
    value {
      type: DT_FLOAT
    }
  }
  attr {
    key: "transpose_a"
    value {
      b: false
    }
  }
  attr {
    key: "transpose_b"
    value {
      b: false
    }
  }
}
node {
  name: "add"
  op: "Add"
  input: "MatMul"
  input: "Variable_1"
  attr {
    key: "T"
    value {
      type: DT_FLOAT
    }
  }
}
node {
  name: "output"
  op: "Softmax"
  input: "add"
  attr {
    key: "T"
    value {
      type: DT_FLOAT
    }
  }
}

In [13]:
g1 = tf.GraphDef()
##checking frozen graph
g1.ParseFromString(open("frozen_model_iris.pb", 'rb').read())
g1


Out[13]:
node {
  name: "Input"
  op: "Placeholder"
  attr {
    key: "dtype"
    value {
      type: DT_FLOAT
    }
  }
  attr {
    key: "shape"
    value {
      shape {
        dim {
          size: -1
        }
        dim {
          size: 4
        }
      }
    }
  }
}
node {
  name: "Variable"
  op: "Const"
  attr {
    key: "dtype"
    value {
      type: DT_FLOAT
    }
  }
  attr {
    key: "value"
    value {
      tensor {
        dtype: DT_FLOAT
        tensor_shape {
          dim {
            size: 4
          }
          dim {
            size: 3
          }
        }
        tensor_content: "\320\267L?\035-f?\266K\276\277\200\0178@\001\374\304\277\302U\177\300<\346c\300\367\276\341=sf\206@\337\331f\300\3216}\300\005\200\211@"
      }
    }
  }
}
node {
  name: "Variable/read"
  op: "Identity"
  input: "Variable"
  attr {
    key: "T"
    value {
      type: DT_FLOAT
    }
  }
  attr {
    key: "_class"
    value {
      list {
        s: "loc:@Variable"
      }
    }
  }
}
node {
  name: "Variable_1"
  op: "Const"
  attr {
    key: "dtype"
    value {
      type: DT_FLOAT
    }
  }
  attr {
    key: "value"
    value {
      tensor {
        dtype: DT_FLOAT
        tensor_shape {
          dim {
            size: 3
          }
        }
        tensor_content: "\374\361v@\004?\320@\264[\257\300"
      }
    }
  }
}
node {
  name: "Variable_1/read"
  op: "Identity"
  input: "Variable_1"
  attr {
    key: "T"
    value {
      type: DT_FLOAT
    }
  }
  attr {
    key: "_class"
    value {
      list {
        s: "loc:@Variable_1"
      }
    }
  }
}
node {
  name: "MatMul"
  op: "MatMul"
  input: "Input"
  input: "Variable/read"
  attr {
    key: "T"
    value {
      type: DT_FLOAT
    }
  }
  attr {
    key: "transpose_a"
    value {
      b: false
    }
  }
  attr {
    key: "transpose_b"
    value {
      b: false
    }
  }
}
node {
  name: "add"
  op: "Add"
  input: "MatMul"
  input: "Variable_1/read"
  attr {
    key: "T"
    value {
      type: DT_FLOAT
    }
  }
}
node {
  name: "output"
  op: "Softmax"
  input: "add"
  attr {
    key: "T"
    value {
      type: DT_FLOAT
    }
  }
}
library {
}

In [14]:
g1==g


Out[14]:
False

In [ ]: