In [1]:
import tensorflow as tf
import keras
import numpy as np

from keras import backend as K
from keras.layers.normalization import BatchNormalization
from keras.models import load_model

tf.__version__


/usr/lib/python3.6/importlib/_bootstrap.py:219: RuntimeWarning: numpy.dtype size changed, may indicate binary incompatibility. Expected 96, got 88
  return f(*args, **kwds)
/usr/lib/python3.6/importlib/_bootstrap.py:219: RuntimeWarning: numpy.dtype size changed, may indicate binary incompatibility. Expected 96, got 88
  return f(*args, **kwds)
Using TensorFlow backend.

GPU and CPU settings

If GPU is not available, comment out the bottom block.


In [2]:
# If GPU is not available: 
# GPU_USE = '/cpu:0'
# config = tf.ConfigProto(device_count = {"GPU": 0})


# If GPU is available: 
config = tf.ConfigProto()
config.log_device_placement = True
config.allow_soft_placement = True
config.gpu_options.allocator_type = 'BFC'

# Limit the maximum memory used
config.gpu_options.per_process_gpu_memory_fraction = 0.1

# set session config
tf.keras.backend.set_session(tf.Session(config=config))

Define input and output


In [3]:
modelname = "myModel"

input_path = "./"
input_file = modelname + ".h5"

output_path = "/var/www/html/tutorial-data/"
output_file = modelname + ".pb"

output_node_prefix = "output_node"

In [4]:
K.set_learning_phase(0)
net_model = load_model(input_path + input_file)

num_output = 1
pred = [None]*num_output
pred_node_names = [None]*num_output
for i in range(num_output):
    pred_node_names[i] = output_node_prefix+str(i)
    pred[i] = tf.identity(net_model.outputs[i], name=pred_node_names[i])
print('output nodes names are: ', pred_node_names)
output_node_prefix = pred_node_names[0]


output nodes names are:  ['output_node0']

In [5]:
sess = K.get_session()

Export model


In [6]:
from tensorflow.python.framework import graph_util
from tensorflow.python.framework import graph_io
constant_graph = graph_util.convert_variables_to_constants(sess, sess.graph.as_graph_def(), pred_node_names)

graph_io.write_graph(constant_graph, output_path, output_file, as_text=False)

print('Saved the freezed graph at: ', (output_path + output_file))


INFO:tensorflow:Froze 4 variables.
INFO:tensorflow:Converted 4 variables to const ops.
Saved the freezed graph at:  /var/www/html/tutorial-data/myModel.pb

Show input and output node


In [7]:
g = tf.GraphDef()
g.ParseFromString(open(output_path + output_file, "rb").read())
s = ""
for n in g.node:
    s =s + str(n.name) + "\n"

print(s)


dense_input
dense/kernel
dense/kernel/read
dense/bias
dense/bias/read
dense/MatMul
dense/BiasAdd
dense/Relu
dense_1/kernel
dense_1/kernel/read
dense_1/bias
dense_1/bias/read
dense_1/MatMul
dense_1/BiasAdd
dense_1/Softmax
output_node0