In [1]:
import tensorflow as tf
import keras
import numpy as np
from keras import backend as K
from keras.layers.normalization import BatchNormalization
from keras.models import load_model
tf.__version__
In [2]:
# If GPU is not available:
# GPU_USE = '/cpu:0'
# config = tf.ConfigProto(device_count = {"GPU": 0})
# If GPU is available:
config = tf.ConfigProto()
config.log_device_placement = True
config.allow_soft_placement = True
config.gpu_options.allocator_type = 'BFC'
# Limit the maximum memory used
config.gpu_options.per_process_gpu_memory_fraction = 0.1
# set session config
tf.keras.backend.set_session(tf.Session(config=config))
In [3]:
modelname = "myModel"
input_path = "./"
input_file = modelname + ".h5"
output_path = "/var/www/html/tutorial-data/"
output_file = modelname + ".pb"
output_node_prefix = "output_node"
In [4]:
K.set_learning_phase(0)
net_model = load_model(input_path + input_file)
num_output = 1
pred = [None]*num_output
pred_node_names = [None]*num_output
for i in range(num_output):
pred_node_names[i] = output_node_prefix+str(i)
pred[i] = tf.identity(net_model.outputs[i], name=pred_node_names[i])
print('output nodes names are: ', pred_node_names)
output_node_prefix = pred_node_names[0]
In [5]:
sess = K.get_session()
In [6]:
from tensorflow.python.framework import graph_util
from tensorflow.python.framework import graph_io
constant_graph = graph_util.convert_variables_to_constants(sess, sess.graph.as_graph_def(), pred_node_names)
graph_io.write_graph(constant_graph, output_path, output_file, as_text=False)
print('Saved the freezed graph at: ', (output_path + output_file))
In [7]:
g = tf.GraphDef()
g.ParseFromString(open(output_path + output_file, "rb").read())
s = ""
for n in g.node:
s =s + str(n.name) + "\n"
print(s)