In [ ]:
from tensorflow.python.tools import freeze_graph
optimize_me_parent_path = '/root/models/optimize_me/linear/cpu'
unoptimized_model_graph_path = '%s/unoptimized_cpu.pb' % optimize_me_parent_path
unoptimized_frozen_model_graph_path = '%s/unoptimized_frozen_cpu.pb' % optimize_me_parent_path
model_checkpoint_path = '%s/model.ckpt' % optimize_me_parent_path
freeze_graph.freeze_graph(input_graph=unoptimized_model_graph_path,
input_saver="",
input_binary=True,
input_checkpoint='/root/models/optimize_me/linear/cpu/model.ckpt',
output_node_names="add",
restore_op_name="save/restore_all",
filename_tensor_name="save/Const:0",
output_graph=unoptimized_frozen_model_graph_path,
clear_devices=True,
initializer_nodes="")
print(unoptimized_frozen_model_graph_path)
In [ ]:
%%bash
ls -l /root/models/optimize_me/linear/cpu/
In [ ]:
%%bash
summarize_graph --in_graph=/root/models/optimize_me/linear/cpu/unoptimized_frozen_cpu.pb
In [ ]:
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import re
from google.protobuf import text_format
from tensorflow.core.framework import graph_pb2
def convert_graph_to_dot(input_graph, output_dot, is_input_graph_binary):
graph = graph_pb2.GraphDef()
with open(input_graph, "rb") as fh:
if is_input_graph_binary:
graph.ParseFromString(fh.read())
else:
text_format.Merge(fh.read(), graph)
with open(output_dot, "wt") as fh:
print("digraph graphname {", file=fh)
for node in graph.node:
output_name = node.name
print(" \"" + output_name + "\" [label=\"" + node.op + "\"];", file=fh)
for input_full_name in node.input:
parts = input_full_name.split(":")
input_name = re.sub(r"^\^", "", parts[0])
print(" \"" + input_name + "\" -> \"" + output_name + "\";", file=fh)
print("}", file=fh)
print("Created dot file '%s' for graph '%s'." % (output_dot, input_graph))
In [ ]:
input_graph='/root/models/optimize_me/linear/cpu/unoptimized_frozen_cpu.pb'
output_dot='/root/notebooks/unoptimized_frozen_cpu.dot'
convert_graph_to_dot(input_graph=input_graph, output_dot=output_dot, is_input_graph_binary=True)
In [ ]:
%%bash
dot -T png /root/notebooks/unoptimized_frozen_cpu.dot \
-o /root/notebooks/unoptimized_frozen_cpu.png > /tmp/a.out
In [ ]:
from IPython.display import Image
Image('/root/notebooks/unoptimized_frozen_cpu.png')
In [ ]:
%%bash
benchmark_model --graph=/root/models/optimize_me/linear/cpu/unoptimized_frozen_cpu.pb \
--input_layer=weights,bias,x_observed \
--input_layer_type=float,float,float \
--input_layer_shape=:: \
--output_layer=add
In [ ]:
import tensorflow as tf
tf.reset_default_graph()
In [ ]:
sess = tf.Session()
In [ ]:
from datetime import datetime
version = int(datetime.now().strftime("%s"))
In [ ]:
%%bash
inspect_checkpoint --file_name=/root/models/optimize_me/linear/cpu/model.ckpt
In [ ]:
saver = tf.train.import_meta_graph('/root/models/optimize_me/linear/cpu/model.ckpt.meta')
saver.restore(sess, '/root/models/optimize_me/linear/cpu/model.ckpt')
optimize_me_parent_path = '/root/models/optimize_me/linear/cpu'
unoptimized_frozen_model_graph_path = '%s/unoptimized_frozen_cpu.pb' % optimize_me_parent_path
print(unoptimized_frozen_model_graph_path)
with tf.gfile.GFile(unoptimized_frozen_model_graph_path, 'rb') as f:
graph_def = tf.GraphDef()
graph_def.ParseFromString(f.read())
tf.import_graph_def(
graph_def,
input_map=None,
return_elements=None,
name="",
op_dict=None,
producer_op_list=None
)
print("weights = ", sess.run("weights:0"))
print("bias = ", sess.run("bias:0"))
In [ ]:
from tensorflow.python.saved_model import utils
from tensorflow.python.saved_model import signature_constants
from tensorflow.python.saved_model import signature_def_utils
graph = tf.get_default_graph()
x_observed = graph.get_tensor_by_name('x_observed:0')
y_pred = graph.get_tensor_by_name('add:0')
tensor_info_x_observed = utils.build_tensor_info(x_observed)
print(tensor_info_x_observed)
tensor_info_y_pred = utils.build_tensor_info(y_pred)
print(tensor_info_y_pred)
prediction_signature = signature_def_utils.build_signature_def(inputs =
{'x_observed': tensor_info_x_observed},
outputs = {'y_pred': tensor_info_y_pred},
method_name = signature_constants.PREDICT_METHOD_NAME)
In [ ]:
from tensorflow.python.saved_model import builder as saved_model_builder
from tensorflow.python.saved_model import tag_constants
unoptimized_saved_model_path = '/root/models/linear_unoptimized/cpu/%s' % version
print(unoptimized_saved_model_path)
builder = saved_model_builder.SavedModelBuilder(unoptimized_saved_model_path)
builder.add_meta_graph_and_variables(sess,
[tag_constants.SERVING],
signature_def_map={'predict':prediction_signature,
signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY:prediction_signature},
clear_devices=True,
)
builder.save(as_text=False)
In [ ]:
import os
print(unoptimized_saved_model_path)
os.listdir(unoptimized_saved_model_path)
os.listdir('%s/variables' % unoptimized_saved_model_path)
In [ ]:
sess.close()
Point to the unoptimized version of the model.
tensorflow_model_server \
--port=9000 \
--model_name=linear \
--model_base_path=/root/models/linear_unoptimized/cpu/ \
--enable_batching=false
The params are as follows:
port
(int)model_name
(anything)model_base_path
(/path/to/model/ above all versioned sub-directories)enable_batching
(true|false)
In [ ]: