Intro

Notebook revolving around the use and concepts of Tensorflow (v1.12.0).


In [1]:
import os
import sys
import numpy as np
import pandas as pd
from matplotlib import pyplot as plt
from pathlib import Path

import tensorflow as tf

%matplotlib notebook
#%matplotlib inline

models_data_folder = Path.home() / "Documents/models/"

Core (Low Level APIs)

Tensorflow operations are arranged into a computational graph. graph is about building, session is about running. The graph nodes are represented by Operation while the edges can be see as Tensor flowing between the nodes. A Tensor does not have values, is just a handler returned by a function.


In [5]:
# create and add up two constants
a = tf.constant(3.0, dtype=tf.float32)
b = tf.constant(4.0)
total = a + b 
print(a)
print(b)
print(total)


Tensor("Const_4:0", shape=(), dtype=float32)
Tensor("Const_5:0", shape=(), dtype=float32)
Tensor("add_2:0", shape=(), dtype=float32)

In [9]:
# execute graph via a Session
sess = tf.Session()
print(sess.run(total))
print(sess.run({'ab': (a, b), 'total': total})) # request multiple tensors


7.0
{'ab': (3.0, 4.0), 'total': 7.0}

In [19]:
# variables
x = tf.placeholder(tf.float32, name='x')
y = tf.placeholder(tf.float32, name='y')
z = x + y

sess = tf.Session()
print(sess.run(z, feed_dict={x: 3, y: 4}))


7.0

Eager Execution

Present from Tensorflow v1.7, provides a imperative programming env that allows to evaluate operations immediately. In this env a Tensor object actually reference concrete values that can be used in other Python contexts like debugger or Numpy.


In [2]:
tf.enable_eager_execution() # enable eager mode, need to be run at start

In [9]:
a = 3.0
b = 4.0
res = tf.multiply(a, b)
res


Out[9]:
<tf.Tensor: id=18, shape=(), dtype=float32, numpy=12.0>

In [8]:
np.multiply(res, res)


Out[8]:
144.0

Dataset API

tf.data as a mean to build input/pre-processing pipelines. Introduces the Dataset (sequence of elements) and Iterator (access elements from a dataset) abstractions.

In eager mode can iterate over a dataset as done in common Python code. In a session need instead to instantiate/initialize an iterator over the dataset.


In [2]:
tf.enable_eager_execution() # enable eager mode, need to be run at start

In [4]:
dataset = tf.data.Dataset.range(10)
print(dataset.output_types)
print(dataset.output_shapes)


<dtype: 'int64'>
()

In [8]:
# apply custom function to each element of the dataset
dataset = dataset.map(lambda x : x + 1)
for i in dataset:
    print(i)


tf.Tensor(2, shape=(), dtype=int64)
tf.Tensor(3, shape=(), dtype=int64)
tf.Tensor(4, shape=(), dtype=int64)
tf.Tensor(5, shape=(), dtype=int64)
tf.Tensor(6, shape=(), dtype=int64)
tf.Tensor(7, shape=(), dtype=int64)
tf.Tensor(8, shape=(), dtype=int64)
tf.Tensor(9, shape=(), dtype=int64)
tf.Tensor(10, shape=(), dtype=int64)
tf.Tensor(11, shape=(), dtype=int64)

In [18]:
# define repeatition, batching and buffers
dataset = tf.data.Dataset.range(10)
dataset = dataset.repeat(2)
dataset = dataset.batch(2)
iterator = dataset.make_one_shot_iterator()

In [19]:
for i in iterator:
    print(i)


tf.Tensor([0 1], shape=(2,), dtype=int64)
tf.Tensor([2 3], shape=(2,), dtype=int64)
tf.Tensor([4 5], shape=(2,), dtype=int64)
tf.Tensor([6 7], shape=(2,), dtype=int64)
tf.Tensor([8 9], shape=(2,), dtype=int64)
tf.Tensor([0 1], shape=(2,), dtype=int64)
tf.Tensor([2 3], shape=(2,), dtype=int64)
tf.Tensor([4 5], shape=(2,), dtype=int64)
tf.Tensor([6 7], shape=(2,), dtype=int64)
tf.Tensor([8 9], shape=(2,), dtype=int64)

In [ ]:
# dummy variables
#v1 = tf.get_variable("v1", shape=[3], initializer=tf.zeros_initializer)
#v2 = tf.get_variablea("v2", shape=[5], initializer=tf.zeros_initializer)
v1 = tf.Variable(tf.constant(0), name='v1')
v2 = tf.Variable(tf.constant(5), name='v2')

# dummy operations
inc_v1 = v1.assign(v1+1)
dec_v2 = v2.assign(v2-1)

In [ ]:
# Save variables

# def init op and saver
init_op = tf.global_variables_initializer()
saver = tf.train.Saver()

# run some operations and save sessions
with tf.Session() as sess:
    sess.run(init_op)
    inc_v1.op.run()
    dec_v2.op.run()
    
    save_path = saver.save(sess,
                str(models_data_folder / 'tmp' / "model.ckpt"))
    print("Model saved in {}".format(save_path))

In [ ]:
# test behavior in new session (need to rerun initializer)
with tf.Session() as sess:
    sess.run(init_op)
    print(v1.eval())
    print(inc_v1.eval())
    print(v1.eval())

In [ ]:
# Restore Variables

# need to redefine the variable
v1 = tf.Variable(tf.constant(0), name='v1')

saver = tf.train.Saver()

with tf.Session() as sess:
    saver.restore(sess,
                 str(models_data_folder / 'tmp' / "model.ckpt"))
    
    #now v1 should have the value we previously saved
    print(v1.eval())

Save and Restore a Model

Uses SavedModelBuilder instead of Saver. Should this be done only for serving? In what way can I reload a model saved with the former and retrain?


In [ ]:
# directory where model will be exported
# include version info in model path as required by TF
version = 0
export_dir = str(models_data_folder / "tf_test_models_export" / str(version))

In [ ]:
# dummy model
x = tf.Variable(tf.constant(0), name='x')
y = tf.Variable(tf.constant(5), name='y')
f = tf.multiply(x, y, name='f')

In [ ]:
# save model
with tf.Session() as sess:
    sess.run(tf.global_variables_initializer())
    #consider difference between eval and run
    #see: https://stackoverflow.com/questions/33610685/in-tensorflow-what-is-the-difference-between-session-run-and-tensor-eval
    #sess.run(f, feed_dict={x:3.0, y:5.0})
    
    fval = f.eval(feed_dict={x:3.0, y:5.0})
    print(fval)
    
    # Init builder
    builder = tf.saved_model.builder.SavedModelBuilder(export_dir)
    
    # Build info for inputs and outputs tensors
    #??Is the key associated with the tensor name?
    inputs = {
        'x' : tf.saved_model.utils.build_tensor_info(x),
        'y' : tf.saved_model.utils.build_tensor_info(y)
    }
    
    outputs = {
        'f' : tf.saved_model.utils.build_tensor_info(f)
    }
    
    # Define signature (set of inputs and outputs for the graph)
    prediction_signature = (
        tf.saved_model.signature_def_utils.build_signature_def(
            inputs=inputs,
            outputs=outputs,
            # method used for the inference
            method_name=tf.saved_model.signature_constants.PREDICT_METHOD_NAME
        )
    )
    
    # Add meta-graph (dataflow graph, variables, assets, and signatures) 
    # to the builder
    builder.add_meta_graph_and_variables(
        sess=sess,
        tags=[tf.saved_model.tag_constants.SERVING],
        # ??
        signature_def_map={
            'predict' : prediction_signature
        },
        # ??
        #legacy_init_op = tf.group(tf.tables_initializer(), name='legacy_init_op')
    )
    
    # Finally save builder
    builder.save()

In [ ]:
# Restore model

# redefine target
x = tf.Variable(tf.constant(1), name='x')
y = tf.Variable(tf.constant(5), name='y')
#f = tf.Operation(None, name='f')

with tf.Session() as sess:
    sess.run(tf.global_variables_initializer())
    
    #print(f.eval())
    
    mg = tf.saved_model.loader.load(
        sess=sess, 
        tags=[tf.saved_model.tag_constants.SERVING],
        export_dir
    )
    f = tf.get_default_graph().get_operation_by_name("f")
    
    # ??Why session graph keeps getting new operations?
    # isn't it clean every time we exit the "with" scope
    #print(sess.graph.get_operations())
    
    print(sess.run(f))

Serving Client

Needs

pip install grpcio grpcio-tools

Plus Tensorflow Serving API files.


In [ ]:
from grpc.beta import implementations

# reference local copy of Tensorflow Serving API Files
sys.path.append(str(os.getcwd() / *[os.pardir]*2 / 'ext_libs'))
import lib.predict_pb2 as predict_pb2
import lib.prediction_service_pb2 as prediction_service_pb2

In [ ]:
host='127.0.0.1'
port=9000
channel = implementations.insecure_channel(host, int(port))
stub = prediction_service_pb2.beta_create_PredictionService_stub(channel)

# build request
request = predict_pb2.PredictRequest()
request.model_spec.name = 'ed' # model name, as given to bazel script
request.model_spec.signature_name = 'predict' # as defined in ModelBuilder

# define inputs
x = 3
y = 4
x_tensor = tf.contrib.util.make_tensor_proto(x, dtype=tf.int32)
y_tensor = tf.contrib.util.make_tensor_proto(y, dtype=tf.int32)
request.inputs['x'].CopyFrom(x_tensor)
request.inputs['y'].CopyFrom(y_tensor)

# call prediction on the server
result = stub.Predict(request, timeout=10.0)

In [ ]:
result