In [7]:
from __future__ import print_function
import tensorflow as tf
In [3]:
#Basic interactive session
# Enter an interactive TensorFlow Session.
sess = tf.InteractiveSession()
# Define a var and a constant
x = tf.Variable([1.0, 2.0])
a = tf.constant([3.0, 3.0])
# Initialize the var 'x' using the run() method
x.initializer.run()
# Add an op to subtract 'a' from 'x'. Run it and print the result
sub = tf.sub(x, a)
print(sub.eval())
# ==> [-2. -1.]
# Close the Session when we're done.
sess.close()
In [4]:
# Get some data
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets('/home/ubuntu/data/training/image/mnist', one_hot=True)
In [9]:
# Interactive session for train a model
import tensorflow as tf
import numpy as np
# Start interactive session
sess = tf.InteractiveSession()
# Declare input variables
x = tf.placeholder(tf.float32, shape=[None, 784])
y = tf.placeholder(tf.float32, shape=[None, 10])
#Trainable variables
W = tf.Variable(tf.zeros([784,10]))
b = tf.Variable(tf.zeros([10]))
#Model
y_pred = tf.nn.softmax(tf.matmul(x,W) + b)
# Loss
cross_entropy = tf.reduce_mean(-tf.reduce_sum(y * tf.log(y_pred), reduction_indices=[1]))
# Trainer
train_step = tf.train.GradientDescentOptimizer(0.5).minimize(cross_entropy)
#Loop to train the model. 30 batches of 100 cases
sess.run(tf.initialize_all_variables())
for i in range(30):
batch = mnist.train.next_batch(500)
train_step.run(feed_dict={x: batch[0], y: batch[1]})
print(i, ' - ',cross_entropy.eval(feed_dict={x: batch[0], y: batch[1]}))
In [11]:
#Evaluate variables
# Evaluata trainable variables
print(b.eval())
print(np.max(W.eval()))
# Evaluate results variables
print(y.eval(feed_dict={x: batch[0], y: batch[1]}))
In [12]:
# Close the Session when we're done.
sess.close()
In [ ]:
In [ ]:
In [ ]:
#Basic usage in batch mode
# Define a graph
graph = tf.Graph()
with graph.as_default():
# graph definition
# Execute a graph to train a network
with tf.Session(graph=graph) as session:
print('Initializing')
tf.initialize_all_variables().run()
for epoch in range(nEpochs):
for batch in batch_list:
feedDict = {} # dictionary of batch data to run the graph
_, param1_out, param2_out = session.run([optimizer, param1_in, param2_in], feed_dict=feedDict)
# Execute a graph to score data
In [ ]:
#SELECT DEVICE
with tf.device('/cpu:0'):
# Include here the graph operations for the CPU.
# Creates a session with log_device_placement set to True.
sess = tf.Session(config=tf.ConfigProto(log_device_placement=True))
# LIMIT THE MEMORY OF THE GPU
# Assume that you have 12GB of GPU memory and want to allocate ~4GB:
gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.333)
sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options))
In [ ]:
# List of variables saved in a model file
path_model = '/home/jorge/data/tesis/handwriting/p05_ctc/IAM_corleone_first_model/'
reader = tf.train.NewCheckpointReader(path_model + "modelCTC_original_images_01_epoch_95.ckpt")
print(reader.debug_string().decode("utf-8"))
In [ ]:
In [14]:
# Create and save model
import tensorflow as tf
#Load data
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets('/tmp/MNIST_data', one_hot=True)
sess = tf.InteractiveSession()
# Define graph
x = tf.placeholder(tf.float32, shape=[None, 784], name='x')
y = tf.placeholder(tf.float32, shape=[None, 10], name='y')
W = tf.Variable(tf.zeros([784,10]))
b = tf.Variable(tf.zeros([10]))
#Prediction
y_pred = tf.nn.softmax(tf.matmul(x,W) + b, name='y_pred')
#Loss
cross_entropy = -tf.reduce_sum(y*tf.log(y_pred), name='cross_entropy')
# Train graph
train_step = tf.train.GradientDescentOptimizer(0.01, name='train_step').minimize(cross_entropy)
# Inicialize graph vars
sess.run(tf.initialize_all_variables())
for i in range(100):
batch = mnist.train.next_batch(50)
train_step.run(feed_dict={x: batch[0], y: batch[1]})
# Predict and evaluate
correct_prediction = tf.equal(tf.argmax(y_pred,1), tf.argmax(y,1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32), name='Accuracy')
print('Accuracy test', accuracy.eval(feed_dict={x: mnist.test.images, y: mnist.test.labels}))
# Add to the collection the vars that we need in the future
# - For train: all the placeholders and the train_step
#tf.add_to_collection('x', x)
#tf.add_to_collection('y', y)
#tf.add_to_collection('train_step', train_step)
# - For score: X placeholders and y_pred
#tf.add_to_collection('x', x)
#tf.add_to_collection('y_pred', y_pred)
# - For validation: All placeholders and loss & accuracy
#tf.add_to_collection('x', x)
#tf.add_to_collection('y', y)
#tf.add_to_collection('cross_entropy', cross_entropy)
#tf.add_to_collection('accuracy', accuracy)
# Create a saver and save weigths.
saver = tf.train.Saver(max_to_keep=0)
saver.save(sess, '/tmp/my-model',)
#Close session
sess.close()
In [1]:
# Continue training a model
import tensorflow as tf
#Load data
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets('/tmp/MNIST_data', one_hot=True)
sess = tf.InteractiveSession()
#Load model
new_saver = tf.train.import_meta_graph('/tmp/my-model.meta')
new_saver.restore(sess, '/tmp/my-model')
#Load vars
#x = tf.get_collection('x')[0]
#y = tf.get_collection('y')[0]
#Continue training
train_step = tf.get_collection('train_step')[0]
for i in range(900):
batch = mnist.train.next_batch(50)
train_step.run(feed_dict={x: batch[0], y: batch[1]})
accuracy = tf.get_collection('accuracy')[0]
print('Accuracy test', accuracy.eval(feed_dict={x: mnist.test.images, y: mnist.test.labels}))
sess.close()
In [1]:
# Score new data
import tensorflow as tf
#Load data
data_path = '/home/jorge/data/training/tensorflow/'
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets(data_path + 'MNIST_data', one_hot=True)
sess = tf.InteractiveSession()
#Load model
new_saver = tf.train.import_meta_graph('/tmp/my-model.meta')
new_saver.restore(sess, '/tmp/my-model')
#Load vars
x = tf.get_collection('x')[0]
y_pred = tf.get_collection('y_pred')[0]
print('Prediction test', y_pred.eval(feed_dict={x: mnist.test.images[0:2]}))
sess.close()
In [1]:
# Evaluate model
import tensorflow as tf
#Load data
data_path = '/home/jorge/data/training/tensorflow/'
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets(data_path + 'MNIST_data', one_hot=True)
sess = tf.InteractiveSession()
#Load model
new_saver = tf.train.import_meta_graph('/tmp/my-model.meta')
new_saver.restore(sess, '/tmp/my-model')
#Load vars
x = tf.get_collection('x')[0]
y = tf.get_collection('y')[0]
accuracy = tf.get_collection('accuracy')[0]
cross_entropy = tf.get_collection('cross_entropy')[0]
print('cross_entropy test', cross_entropy.eval(feed_dict={x: mnist.test.images, y: mnist.test.labels}))
print('Accuracy test', accuracy.eval(feed_dict={x: mnist.test.images, y: mnist.test.labels}))
sess.close()
In [ ]:
In [ ]:
sess = tf.InteractiveSession()
### create some graph here ###
##############################
graph_def = sess.graph.as_graph_def()
output_node_names = "output0,output1" # put the names of the output nodes here
# freeze all parameters and save
output_graph_def = graph_util.convert_variables_to_constants(
sess, graph_def, output_node_names.split(","))
with tf.gfile.GFile(output_graph_file, "wb") as f:
f.write(output_graph_def.SerializeToString())
1 build the binary:
$bazel build -c opt tensorflow/tools/benchmark:benchmark_model
2 Run on your compute graph:
$bazel-bin/tensorflow/tools/benchmark/benchmark_model \
--graph=tensorflow_inception_graph.pb \
--input_layer="input:0" \
--input_layer_shape="1,224,224,3" \
--input_layer_type="float" \
--output_layer="output:0"
In [ ]:
In [ ]: