Installation tips

  • Create Anaconda virtual environment with ipython notebook support

    conda create -n tf ipython-notebook --yes

  • The set up as explained in the official site failed for me. Something to do with failure to update setup tools. The remedy was doing as explained in here:

    pip install --ignore-installed --upgrade pip setuptools

Hellow TensorFlow

Basic graph creation and how to inspect the elements of the graph


In [16]:
import tensorflow as tf

#----------------------------------------------------------
# Basic graph structure and operations
#  tf.add , tf.sub, tf.mul , tf.div , tf.mod , tf.poe
#  tf.less , tf.greater , tf.less_equal , tf.greater_equal
#  tf.logical_and , tf.logical_or , logical.xor
#------------------------------------------------------------
tf.reset_default_graph()
print tf.add(1,2)
print tf.mul(7,9)

graph = tf.get_default_graph()
for op in graph.get_operations():
    print op.name

sess = tf.Session()                      # For regular python code
tf.initialize_all_variables()
print 'Addition is:    {} + {} = {} '.format(sess.run('Add/x:0'),sess.run('Add/y:0'),sess.run('Add:0'))
print 'Multiplication: {} * {} = {}'.format(sess.run('Mul/x:0'),sess.run('Mul/y:0'),sess.run('Mul:0'))


Tensor("Add:0", shape=(), dtype=int32)
Tensor("Mul:0", shape=(), dtype=int32)
Add/x
Add/y
Add
Mul/x
Mul/y
Mul
Addition is:    1 + 2 = 3 
Multiplication: 7 * 9 = 63

Constants


In [20]:
tf.reset_default_graph()

m1 = tf.constant([[1., 2.], [3.,4]])
m2 = tf.constant([[5.,6.],[7.,8.]])

m3 = tf.matmul(m1, m2)

# have to run the graph using a session
sess = tf.Session()
sess.run(tf.initialize_all_variables())
print 'm3 = ',sess.run(m3)
sess.close()


m3 =  [[ 19.  22.]
 [ 43.  50.]]

Variables


In [41]:
tf.reset_default_graph()

v1 = tf.Variable(1, name="my_variable")
v2 = tf.Variable(tf.zeros([3,5]),name='5_zeros')     # Variable with innitializer
c1 = tf.random_normal([4, 4], mean=0.0, stddev=1.0)  # 4x4 matrixwith normal random variables
v3 = tf.Variable(c1,name='RandomMatrix')
v4 = tf.Variable(tf.ones(6))

counter = tf.Variable(0)

sess = tf.Session()
sess.run(tf.initialize_all_variables())
print 'v1 =',sess.run(v1)
print 'v2 =',sess.run(v2)
print 'v3=',sess.run(v3)
print 'v4=',sess.run(v4)

# Changing the value of a variable
print 'Changed v1 =',sess.run(v1.assign(v1 + 7))
print 'v1 new val =',sess.run(v1)

print sess.run(counter.assign_add(1))
print sess.run(counter.assign_add(1))


sess.close()


v1 = 1
v2 = [[ 0.  0.  0.  0.  0.]
 [ 0.  0.  0.  0.  0.]
 [ 0.  0.  0.  0.  0.]]
v3= [[ 0.02285025  0.38858941  0.71063793 -0.41778183]
 [ 1.75453699 -0.28642228  0.93922818 -0.91024143]
 [-0.04701722 -1.16829133 -0.72410405  0.837156  ]
 [ 0.80568159 -0.37985972  2.02597475 -1.09147239]]
v4= [ 1.  1.  1.  1.  1.  1.]
Changed v1 = 8
v1 new val = 8
1
2

Scopes


In [43]:
tf.reset_default_graph()

v1 = tf.add(1,2,name='add')
with tf.name_scope("Scope1"):
    with tf.name_scope("Scope_nested"):
        vs = tf.mul(5, 5,name='mul')
print v1.name
print vs.name


add:0
Scope1/Scope_nested/mul:0

In [18]:
tf.reset_default_graph()

graph = tf.get_default_graph()
graph.get_operations()

# Model of a simple neuron: y <-- x * w
x       = tf.constant(1.0,name='x')
w       = tf.Variable(0.8,name='w')
y       = tf.mul(w , x, name='y')

y_     = tf.constant(0.0,name='y_train')
loss   = (y-y_)**2


Operations in graph 
===========================
x
w/initial_value
w
w/Assign
w/read
y
y_train
sub
pow/y
pow

In [17]:
tf.reset_default_graph()
graph = tf.get_default_graph()
graph.get_operations()

# Model of a simple neuron: y <-- x * w
x       = tf.constant(1.0,name='x')
w       = tf.Variable(0.8,name='w')
y       = tf.mul(w , x, name='y')

y_     = tf.constant(0.0,name='y_train')
loss   = (y-y_)**2

#--------------------------------------------------------------
# Print the nodes of teh graph, also called 'operations' or 'ops'
#--------------------------------------------------------------
print 'Operations in graph \n==========================='
for op in graph.get_operations():
    print op.name


Operations in graph 
===========================
x
w/initial_value
w
w/Assign
w/read
y
y_train
sub
pow/y
pow

Laste operation in graph is :  pow

Inputs:
===========  
   Tensor("sub:0", shape=(), dtype=float32)
   Tensor("pow/y:0", shape=(), dtype=float32)

Outputs:
===========  
   Tensor("pow:0", shape=(), dtype=float32)

Operations in graph 
===========================
x
w/initial_value
w
w/Assign
w/read
y
y_train
sub
pow/y
pow
gradients/Shape
gradients/Const
gradients/Fill
gradients/pow_grad/Shape
gradients/pow_grad/Shape_1
gradients/pow_grad/BroadcastGradientArgs
gradients/pow_grad/mul
gradients/pow_grad/sub/y
gradients/pow_grad/sub
gradients/pow_grad/Pow
gradients/pow_grad/mul_1
gradients/pow_grad/Sum
gradients/pow_grad/Reshape
gradients/pow_grad/mul_2
gradients/pow_grad/Log
gradients/pow_grad/mul_3
gradients/pow_grad/Sum_1
gradients/pow_grad/Reshape_1
gradients/pow_grad/tuple/group_deps
gradients/pow_grad/tuple/control_dependency
gradients/pow_grad/tuple/control_dependency_1
gradients/sub_grad/Shape
gradients/sub_grad/Shape_1
gradients/sub_grad/BroadcastGradientArgs
gradients/sub_grad/Sum
gradients/sub_grad/Reshape
gradients/sub_grad/Sum_1
gradients/sub_grad/Neg
gradients/sub_grad/Reshape_1
gradients/sub_grad/tuple/group_deps
gradients/sub_grad/tuple/control_dependency
gradients/sub_grad/tuple/control_dependency_1
gradients/y_grad/Shape
gradients/y_grad/Shape_1
gradients/y_grad/BroadcastGradientArgs
gradients/y_grad/mul
gradients/y_grad/Sum
gradients/y_grad/Reshape
gradients/y_grad/mul_1
gradients/y_grad/Sum_1
gradients/y_grad/Reshape_1
gradients/y_grad/tuple/group_deps
gradients/y_grad/tuple/control_dependency
gradients/y_grad/tuple/control_dependency_1

Laste operation in graph is :  gradients/y_grad/tuple/control_dependency_1

Inputs:
===========  
   Tensor("gradients/y_grad/Reshape_1:0", shape=(), dtype=float32)

Outputs:
===========  
   Tensor("gradients/y_grad/tuple/control_dependency_1:0", shape=(), dtype=float32)

Laste operation in graph is now:  init

Create a session to evaluate graph operations
===============================================
Run session to obtaine the value of x:  1.0
Run session to obtaine the value of y:  0.8
Initial value of loss function       :  0.64

Compute gradients:                    1.6

Updates weights after one BackProp step: 0.759999990463, loss function=0.577600002289


   0 : Weight=0.722, loss=0.5213
  10 : Weight=0.432, loss=0.1869
  20 : Weight=0.259, loss=0.06699
  30 : Weight=0.155, loss=0.02402
  40 : Weight=0.0928, loss=0.008609
  50 : Weight=0.0556, loss=0.003086
  60 : Weight=0.0333, loss=0.001106
  70 : Weight=0.0199, loss=0.0003966
  80 : Weight=0.0119, loss=0.0001422
  90 : Weight=0.00714, loss=5.097e-05
 100 : Weight=0.00427, loss=1.827e-05
 110 : Weight=0.00256, loss=6.55e-06
 120 : Weight=0.00153, loss=2.348e-06
 130 : Weight=0.000917, loss=8.418e-07
 140 : Weight=0.000549, loss=3.018e-07
 150 : Weight=0.000329, loss=1.082e-07
 160 : Weight=0.000197, loss=3.878e-08
 170 : Weight=0.000118, loss=1.39e-08
 180 : Weight=7.06e-05, loss=4.984e-09
 190 : Weight=4.23e-05, loss=1.787e-09
---------------------------------------------------------------------------
NameError                                 Traceback (most recent call last)
<ipython-input-17-cb60b84268e4> in <module>()
     93 
     94 
---> 95 ops.reset_default_graph()

NameError: name 'ops' is not defined

Training and visualization

To see the graphs invoke the command:

tensorboard --logdir=log_simple_stat

which can then be viewed in the browser at

localhost:6006/#events


In [62]:
import tensorflow as tf

x = tf.constant(1.0, name='input')
w = tf.Variable(0.8, name='weight')
y = tf.mul(w, x, name='output')
y_ = tf.constant(0.0, name='correct_value')
loss = tf.pow(y - y_, 2, name='loss')
train_step = tf.train.GradientDescentOptimizer(0.025).minimize(loss)

for value in [x, w, y, y_, loss]:
    tf.scalar_summary(value.op.name, value)

summaries = tf.merge_all_summaries()

sess = tf.Session()
summary_writer = tf.train.SummaryWriter('log_simple_stats', sess.graph)

sess.run(tf.initialize_all_variables())
for i in range(100):
    summary_writer.add_summary(sess.run(summaries), i)
    sess.run(train_step)