conda create -n tf ipython-notebook --yes
pip install --ignore-installed --upgrade pip setuptools
In [16]:
import tensorflow as tf
#----------------------------------------------------------
# Basic graph structure and operations
# tf.add , tf.sub, tf.mul , tf.div , tf.mod , tf.poe
# tf.less , tf.greater , tf.less_equal , tf.greater_equal
# tf.logical_and , tf.logical_or , logical.xor
#------------------------------------------------------------
tf.reset_default_graph()
print tf.add(1,2)
print tf.mul(7,9)
graph = tf.get_default_graph()
for op in graph.get_operations():
print op.name
sess = tf.Session() # For regular python code
tf.initialize_all_variables()
print 'Addition is: {} + {} = {} '.format(sess.run('Add/x:0'),sess.run('Add/y:0'),sess.run('Add:0'))
print 'Multiplication: {} * {} = {}'.format(sess.run('Mul/x:0'),sess.run('Mul/y:0'),sess.run('Mul:0'))
In [20]:
tf.reset_default_graph()
m1 = tf.constant([[1., 2.], [3.,4]])
m2 = tf.constant([[5.,6.],[7.,8.]])
m3 = tf.matmul(m1, m2)
# have to run the graph using a session
sess = tf.Session()
sess.run(tf.initialize_all_variables())
print 'm3 = ',sess.run(m3)
sess.close()
In [41]:
tf.reset_default_graph()
v1 = tf.Variable(1, name="my_variable")
v2 = tf.Variable(tf.zeros([3,5]),name='5_zeros') # Variable with innitializer
c1 = tf.random_normal([4, 4], mean=0.0, stddev=1.0) # 4x4 matrixwith normal random variables
v3 = tf.Variable(c1,name='RandomMatrix')
v4 = tf.Variable(tf.ones(6))
counter = tf.Variable(0)
sess = tf.Session()
sess.run(tf.initialize_all_variables())
print 'v1 =',sess.run(v1)
print 'v2 =',sess.run(v2)
print 'v3=',sess.run(v3)
print 'v4=',sess.run(v4)
# Changing the value of a variable
print 'Changed v1 =',sess.run(v1.assign(v1 + 7))
print 'v1 new val =',sess.run(v1)
print sess.run(counter.assign_add(1))
print sess.run(counter.assign_add(1))
sess.close()
In [43]:
tf.reset_default_graph()
v1 = tf.add(1,2,name='add')
with tf.name_scope("Scope1"):
with tf.name_scope("Scope_nested"):
vs = tf.mul(5, 5,name='mul')
print v1.name
print vs.name
In [18]:
tf.reset_default_graph()
graph = tf.get_default_graph()
graph.get_operations()
# Model of a simple neuron: y <-- x * w
x = tf.constant(1.0,name='x')
w = tf.Variable(0.8,name='w')
y = tf.mul(w , x, name='y')
y_ = tf.constant(0.0,name='y_train')
loss = (y-y_)**2
In [17]:
tf.reset_default_graph()
graph = tf.get_default_graph()
graph.get_operations()
# Model of a simple neuron: y <-- x * w
x = tf.constant(1.0,name='x')
w = tf.Variable(0.8,name='w')
y = tf.mul(w , x, name='y')
y_ = tf.constant(0.0,name='y_train')
loss = (y-y_)**2
#--------------------------------------------------------------
# Print the nodes of teh graph, also called 'operations' or 'ops'
#--------------------------------------------------------------
print 'Operations in graph \n==========================='
for op in graph.get_operations():
print op.name
In [62]:
import tensorflow as tf
x = tf.constant(1.0, name='input')
w = tf.Variable(0.8, name='weight')
y = tf.mul(w, x, name='output')
y_ = tf.constant(0.0, name='correct_value')
loss = tf.pow(y - y_, 2, name='loss')
train_step = tf.train.GradientDescentOptimizer(0.025).minimize(loss)
for value in [x, w, y, y_, loss]:
tf.scalar_summary(value.op.name, value)
summaries = tf.merge_all_summaries()
sess = tf.Session()
summary_writer = tf.train.SummaryWriter('log_simple_stats', sess.graph)
sess.run(tf.initialize_all_variables())
for i in range(100):
summary_writer.add_summary(sess.run(summaries), i)
sess.run(train_step)