In [1]:
import tensorflow as tf
In [2]:
graph = tf.get_default_graph()
graph.get_operations()
Out[2]:
In [3]:
input_value = tf.constant(1.0) # run only once !!!
input_value
Out[3]:
In [4]:
operations = graph.get_operations()
operations
Out[4]:
In [5]:
operations[0].node_def
Out[5]:
In [6]:
sess = tf.Session()
sess.run(input_value)
Out[6]:
Simplest neuron
In [7]:
weight = tf.Variable(0.8)
weight
Out[7]:
In [8]:
for operation in graph.get_operations():
print(operation.name)
In [9]:
output_value = weight * input_value
In [10]:
for operation in graph.get_operations():
print(operation.name)
In [11]:
init = tf.initialize_all_variables()
sess.run(init)
In [12]:
sess.run(output_value)
Out[12]:
TensorBoard
In [13]:
x = tf.constant(1.0, name="input")
w = tf.Variable(0.8, name="weight")
y = tf.multiply(w, x, name="output")
In [14]:
summary_writer = tf.summary.FileWriter ("log_simple_graph", sess.graph)
In [15]:
!tensorboard --logdir=log_simple_graph
Fitting neuron
In [16]:
y_ = tf.constant(0.0)
loss = (y - y_) ** 2
In [17]:
optimizer = tf.train.GradientDescentOptimizer(learning_rate=0.025)
In [18]:
gradients_and_vars = optimizer.compute_gradients(loss)
In [19]:
sess.run(tf.initialize_all_variables())
sess.run(gradients_and_vars[1][0])
Out[19]:
In [21]:
sess.run(optimizer.apply_gradients(gradients_and_vars))
sess.run(w)
Out[21]:
In [24]:
train_step = tf.train.GradientDescentOptimizer(learning_rate=0.025).minimize(loss)
for i in xrange(100):
sess.run(train_step)
sess.run(y)
Out[24]:
Summary
In [30]:
sess.run(tf.initialize_all_variables())
summary_y = tf.summary.scalar("output", y)
summary_writer = tf.summary.FileWriter("log_simple_stats")
for i in xrange(100):
summary_str = sess.run(summary_y)
summary_writer.add_summary(summary_str, i)
sess.run(train_step)
In [ ]:
!tensorboard --logdir=log_simple_stats
In [ ]: