In [1]:
import tensorflow as tf
TensorFlow programs consist of 2 discrete sections:
The computational graph is a series of TF operations arranged into a graph of nodes. Each node takes zero or more tensors as inputs and produces a tensor as an output.
Constants are a type of node which takes no inputs and will output an internally stored value. Values are initialized with tf.constant and can never change. Printing the nodes gives tensor node metadata, not the values of the nodes.
In [2]:
node1 = tf.constant(3.0, dtype=tf.float32)
node2 = tf.constant(4.0) # also tf.float32 implicitly
print(node1, node2)
In [3]:
sess = tf.Session()
print(sess.run([node1, node2]))
More complicated computations can be performed by combining Tensor nodes with Operation nodes. Use the tf.add node to mathematically add node1 and node2:
In [4]:
node3 = tf.add(node1, node2)
print(node3)
print(sess.run(node3))
In [5]:
a = tf.placeholder(tf.float32)
b = tf.placeholder(tf.float32)
adder_node = a + b # + provides a shortcut for tf.add(a, b)
In [6]:
print(sess.run(adder_node, {a: 3, b: 4.5}))
print(sess.run(adder_node, {a: [1, 3], b: [2, 4]}))
Make the graph more complex by adding another operation:
In [7]:
add_and_triple = adder_node * 3.
print(sess.run(add_and_triple, {a: 3, b: 4.5}))
In [8]:
W = tf.Variable([.3], dtype=tf.float32)
b = tf.Variable([-.3], dtype=tf.float32)
x = tf.placeholder(tf.float32)
linear_model = W * x + b
Initialize all variables with a special operation. Until this point, they are uninitialized:
In [9]:
init = tf.global_variables_initializer()
sess.run(init)
Because x is a placeholder, linear_model can be evaluated for several x values simultaneously:
In [10]:
Sessionprint(sess.run(linear_model, {x: [1, 2, 3, 4]}))
In [11]:
y = tf.placeholder(tf.float32)
squared_deltas = tf.square(linear_model - y)
loss = tf.reduce_sum(squared_deltas)
print(sess.run(loss, {x: [1, 2, 3, 4], y: [0, -1, -2, -3]}))
The values of W and b need to be updated in order to get a perfect fit. We can manually figure out what they should be in order to get the right y output (with a loss of 0):
In [12]:
fixW = tf.assign(W, [-1.])
fixb = tf.assign(b, [1.])
sess.run([fixW, fixb])
print(sess.run(loss, {x: [1, 2, 3, 4], y: [0, -1, -2, -3]}))
In [13]:
optimizer = tf.train.GradientDescentOptimizer(0.01)
train = optimizer.minimize(loss)
In [14]:
sess.run(init) # reset values to incorrect defaults.
for i in range(1000):
sess.run(train, {x: [1, 2, 3, 4], y: [0, -1, -2, -3]})
print(sess.run([W, b]))
The above values are the final model parameters which minimize the loss function!
In [15]:
# Model parameters
W = tf.Variable([.3], dtype=tf.float32)
b = tf.Variable([-.3], dtype=tf.float32)
# Model input and output
x = tf.placeholder(tf.float32)
linear_model = W * x + b
y = tf.placeholder(tf.float32)
# loss
loss = tf.reduce_sum(tf.square(linear_model - y)) # sum of the squares
# optimizer
optimizer = tf.train.GradientDescentOptimizer(0.01)
train = optimizer.minimize(loss)
# training data
x_train = [1, 2, 3, 4]
y_train = [0, -1, -2, -3]
# training loop
init = tf.global_variables_initializer()
sess = tf.Session()
sess.run(init) # reset values to wrong
for i in range(1000):
sess.run(train, {x: x_train, y: y_train})
# evaluate training accuracy
curr_W, curr_b, curr_loss = sess.run([W, b, loss], {x: x_train, y: y_train})
print("W: %s b: %s loss: %s"%(curr_W, curr_b, curr_loss))
In [16]:
g = tf.Graph()
with g.as_default():
a = tf.placeholder(tf.float32, name="node1")
b = tf.placeholder(tf.float32, name="node2")
c = a + b
tf.summary.FileWriter("logs", g).close()
#from this notebook's directory run > tensorboard --logdir=logs
#then open TensorBoard at: http://localhost:6006/#graphs