In [1]:
import tensorflow as tf
In [2]:
3 # a rank 0 tensor; this is a scalar with shape []
[1. ,2., 3.] # a rank 1 tensor; this is a vector with shape [3]
[[1., 2., 3.], [4., 5., 6.]] # a rank 2 tensor; a matrix with shape [2, 3]
[[[1., 2., 3.]], [[7., 8., 9.]]] # a rank 3 tensor with shape [2, 1, 3]
Out[2]:
In [3]:
n1 = tf.constant(2.0, tf.float32)
n2 = tf.constant(4.0) # type?
print(n1)
print(n2)
This is just the build step. To evaluate the nodes, run the graph within a session.
In [4]:
sess = tf.Session()
print(sess.run([n1, n2]))
additon node
In [5]:
n3 = tf.add(n1, n2)
print('Node 3', n3)
print('sess.run(n3)', sess.run(n3))
In [6]:
a = tf.placeholder(tf.float32)
b = tf.placeholder(tf.float32)
adder_node = a + b # + provides a shortcut for tf.add(a, b)
print(sess.run(adder_node, {a: 3, b:4.5}))
print(sess.run(adder_node, {a: [1,3], b: [2, 4]}))
Other operations
In [7]:
add_and_half = adder_node / 2
print(sess.run(add_and_half, {a:0.5, b:-1.5}))
Tunable variables:
In [8]:
m = tf.Variable([.3], tf.float32)
c = tf.Variable([-.3], tf.float32)
x = tf.placeholder(tf.float32)
linear_model = m * x + c
Unlike constants - whose values never change, variables aren't initialized by default.
In [9]:
init = tf.global_variables_initializer()
sess.run(init)
Evaluating the lines ordinate for various x values simultaneously:
In [10]:
print(sess.run(linear_model, {x:[1,2,3,0,-5,20]}))
In [11]:
y = tf.placeholder(tf.float32)
squared_deltas = tf.square(linear_model - y)
loss = tf.reduce_sum(squared_deltas)
print(sess.run(loss, {x:[1,2,3,4], y:[0,-1,-2,-3]}))
In [12]:
n_slope = tf.assign(m, [-0.9])
n_const = tf.assign(c, [1.5])
sess.run([n_slope, n_const])
print(sess.run(loss, {x:[1,2,3,4], y:[0,-1,-2,-3]}))
In [21]:
optimizer = tf.train.GradientDescentOptimizer(0.01)
train = optimizer.minimize(loss)
sess.run(init) # reset values to incorrect defaults.
for i in range(1000):
sess.run(train, {x:[1,2,3,4], y:[0,-1,-2,-3]})
print(sess.run([m, m]))
Experimenting with different (learning rate) step values
In [23]:
import numpy as np
In [24]:
features = [tf.contrib.layers.real_valued_column("x", dimension=1)]
In [25]:
estimator = tf.contrib.learn.LinearRegressor(feature_columns=features)
In [26]:
x = np.array([1., 2., 3., 4.])
y = np.array([0., -1., -2., -3.])
input_fn = tf.contrib.learn.io.numpy_input_fn({"x":x}, y, batch_size=4, num_epochs=1000)
In [29]:
score = estimator.evaluate(input_fn=input_fn)
In [30]:
print(score)
In [ ]: