In [1]:
import numpy as np
import tensorflow as tf

Build model


In [2]:
# Model parameters
W = tf.Variable([.3], tf.float32)
b = tf.Variable([-.3], tf.float32)
# Model input and output
x = tf.placeholder(tf.float32)
linear_model = W * x + b
y = tf.placeholder(tf.float32)
# loss
loss = tf.reduce_sum(tf.square(linear_model - y)) # sum of the squares
# optimizer
optimizer = tf.train.GradientDescentOptimizer(0.01)
train = optimizer.minimize(loss)

In [3]:
# training data
x_train = [1,2,3,4]
y_train = [0,-1,-2,-3]

In [4]:
# training loop
init = tf.global_variables_initializer()

In [6]:
sess = tf.Session()
sess.run(init) # reset values to wrong
for i in range(1000):
    sess.run(train, {x:x_train, y:y_train})

In [7]:
# evaluate training accuracy
curr_W, curr_b, curr_loss  = sess.run([W, b, loss], {x:x_train, y:y_train})
print("W: %s b: %s loss: %s"%(curr_W, curr_b, curr_loss))


W: [-0.9999969] b: [ 0.99999082] loss: 5.69997e-11

In [8]:
# prediction
sess.run(linear_model, feed_dict={x: [1, 2, 3]})


Out[8]:
array([ -6.07967377e-06,  -1.00000298e+00,  -1.99999988e+00], dtype=float32)

Save model


In [9]:
tf.add_to_collection("vars", W)
tf.add_to_collection("vars", b)

In [10]:
model_saver = tf.train.Saver()
model_saver.save(sess, "./linear_regression")


Out[10]:
'./linear_regression'

Load model


In [2]:
sess = tf.Session()
new_saver = tf.train.import_meta_graph('./linear_regression.meta')
new_saver.restore(sess, "./linear_regression")

In [3]:
W_ = sess.run(tf.get_collection("vars")[0])
b_ = sess.run(tf.get_collection("vars")[1])

In [4]:
W_, b_


Out[4]:
(array([-0.9999969], dtype=float32), array([ 0.99999082], dtype=float32))

Prediction


In [5]:
x = tf.placeholder(tf.float32)
linear_model = W_ * x + b_

In [6]:
sess.run(linear_model, feed_dict={x: [1, 2, 3]})


Out[6]:
array([ -6.07967377e-06,  -1.00000298e+00,  -1.99999988e+00], dtype=float32)

In [ ]: