This notebook contains code that demonstrates linear regression. Uncomment lines to learn more.
When you're finished, open the notebook 'tensorboard' to learn how to modify this one to display the graph and summaries for how variables and loss change overtime.
In [ ]:
# Import tensorflow and other libraries.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import os
import numpy as np
import pylab
import tensorflow as tf
# This is a handy command when working with IPython notebooks
# that instructs Matplotlib to insert plots directly
# into the notebook.
%matplotlib inline
In [ ]:
# Resets the graph.
# This is helpful when working with IPython notebooks
# to ensure a clean start
tf.reset_default_graph()
# Create a session.
# You can think of this as an execution environment for a graph
sess = tf.Session()
In [ ]:
# Create input data using NumPy. y = x * 0.1 + 0.3 + noise
x_train = np.random.rand(100).astype(np.float32)
noise = np.random.normal(scale=0.01, size=len(x_train))
y_train = x_train * 0.1 + 0.3 + noise
# Uncomment the following line to plot our input data.
# pylab.plot(x_train, y_train, '.')
In [ ]:
# Create some fake evaluation data
x_eval = np.random.rand(len(x_train)).astype(np.float32)
noise = np.random.normal(scale=0.01, size=len(x_train))
y_eval = x_eval * 0.1 + 0.3 + noise
In [ ]:
# Build inference graph.
# Create Variables W and b that compute y_data = W * x_data + b
W = tf.Variable(tf.random_normal([1]))
b = tf.Variable(tf.random_normal([1]))
# Uncomment the following lines to see what W and b are.
# print(W)
# print(b)
# Create a placeholder we'll use later to feed x's into the graph for training and eval.
# shape=[None] means we can put in any number of examples.
# This is used for minibatch training, and to evaluate a lot of examples at once.
x = tf.placeholder(shape=[None], dtype=tf.float32)
# Uncomment this line to see what x is
# print(x)
# This is the same as tf.add(tf.mul(W, x), b), but looks nicer
y = W * x + b
At this point, we have:
In [ ]:
# Create a placeholder we'll use later to feed the correct y value into the graph
y_label = tf.placeholder(shape=[None], dtype=tf.float32, name='labels')
# print (y_label)
In [ ]:
# Build training graph.
loss = tf.reduce_mean(tf.square(y - y_label)) # Create an operation that calculates loss.
optimizer = tf.train.GradientDescentOptimizer(0.5) # Create an optimizer.
train = optimizer.minimize(loss) # Create an operation that minimizes loss.
# Uncomment the following 3 lines to see what 'loss', 'optimizer' and 'train' are.
# print("loss:", loss)
# print("optimizer:", optimizer)
# print("train:", train)
In [ ]:
# Create an operation to initialize all the variables.
init = tf.global_variables_initializer()
# print(init)
sess.run(init)
In [ ]:
# Our graph is ready to go!
# Uncomment the following line to see the initial W and b values.
# print(sess.run([W, b]))
In [ ]:
# Uncomment these lines to test that we can compute a y from an x (without having trained anything).
# x must be a vector, hence [3] not just 3.
# x_in = [3]
# sess.run(y, feed_dict={x: x_in})
In [ ]:
# Calculate loss on the evaluation data before training
def eval_loss():
return sess.run(loss, feed_dict={x: x_eval, y_label: y_eval})
eval_loss()
In [ ]:
# Perform training.
for step in range(201):
# Run the training op; feed the training data into the graph
sess.run([train], feed_dict={x: x_train, y_label: y_train})
# Uncomment the following two lines to watch training happen real time.
# if step % 20 == 0:
# print(step, sess.run([W, b]))
In [ ]:
# Uncomment the following lines to plot the predicted values
# pylab.plot(x_train, y_train, '.', label="target")
# pylab.plot(x_train, sess.run(y, feed_dict={x: x_train, y_label: y_train}), label="predicted")
# pylab.legend()
In [ ]:
# Check accuracy on eval data after training
eval_loss()
In [ ]:
# Use the model to make a prediction
# we expect the result to be about (2 * 0.1 + 0.3 + noise) ~= 0.5
sess.run(y, feed_dict={x: [2]})