In [ ]:
# Import TensorFlow library:
import tensorflow as tf
# Import Numpy library:
import numpy as np
In [ ]:
# Line equation:
# y = slope * x + intercept
# Set target slope and intercept:
target_slope = 12.0
target_intercept = 7.0
# Set training parameters:
num_examples = 25
num_epochs = 1000
learning_rate = 0.01
# Create random noise:
noise_level = 5.0
noise = np.random.uniform( -noise_level, noise_level, size = num_examples )
# Create training data:
trainX = np.linspace( 0.0, 10.0, num = num_examples )
trainY = target_slope * trainX + target_intercept + noise
In [ ]:
# Create input placeholders:
X = tf.placeholder( tf.float32 )
Y = tf.placeholder( tf.float32 )
# Create weight and bias variables:
W = tf.Variable( np.random.randn(), name="weight" )
b = tf.Variable( np.random.randn(), name="bias" )
# Create prediction operation:
predict = tf.add( tf.multiply( X, W ), b )
# Create mean squared error (MSE) cost function:
cost = tf.reduce_sum( tf.pow( predict - Y, 2.0 ) ) * ( 1.0 / num_examples )
# Create gradient descent optimizer:
optimizer = tf.train.GradientDescentOptimizer( learning_rate ).minimize( cost )
In [ ]:
# Create session:
with tf.Session() as sess:
# Initialize global variables:
sess.run( tf.global_variables_initializer() )
# Iterate over each training epoch:
for epoch in range( num_epochs ):
# Iterate over each training pair:
for ( x, y ) in zip( trainX, trainY ):
# Run optimizer on training pair:
sess.run( optimizer, feed_dict = { X: x, Y: y } )
# Print stats:
if ( epoch + 1 ) % 25 == 0:
curr_cost = sess.run( cost, feed_dict = { X: trainX, Y: trainY } )
curr_W = sess.run( W )
curr_b = sess.run( b )
print "Epoch:", '%04d' % ( epoch + 1 ), \
"cost=", "{:.4f}".format( curr_cost ), \
"W=", "{:.4f}".format( curr_W ), \
"b=", "{:.4f}".format( curr_b )
In [ ]: