In [7]:
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
In [8]:
# x and y are placeholders for our training data
x = tf.placeholder("float")
y = tf.placeholder("float")
In [9]:
# w is the variable storing our values. It is initialised with starting "guesses"
# w[0] is the "a" in our equation, w[1] is the "b"
w = tf.Variable([1.0, 2.0], name="w")
# Our model of y = a*x + b
y_model = tf.mul(x, w[0]) + w[1]
In [16]:
# Our error is defined as the square of the differences
error = tf.square(y - y_model)
# The Gradient Descent Optimizer does the heavy lifting
train_op = tf.train.GradientDescentOptimizer(0.01).minimize(error)
In [11]:
# Normal TensorFlow - initialize values, create a session and run the model
model = tf.initialize_all_variables()
In [14]:
errors = []
with tf.Session() as session:
session.run(model)
for i in range(1000):
x_value = np.random.rand()
y_value = x_value * 2 + 6
_, error_value, y_model_value , w_value = session.run([train_op, error, y_model, w], feed_dict={x: x_value, y: y_value})
errors.append(error_value)
w_value = session.run(w)
print("Predicted model: {a:.3f}x + {b:.3f}".format(a=w_value[0], b=w_value[1]))
In [15]:
plt.plot([np.mean(errors[i-50:i]) for i in range(len(errors))])
plt.show()
In [ ]: