In [1]:
# import and check version
import tensorflow as tf
# tf can be really verbose
tf.logging.set_verbosity(tf.logging.ERROR)
print(tf.__version__)
In [2]:
# a small sanity check, does tf seem to work ok?
sess = tf.Session()
hello = tf.constant('Hello TF!')
print(sess.run(hello))
sess.close()
In [0]:
input = [[-1], [0], [1], [2], [3], [4]]
output = [[2], [1], [0], [-1], [-2], [-3]]
In [10]:
import matplotlib.pyplot as plt
plt.xlabel('input')
plt.ylabel('output')
plt.plot(input, output, 'kX')
Out[10]:
In [13]:
plt.plot(input, output)
plt.plot(input, output, 'ro')
Out[13]:
In [14]:
x = tf.constant(input, dtype=tf.float32)
y_true = tf.constant(output, dtype=tf.float32)
y_true
Out[14]:
untrained single unit (neuron) also outputs a line from same input, although another one
In [15]:
# short version, though harder to inspect
# y_pred = tf.layers.dense(inputs=x, units=1)
# matrix multiplication under the hood
# tf.matmul(x, w) + b
linear_model = tf.layers.Dense(units=1)
y_pred = linear_model(x)
y_pred
Out[15]:
In [16]:
# single neuron and single input: one weight and one bias
# weights and biases are represented as variables
# https://www.tensorflow.org/guide/variables
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
weights = sess.run(linear_model.trainable_weights)
print(weights)
In [19]:
# when you execute this cell, you should see a different line, as the initialization is random
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
output_pred = sess.run(y_pred)
print(output_pred)
weights = sess.run(linear_model.trainable_weights)
print(weights)
plt.plot(input, output_pred)
plt.plot(input, output, 'ro')
In [21]:
loss = tf.losses.mean_squared_error(labels=y_true, predictions=y_pred)
loss
Out[21]:
In [22]:
# when this loss is zero (which it is not right now) we get the desired output
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
print(sess.run(loss))
Move in parameter space in the direction of a descent
https://twitter.com/colindcarroll/status/1090266016259534848
In [23]:
# move the parameters of our single neuron in the right direction with a pretty high intensity (learning rate)
optimizer = tf.train.GradientDescentOptimizer(learning_rate=0.01)
train = optimizer.minimize(loss)
train
Out[23]:
In [0]:
losses = []
sess = tf.Session()
sess.run(tf.global_variables_initializer())
# iterations aka epochs, optimizing the parameters of the neuron
for i in range(500):
# executing optimizer and current loss, but only displaying current loss
_, loss_value = sess.run((train, loss))
losses.append(loss_value)
In [25]:
print(sess.run(loss))
In [29]:
# wet dream of every machine learning person (typically you see a noisy curve only sort of going down)
plt.yscale('log')
plt.ylabel("loss")
plt.xlabel("epochs")
plt.plot(losses)
Out[29]:
In [30]:
output_pred = sess.run(y_pred)
print(output_pred)
plt.plot(input, output_pred)
plt.plot(input, output, 'ro')
Out[30]:
In [31]:
# single neuron and single input: one weight and one bias
# slope m ~ -1
# y-axis offset y0 ~ 1
# https://en.wikipedia.org/wiki/Linear_equation#Slope%E2%80%93intercept_form
weights = sess.run(linear_model.trainable_weights)
print(weights)
In [0]: