In [1]:
# import and check version
import tensorflow as tf
# tf can be really verbose
tf.logging.set_verbosity(tf.logging.ERROR)
print(tf.__version__)
In [2]:
# a small sanity check, does tf seem to work ok?
sess = tf.Session()
hello = tf.constant('Hello TF!')
print(sess.run(hello))
sess.close()
In [0]:
input = [[-1], [0], [1], [2], [3], [4]]
output = [[2], [1], [0], [-1], [-2], [-3]]
x = tf.constant(input, dtype=tf.float32)
y_true = tf.constant(output, dtype=tf.float32)
Typically, the output of a neuron is transformed using an activation function which compresses the output to a value between 0 and 1 (sigmoid), or between -1 and 1 (tanh) or sets all negative values to zero (relu).
In [4]:
import matplotlib.pyplot as plt
# linear = none
sess = tf.Session()
y_pred = tf.layers.dense(inputs=x, units=1)
sess.run(tf.global_variables_initializer())
plt.plot(input, sess.run(y_pred))
Out[4]:
In [5]:
# sigmoid = compresses output between 0 and 1
sess = tf.Session()
y_pred = tf.layers.dense(inputs=x, units=1, activation=tf.nn.sigmoid)
sess.run(tf.global_variables_initializer())
plt.plot(input, sess.run(y_pred))
Out[5]:
In [6]:
# hyperbolic tangent = compresses output between -1 and 1
sess = tf.Session()
y_pred = tf.layers.dense(inputs=x, units=1, activation=tf.nn.tanh)
sess.run(tf.global_variables_initializer())
plt.plot(input, sess.run(y_pred))
Out[6]:
In [7]:
# relu = 0 below 0, linear above zero
sess = tf.Session()
y_pred = tf.layers.dense(inputs=x, units=1, activation=tf.nn.relu)
sess.run(tf.global_variables_initializer())
plt.plot(input, sess.run(y_pred))
Out[7]:
In [8]:
# activation is just a function
sess = tf.Session()
h = tf.layers.dense(inputs=x, units=1)
y_pred = tf.nn.relu(h)
sess.run(tf.global_variables_initializer())
plt.plot(input, sess.run(h))
plt.plot(input, sess.run(y_pred))
plt.legend(['no activation', 'relu'])
Out[8]:
In [0]: