In [1]:
import numpy as np
import tensorflow as tf
import matplotlib.pyplot as plt
Set Random Seeds for results reproducibility
In [2]:
np.random.seed(101)
tf.set_random_seed(101)
Data Setup
Setting Up some Random Data for Demonstration Purposes
In [3]:
rand_a = np.random.uniform(low = 0,
high = 100,
size = (5,5))
rand_a
Out[3]:
In [4]:
rand_b = np.random.uniform(low = 0,
high = 100,
size = (5,1))
rand_b
Out[4]:
In [5]:
# Initialize placeholders as of type float32
a = tf.placeholder(tf.float32)
b = tf.placeholder(tf.float32)
In [6]:
add_op = a + b # tf.add(a,b)
mult_op = a * b #tf.multiply(a,b)
In [7]:
with tf.Session() as sess:
add_result = sess.run(add_op,
feed_dict={a : rand_a,
b : rand_b})
print(add_result)
print('\n')
mult_result = sess.run(mult_op,
feed_dict={a : rand_a,
b : rand_b})
print(mult_result)
In [8]:
n_features = 10
n_dense_neurons = 3
In [9]:
# Placeholder for x
x = tf.placeholder(tf.float32, (None, n_features))
In [10]:
# Variables for w and b
b = tf.Variable(tf.zeros([n_dense_neurons]))
W = tf.Variable(tf.random_normal([n_features, n_dense_neurons]))
In [11]:
b.get_shape()
Out[11]:
In [12]:
W.get_shape()
Out[12]:
Operation Activation Function
In [13]:
xW = tf.matmul(x, W)
In [14]:
z = tf.add(xW,b)
In [15]:
# tf.nn.relu() or tf.tanh()
a = tf.sigmoid(z)
Variable Intializer!
In [16]:
init = tf.global_variables_initializer()
In [17]:
with tf.Session() as sess:
# Run session with the initializer
sess.run(init)
# Result
layer_out = sess.run(a,feed_dict={x : np.random.random([1, n_features])})
In [18]:
print(layer_out)
We still need to finish off this process with optimization! Let's learn how to do this next.
In [19]:
x_data = np.linspace(0, 10, 10) + np.random.uniform(-1.5, 1.5, 10)
In [20]:
x_data
Out[20]:
In [21]:
y_label = np.linspace(0, 10, 10) + np.random.uniform(-1.5, 1.5, 10)
In [22]:
%matplotlib inline
plt.figure(figsize = (10, 10))
plt.plot(x_data,y_label,'*')
Out[22]:
Variables
In [23]:
np.random.rand(2)
Out[23]:
In [24]:
m = tf.Variable(0.442)
b = tf.Variable(0.877)
In [25]:
error = 0
for x, y in zip(x_data, y_label):
# Predicted value
y_hat = m * x + b
# Mean Squared Error (MSE)
error += (y - y_hat) ** 2
In [26]:
optimizer = tf.train.GradientDescentOptimizer(learning_rate = 0.001)
train = optimizer.minimize(error)
In [27]:
init = tf.global_variables_initializer()
In [28]:
epochs = 1000
In [29]:
with tf.Session() as sess:
sess.run(init)
for i in range(epochs):
sess.run(train)
# Print the error for every 50th epoch.
if (i % 50 == 0):
print("Epoch {}: {}".format(i, sess.run(error)))
# Get the m and b values.
final_slope , final_intercept = sess.run([m, b])
In [30]:
final_slope
Out[30]:
In [31]:
final_intercept
Out[31]:
In [32]:
x_test = np.linspace(-1, 11, 10)
y_pred_plot = final_slope * x_test + final_intercept
plt.plot(x_test,y_pred_plot,'r')
plt.plot(x_data,y_label,'*')
Out[32]: