In [1]:
import sys; print('Python \t\t{0[0]}.{0[1]}'.format(sys.version_info))
import tensorflow as tf; print('Tensorflow \t{}'.format(tf.__version__))
import keras; print('Keras \t\t{}'.format(keras.__version__))
In [2]:
%matplotlib inline
import matplotlib.pyplot as plt
plt.style.use('ggplot')
import numpy as np
In [3]:
#linear regression, with additive noise
f = lambda x, e: 2*x + 3 + e
f_vec = np.vectorize(f)
In [4]:
samples = 1000
e_data = np.random.normal(0.0, 0.2, samples)
x_data = np.random.rand(samples)
y_data = f_vec(x_data,e_data)
# transform into columns
x_data = x_data.reshape(-1,1)
y_data = y_data.reshape(-1,1)
In [5]:
plt.figure(figsize=(5,5))
plt.plot(x_data[:100], y_data[:100], 'r.')
plt.show()
In [6]:
from sklearn.model_selection import train_test_split
x_train, x_test, y_train, y_test = train_test_split(x_data, y_data, test_size=0.01, random_state=42)
In [7]:
import tensorflow as tf
In [8]:
# Set parameters
learning_rate = 0.01
training_iteration = 10
batch_size = 5
FLAGS = None
In [9]:
# TF graph input
x = tf.placeholder('float', [None, 1])
y = tf.placeholder('float', [None, 1])
In [10]:
# Set model weights
w = tf.Variable(tf.zeros([1]), name='w')
b = tf.Variable(tf.zeros([1]), name='b')
In [11]:
with tf.name_scope("wx_b") as scope:
# Construct a linear model
y_hat = w*x + b
# Add summary ops to collect data
tf.summary.histogram("weights", w)
tf.summary.histogram("biases", b)
In [12]:
# More name scopes will clean up graph representation
with tf.name_scope("cost_function") as scope:
# Minimize sum squared errors
cost_function = tf.reduce_sum(tf.pow(y_hat-y,2))
# Create a summary to monitor the cost function
tf.summary.scalar("cost_function", cost_function)
In [13]:
with tf.name_scope("train") as scope:
# Gradient descent
optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(cost_function)
In [14]:
# Initializing the variables
init = tf.global_variables_initializer()
# Merge all summaries into a single operator
merged_summary_op = tf.summary.merge_all()
In [15]:
# Launch the graph
sess = tf.InteractiveSession()
sess.run(init)
In [16]:
# Change this to a location on your computer
summary_writer = tf.summary.FileWriter('./tensorboard', graph=sess.graph)
In [17]:
# Training cycle
for iteration in range(training_iteration):
avg_cost = 0.
total_batch = int(samples/batch_size)
# Loop over all batches
for i in range(total_batch):
batch_range = slice(batch_size*i,batch_size*(i+1))
batch_xs = np.copy(x_train[batch_range])
batch_ys = np.copy(y_train[batch_range])
# Fit training using batch data
sess.run(optimizer, feed_dict={x : batch_xs, y: batch_ys})
#cost
cost = sess.run(cost_function, feed_dict={x: batch_xs, y: batch_ys})
# Compute the average loss
avg_cost += cost/total_batch
# Write logs for each iteration
summary_str = sess.run(merged_summary_op, feed_dict={x: batch_xs, y: batch_ys})
summary_writer.add_summary(summary_str, iteration*total_batch + i)
# Display logs per iteration step
print("Iteration: {:04d} cost= {:.9f}".format((iteration+1),avg_cost))
In [18]:
# Calculate accuracy
mse = tf.reduce_mean(tf.pow(y_hat-y,2))
print("Test accuracy (MSE):", mse.eval({x: x_test, y: y_test}))
In [19]:
w_ = w.eval()[0]
b_ = b.eval()[0]
print("Regression a = {:.2}, b = {:.2}".format(w_, b_))
In [20]:
y_test_predict = sess.run(y_hat, feed_dict={x:x_test})
np.hstack([x_test[:10], y_test_predict[:10]])
Out[20]:
In [21]:
plt.figure(figsize=(5,5))
plt.plot(x_data[:100], y_data[:100], 'r.')
plt.plot(x_test, y_test_predict, 'bo')
plt.plot([b_,w_*1+b_], 'b-')
plt.show()
In [22]:
# Close the Session when we're done.
sess.close()
In [ ]: