In [1]:
%matplotlib inline
import numpy as np
import matplotlib.pyplot as plt
import tensorflow as tf
from sklearn import model_selection
from sklearn import metrics
# generate some data
np.random.seed(5)
X = np.arange(0, 100)
y = 20 + 3 * X + np.random.normal(0, 80, 100)
X_train, X_test, y_train, y_test = model_selection.train_test_split(X, y, test_size=0.33, random_state=42)
plt.scatter(X_train, y_train, color='green', label="training data")
plt.scatter(X_test, y_test, color='blue', label="test data")
rng = np.random
# Parameters
learning_rate = 0.01
training_epochs = 1000
display_step = 50
n_samples = len(X_train)
# Create tensorflow regression model
X = tf.placeholder("float")
Y = tf.placeholder("float")
# Set model weights
W = tf.Variable(rng.randn(), name="weight")
b = tf.Variable(rng.randn(), name="bias")
# Construct a linear model
pred = tf.add(tf.multiply(X, W), b)
# Mean squared error
cost = tf.reduce_sum(tf.pow(pred-Y, 2))/(2*n_samples)
# Gradient descent
# Note, minimize() knows to modify W and b because Variable objects are trainable=True by default
optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(cost)
# Initialize the variables (i.e. assign their default value)
init = tf.global_variables_initializer()
# Start training
with tf.Session() as sess:
# Run the initializer
sess.run(init)
# Fit all training data
for epoch in range(training_epochs):
for (x, y) in zip(X_train, y_train):
sess.run(optimizer, feed_dict={X: x, Y: y})
# Display logs per epoch step
if (epoch+1) % display_step == 0:
c = sess.run(cost, feed_dict={X: X_train, Y:y_train})
print("Epoch:", '%04d' % (epoch+1), "cost=", "{:.9f}".format(c), \
"W=", sess.run(W), "b=", sess.run(b))
y_pred = sess.run(W) * X_test + sess.run(b)
order = np.argsort(X_test)
plt.plot(np.array(X_test)[order], np.array(y_pred)[order], color='red', linewidth=3, linestyle='solid', label="model")
plt.legend()
plt.draw()
print('Mean squared error: %.2f' % metrics.mean_squared_error(y_test, y_pred))
print('R^2 score: %.2f' % metrics.r2_score(y_test, y_pred))
In [2]:
np.random.seed(5)
X = np.arange(0, 100)
y = np.power(X, 2) + np.random.normal(0, 500, 100)
X_train, X_test, y_train, y_test = model_selection.train_test_split(X, y, test_size=0.33, random_state=42)
plt.scatter(X_train, y_train, color='green', label="training data")
plt.scatter(X_test, y_test, color='blue', label="test data")
rng = np.random
# Parameters
learning_rate = 0.01
training_epochs = 1000
display_step = 50
n_samples = len(X_train)
# Create tensorflow regression model
X = tf.placeholder("float")
Y = tf.placeholder("float")
# Set model weights
W = tf.Variable(rng.randn(), name="weight")
b = tf.Variable(rng.randn(), name="bias")
# Construct a linear model
pred = tf.add(tf.multiply(X, W), b)
# Mean squared error
cost = tf.reduce_sum(tf.pow(pred-Y, 2))/(2*n_samples)
# Gradient descent
# Note, minimize() knows to modify W and b because Variable objects are trainable=True by default
optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(cost)
# Initialize the variables (i.e. assign their default value)
init = tf.global_variables_initializer()
# Start training
with tf.Session() as sess:
# Run the initializer
sess.run(init)
# Fit all training data
for epoch in range(training_epochs):
for (x, y) in zip(X_train, y_train):
sess.run(optimizer, feed_dict={X: x, Y: y})
# Display logs per epoch step
if (epoch+1) % display_step == 0:
c = sess.run(cost, feed_dict={X: X_train, Y:y_train})
print("Epoch:", '%04d' % (epoch+1), "cost=", "{:.9f}".format(c), \
"W=", sess.run(W), "b=", sess.run(b))
y_pred = sess.run(W) * X_test + sess.run(b)
order = np.argsort(X_test)
plt.plot(np.array(X_test)[order], np.array(y_pred)[order], color='red', linewidth=3, linestyle='solid', label="model")
plt.legend()
plt.draw()
print('Mean squared error: %.2f' % metrics.mean_squared_error(y_test, y_pred))
print('R^2 score: %.2f' % metrics.r2_score(y_test, y_pred))