In [1]:
import tensorflow as tf
import keras as ks
import numpy as np
In [2]:
x = tf.Variable(3, name='x')
y = tf.Variable(4, name='y')
f = x*x*y + y + 2
sess = tf.Session()
sess.run(x.initializer)
sess.run(y.initializer)
result = sess.run(f)
print(result)
In [3]:
x = tf.Variable(3, name='x')
y = tf.Variable(4, name='y')
f = x*x*y + y + 2
init = tf.global_variables_initializer()
with tf.Session() as sess:
init.run()
result = f.eval()
print(result)
In [7]:
from sklearn.datasets import fetch_california_housing
from sklearn.preprocessing import StandardScaler
housing = fetch_california_housing()
m, n = housing.data.shape
std_scaler = StandardScaler()
housing_data_scaled = std_scaler.fit_transform(housing.data)
housing_data_plus_bias = np.c_[np.ones((m,1)), housing_data_scaled]
X = tf.constant(housing_data_plus_bias, dtype=tf.float32, name='X')
y = tf.constant(housing.target.reshape(-1,1), dtype=tf.float32, name='y')
In [13]:
XT = tf.transpose(X)
theta = tf.matmul(tf.matmul(tf.matrix_inverse(tf.matmul(XT, X)), XT), y)
y_pred = tf.matmul(X, theta, name='y_pred')
error = y_pred -y
mse = tf.reduce_mean(tf.square(error), name='mse')
with tf.Session() as sess:
theta_value = theta.eval()
print(theta_value)
print(mse.eval())
In [14]:
# Manually Computing the Gradients
n_epochs = 1000
learning_rate = 0.01
theta = tf.Variable(tf.random_uniform([n+1,1],-1.0,1.0), name='theta')
y_pred = tf.matmul(X, theta, name='y_pred')
error = y_pred -y
mse = tf.reduce_mean(tf.square(error), name='mse')
gradients = 2/m * tf.matmul(tf.transpose(X), error)
training_op = tf.assign(theta, theta-learning_rate * gradients)
init = tf.global_variables_initializer()
with tf.Session() as sess:
sess.run(init)
for epoch in range(n_epochs):
if epoch % 100 == 0:
print('Epoch', epoch, "MSE =", mse.eval())
sess.run(training_op)
best_theta = theta.eval()
print(best_theta)
In [15]:
# Autodiff
X = tf.placeholder(tf.float32, shape=(None,n+1), name='X')
y = tf.placeholder(tf.float32, shape=(None, 1), name='y')
theta = tf.Variable(tf.random_uniform([n+1,1],-1.0,1.0), name='theta')
y_pred = tf.matmul(X, theta, name='y_pred')
error = y_pred - y
mse = tf.reduce_mean(tf.square(error), name='mse')
gradients = 2 / m * tf.matmul(tf.transpose(X), error)
training_op = tf.assign(theta, theta - learning_rate * gradients)
init = tf.global_variables_initializer()
batch_size = 1024
n_batches = int(np.ceil(m / batch_size))
def fetch_batch(epoch, batch_index, batch_size):
a = batch_index * batch_size
b = min(batch_index * batch_size + batch_size, m)
return housing_data_plus_bias[a:b,:],housing.target.reshape(-1,1)[a:b,:]
with tf.Session() as sess:
sess.run(init)
for epoch in range(n_epochs):
for batch_index in range(n_batches):
X_batch, y_batch = fetch_batch(epoch, batch_index, batch_size)
mse_value, _ = sess.run([mse,training_op], feed_dict={X:X_batch, y: y_batch})
if epoch % 100 == 0:
print('Epoch', epoch, "MSE =", mse_value)
In [18]:
# Saving and Restoring Models
saver = tf.train.Saver()
n_epochs = 201
with tf.Session() as sess:
sess.run(init)
for epoch in range(n_epochs):
for batch_index in range(n_batches):
X_batch, y_batch = fetch_batch(epoch, batch_index, batch_size)
mse_value, _ = sess.run([mse,training_op], feed_dict={X:X_batch, y: y_batch})
if epoch % 100 == 0:
print('Epoch', epoch, "MSE =", mse_value)
saver.save(sess, '/tmp/my_model_final.ckpt')
In [20]:
with tf.Session() as sess:
saver.restore(sess, '/tmp/my_model_final.ckpt')
for epoch in range(n_epochs):
for batch_index in range(n_batches):
X_batch, y_batch = fetch_batch(epoch, batch_index, batch_size)
mse_value, _ = sess.run([mse,training_op], feed_dict={X:X_batch, y: y_batch})
if epoch % 100 == 0:
print('Epoch', epoch, "MSE =", mse_value)
saver.save(sess, '/tmp/my_model_final.ckpt')
In [21]:
# Visualizing the Graph
from datetime import datetime
now = datetime.utcnow().strftime('%Y%m%d%H%M%S')
root_logdir = 'tf_logs'
logdir = '{}/run-{}'.format(root_logdir, now)
mse_summary = tf.summary.scalar('MSE', mse)
file_writer = tf.summary.FileWriter(logdir, tf.get_default_graph())
with tf.Session() as sess:
sess.run(init)
for epoch in range(n_epochs):
for batch_index in range(n_batches):
X_batch, y_batch = fetch_batch(epoch, batch_index, batch_size)
mse_value, _ = sess.run([mse,training_op], feed_dict={X:X_batch, y: y_batch})
if batch_index % 10 == 0:
summary_str = mse_summary.eval(feed_dict={X:X_batch, y: y_batch})
file_writer.add_summary(summary_str, epoch * n_batches + batch_index)
if epoch % 100 == 0:
print('Epoch', epoch, "MSE =", mse_value)
In [ ]: