In [1]:
import tensorflow as tf
import keras as ks
import numpy as np


Using TensorFlow backend.

In [2]:
x = tf.Variable(3, name='x')
y = tf.Variable(4, name='y')
f = x*x*y + y + 2

sess = tf.Session()
sess.run(x.initializer)
sess.run(y.initializer)
result = sess.run(f)
print(result)


WARNING:tensorflow:From /home/han/anaconda3/envs/tf-gpu/lib/python3.7/site-packages/tensorflow/python/framework/op_def_library.py:263: colocate_with (from tensorflow.python.framework.ops) is deprecated and will be removed in a future version.
Instructions for updating:
Colocations handled automatically by placer.
42

In [3]:
x = tf.Variable(3, name='x')
y = tf.Variable(4, name='y')
f = x*x*y + y + 2

init = tf.global_variables_initializer()

with tf.Session() as sess:
    init.run()
    result = f.eval()
    print(result)


42

In [7]:
from sklearn.datasets import fetch_california_housing
from sklearn.preprocessing import StandardScaler 
housing = fetch_california_housing()

m, n = housing.data.shape

std_scaler = StandardScaler()

housing_data_scaled = std_scaler.fit_transform(housing.data)

housing_data_plus_bias = np.c_[np.ones((m,1)), housing_data_scaled]

X = tf.constant(housing_data_plus_bias, dtype=tf.float32, name='X')
y = tf.constant(housing.target.reshape(-1,1), dtype=tf.float32, name='y')

In [13]:
XT = tf.transpose(X)

theta = tf.matmul(tf.matmul(tf.matrix_inverse(tf.matmul(XT, X)), XT), y)
y_pred = tf.matmul(X, theta, name='y_pred')
error = y_pred -y
mse = tf.reduce_mean(tf.square(error), name='mse')

with tf.Session() as sess:
    theta_value = theta.eval()
    print(theta_value)
    print(mse.eval())


[[ 2.0685625 ]
 [ 0.82961977]
 [ 0.11875165]
 [-0.26552737]
 [ 0.30569723]
 [-0.00450288]
 [-0.03932627]
 [-0.8998867 ]
 [-0.87054163]]
0.524321

In [14]:
# Manually Computing the Gradients

n_epochs = 1000
learning_rate = 0.01

theta = tf.Variable(tf.random_uniform([n+1,1],-1.0,1.0), name='theta')
y_pred = tf.matmul(X, theta, name='y_pred')
error = y_pred -y
mse = tf.reduce_mean(tf.square(error), name='mse')
gradients = 2/m * tf.matmul(tf.transpose(X), error)
training_op = tf.assign(theta, theta-learning_rate * gradients)

init = tf.global_variables_initializer()
with tf.Session() as sess:
    sess.run(init)
    for epoch in range(n_epochs):
        if epoch % 100 == 0:
            print('Epoch', epoch, "MSE =", mse.eval())
        sess.run(training_op)
    best_theta = theta.eval()
print(best_theta)


Epoch 0 MSE = 10.26011
Epoch 100 MSE = 0.8208547
Epoch 200 MSE = 0.6618923
Epoch 300 MSE = 0.63001823
Epoch 400 MSE = 0.60719544
Epoch 500 MSE = 0.5896548
Epoch 600 MSE = 0.576077
Epoch 700 MSE = 0.56551117
Epoch 800 MSE = 0.5572465
Epoch 900 MSE = 0.5507492
[[ 2.0685523 ]
 [ 0.9353447 ]
 [ 0.1652334 ]
 [-0.41541597]
 [ 0.40771294]
 [ 0.01103384]
 [-0.0455125 ]
 [-0.46176472]
 [-0.44231313]]

In [15]:
# Autodiff

X = tf.placeholder(tf.float32, shape=(None,n+1), name='X')
y = tf.placeholder(tf.float32, shape=(None, 1), name='y')
theta = tf.Variable(tf.random_uniform([n+1,1],-1.0,1.0), name='theta')
y_pred = tf.matmul(X, theta, name='y_pred')
error = y_pred - y
mse = tf.reduce_mean(tf.square(error), name='mse')
gradients = 2 / m * tf.matmul(tf.transpose(X), error)
training_op = tf.assign(theta, theta - learning_rate * gradients)

init = tf.global_variables_initializer()

batch_size = 1024
n_batches = int(np.ceil(m / batch_size))

def fetch_batch(epoch, batch_index, batch_size):
    a = batch_index * batch_size
    b = min(batch_index * batch_size + batch_size, m)
    return housing_data_plus_bias[a:b,:],housing.target.reshape(-1,1)[a:b,:]
    
with tf.Session() as sess:
    sess.run(init)
    for epoch in range(n_epochs):
        for batch_index in range(n_batches):
            X_batch, y_batch = fetch_batch(epoch, batch_index, batch_size)
            mse_value, _ = sess.run([mse,training_op], feed_dict={X:X_batch, y: y_batch})
        if epoch % 100 == 0:
            print('Epoch', epoch, "MSE =", mse_value)


Epoch 0 MSE = 2.4434247
Epoch 100 MSE = 0.33161554
Epoch 200 MSE = 0.33421266
Epoch 300 MSE = 0.3269612
Epoch 400 MSE = 0.31481284
Epoch 500 MSE = 0.30427787
Epoch 600 MSE = 0.295708
Epoch 700 MSE = 0.2887715
Epoch 800 MSE = 0.2831533
Epoch 900 MSE = 0.27859667

In [18]:
# Saving and Restoring Models

saver = tf.train.Saver()

n_epochs = 201

with tf.Session() as sess:
    sess.run(init)
    for epoch in range(n_epochs):
        for batch_index in range(n_batches):
            X_batch, y_batch = fetch_batch(epoch, batch_index, batch_size)
            mse_value, _ = sess.run([mse,training_op], feed_dict={X:X_batch, y: y_batch})
        if epoch % 100 == 0:
            print('Epoch', epoch, "MSE =", mse_value)
            saver.save(sess, '/tmp/my_model_final.ckpt')


Epoch 0 MSE = 3.037168
Epoch 100 MSE = 0.25105837
Epoch 200 MSE = 0.2581487

In [20]:
with tf.Session() as sess:
    saver.restore(sess, '/tmp/my_model_final.ckpt')
    for epoch in range(n_epochs):
        for batch_index in range(n_batches):
            X_batch, y_batch = fetch_batch(epoch, batch_index, batch_size)
            mse_value, _ = sess.run([mse,training_op], feed_dict={X:X_batch, y: y_batch})
        if epoch % 100 == 0:
            print('Epoch', epoch, "MSE =", mse_value)
            saver.save(sess, '/tmp/my_model_final.ckpt')


INFO:tensorflow:Restoring parameters from /tmp/my_model_final.ckpt
Epoch 0 MSE = 0.26196417
Epoch 100 MSE = 0.26132464
Epoch 200 MSE = 0.26083633

In [21]:
# Visualizing the Graph

from datetime import datetime

now = datetime.utcnow().strftime('%Y%m%d%H%M%S')

root_logdir = 'tf_logs'

logdir = '{}/run-{}'.format(root_logdir, now)

mse_summary = tf.summary.scalar('MSE', mse)

file_writer = tf.summary.FileWriter(logdir, tf.get_default_graph())

with tf.Session() as sess:
    sess.run(init)
    for epoch in range(n_epochs):
        for batch_index in range(n_batches):
            X_batch, y_batch = fetch_batch(epoch, batch_index, batch_size)
            mse_value, _ = sess.run([mse,training_op], feed_dict={X:X_batch, y: y_batch})
            if batch_index % 10 == 0:
                summary_str = mse_summary.eval(feed_dict={X:X_batch, y: y_batch})
                file_writer.add_summary(summary_str, epoch * n_batches + batch_index)
        if epoch % 100 == 0:
            print('Epoch', epoch, "MSE =", mse_value)


Epoch 0 MSE = 1.8571377
Epoch 100 MSE = 0.27233684
Epoch 200 MSE = 0.27052534

In [ ]: