In [1]:
# These are all the modules we'll be using later. Make sure you can import them
# before proceeding further.
from __future__ import print_function
import numpy as np
import tensorflow as tf
from six.moves import cPickle as pickle
First reload the data we generated in notmist.ipynb.
In [2]:
pickle_file = 'notMNIST.pickle'
with open(pickle_file, 'rb') as f:
save = pickle.load(f)
train_dataset = save['train_dataset']
train_labels = save['train_labels']
valid_dataset = save['valid_dataset']
valid_labels = save['valid_labels']
test_dataset = save['test_dataset']
test_labels = save['test_labels']
del save # hint to help gc free up memory
print('Training set', train_dataset.shape, train_labels.shape)
print('Validation set', valid_dataset.shape, valid_labels.shape)
print('Test set', test_dataset.shape, test_labels.shape)
Reformat into a shape that's more adapted to the models we're going to train:
In [3]:
image_size = 28
num_labels = 10
def reformat(dataset, labels):
dataset = dataset.reshape((-1, image_size * image_size)).astype(np.float32)
# Map 2 to [0.0, 1.0, 0.0 ...], 3 to [0.0, 0.0, 1.0 ...]
labels = (np.arange(num_labels) == labels[:,None]).astype(np.float32)
return dataset, labels
train_dataset, train_labels = reformat(train_dataset, train_labels)
valid_dataset, valid_labels = reformat(valid_dataset, valid_labels)
test_dataset, test_labels = reformat(test_dataset, test_labels)
print('Training set', train_dataset.shape, train_labels.shape)
print('Validation set', valid_dataset.shape, valid_labels.shape)
print('Test set', test_dataset.shape, test_labels.shape)
In [4]:
def accuracy(predictions, labels):
return (100.0 * np.sum(np.argmax(predictions, 1) == np.argmax(labels, 1))
/ predictions.shape[0])
Introduce and tune L2 regularization for both logistic and neural network models. Remember that L2 amounts to adding a penalty on the norm of the weights to the loss. In TensorFlow, you can compute the L2 loss for a tensor t
using nn.l2_loss(t)
. The right amount of regularization should improve your validation / test accuracy.
In [6]:
# Exactly as before but we are adding a regularisation parameter
batch_size = 128
beta = 0.00005 # Regularization parameter
graph = tf.Graph()
with graph.as_default():
# Input data. For the training data, we use a placeholder that will be fed
# at run time with a training minibatch.
tf_train_dataset = tf.placeholder(tf.float32,
shape=(batch_size, image_size * image_size))
tf_train_labels = tf.placeholder(tf.float32, shape=(batch_size, num_labels))
tf_valid_dataset = tf.constant(valid_dataset)
tf_test_dataset = tf.constant(test_dataset)
# Variables.
weights = tf.Variable(tf.truncated_normal([image_size * image_size, num_labels], stddev=0.1))
biases = tf.Variable(tf.constant(0.1, shape=[num_labels]))
# Training computation.
logits = tf.matmul(tf_train_dataset, weights) + biases
loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits, tf_train_labels))
# Regularization
regularizers = tf.nn.l2_loss(weights) + tf.nn.l2_loss(biases)
loss += beta * regularizers
# Optimizer.
optimizer = tf.train.GradientDescentOptimizer(0.5).minimize(loss)
# Predictions for the training, validation, and test data.
train_prediction = tf.nn.softmax(logits)
valid_prediction = tf.nn.softmax(tf.matmul(tf_valid_dataset, weights) + biases)
test_prediction = tf.nn.softmax(tf.matmul(tf_test_dataset, weights) + biases)
In [9]:
def accuracy(predictions, labels):
return (100.0 * np.sum(np.argmax(predictions, 1) == np.argmax(labels, 1))
/ predictions.shape[0])
In [10]:
num_steps = 3001
with tf.Session(graph=graph) as session:
tf.initialize_all_variables().run()
print("Initialized")
for step in range(num_steps):
# Pick an offset within the training data, which has been randomized.
# Note: we could use better randomization across epochs.
offset = (step * batch_size) % (train_labels.shape[0] - batch_size)
# Generate a minibatch.
batch_data = train_dataset[offset:(offset + batch_size), :]
batch_labels = train_labels[offset:(offset + batch_size), :]
# Prepare a dictionary telling the session where to feed the minibatch.
# The key of the dictionary is the placeholder node of the graph to be fed,
# and the value is the numpy array to feed to it.
feed_dict = {tf_train_dataset : batch_data, tf_train_labels : batch_labels}
_, l, predictions = session.run(
[optimizer, loss, train_prediction], feed_dict=feed_dict)
if (step % 500 == 0):
print("Minibatch loss at step %d: %f" % (step, l))
print("Minibatch accuracy: %.1f%%" % accuracy(predictions, batch_labels))
print("Validation accuracy: %.1f%%" % accuracy(
valid_prediction.eval(), valid_labels))
print("Test accuracy: %.1f%%" % accuracy(test_prediction.eval(), test_labels))
In [19]:
H = 1024
# accuracy() defined above
# below is an adapted copy of above SGD case with a hidden layer added:
graph = tf.Graph()
with graph.as_default():
# Input data. For the training data, we use a placeholder that will be fed
# at run time with a training minibatch.
tf_train_dataset = tf.placeholder(tf.float32,
shape=(batch_size, image_size * image_size))
tf_train_labels = tf.placeholder(tf.float32, shape=(batch_size, num_labels))
tf_valid_dataset = tf.constant(valid_dataset)
tf_test_dataset = tf.constant(test_dataset)
# Variables.
weights1 = tf.Variable(tf.truncated_normal([image_size * image_size, H], stddev=0.1))
weights2 = tf.Variable(tf.truncated_normal([H, num_labels], stddev=0.1))
biases1 = tf.Variable(tf.constant(0.1, shape=[H]))
biases2 = tf.Variable(tf.constant(0.1, shape=[num_labels]))
# Training computation.
hidden = tf.nn.relu(tf.matmul(tf_train_dataset, weights1) + biases1)
logits = tf.matmul(hidden, weights2) + biases2
loss = tf.reduce_mean(
tf.nn.softmax_cross_entropy_with_logits(logits, tf_train_labels))
# L2 regularization applies across both hidden and output layer
# regularizers = tf.nn.l2_loss(weights) + tf.nn.l2_loss(biases)
regularizers = tf.nn.l2_loss(weights1) + tf.nn.l2_loss(biases1) + tf.nn.l2_loss(weights2) + tf.nn.l2_loss(biases2)
loss += beta * regularizers
# Optimizer.Altering step size can lead to NaNs in matrices
optimizer = tf.train.GradientDescentOptimizer(0.1).minimize(loss)
# Predictions for the training, validation, and test data.
train_prediction = tf.nn.softmax(logits)
valid_logits = tf.matmul(tf.nn.relu(tf.matmul(tf_valid_dataset, weights1) + biases1), weights2) + biases2
test_logits = tf.matmul(tf.nn.relu(tf.matmul(tf_test_dataset, weights1) + biases1), weights2) + biases2
valid_prediction = tf.nn.softmax(valid_logits)
test_prediction = tf.nn.softmax(test_logits)
In [20]:
num_steps = 3001
with tf.Session(graph=graph) as session:
tf.initialize_all_variables().run()
print("Initialized")
for step in range(num_steps):
# Pick an offset within the training data, which has been randomized.
# Note: we could use better randomization across epochs.
offset = (step * batch_size) % (train_labels.shape[0] - batch_size)
# Generate a minibatch.
batch_data = train_dataset[offset:(offset + batch_size), :]
batch_labels = train_labels[offset:(offset + batch_size), :]
# Prepare a dictionary telling the session where to feed the minibatch.
# The key of the dictionary is the placeholder node of the graph to be fed,
# and the value is the numpy array to feed to it.
feed_dict = {tf_train_dataset : batch_data, tf_train_labels : batch_labels}
_, l, predictions = session.run(
[optimizer, loss, train_prediction], feed_dict=feed_dict)
if (step % 500 == 0):
print("Minibatch loss at step %d: %f" % (step, l))
print("Minibatch accuracy: %.1f%%" % accuracy(predictions, batch_labels))
print("Validation accuracy: %.1f%%" % accuracy(
valid_prediction.eval(), valid_labels))
print("Test accuracy: %.1f%%" % accuracy(test_prediction.eval(), test_labels))
In [21]:
# We reduce number of steps to 25. We can't reduce batch size
# without also rebuilding the computation graph which depends on it.
batch_size = 128
num_steps = 25
with tf.Session(graph=graph) as session:
tf.initialize_all_variables().run()
print("Initialized")
for step in xrange(num_steps):
# Pick an offset within the training data, which has been randomized.
# Note: we could use better randomization across epochs.
offset = (step * batch_size) % (train_labels.shape[0] - batch_size)
# Generate a minibatch.
batch_data = train_dataset[offset:(offset + batch_size), :]
batch_labels = train_labels[offset:(offset + batch_size), :]
# Prepare a dictionary telling the session where to feed the minibatch.
# The key of the dictionary is the placeholder node of the graph to be fed,
# and the value is the numpy array to feed to it.
feed_dict = {tf_train_dataset : batch_data, tf_train_labels : batch_labels}
_, l, predictions = session.run([optimizer, loss, train_prediction], feed_dict=feed_dict)
if (step % 5 == 0):
print("Minibatch loss at step", step, ":", l)
print("Minibatch accuracy: %.1f%%" % accuracy(predictions, batch_labels))
print("Validation accuracy: %.1f%%" % accuracy(
valid_prediction.eval(), valid_labels))
print("Test accuracy: %.1f%%" % accuracy(test_prediction.eval(), test_labels))
Introduce Dropout on the hidden layer of the neural network. Remember: Dropout should only be introduced during training, not evaluation, otherwise your evaluation results would be stochastic as well. TensorFlow provides nn.dropout()
for that, but you have to make sure it's only inserted during training.
What happens to our extreme overfitting case?
In [22]:
# Exactly as before but we are adding a regularisation parameter AND dropout
batch_size = 128
H = 1024
beta = 0.001
dropout_prob = 0.5
# accuracy() defined above
# below is an adapted copy of above SGD case with a hidden layer added:
graph = tf.Graph()
with graph.as_default():
# Input data. For the training data, we use a placeholder that will be fed
# at run time with a training minibatch.
tf_train_dataset = tf.placeholder(tf.float32,
shape=(batch_size, image_size * image_size))
tf_train_labels = tf.placeholder(tf.float32, shape=(batch_size, num_labels))
tf_valid_dataset = tf.constant(valid_dataset)
tf_test_dataset = tf.constant(test_dataset)
# Variables.
weights1 = tf.Variable(tf.truncated_normal([image_size * image_size, H], stddev=0.1))
weights2 = tf.Variable(tf.truncated_normal([H, num_labels], stddev=0.1))
biases1 = tf.Variable(tf.constant(0.1, shape=[H]))
biases2 = tf.Variable(tf.constant(0.1, shape=[num_labels]))
# Training computation with dropbout of 50% added
hidden = tf.nn.relu(tf.matmul(tf_train_dataset, weights1) + biases1)
hidden = tf.nn.dropout(hidden, dropout_prob)
logits = tf.matmul(hidden, weights2) + biases2
loss = tf.reduce_mean(
tf.nn.softmax_cross_entropy_with_logits(logits, tf_train_labels))
# L2 regularization applies across both hidden and output layer
# regularizers = tf.nn.l2_loss(weights) + tf.nn.l2_loss(biases)
regularizers = tf.nn.l2_loss(weights1) + tf.nn.l2_loss(biases1) + tf.nn.l2_loss(weights2) + tf.nn.l2_loss(biases2)
loss += beta * regularizers
# Optimizer.Altering step size can lead to NaNs in matrices
optimizer = tf.train.GradientDescentOptimizer(0.1).minimize(loss)
# Predictions for the training, validation, and test data.
train_prediction = tf.nn.softmax(logits)
valid_logits = tf.matmul(tf.nn.relu(tf.matmul(tf_valid_dataset, weights1) + biases1), weights2) + biases2
test_logits = tf.matmul(tf.nn.relu(tf.matmul(tf_test_dataset, weights1) + biases1), weights2) + biases2
valid_prediction = tf.nn.softmax(valid_logits)
test_prediction = tf.nn.softmax(test_logits)
In [23]:
num_steps = 3001
with tf.Session(graph=graph) as session:
tf.initialize_all_variables().run()
print("Initialized")
for step in range(num_steps):
# Pick an offset within the training data, which has been randomized.
# Note: we could use better randomization across epochs.
offset = (step * batch_size) % (train_labels.shape[0] - batch_size)
# Generate a minibatch.
batch_data = train_dataset[offset:(offset + batch_size), :]
batch_labels = train_labels[offset:(offset + batch_size), :]
# Prepare a dictionary telling the session where to feed the minibatch.
# The key of the dictionary is the placeholder node of the graph to be fed,
# and the value is the numpy array to feed to it.
feed_dict = {tf_train_dataset : batch_data, tf_train_labels : batch_labels}
_, l, predictions = session.run(
[optimizer, loss, train_prediction], feed_dict=feed_dict)
if (step % 500 == 0):
print("Minibatch loss at step %d: %f" % (step, l))
print("Minibatch accuracy: %.1f%%" % accuracy(predictions, batch_labels))
print("Validation accuracy: %.1f%%" % accuracy(
valid_prediction.eval(), valid_labels))
print("Test accuracy: %.1f%%" % accuracy(test_prediction.eval(), test_labels))
Try to get the best performance you can using a multi-layer model! The best reported test accuracy using a deep network is 97.1%.
One avenue you can explore is to add multiple layers.
Another one is to use learning rate decay:
global_step = tf.Variable(0) # count the number of steps taken.
learning_rate = tf.train.exponential_decay(0.5, step, ...)
optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(loss, global_step=global_step)
In [5]:
# Going for the max! 3 hidden layers, learning rate decay AND 50% dropout
batch_size = 1024
H = 1024
beta = 0.003
dropout_prob = 0.5
graph = tf.Graph()
with graph.as_default():
# Input data. For the training data, we use a placeholder that will be fed
# at run time with a training minibatch.
tf_train_dataset = tf.placeholder(tf.float32,
shape=(batch_size, image_size * image_size))
tf_train_labels = tf.placeholder(tf.float32, shape=(batch_size, num_labels))
tf_valid_dataset = tf.constant(valid_dataset)
tf_test_dataset = tf.constant(test_dataset)
# Variables.
weights1 = tf.Variable(tf.truncated_normal([image_size * image_size, H], stddev=0.1))
weights2 = tf.Variable(tf.truncated_normal([H, H], stddev=0.1))
weights3 = tf.Variable(tf.truncated_normal([H, H], stddev=0.1))
weights4 = tf.Variable(tf.truncated_normal([H, num_labels], stddev=0.1))
biases1 = tf.Variable(tf.constant(0.1, shape=[H]))
biases2 = tf.Variable(tf.constant(0.1, shape=[H]))
biases3 = tf.Variable(tf.constant(0.1, shape=[H]))
biases4 = tf.Variable(tf.constant(0.1, shape=[num_labels]))
# Training computation (with Dropout)
hidden1 = tf.nn.relu(tf.matmul(tf_train_dataset, weights1) + biases1)
hidden1 = tf.nn.dropout(hidden1, dropout_prob)
hidden2 = tf.nn.relu(tf.matmul(hidden1, weights2) + biases2)
hidden2 = tf.nn.dropout(hidden2, dropout_prob)
hidden3 = tf.nn.relu(tf.matmul(hidden2, weights3) + biases3)
hidden3 = tf.nn.dropout(hidden3, dropout_prob)
logits = tf.matmul(hidden3, weights4) + biases4
loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits, tf_train_labels))
# Regularization
regularizers = tf.nn.l2_loss(weights1) + tf.nn.l2_loss(biases1) + tf.nn.l2_loss(weights2) + tf.nn.l2_loss(biases2) + tf.nn.l2_loss(weights3) + tf.nn.l2_loss(biases3) + tf.nn.l2_loss(weights4) + tf.nn.l2_loss(biases4)
loss += beta * regularizers
# Use learning rate decay
global_step = tf.Variable(0)
learning_rate = tf.train.exponential_decay(0.2, global_step, 10000, 0.96)
# Optimizer.
optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(loss, global_step=global_step)
# Predictions for the training, validation, and test data.
train_prediction = tf.nn.softmax(logits)
# Validation set
valid_hidden1 = tf.nn.relu(tf.matmul(tf_valid_dataset, weights1) + biases1)
valid_hidden2 = tf.nn.relu(tf.matmul(valid_hidden1, weights2) + biases2)
valid_hidden3 = tf.nn.relu(tf.matmul(valid_hidden2, weights3) + biases3)
valid_logits = tf.matmul(valid_hidden3, weights4) + biases4
# Test set
test_hidden1 = tf.nn.relu(tf.matmul(tf_test_dataset, weights1) + biases1)
test_hidden2 = tf.nn.relu(tf.matmul(test_hidden1, weights2) + biases2)
test_hidden3 = tf.nn.relu(tf.matmul(test_hidden2, weights3) + biases3)
test_logits = tf.matmul(test_hidden3, weights4) + biases4
valid_prediction = tf.nn.softmax(valid_logits)
test_prediction = tf.nn.softmax(test_logits)
In [6]:
# As before but with THREE times as many steps
num_steps = 9001
with tf.Session(graph=graph) as session:
tf.initialize_all_variables().run()
print("Initialized")
for step in range(num_steps):
# Pick an offset within the training data, which has been randomized.
# Note: we could use better randomization across epochs.
offset = (step * batch_size) % (train_labels.shape[0] - batch_size)
# Generate a minibatch.
batch_data = train_dataset[offset:(offset + batch_size), :]
batch_labels = train_labels[offset:(offset + batch_size), :]
# Prepare a dictionary telling the session where to feed the minibatch.
# The key of the dictionary is the placeholder node of the graph to be fed,
# and the value is the numpy array to feed to it.
feed_dict = {tf_train_dataset : batch_data, tf_train_labels : batch_labels}
_, l, predictions = session.run(
[optimizer, loss, train_prediction], feed_dict=feed_dict)
if (step % 500 == 0):
print("Minibatch loss at step %d: %f" % (step, l))
print("Minibatch accuracy: %.1f%%" % accuracy(predictions, batch_labels))
print("Validation accuracy: %.1f%%" % accuracy(
valid_prediction.eval(), valid_labels))
print("Test accuracy: %.1f%%" % accuracy(test_prediction.eval(), test_labels))
In [ ]: