In [1]:
# These are all the modules we'll be using later. Make sure you can import them
# before proceeding further.
from __future__ import print_function
import numpy as np
import tensorflow as tf
from six.moves import cPickle as pickle
First reload the data we generated in notmist.ipynb.
In [2]:
pickle_file = 'notMNIST.pickle'
with open(pickle_file, 'rb') as f:
save = pickle.load(f)
train_dataset = save['train_dataset']
train_labels = save['train_labels']
valid_dataset = save['valid_dataset']
valid_labels = save['valid_labels']
test_dataset = save['test_dataset']
test_labels = save['test_labels']
del save # hint to help gc free up memory
print('Training set', train_dataset.shape, train_labels.shape)
print('Validation set', valid_dataset.shape, valid_labels.shape)
print('Test set', test_dataset.shape, test_labels.shape)
Reformat into a shape that's more adapted to the models we're going to train:
In [3]:
image_size = 28
num_labels = 10
def reformat(dataset, labels):
dataset = dataset.reshape((-1, image_size * image_size)).astype(np.float32)
# Map 2 to [0.0, 1.0, 0.0 ...], 3 to [0.0, 0.0, 1.0 ...]
labels = (np.arange(num_labels) == labels[:,None]).astype(np.float32)
return dataset, labels
train_dataset, train_labels = reformat(train_dataset, train_labels)
valid_dataset, valid_labels = reformat(valid_dataset, valid_labels)
test_dataset, test_labels = reformat(test_dataset, test_labels)
print('Training set', train_dataset.shape, train_labels.shape)
print('Validation set', valid_dataset.shape, valid_labels.shape)
print('Test set', test_dataset.shape, test_labels.shape)
Note:
np.arange(num_labels) == labels[:,None]
This is a filter from numpy.ndarray.
In [6]:
def accuracy(predictions, labels):
return (100.0 * np.sum(np.argmax(predictions, 1) == np.argmax(labels, 1))
/ predictions.shape[0])
Introduce and tune L2 regularization for both logistic and neural network models. Remember that L2 amounts to adding a penalty on the norm of the weights to the loss. In TensorFlow, you can compute the L2 loss for a tensor t
using nn.l2_loss(t)
. The right amount of regularization should improve your validation / test accuracy.
In [34]:
batch_size = 3000
num_hiddens = 50
alpha = 0.1
graph = tf.Graph()
with graph.as_default():
#input
tf_train_dataset = tf.placeholder(tf.float32, shape=(batch_size,image_size*image_size))
tf_train_labels = tf.placeholder(tf.float32, shape=(batch_size,num_labels))
tf_valid_dataset = tf.constant(valid_dataset)
tf_test_dataset = tf.constant(test_dataset)
tf_valid_labels = tf.constant(valid_labels) #invalid
tf_test_labels = tf.constant(test_labels)
#variables
weights1 = tf.Variable(tf.truncated_normal([image_size*image_size,num_hiddens]))
biases1 = tf.Variable(tf.zeros([num_hiddens]))
weights2 = tf.Variable(tf.truncated_normal([num_hiddens, num_labels]))
biases2 = tf.Variable(tf.zeros([num_labels]))
#training computation
hiddens1_input = tf.matmul(tf_train_dataset,weights1)+biases1
hiddens1_output = tf.nn.relu(hiddens1_input)
logits = tf.matmul(hiddens1_output,weights2)+biases2
loss = tf.reduce_mean(
tf.nn.softmax_cross_entropy_with_logits(logits, tf_train_labels)+alpha*tf.nn.l2_loss(weights1)+alpha*tf.nn.l2_loss(weights2))
#optimizer
optimizer = tf.train.GradientDescentOptimizer(0.2).minimize(loss)
#predictions
tf_train_prediction = tf.nn.softmax(logits)
tf_valid_prediction = tf.nn.softmax(tf.matmul(tf.nn.relu(tf.matmul(tf_valid_dataset,weights1)+biases1),weights2)+biases2)
tf_test_prediction = tf.nn.softmax(tf.matmul(tf.nn.relu(tf.matmul(tf_test_dataset,weights1)+biases1),weights2)+biases2)
# training
num_steps = 6000
with tf.Session(graph=graph) as sess:
# initilze variables
init_graph = tf.initialize_all_variables()
sess.run(init_graph)
print("Initialized!")
#training iterations
for step in range(num_steps):
#offset = (step * batch_size) % (train_labels.shape[0] - batch_size)
batch_data = train_dataset[0:batch_size, :]
batch_labels = train_labels[0:batch_size, :]
feed_dict = {tf_train_dataset : batch_data, tf_train_labels : batch_labels}
_, l, predictions = sess.run([optimizer, loss, tf_train_prediction], feed_dict=feed_dict)
if (step % 500 == 0):
print("Minibatch loss at step %d: %f" % (step, l))
print("Minibatch accuracy: %.1f%%" % accuracy(predictions, batch_labels))
print("Validation accuracy: %.1f%%" % accuracy(tf_valid_prediction.eval(), valid_labels))
print("Test accuracy: %.1f%%" % accuracy(tf_test_prediction.eval(), test_labels))
print("----------------------------------------")
In [31]:
batch_size = 3000
num_hiddens = 50
alpha = 0.1
graph = tf.Graph()
with graph.as_default():
#input
tf_train_dataset = tf.placeholder(tf.float32, shape=(batch_size,image_size*image_size))
tf_train_labels = tf.placeholder(tf.float32, shape=(batch_size,num_labels))
tf_valid_dataset = tf.constant(valid_dataset)
tf_test_dataset = tf.constant(test_dataset)
tf_valid_labels = tf.constant(valid_labels) #invalid
tf_test_labels = tf.constant(test_labels)
#variables
weights1 = tf.Variable(tf.truncated_normal([image_size*image_size,num_hiddens]))
biases1 = tf.Variable(tf.zeros([num_hiddens]))
weights2 = tf.Variable(tf.truncated_normal([num_hiddens, num_labels]))
biases2 = tf.Variable(tf.zeros([num_labels]))
#training computation
hiddens1_input = tf.matmul(tf_train_dataset,weights1)+biases1
hiddens1_output = tf.nn.relu(hiddens1_input)
logits = tf.matmul(hiddens1_output,weights2)+biases2
loss = tf.reduce_mean(
tf.nn.softmax_cross_entropy_with_logits(logits, tf_train_labels))
#optimizer
optimizer = tf.train.GradientDescentOptimizer(0.2).minimize(loss)
#predictions
tf_train_prediction = tf.nn.softmax(logits)
tf_valid_prediction = tf.nn.softmax(tf.matmul(tf.nn.relu(tf.matmul(tf_valid_dataset,weights1)+biases1),weights2)+biases2)
tf_test_prediction = tf.nn.softmax(tf.matmul(tf.nn.relu(tf.matmul(tf_test_dataset,weights1)+biases1),weights2)+biases2)
# training
num_steps = 6000
with tf.Session(graph=graph) as sess:
# initilze variables
init_graph = tf.initialize_all_variables()
sess.run(init_graph)
print("Initialized!")
#training iterations
for step in range(num_steps):
#offset = (step * batch_size) % (train_labels.shape[0] - batch_size)
batch_data = train_dataset[0:batch_size, :]
batch_labels = train_labels[0:batch_size, :]
feed_dict = {tf_train_dataset : batch_data, tf_train_labels : batch_labels}
_, l, predictions = sess.run([optimizer, loss, tf_train_prediction], feed_dict=feed_dict)
if (step % 500 == 0):
print("Minibatch loss at step %d: %f" % (step, l))
print("Minibatch accuracy: %.1f%%" % accuracy(predictions, batch_labels))
print("Validation accuracy: %.1f%%" % accuracy(tf_valid_prediction.eval(), valid_labels))
print("Test accuracy: %.1f%%" % accuracy(tf_test_prediction.eval(), test_labels))
print("----------------------------------------")
Introduce Dropout on the hidden layer of the neural network. Remember: Dropout should only be introduced during training, not evaluation, otherwise your evaluation results would be stochastic as well. TensorFlow provides nn.dropout()
for that, but you have to make sure it's only inserted during training.
What happens to our extreme overfitting case?
In [35]:
batch_size = 3000
num_hiddens = 50
keep_prob = 0.5
graph = tf.Graph()
with graph.as_default():
#input
tf_train_dataset = tf.placeholder(tf.float32, shape=(batch_size,image_size*image_size))
tf_train_labels = tf.placeholder(tf.float32, shape=(batch_size,num_labels))
tf_valid_dataset = tf.constant(valid_dataset)
tf_test_dataset = tf.constant(test_dataset)
tf_valid_labels = tf.constant(valid_labels) #invalid
tf_test_labels = tf.constant(test_labels)
#variables
weights1 = tf.Variable(tf.truncated_normal([image_size*image_size,num_hiddens]))
biases1 = tf.Variable(tf.zeros([num_hiddens]))
weights2 = tf.Variable(tf.truncated_normal([num_hiddens, num_labels]))
biases2 = tf.Variable(tf.zeros([num_labels]))
#training computation
hiddens1_input = tf.matmul(tf_train_dataset,weights1)+biases1
hiddens1_output = tf.nn.dropout(tf.nn.relu(hiddens1_input),keep_prob)
logits = tf.matmul(hiddens1_output,weights2)+biases2
loss = tf.reduce_mean(
tf.nn.softmax_cross_entropy_with_logits(logits, tf_train_labels))
#optimizer
optimizer = tf.train.GradientDescentOptimizer(0.2).minimize(loss)
#predictions
tf_train_prediction = tf.nn.softmax(logits)
tf_valid_prediction = tf.nn.softmax(tf.matmul(tf.nn.relu(tf.matmul(tf_valid_dataset,weights1)+biases1),weights2)+biases2)
tf_test_prediction = tf.nn.softmax(tf.matmul(tf.nn.relu(tf.matmul(tf_test_dataset,weights1)+biases1),weights2)+biases2)
# training
num_steps = 6000
with tf.Session(graph=graph) as sess:
# initilze variables
init_graph = tf.initialize_all_variables()
sess.run(init_graph)
print("Initialized!")
#training iterations
for step in range(num_steps):
#offset = (step * batch_size) % (train_labels.shape[0] - batch_size)
batch_data = train_dataset[0:batch_size, :]
batch_labels = train_labels[0:batch_size, :]
feed_dict = {tf_train_dataset : batch_data, tf_train_labels : batch_labels}
_, l, predictions = sess.run([optimizer, loss, tf_train_prediction], feed_dict=feed_dict)
if (step % 500 == 0):
print("Minibatch loss at step %d: %f" % (step, l))
print("Minibatch accuracy: %.1f%%" % accuracy(predictions, batch_labels))
print("Validation accuracy: %.1f%%" % accuracy(tf_valid_prediction.eval(), valid_labels))
print("Test accuracy: %.1f%%" % accuracy(tf_test_prediction.eval(), test_labels))
print("----------------------------------------")
Try to get the best performance you can using a multi-layer model! The best reported test accuracy using a deep network is 97.1%.
One avenue you can explore is to add multiple layers.
Another one is to use learning rate decay:
global_step = tf.Variable(0) # count the number of steps taken.
learning_rate = tf.train.exponential_decay(0.5, global_step, ...)
optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(loss, global_step=global_step)
In [51]:
# example, how to operate tf variable
a = tf.Variable(0)
a = a+2
with tf.Session() as sess:
init_graph = tf.initialize_all_variables()
sess.run(init_graph)
result = sess.run(a)
print(result)
In [ ]:
batch_size = 128
num_hiddens1 = 2024
keep_prob = 0.5
graph = tf.Graph()
with graph.as_default():
#input
tf_train_dataset = tf.placeholder(tf.float32, shape=(batch_size,image_size*image_size))
tf_train_labels = tf.placeholder(tf.float32, shape=(batch_size,num_labels))
tf_valid_dataset = tf.constant(valid_dataset)
tf_test_dataset = tf.constant(test_dataset)
tf_valid_labels = tf.constant(valid_labels) #invalid
tf_test_labels = tf.constant(test_labels)
#variables
weights1 = tf.Variable(tf.truncated_normal([image_size*image_size,num_hiddens1]))
biases1 = tf.Variable(tf.zeros([num_hiddens1]))
weights2 = tf.Variable(tf.truncated_normal([num_hiddens1, num_labels]))
biases2 = tf.Variable(tf.zeros([num_labels]))
#weights3 = tf.Variable(tf.truncated_normal([num_hiddens2, num_labels]))
#biases3 = tf.Variable(tf.zeros([num_labels]))
#weights4 = tf.Variable(tf.truncated_normal([num_hiddens3, num_labels]))
#biases4 = tf.Variable(tf.zeros([num_labels]))
#training computation
hiddens1_input = tf.matmul(tf_train_dataset,weights1)+biases1
hiddens1_output = tf.nn.dropout(tf.nn.relu(hiddens1_input),keep_prob)
hiddens2_input = tf.matmul(hiddens1_output,weights2)+biases2
#hiddens2_output = tf.nn.relu(hiddens2_input)
#hiddens3_input = tf.matmul(hiddens2_output,weights3)+biases3
#hiddens3_output = tf.nn.relu(hiddens3_input)
#hiddens4_input = tf.matmul(hiddens3_output,weights4)+biases4
logits = hiddens2_input
loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits, tf_train_labels))
#optimizer
global_step = tf.Variable(0) # count the number of steps taken.
learning_rate = tf.train.exponential_decay(0.5, global_step,500,0.90,staircase=True)
optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(loss, global_step=global_step)
#predictions
tf_train_prediction = tf.nn.softmax(logits)
valid_h1_in = tf.matmul(tf_valid_dataset,weights1)+biases1
valid_h1_out = tf.nn.relu(valid_h1_in)
valid_h2_in = tf.matmul(valid_h1_out,weights2)+biases2
#valid_h2_out = tf.nn.relu(valid_h2_in)
#valid_h3_in = tf.matmul(valid_h2_out,weights3)+biases3
#valid_h3_out = tf.nn.relu(valid_h3_in)
#valid_h4_in = tf.matmul(valid_h3_out,weights4)+biases4
valid_logits = valid_h2_in
tf_valid_prediction = tf.nn.softmax(valid_logits)
test_h1_in = tf.matmul(tf_test_dataset,weights1)+biases1
test_h1_out = tf.nn.relu(test_h1_in)
test_h2_in = tf.matmul(test_h1_out,weights2)+biases2
#test_h2_out = tf.nn.relu(test_h2_in)
#test_h3_in = tf.matmul(test_h2_out,weights3)+biases3
#test_h3_out = tf.nn.relu(test_h3_in)
#test_h4_in = tf.matmul(test_h3_out,weights4)+biases4
test_logits = test_h2_in
tf_test_prediction = tf.nn.softmax(test_logits)
# training
num_steps = 12000
with tf.Session(graph=graph) as sess:
# initilze variables
init_graph = tf.initialize_all_variables()
sess.run(init_graph)
print("Initialized!")
#training iterations
for step in range(num_steps):
offset = (step * batch_size) % (train_labels.shape[0] - batch_size)
batch_data = train_dataset[offset:(offset + batch_size), :]
batch_labels = train_labels[offset:(offset + batch_size), :]
feed_dict = {tf_train_dataset : batch_data, tf_train_labels : batch_labels}
lr, _, l, predictions = sess.run([learning_rate, optimizer, loss, tf_train_prediction], feed_dict=feed_dict)
global_step = global_step + 1
if (step % 500 == 0):
print("Learning rate: %0.3f" % lr)
print("Minibatch loss at step %d: %f" % (step, l))
print("Minibatch accuracy: %.1f%%" % accuracy(predictions, batch_labels))
print("Validation accuracy: %.1f%%" % accuracy(tf_valid_prediction.eval(), valid_labels))
print("Test accuracy: %.1f%%" % accuracy(tf_test_prediction.eval(), test_labels))
print("----------------------------------------")