Fully Connected Naural Networks - Regularization/L2 - No Convolutions


In [1]:
# These are all the modules we'll be using later. Make sure you can import them
# before proceeding further.
from __future__ import print_function
import numpy as np
import tensorflow as tf
from six.moves import cPickle as pickle

First reload the data we generated in notMNIST_nonTensorFlow_comparisons.ipynb.


In [2]:
pickle_file = 'notMNIST.pickle'

with open(pickle_file, 'rb') as f:
  save = pickle.load(f)
  train_dataset = save['train_dataset']
  train_labels = save['train_labels']
  valid_dataset = save['valid_dataset']
  valid_labels = save['valid_labels']
  test_dataset = save['test_dataset']
  test_labels = save['test_labels']
  del save  # hint to help gc free up memory
  print('Training set', train_dataset.shape, train_labels.shape)
  print('Validation set', valid_dataset.shape, valid_labels.shape)
  print('Test set', test_dataset.shape, test_labels.shape)


Training set (200000, 28, 28) (200000,)
Validation set (10000, 28, 28) (10000,)
Test set (10000, 28, 28) (10000,)

Reformat into a shape that's more adapted to the models we're going to train:

  • data as a flat matrix,
  • labels as float 1-hot encodings.

In [3]:
image_size = 28
num_labels = 10

def reformat(dataset, labels):
  dataset = dataset.reshape((-1, image_size * image_size)).astype(np.float32)
  # Map 1 to [0.0, 1.0, 0.0 ...], 2 to [0.0, 0.0, 1.0 ...]
  labels = (np.arange(num_labels) == labels[:,None]).astype(np.float32)
  return dataset, labels
train_dataset, train_labels = reformat(train_dataset, train_labels)
valid_dataset, valid_labels = reformat(valid_dataset, valid_labels)
test_dataset, test_labels = reformat(test_dataset, test_labels)
print('Training set', train_dataset.shape, train_labels.shape)
print('Validation set', valid_dataset.shape, valid_labels.shape)
print('Test set', test_dataset.shape, test_labels.shape)


Training set (200000, 784) (200000, 10)
Validation set (10000, 784) (10000, 10)
Test set (10000, 784) (10000, 10)

In [4]:
def accuracy(predictions, labels):
  return (100.0 * np.sum(np.argmax(predictions, 1) == np.argmax(labels, 1))
          / predictions.shape[0])

L2 regularization

L2 amounts to adding a penalty on the norm of the weights to the loss. In TensorFlow, you can compute the L2 loss for a tensor t using nn.l2_loss(t).

Logistic Model


In [5]:
image_size=28
##### logistic model 
def create_log_model_and_run(graph,
                         train_dataset,
                         train_labels,
                         valid_dataset,
                         valid_labels,
                         test_dataset,
                         test_labels,
                         beta,
                         num_steps,
                         num_labels=10,batch_size = 128):
    
    with graph.as_default():
      # Input data. For the training data, we use a placeholder that will be fed
      # at run time with a training minibatch.
      tf_train_dataset = tf.placeholder(tf.float32,shape=(batch_size, image_size * image_size))
      tf_train_labels = tf.placeholder(tf.float32, shape=(batch_size, num_labels))
        
      tf_valid_dataset = tf.constant(valid_dataset)
      tf_test_dataset = tf.constant(test_dataset)

      # Variables.
      weights = tf.Variable(tf.truncated_normal([image_size * image_size, num_labels]))
      biases = tf.Variable(tf.zeros([num_labels]))

      # Training computation.
      logits = tf.matmul(tf_train_dataset, weights) + biases
      loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits, tf_train_labels))+(1/batch_size)*beta*tf.nn.l2_loss(weights)

      # Optimizer.
      global_step = tf.Variable(0)  # count the number of steps taken.
      learning_rate = tf.train.exponential_decay(0.5, global_step, 100000, 0.96, staircase=True)
      optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(loss, global_step=global_step)
      #optimizer = tf.train.GradientDescentOptimizer(0.5).minimize(loss)

      # Predictions for the training, validation, and test data.
      train_prediction = tf.nn.softmax(logits)
      
      valid_prediction = tf.nn.softmax(tf.matmul(tf_valid_dataset, weights) + biases)
      test_prediction = tf.nn.softmax(tf.matmul(tf_test_dataset, weights) + biases)
        
    test_accuracy = 0
    with tf.Session(graph=graph) as session:
      tf.global_variables_initializer().run()
      print("Initialized")
      for step in range(num_steps):
        offset = (step * batch_size) % (train_labels.shape[0] - batch_size)
        # Generate a minibatch.
        batch_data = train_dataset[offset:(offset + batch_size), :]
        batch_labels = train_labels[offset:(offset + batch_size), :]
        
        feed_dict = {tf_train_dataset : batch_data, tf_train_labels : batch_labels}
        _, l, predictions = session.run(
          [optimizer, loss, train_prediction], feed_dict=feed_dict)
        if (step % 500 == 0):
          print("Minibatch loss at step %d: %f" % (step, l))
          print("Minibatch accuracy: %.1f%%" % accuracy(predictions, batch_labels))
          print("Validation accuracy: %.1f%%" % accuracy(valid_prediction.eval(), valid_labels))
          test_accuracy = accuracy(test_prediction.eval(), test_labels)
          print("Test accuracy: %.1f%%" % test_accuracy)
    
    return test_accuracy

In [6]:
num_steps = 3001

betas = [0, 0.001,0.01,0.1,1,10]
test_accuracy = np.zeros(len(betas))
i = 0
for beta in betas:
  print("\n>>>>>>>>>> Beta: %f%%" % beta)
  graph = tf.Graph()
  test_accuracy[i] = create_log_model_and_run(graph,
                         train_dataset,
                         train_labels,
                         valid_dataset,
                         valid_labels,
                         test_dataset,
                         test_labels,
                         beta,
                         num_steps)
   
  i = i + 1


>>>>>>>>>> Beta: 0.000000%
Initialized
Minibatch loss at step 0: 18.741056
Minibatch accuracy: 10.2%
Validation accuracy: 10.7%
Test accuracy: 10.4%
Minibatch loss at step 500: 1.130287
Minibatch accuracy: 78.9%
Validation accuracy: 75.8%
Test accuracy: 83.2%
Minibatch loss at step 1000: 1.432582
Minibatch accuracy: 76.6%
Validation accuracy: 76.4%
Test accuracy: 84.4%
Minibatch loss at step 1500: 0.757141
Minibatch accuracy: 79.7%
Validation accuracy: 77.4%
Test accuracy: 85.2%
Minibatch loss at step 2000: 0.906352
Minibatch accuracy: 81.2%
Validation accuracy: 76.8%
Test accuracy: 85.2%
Minibatch loss at step 2500: 1.114663
Minibatch accuracy: 75.0%
Validation accuracy: 77.9%
Test accuracy: 86.0%
Minibatch loss at step 3000: 0.994610
Minibatch accuracy: 77.3%
Validation accuracy: 78.5%
Test accuracy: 86.3%

>>>>>>>>>> Beta: 0.001000%
Initialized
Minibatch loss at step 0: 20.761656
Minibatch accuracy: 10.2%
Validation accuracy: 10.4%
Test accuracy: 11.3%
Minibatch loss at step 500: 1.287068
Minibatch accuracy: 82.0%
Validation accuracy: 75.7%
Test accuracy: 83.1%
Minibatch loss at step 1000: 1.530305
Minibatch accuracy: 78.1%
Validation accuracy: 76.5%
Test accuracy: 83.9%
Minibatch loss at step 1500: 0.679831
Minibatch accuracy: 85.9%
Validation accuracy: 76.4%
Test accuracy: 84.3%
Minibatch loss at step 2000: 0.923166
Minibatch accuracy: 81.2%
Validation accuracy: 77.4%
Test accuracy: 85.2%
Minibatch loss at step 2500: 0.939260
Minibatch accuracy: 81.2%
Validation accuracy: 78.6%
Test accuracy: 86.1%
Minibatch loss at step 3000: 1.153385
Minibatch accuracy: 73.4%
Validation accuracy: 78.7%
Test accuracy: 86.7%

>>>>>>>>>> Beta: 0.010000%
Initialized
Minibatch loss at step 0: 20.829924
Minibatch accuracy: 6.2%
Validation accuracy: 8.6%
Test accuracy: 8.1%
Minibatch loss at step 500: 1.403413
Minibatch accuracy: 78.1%
Validation accuracy: 74.8%
Test accuracy: 82.3%
Minibatch loss at step 1000: 1.580261
Minibatch accuracy: 75.8%
Validation accuracy: 76.1%
Test accuracy: 84.1%
Minibatch loss at step 1500: 0.893486
Minibatch accuracy: 83.6%
Validation accuracy: 76.8%
Test accuracy: 84.8%
Minibatch loss at step 2000: 0.829704
Minibatch accuracy: 86.7%
Validation accuracy: 77.4%
Test accuracy: 85.4%
Minibatch loss at step 2500: 1.018498
Minibatch accuracy: 76.6%
Validation accuracy: 78.7%
Test accuracy: 86.4%
Minibatch loss at step 3000: 1.059865
Minibatch accuracy: 80.5%
Validation accuracy: 78.9%
Test accuracy: 86.6%

>>>>>>>>>> Beta: 0.100000%
Initialized
Minibatch loss at step 0: 19.786070
Minibatch accuracy: 11.7%
Validation accuracy: 13.7%
Test accuracy: 14.2%
Minibatch loss at step 500: 2.447759
Minibatch accuracy: 81.2%
Validation accuracy: 76.0%
Test accuracy: 82.8%
Minibatch loss at step 1000: 1.811436
Minibatch accuracy: 79.7%
Validation accuracy: 77.9%
Test accuracy: 84.8%
Minibatch loss at step 1500: 1.084249
Minibatch accuracy: 84.4%
Validation accuracy: 79.1%
Test accuracy: 86.3%
Minibatch loss at step 2000: 0.912001
Minibatch accuracy: 85.9%
Validation accuracy: 80.6%
Test accuracy: 87.5%
Minibatch loss at step 2500: 0.914403
Minibatch accuracy: 78.9%
Validation accuracy: 81.3%
Test accuracy: 88.4%
Minibatch loss at step 3000: 0.809557
Minibatch accuracy: 82.8%
Validation accuracy: 81.8%
Test accuracy: 88.9%

>>>>>>>>>> Beta: 1.000000%
Initialized
Minibatch loss at step 0: 43.276993
Minibatch accuracy: 12.5%
Validation accuracy: 9.9%
Test accuracy: 9.3%
Minibatch loss at step 500: 0.901729
Minibatch accuracy: 87.5%
Validation accuracy: 81.1%
Test accuracy: 87.8%
Minibatch loss at step 1000: 0.791433
Minibatch accuracy: 79.7%
Validation accuracy: 81.4%
Test accuracy: 88.5%
Minibatch loss at step 1500: 0.548994
Minibatch accuracy: 82.8%
Validation accuracy: 81.4%
Test accuracy: 88.1%
Minibatch loss at step 2000: 0.631377
Minibatch accuracy: 89.1%
Validation accuracy: 81.4%
Test accuracy: 88.1%
Minibatch loss at step 2500: 0.766485
Minibatch accuracy: 80.5%
Validation accuracy: 81.2%
Test accuracy: 88.0%
Minibatch loss at step 3000: 0.767297
Minibatch accuracy: 80.5%
Validation accuracy: 81.5%
Test accuracy: 88.7%

>>>>>>>>>> Beta: 10.000000%
Initialized
Minibatch loss at step 0: 251.276367
Minibatch accuracy: 10.9%
Validation accuracy: 12.4%
Test accuracy: 13.1%
Minibatch loss at step 500: 0.879916
Minibatch accuracy: 85.2%
Validation accuracy: 79.1%
Test accuracy: 86.2%
Minibatch loss at step 1000: 1.056504
Minibatch accuracy: 75.8%
Validation accuracy: 78.9%
Test accuracy: 85.8%
Minibatch loss at step 1500: 0.836636
Minibatch accuracy: 82.8%
Validation accuracy: 78.1%
Test accuracy: 85.5%
Minibatch loss at step 2000: 0.950700
Minibatch accuracy: 82.0%
Validation accuracy: 76.0%
Test accuracy: 83.4%
Minibatch loss at step 2500: 1.039283
Minibatch accuracy: 74.2%
Validation accuracy: 77.6%
Test accuracy: 84.5%
Minibatch loss at step 3000: 1.049438
Minibatch accuracy: 78.1%
Validation accuracy: 78.7%
Test accuracy: 85.8%

In [7]:
print("*** Best beta:"+str(betas[np.argmax(test_accuracy)])+ " -- accuracy:" + str(test_accuracy[np.argmax(test_accuracy)]))


*** Best beta:0.1 -- accuracy:88.87

We got an improvement in test accuracy vs. not regularized model (~86.3%).

Neural Networks models: 1 hidden layer


In [8]:
##### nn model 
import math

def create_nn1_model_and_run(graph,
                         train_dataset,
                         train_labels,
                         valid_dataset,
                         valid_labels,
                         test_dataset,
                         test_labels,
                         beta,
                         num_steps,
                         hidden_size = 1024, 
                         num_labels=10,batch_size = 128):
    
    uniMax = 1/math.sqrt(hidden_size)
    
    with graph.as_default():
      tf_train_dataset = tf.placeholder(tf.float32,shape=(batch_size, image_size * image_size))
      tf_train_labels = tf.placeholder(tf.float32, shape=(batch_size, num_labels))
        
      tf_valid_dataset = tf.constant(valid_dataset)
      tf_test_dataset = tf.constant(test_dataset)

      # Hidden 1
      weights_1 = tf.Variable(tf.random_uniform([image_size * image_size, hidden_size], minval=-uniMax, maxval=uniMax),
                             name='weights_1')
      biases_1 = tf.Variable(tf.random_uniform([hidden_size],minval=-uniMax, maxval=uniMax),name='biases_1')
      hidden_1 = tf.nn.relu(tf.matmul(tf_train_dataset, weights_1) + biases_1)

      # Softmax 
      weights_2 = tf.Variable(tf.random_uniform([hidden_size, num_labels],minval=-uniMax, maxval=uniMax), name='weights_2')
      biases_2 = tf.Variable(tf.random_uniform([num_labels],minval=-uniMax, maxval=uniMax),name='biases_2')
      logits = tf.matmul(hidden_1, weights_2) + biases_2

      # 
      loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits, tf_train_labels))+(1/batch_size)*beta*(tf.nn.l2_loss(weights_1)+tf.nn.l2_loss(weights_2)) 

      # Optimizer
      global_step = tf.Variable(0)  # count the number of steps taken.
      learning_rate = tf.train.exponential_decay(0.5, global_step, 100000, 0.96, staircase=True)
      optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(loss, global_step=global_step)
      #optimizer = tf.train.GradientDescentOptimizer(0.5).minimize(loss)

      # Predictions for the training, validation, and test data.
      train_prediction = tf.nn.softmax(logits)
      valid_prediction = tf.nn.softmax(
        tf.matmul(tf.nn.relu(tf.matmul(tf_valid_dataset, weights_1) + biases_1), weights_2) + biases_2)
      test_prediction = tf.nn.softmax(
        tf.matmul(tf.nn.relu(tf.matmul(tf_test_dataset, weights_1) + biases_1), weights_2) + biases_2)

    test_accuracy = 0
    with tf.Session(graph=graph) as session:
        tf.global_variables_initializer().run()
        print("Initialized")
        for step in range(num_steps):
            # Pick an offset within the training data, which has been randomized.
            # Note: we could use better randomization across epochs.
            offset = (step * batch_size) % (train_labels.shape[0] - batch_size)
            # Generate a minibatch.
            batch_data = train_dataset[offset:(offset + batch_size), :]
            batch_labels = train_labels[offset:(offset + batch_size), :]
            # Prepare a dictionary telling the session where to feed the minibatch.
            # The key of the dictionary is the placeholder node of the graph to be fed,
            # and the value is the numpy array to feed to it.
            feed_dict = {tf_train_dataset : batch_data, tf_train_labels : batch_labels}
            _, l, predictions = session.run(
              [optimizer, loss, train_prediction], feed_dict=feed_dict)
            if (step % 500 == 0):
              print("Minibatch loss at step %d: %f" % (step, l))
              print("Minibatch accuracy: %.1f%%" % accuracy(predictions, batch_labels))
              print("Validation accuracy: %.1f%%" % accuracy(
              valid_prediction.eval(), valid_labels))
              test_accuracy = accuracy(test_prediction.eval(), test_labels)
              print("Test accuracy: %.1f%%" % test_accuracy)
    return test_accuracy

In [9]:
betas = [0, 0.001,0.01,0.1,1,10]
test_accuracy = np.zeros(len(betas))
i = 0
for beta in betas:
  print("\n>>>>>>>>>> Beta: %f%%" % beta)
  graph = tf.Graph()
  test_accuracy[i] = create_nn1_model_and_run(graph,
                         train_dataset,
                         train_labels,
                         valid_dataset,
                         valid_labels,
                         test_dataset,
                         test_labels,
                         beta,
                         num_steps)
   
  i = i +1


>>>>>>>>>> Beta: 0.000000%
Initialized
Minibatch loss at step 0: 2.297858
Minibatch accuracy: 18.0%
Validation accuracy: 36.9%
Test accuracy: 40.3%
Minibatch loss at step 500: 0.367253
Minibatch accuracy: 90.6%
Validation accuracy: 85.6%
Test accuracy: 91.9%
Minibatch loss at step 1000: 0.526298
Minibatch accuracy: 85.2%
Validation accuracy: 86.4%
Test accuracy: 92.8%
Minibatch loss at step 1500: 0.276079
Minibatch accuracy: 93.8%
Validation accuracy: 87.8%
Test accuracy: 93.9%
Minibatch loss at step 2000: 0.284038
Minibatch accuracy: 93.8%
Validation accuracy: 87.8%
Test accuracy: 94.0%
Minibatch loss at step 2500: 0.351037
Minibatch accuracy: 89.8%
Validation accuracy: 88.0%
Test accuracy: 94.2%
Minibatch loss at step 3000: 0.346995
Minibatch accuracy: 89.1%
Validation accuracy: 88.3%
Test accuracy: 94.3%

>>>>>>>>>> Beta: 0.001000%
Initialized
Minibatch loss at step 0: 2.324186
Minibatch accuracy: 5.5%
Validation accuracy: 40.0%
Test accuracy: 43.3%
Minibatch loss at step 500: 0.357890
Minibatch accuracy: 90.6%
Validation accuracy: 85.6%
Test accuracy: 91.9%
Minibatch loss at step 1000: 0.532684
Minibatch accuracy: 83.6%
Validation accuracy: 86.1%
Test accuracy: 92.9%
Minibatch loss at step 1500: 0.292951
Minibatch accuracy: 91.4%
Validation accuracy: 87.7%
Test accuracy: 93.8%
Minibatch loss at step 2000: 0.277259
Minibatch accuracy: 93.0%
Validation accuracy: 88.0%
Test accuracy: 94.0%
Minibatch loss at step 2500: 0.354774
Minibatch accuracy: 89.8%
Validation accuracy: 88.2%
Test accuracy: 94.2%
Minibatch loss at step 3000: 0.338070
Minibatch accuracy: 89.1%
Validation accuracy: 87.9%
Test accuracy: 94.3%

>>>>>>>>>> Beta: 0.010000%
Initialized
Minibatch loss at step 0: 2.325355
Minibatch accuracy: 7.0%
Validation accuracy: 40.8%
Test accuracy: 44.6%
Minibatch loss at step 500: 0.371592
Minibatch accuracy: 90.6%
Validation accuracy: 85.4%
Test accuracy: 91.9%
Minibatch loss at step 1000: 0.547777
Minibatch accuracy: 84.4%
Validation accuracy: 86.5%
Test accuracy: 92.9%
Minibatch loss at step 1500: 0.315192
Minibatch accuracy: 93.0%
Validation accuracy: 87.6%
Test accuracy: 93.7%
Minibatch loss at step 2000: 0.300872
Minibatch accuracy: 93.0%
Validation accuracy: 87.8%
Test accuracy: 93.8%
Minibatch loss at step 2500: 0.366530
Minibatch accuracy: 89.8%
Validation accuracy: 87.9%
Test accuracy: 94.2%
Minibatch loss at step 3000: 0.382559
Minibatch accuracy: 89.1%
Validation accuracy: 88.2%
Test accuracy: 94.4%

>>>>>>>>>> Beta: 0.100000%
Initialized
Minibatch loss at step 0: 2.411651
Minibatch accuracy: 6.2%
Validation accuracy: 40.2%
Test accuracy: 43.6%
Minibatch loss at step 500: 0.465705
Minibatch accuracy: 90.6%
Validation accuracy: 85.3%
Test accuracy: 91.7%
Minibatch loss at step 1000: 0.626549
Minibatch accuracy: 85.9%
Validation accuracy: 86.0%
Test accuracy: 92.5%
Minibatch loss at step 1500: 0.377946
Minibatch accuracy: 92.2%
Validation accuracy: 87.4%
Test accuracy: 93.8%
Minibatch loss at step 2000: 0.386094
Minibatch accuracy: 92.2%
Validation accuracy: 87.6%
Test accuracy: 93.8%
Minibatch loss at step 2500: 0.446255
Minibatch accuracy: 88.3%
Validation accuracy: 87.5%
Test accuracy: 93.8%
Minibatch loss at step 3000: 0.480406
Minibatch accuracy: 89.1%
Validation accuracy: 87.8%
Test accuracy: 94.0%

>>>>>>>>>> Beta: 1.000000%
Initialized
Minibatch loss at step 0: 3.324699
Minibatch accuracy: 16.4%
Validation accuracy: 37.5%
Test accuracy: 40.8%
Minibatch loss at step 500: 0.578685
Minibatch accuracy: 87.5%
Validation accuracy: 83.3%
Test accuracy: 90.2%
Minibatch loss at step 1000: 0.765239
Minibatch accuracy: 83.6%
Validation accuracy: 83.5%
Test accuracy: 90.4%
Minibatch loss at step 1500: 0.542304
Minibatch accuracy: 89.1%
Validation accuracy: 84.0%
Test accuracy: 91.2%
Minibatch loss at step 2000: 0.566519
Minibatch accuracy: 91.4%
Validation accuracy: 84.0%
Test accuracy: 91.1%
Minibatch loss at step 2500: 0.680417
Minibatch accuracy: 85.2%
Validation accuracy: 83.6%
Test accuracy: 90.2%
Minibatch loss at step 3000: 0.720771
Minibatch accuracy: 84.4%
Validation accuracy: 84.0%
Test accuracy: 90.9%

>>>>>>>>>> Beta: 10.000000%
Initialized
Minibatch loss at step 0: 12.637547
Minibatch accuracy: 9.4%
Validation accuracy: 41.0%
Test accuracy: 44.9%
Minibatch loss at step 500: 1.171596
Minibatch accuracy: 85.2%
Validation accuracy: 79.3%
Test accuracy: 86.0%
Minibatch loss at step 1000: 1.320983
Minibatch accuracy: 73.4%
Validation accuracy: 78.8%
Test accuracy: 86.0%
Minibatch loss at step 1500: 1.109461
Minibatch accuracy: 84.4%
Validation accuracy: 79.5%
Test accuracy: 86.9%
Minibatch loss at step 2000: 1.227175
Minibatch accuracy: 79.7%
Validation accuracy: 72.5%
Test accuracy: 79.5%
Minibatch loss at step 2500: 1.298574
Minibatch accuracy: 76.6%
Validation accuracy: 78.5%
Test accuracy: 85.8%
Minibatch loss at step 3000: 1.364912
Minibatch accuracy: 71.9%
Validation accuracy: 78.5%
Test accuracy: 85.6%

In [10]:
print("*** Best beta:"+str(betas[np.argmax(test_accuracy)])+ " -- accuracy:" + str(test_accuracy[np.argmax(test_accuracy)]))


*** Best beta:0.01 -- accuracy:94.42

We got an improvement in test accuracy vs. not regularized model (~89.1%).

Neural Networks models: 2 hidden layers


In [20]:
def create_nn2_model_and_run(graph,
                         train_dataset,
                         train_labels,
                         valid_dataset,
                         valid_labels,
                         test_dataset,
                         test_labels,
                         beta,
                         num_steps,
                         hidden_size = 1024, 
                         num_labels=10,batch_size = 128):
    
    uniMax = 1/math.sqrt(hidden_size)
    
    with graph.as_default():
      # Input data. For the training data, we use a placeholder that will be fed
      # at run time with a training minibatch.
      tf_train_dataset = tf.placeholder(tf.float32,shape=(batch_size, image_size * image_size))
      tf_train_labels = tf.placeholder(tf.float32, shape=(batch_size, num_labels))
        
      tf_valid_dataset = tf.constant(valid_dataset)
      tf_test_dataset = tf.constant(test_dataset)

      # Hidden 1
      weights_1 = tf.Variable(tf.random_uniform([image_size * image_size, hidden_size], minval=-uniMax, maxval=uniMax),
                             name='weights_1')
      biases_1 = tf.Variable(tf.random_uniform([hidden_size],minval=-uniMax, maxval=uniMax),name='biases_1')
      hidden_1 = tf.nn.relu(tf.matmul(tf_train_dataset, weights_1) + biases_1)
        
      # Hidden 2
      weights_2 = tf.Variable(tf.random_uniform([hidden_size, hidden_size], minval=-uniMax, maxval=uniMax),name='weights_2')
      biases_2 = tf.Variable(tf.random_uniform([hidden_size],minval=-uniMax, maxval=uniMax),name='biases_2')
      hidden_2 = tf.nn.relu(tf.matmul(hidden_1, weights_2) + biases_2)

      # Softmax 
      weights_3 = tf.Variable(tf.random_uniform([hidden_size, num_labels],minval=-uniMax, maxval=uniMax), name='weights_3')
      biases_3 = tf.Variable(tf.random_uniform([num_labels],minval=-uniMax, maxval=uniMax),name='biases_3')
      logits = tf.matmul(hidden_2, weights_3) + biases_3

      # 
      loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits, tf_train_labels) )+(1/batch_size)*beta*(tf.nn.l2_loss(weights_1)+tf.nn.l2_loss(weights_2)+tf.nn.l2_loss(weights_3))

      # Optimizer.
      global_step = tf.Variable(0)  # count the number of steps taken.
      learning_rate = tf.train.exponential_decay(0.5, global_step, 100000, 0.96, staircase=True)
      optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(loss, global_step=global_step)
      #optimizer = tf.train.GradientDescentOptimizer(0.5).minimize(loss)

      # Predictions for the training, validation, and test data.
      train_prediction = tf.nn.softmax(logits)
      
      valid_prediction = tf.nn.softmax(
        tf.matmul(tf.nn.relu(tf.matmul(tf.nn.relu(tf.matmul(tf_valid_dataset, weights_1) + biases_1), weights_2) + biases_2),
              weights_3)+biases_3)
      test_prediction = tf.nn.softmax(
            tf.matmul(tf.nn.relu(tf.matmul(tf.nn.relu(tf.matmul(tf_test_dataset, weights_1) + biases_1), weights_2) + biases_2),
              weights_3)+biases_3)

    test_accuracy = 0
    with tf.Session(graph=graph) as session:
        tf.global_variables_initializer().run()
        print("Initialized")
        for step in range(num_steps):
    
            offset = (step * batch_size) % (train_labels.shape[0] - batch_size)
            
            # Generate a minibatch.
            batch_data = train_dataset[offset:(offset + batch_size), :]
            batch_labels = train_labels[offset:(offset + batch_size), :]
            
            feed_dict = {tf_train_dataset : batch_data, tf_train_labels : batch_labels}
            _, l, predictions = session.run(
              [optimizer, loss, train_prediction], feed_dict=feed_dict)
            
            if (step % 500 == 0):
              print("Minibatch loss at step %d: %f" % (step, l))
              print("Minibatch accuracy: %.1f%%" % accuracy(predictions, batch_labels))
              print("Validation accuracy: %.1f%%" % accuracy(
              valid_prediction.eval(), valid_labels))
              test_accuracy = accuracy(test_prediction.eval(), test_labels)
              print("Test accuracy: %.1f%%" % test_accuracy)
    return test_accuracy

In [21]:
betas = [0, 0.001,0.01,0.1,1,10]
test_accuracy = np.zeros(len(betas))
i = 0
for beta in betas:
  print("\n>>>>>>>>>> Beta: %f%%" % beta)
  graph = tf.Graph()
  test_accuracy[i] = create_nn2_model_and_run(graph,
                         train_dataset,
                         train_labels,
                         valid_dataset,
                         valid_labels,
                         test_dataset,
                         test_labels,
                         beta,
                         num_steps)
   
  i = i +1


>>>>>>>>>> Beta: 0.000000%
Initialized
Minibatch loss at step 0: 2.297088
Minibatch accuracy: 14.1%
Validation accuracy: 35.2%
Test accuracy: 38.1%
Minibatch loss at step 500: 0.349590
Minibatch accuracy: 89.1%
Validation accuracy: 85.7%
Test accuracy: 92.3%
Minibatch loss at step 1000: 0.472860
Minibatch accuracy: 85.2%
Validation accuracy: 86.8%
Test accuracy: 93.0%
Minibatch loss at step 1500: 0.243931
Minibatch accuracy: 93.0%
Validation accuracy: 88.3%
Test accuracy: 94.1%
Minibatch loss at step 2000: 0.234967
Minibatch accuracy: 94.5%
Validation accuracy: 88.5%
Test accuracy: 94.6%
Minibatch loss at step 2500: 0.300683
Minibatch accuracy: 92.2%
Validation accuracy: 89.1%
Test accuracy: 94.9%
Minibatch loss at step 3000: 0.320041
Minibatch accuracy: 89.8%
Validation accuracy: 89.1%
Test accuracy: 94.9%

>>>>>>>>>> Beta: 0.001000%
Initialized
Minibatch loss at step 0: 2.306846
Minibatch accuracy: 13.3%
Validation accuracy: 17.6%
Test accuracy: 18.9%
Minibatch loss at step 500: 0.355565
Minibatch accuracy: 89.1%
Validation accuracy: 85.8%
Test accuracy: 92.4%
Minibatch loss at step 1000: 0.476989
Minibatch accuracy: 85.9%
Validation accuracy: 86.7%
Test accuracy: 93.0%
Minibatch loss at step 1500: 0.245425
Minibatch accuracy: 92.2%
Validation accuracy: 88.3%
Test accuracy: 94.0%
Minibatch loss at step 2000: 0.248292
Minibatch accuracy: 94.5%
Validation accuracy: 88.8%
Test accuracy: 94.7%
Minibatch loss at step 2500: 0.301564
Minibatch accuracy: 89.8%
Validation accuracy: 89.0%
Test accuracy: 94.8%
Minibatch loss at step 3000: 0.335545
Minibatch accuracy: 89.1%
Validation accuracy: 89.0%
Test accuracy: 95.0%

>>>>>>>>>> Beta: 0.010000%
Initialized
Minibatch loss at step 0: 2.323083
Minibatch accuracy: 10.9%
Validation accuracy: 29.6%
Test accuracy: 32.3%
Minibatch loss at step 500: 0.372032
Minibatch accuracy: 89.1%
Validation accuracy: 85.7%
Test accuracy: 92.3%
Minibatch loss at step 1000: 0.507049
Minibatch accuracy: 85.2%
Validation accuracy: 86.7%
Test accuracy: 93.1%
Minibatch loss at step 1500: 0.283297
Minibatch accuracy: 92.2%
Validation accuracy: 88.2%
Test accuracy: 94.0%
Minibatch loss at step 2000: 0.294270
Minibatch accuracy: 93.0%
Validation accuracy: 88.6%
Test accuracy: 94.7%
Minibatch loss at step 2500: 0.323566
Minibatch accuracy: 89.8%
Validation accuracy: 88.9%
Test accuracy: 95.0%
Minibatch loss at step 3000: 0.367163
Minibatch accuracy: 89.1%
Validation accuracy: 89.0%
Test accuracy: 95.0%

>>>>>>>>>> Beta: 0.100000%
Initialized
Minibatch loss at step 0: 2.536520
Minibatch accuracy: 9.4%
Validation accuracy: 30.4%
Test accuracy: 32.7%
Minibatch loss at step 500: 0.550975
Minibatch accuracy: 89.1%
Validation accuracy: 85.4%
Test accuracy: 92.0%
Minibatch loss at step 1000: 0.658326
Minibatch accuracy: 85.2%
Validation accuracy: 86.4%
Test accuracy: 92.7%
Minibatch loss at step 1500: 0.400193
Minibatch accuracy: 91.4%
Validation accuracy: 87.8%
Test accuracy: 93.8%
Minibatch loss at step 2000: 0.394436
Minibatch accuracy: 94.5%
Validation accuracy: 88.0%
Test accuracy: 94.5%
Minibatch loss at step 2500: 0.450166
Minibatch accuracy: 89.1%
Validation accuracy: 88.1%
Test accuracy: 94.4%
Minibatch loss at step 3000: 0.492502
Minibatch accuracy: 88.3%
Validation accuracy: 88.3%
Test accuracy: 94.4%

>>>>>>>>>> Beta: 1.000000%
Initialized
Minibatch loss at step 0: 4.673956
Minibatch accuracy: 7.0%
Validation accuracy: 30.0%
Test accuracy: 32.2%
Minibatch loss at step 500: 0.635985
Minibatch accuracy: 89.1%
Validation accuracy: 83.3%
Test accuracy: 90.2%
Minibatch loss at step 1000: 0.801995
Minibatch accuracy: 81.2%
Validation accuracy: 83.6%
Test accuracy: 90.4%
Minibatch loss at step 1500: 0.569250
Minibatch accuracy: 89.1%
Validation accuracy: 84.5%
Test accuracy: 91.3%
Minibatch loss at step 2000: 0.590984
Minibatch accuracy: 92.2%
Validation accuracy: 84.1%
Test accuracy: 91.0%
Minibatch loss at step 2500: 0.710768
Minibatch accuracy: 85.2%
Validation accuracy: 83.9%
Test accuracy: 90.3%
Minibatch loss at step 3000: 0.755620
Minibatch accuracy: 85.2%
Validation accuracy: 84.4%
Test accuracy: 91.1%

>>>>>>>>>> Beta: 10.000000%
Initialized
Minibatch loss at step 0: 25.989532
Minibatch accuracy: 12.5%
Validation accuracy: 25.6%
Test accuracy: 27.4%
Minibatch loss at step 500: 1.477812
Minibatch accuracy: 79.7%
Validation accuracy: 76.5%
Test accuracy: 83.4%
Minibatch loss at step 1000: 1.582738
Minibatch accuracy: 74.2%
Validation accuracy: 74.9%
Test accuracy: 82.4%
Minibatch loss at step 1500: 1.398084
Minibatch accuracy: 75.8%
Validation accuracy: 74.0%
Test accuracy: 81.1%
Minibatch loss at step 2000: 1.602605
Minibatch accuracy: 71.1%
Validation accuracy: 65.9%
Test accuracy: 72.7%
Minibatch loss at step 2500: 1.586047
Minibatch accuracy: 75.8%
Validation accuracy: 69.5%
Test accuracy: 76.3%
Minibatch loss at step 3000: 1.854764
Minibatch accuracy: 63.3%
Validation accuracy: 65.7%
Test accuracy: 72.4%

In [13]:
print("*** Best beta:"+str(betas[np.argmax(test_accuracy)])+ " -- accuracy:" + str(test_accuracy[np.argmax(test_accuracy)]))


*** Best beta:0 -- accuracy:94.47

We did not get a significative improvement in test accuracy vs. not regularized model (~94.7%).

Neural Networks models: 3 hidden layers


In [26]:
def create_nn3_model_and_run(graph,
                         train_dataset,
                         train_labels,
                         valid_dataset,
                         valid_labels,
                         test_dataset,
                         test_labels,
                         beta,
                         num_steps,
                         hidden_size = 1024, 
                         num_labels=10,batch_size = 128):
    
    uniMax = 1/math.sqrt(hidden_size)
    
    with graph.as_default():
      # Input data. For the training data, we use a placeholder that will be fed
      # at run time with a training minibatch.
      tf_train_dataset = tf.placeholder(tf.float32,shape=(batch_size, image_size * image_size))
      tf_train_labels = tf.placeholder(tf.float32, shape=(batch_size, num_labels))
        
      tf_valid_dataset = tf.constant(valid_dataset)
      tf_test_dataset = tf.constant(test_dataset)

      # Hidden 1
      weights_1 = tf.Variable(tf.random_uniform([image_size * image_size, hidden_size], minval=-uniMax, maxval=uniMax),
                             name='weights_1')
      biases_1 = tf.Variable(tf.random_uniform([hidden_size],minval=-uniMax, maxval=uniMax),name='biases_1')
      hidden_1 = tf.nn.relu(tf.matmul(tf_train_dataset, weights_1) + biases_1)
        
      # Hidden 2
      weights_2 = tf.Variable(tf.random_uniform([hidden_size, hidden_size], minval=-uniMax, maxval=uniMax),name='weights_2')
      biases_2 = tf.Variable(tf.random_uniform([hidden_size],minval=-uniMax, maxval=uniMax),name='biases_2')
      hidden_2 = tf.nn.relu(tf.matmul(hidden_1, weights_2) + biases_2)
    
      # Hidden 3
      weights_3 = tf.Variable(tf.random_uniform([hidden_size, hidden_size], minval=-uniMax, maxval=uniMax),name='weights_3')
      biases_3 = tf.Variable(tf.random_uniform([hidden_size],minval=-uniMax, maxval=uniMax),name='biases_3')
      hidden_3 = tf.nn.relu(tf.matmul(hidden_2, weights_3) + biases_3)

      # Softmax 
      weights_4 = tf.Variable(tf.random_uniform([hidden_size, num_labels],minval=-uniMax, maxval=uniMax), name='weights_4')
      biases_4 = tf.Variable(tf.random_uniform([num_labels],minval=-uniMax, maxval=uniMax),name='biases_4')
      logits = tf.matmul(hidden_3, weights_4) + biases_4

      # 
      loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits, tf_train_labels) )+(1/batch_size)*beta*(tf.nn.l2_loss(weights_1)+tf.nn.l2_loss(weights_2)+tf.nn.l2_loss(weights_3)+tf.nn.l2_loss(weights_4))

      # Optimizer.
      global_step = tf.Variable(0)  # count the number of steps taken.
      learning_rate = tf.train.exponential_decay(0.5, global_step, 100000, 0.96, staircase=True)
      optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(loss, global_step=global_step)
      #optimizer = tf.train.GradientDescentOptimizer(0.5).minimize(loss)

      # Predictions for the training, validation, and test data.
      train_prediction = tf.nn.softmax(logits)
      
      valid_prediction = tf.nn.softmax(
        tf.matmul(tf.nn.relu(tf.matmul(tf.nn.relu(tf.matmul(tf.nn.relu(tf.matmul(tf_valid_dataset, weights_1) + biases_1), weights_2) + biases_2),
              weights_3)+biases_3),weights_4)+biases_4)
      test_prediction = tf.nn.softmax(
            tf.matmul(tf.nn.relu(tf.matmul(tf.nn.relu(tf.matmul(tf.nn.relu(tf.matmul(tf_test_dataset, weights_1) + biases_1), weights_2) + biases_2),
              weights_3)+biases_3),weights_4)+biases_4)

    test_accuracy = 0
    with tf.Session(graph=graph) as session:
        tf.global_variables_initializer().run()
        print("Initialized")
        for step in range(num_steps):
    
            offset = (step * batch_size) % (train_labels.shape[0] - batch_size)
            
            # Generate a minibatch.
            batch_data = train_dataset[offset:(offset + batch_size), :]
            batch_labels = train_labels[offset:(offset + batch_size), :]
            
            feed_dict = {tf_train_dataset : batch_data, tf_train_labels : batch_labels}
            _, l, predictions = session.run(
              [optimizer, loss, train_prediction], feed_dict=feed_dict)
            
            if (step % 500 == 0):
              print("Minibatch loss at step %d: %f" % (step, l))
              print("Minibatch accuracy: %.1f%%" % accuracy(predictions, batch_labels))
              print("Validation accuracy: %.1f%%" % accuracy(
              valid_prediction.eval(), valid_labels))
              test_accuracy = accuracy(test_prediction.eval(), test_labels)
              print("Test accuracy: %.1f%%" % test_accuracy)
    return test_accuracy

In [27]:
betas = [0, 0.001,0.01,0.1,1,10]
test_accuracy = np.zeros(len(betas))
i = 0
for beta in betas:
  print("\n>>>>>>>>>> Beta: %f%%" % beta)
  graph = tf.Graph()
  test_accuracy[i] = create_nn3_model_and_run(graph,
                         train_dataset,
                         train_labels,
                         valid_dataset,
                         valid_labels,
                         test_dataset,
                         test_labels,
                         beta,
                         num_steps)
   
  i = i +1


>>>>>>>>>> Beta: 0.000000%
Initialized
Minibatch loss at step 0: 2.300773
Minibatch accuracy: 12.5%
Validation accuracy: 18.2%
Test accuracy: 19.5%
Minibatch loss at step 500: 0.354965
Minibatch accuracy: 88.3%
Validation accuracy: 85.5%
Test accuracy: 92.1%
Minibatch loss at step 1000: 0.463847
Minibatch accuracy: 84.4%
Validation accuracy: 86.4%
Test accuracy: 92.8%
Minibatch loss at step 1500: 0.258584
Minibatch accuracy: 91.4%
Validation accuracy: 88.2%
Test accuracy: 93.9%
Minibatch loss at step 2000: 0.250371
Minibatch accuracy: 93.8%
Validation accuracy: 88.4%
Test accuracy: 94.8%
Minibatch loss at step 2500: 0.302076
Minibatch accuracy: 90.6%
Validation accuracy: 88.9%
Test accuracy: 94.8%
Minibatch loss at step 3000: 0.355451
Minibatch accuracy: 88.3%
Validation accuracy: 88.9%
Test accuracy: 95.1%

>>>>>>>>>> Beta: 0.001000%
Initialized
Minibatch loss at step 0: 2.306205
Minibatch accuracy: 10.9%
Validation accuracy: 19.2%
Test accuracy: 20.4%
Minibatch loss at step 500: 0.370236
Minibatch accuracy: 89.1%
Validation accuracy: 85.5%
Test accuracy: 92.1%
Minibatch loss at step 1000: 0.491403
Minibatch accuracy: 85.2%
Validation accuracy: 86.5%
Test accuracy: 92.9%
Minibatch loss at step 1500: 0.266748
Minibatch accuracy: 90.6%
Validation accuracy: 87.9%
Test accuracy: 93.8%
Minibatch loss at step 2000: 0.259631
Minibatch accuracy: 93.0%
Validation accuracy: 88.6%
Test accuracy: 94.7%
Minibatch loss at step 2500: 0.317300
Minibatch accuracy: 89.1%
Validation accuracy: 88.8%
Test accuracy: 94.8%
Minibatch loss at step 3000: 0.353470
Minibatch accuracy: 88.3%
Validation accuracy: 88.8%
Test accuracy: 94.8%

>>>>>>>>>> Beta: 0.010000%
Initialized
Minibatch loss at step 0: 2.340016
Minibatch accuracy: 10.9%
Validation accuracy: 18.5%
Test accuracy: 19.8%
Minibatch loss at step 500: 0.401726
Minibatch accuracy: 88.3%
Validation accuracy: 85.4%
Test accuracy: 92.0%
Minibatch loss at step 1000: 0.512600
Minibatch accuracy: 85.2%
Validation accuracy: 86.4%
Test accuracy: 92.8%
Minibatch loss at step 1500: 0.300189
Minibatch accuracy: 92.2%
Validation accuracy: 88.2%
Test accuracy: 94.0%
Minibatch loss at step 2000: 0.298666
Minibatch accuracy: 93.8%
Validation accuracy: 88.3%
Test accuracy: 94.6%
Minibatch loss at step 2500: 0.355419
Minibatch accuracy: 90.6%
Validation accuracy: 88.8%
Test accuracy: 94.8%
Minibatch loss at step 3000: 0.394145
Minibatch accuracy: 88.3%
Validation accuracy: 88.9%
Test accuracy: 94.8%

>>>>>>>>>> Beta: 0.100000%
Initialized
Minibatch loss at step 0: 2.674578
Minibatch accuracy: 10.9%
Validation accuracy: 18.0%
Test accuracy: 18.8%
Minibatch loss at step 500: 0.657184
Minibatch accuracy: 89.1%
Validation accuracy: 85.3%
Test accuracy: 91.8%
Minibatch loss at step 1000: 0.717529
Minibatch accuracy: 83.6%
Validation accuracy: 86.1%
Test accuracy: 92.5%
Minibatch loss at step 1500: 0.448350
Minibatch accuracy: 92.2%
Validation accuracy: 87.8%
Test accuracy: 93.8%
Minibatch loss at step 2000: 0.430762
Minibatch accuracy: 92.2%
Validation accuracy: 88.0%
Test accuracy: 94.3%
Minibatch loss at step 2500: 0.482252
Minibatch accuracy: 89.8%
Validation accuracy: 88.1%
Test accuracy: 94.2%
Minibatch loss at step 3000: 0.527152
Minibatch accuracy: 89.1%
Validation accuracy: 88.0%
Test accuracy: 94.1%

>>>>>>>>>> Beta: 1.000000%
Initialized
Minibatch loss at step 0: 6.003622
Minibatch accuracy: 10.2%
Validation accuracy: 18.2%
Test accuracy: 19.7%
Minibatch loss at step 500: 0.703134
Minibatch accuracy: 88.3%
Validation accuracy: 83.2%
Test accuracy: 90.0%
Minibatch loss at step 1000: 0.845760
Minibatch accuracy: 79.7%
Validation accuracy: 83.4%
Test accuracy: 90.2%
Minibatch loss at step 1500: 0.603916
Minibatch accuracy: 86.7%
Validation accuracy: 84.3%
Test accuracy: 91.1%
Minibatch loss at step 2000: 0.627656
Minibatch accuracy: 91.4%
Validation accuracy: 83.9%
Test accuracy: 90.8%
Minibatch loss at step 2500: 0.752064
Minibatch accuracy: 84.4%
Validation accuracy: 82.7%
Test accuracy: 89.1%
Minibatch loss at step 3000: 0.808706
Minibatch accuracy: 84.4%
Validation accuracy: 84.2%
Test accuracy: 91.0%

>>>>>>>>>> Beta: 10.000000%
Initialized
Minibatch loss at step 0: 39.291458
Minibatch accuracy: 8.6%
Validation accuracy: 10.1%
Test accuracy: 10.2%
Minibatch loss at step 500: 2.210603
Minibatch accuracy: 14.8%
Validation accuracy: 22.6%
Test accuracy: 23.8%
Minibatch loss at step 1000: 2.151734
Minibatch accuracy: 22.7%
Validation accuracy: 21.9%
Test accuracy: 22.8%
Minibatch loss at step 1500: 2.071776
Minibatch accuracy: 25.8%
Validation accuracy: 27.0%
Test accuracy: 29.3%
Minibatch loss at step 2000: 1.925046
Minibatch accuracy: 38.3%
Validation accuracy: 38.6%
Test accuracy: 41.1%
Minibatch loss at step 2500: 1.937038
Minibatch accuracy: 38.3%
Validation accuracy: 36.6%
Test accuracy: 39.1%
Minibatch loss at step 3000: 2.122524
Minibatch accuracy: 34.4%
Validation accuracy: 39.4%
Test accuracy: 42.2%

In [28]:
print("*** Best beta:"+str(betas[np.argmax(test_accuracy)])+ " -- accuracy:" + str(test_accuracy[np.argmax(test_accuracy)]))


*** Best beta:0 -- accuracy:95.1

We did not get a significative improvement in test accuracy vs. not regularized model (~95.0%).

Conclusions

  • Logistic Model: L2 regularization effective
    • unregularized - test accuracy 86.3%
    • regularized (best beta:0.1) - test accuracy:88.87%
  • Neural Networks models, 1 hidden layer: L2 regularization effective
    • unregularized - test accuracy 89.1%
    • regularized (best beta:0.01) - test accuracy:94.42%
  • Neural Networks models, 2 hidden layers: L2 regularization not effective
    • unregularized - test accuracy 94.7%
    • regularized (best beta:0, i.e. unregularized) - test accuracy:94.47%
  • Neural Networks models, 3 hidden layers: L2 regularization not effective
    • unregularized - test accuracy 95.0%
    • regularized (best beta:0, i.e. unregularized) - test accuracy:95.1%

In general we note that L2 regularization is less effective for deep learning architectures with higher number of hidden layers