MNIST Example(tutorial)

build - inference - model, mainly used two files. This file merge them and serve as tutorials.

MNIST.py: libraries, that contains the basic inference/loss/ function (cannot run directly) fully-connected-feed.py: function, that use the MNIST.py interface


In [12]:
import argparse
import os.path
import sys
import time

from six.moves import xrange  # pylint: disable=redefined-builtin
import tensorflow as tf

from tensorflow.examples.tutorials.mnist import input_data
from tensorflow.examples.tutorials.mnist import mnist # this is the mnist.py file(libarary)

In [ ]:


In [13]:
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================

"""Builds the MNIST network.
Implements the inference/loss/training pattern for model building.
1. inference() - Builds the model as far as is required for running the network
forward to make predictions.
2. loss() - Adds to the inference model the layers required to generate loss.
3. training() - Adds to the loss model the Ops required to generate and
apply gradients.
This file is used by the various "fully_connected_*.py" files and not meant to
be run.
"""


Out[13]:
'Builds the MNIST network.\nImplements the inference/loss/training pattern for model building.\n1. inference() - Builds the model as far as is required for running the network\nforward to make predictions.\n2. loss() - Adds to the inference model the layers required to generate loss.\n3. training() - Adds to the loss model the Ops required to generate and\napply gradients.\nThis file is used by the various "fully_connected_*.py" files and not meant to\nbe run.\n'

In [ ]:


In [ ]:


In [14]:
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function

import math

import tensorflow as tf

In [15]:
# The MNIST dataset has 10 classes, representing the digits 0 through 9.
NUM_CLASSES = 10

# The MNIST images are always 28x28 pixels.
IMAGE_SIZE = 28
IMAGE_PIXELS = IMAGE_SIZE * IMAGE_SIZE

In [ ]:


In [ ]:


In [ ]:


In [16]:
def inference(images, hidden1_units, hidden2_units):
    """Build the MNIST model up to where it may be used for inference.
    Args:
        images: Images placeholder, from inputs().
        hidden1_units: Size of the first hidden layer.
        hidden2_units: Size of the second hidden layer.
    Returns:
        softmax_linear: Output tensor with the computed logits.
    """
    # Hidden 1
    with tf.name_scope('hidden1'):
        weights = tf.Variable(tf.truncated_normal([IMAGE_PIXELS, hidden1_units],
                            stddev=1.0 / math.sqrt(float(IMAGE_PIXELS))),
        name='weights')
        biases = tf.Variable(tf.zeros([hidden1_units]),
                         name='biases')
        hidden1 = tf.nn.relu(tf.matmul(images, weights) + biases)
    
    # Hidden 2
    with tf.name_scope('hidden2'):
        weights = tf.Variable(tf.truncated_normal([hidden1_units, hidden2_units],
                            stddev=1.0 / math.sqrt(float(hidden1_units))),
        name='weights')
        biases = tf.Variable(tf.zeros([hidden2_units]),
                         name='biases')
        hidden2 = tf.nn.relu(tf.matmul(hidden1, weights) + biases)
  
    # Linear
    with tf.name_scope('softmax_linear'):
        weights = tf.Variable(tf.truncated_normal([hidden2_units, NUM_CLASSES],
                            stddev=1.0 / math.sqrt(float(hidden2_units))),
        name='weights')
        biases = tf.Variable(tf.zeros([NUM_CLASSES]),
                         name='biases')
        logits = tf.matmul(hidden2, weights) + biases
    return logits

In [17]:
def loss(logits, labels):
    """Calculates the loss from the logits and the labels.
    Args:
        logits: Logits tensor, float - [batch_size, NUM_CLASSES].
        labels: Labels tensor, int32 - [batch_size].
    Returns:
        loss: Loss tensor of type float.
    """
    labels = tf.to_int64(labels)
    cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=labels, logits=logits, name='xentropy')
    return tf.reduce_mean(cross_entropy, name='xentropy_mean')

In [18]:
def training(loss, learning_rate):
    """Sets up the training Ops.
      Creates a summarizer to track the loss over time in TensorBoard.
      Creates an optimizer and applies the gradients to all trainable variables.
      The Op returned by this function is what must be passed to the
      `sess.run()` call to cause the model to train.
      Args:
            loss: Loss tensor, from loss().
            learning_rate: The learning rate to use for gradient descent.
      Returns:
            train_op: The Op for training.
      """
    # Add a scalar summary for the snapshot loss.
    tf.summary.scalar('loss', loss)
    # Create the gradient descent optimizer with the given learning rate.
    optimizer = tf.train.GradientDescentOptimizer(learning_rate)
    # Create a variable to track the global step.
    global_step = tf.Variable(0, name='global_step', trainable=False)
    # Use the optimizer to apply the gradients that minimize the loss
    # (and also increment the global step counter) as a single training step.
    train_op = optimizer.minimize(loss, global_step=global_step)
    
    return train_op

In [19]:
def evaluation(logits, labels):
    """Evaluate the quality of the logits at predicting the label.
      Args:
            logits: Logits tensor, float - [batch_size, NUM_CLASSES].
            labels: Labels tensor, int32 - [batch_size], with values in the
            range [0, NUM_CLASSES).
      Returns:
            A scalar int32 tensor with the number of examples (out of batch_size)
            that were predicted correctly.
      """
    # For a classifier model, we can use the in_top_k Op.
    # It returns a bool tensor with shape [batch_size] that is true for
    # the examples where the label is in the top k (here k=1)
    # of all logits for that example.
    correct = tf.nn.in_top_k(logits, labels, 1)
    # Return the number of true entries.
    return tf.reduce_sum(tf.cast(correct, tf.int32))

In [20]:
FLAGS = None

In [21]:
def placeholder_inputs(batch_size):
    """Generate placeholder variables to represent the input tensors.
        These placeholders are used as inputs by the rest of the model building
          code and will be fed from the downloaded data in the .run() loop, below.
      Args:
            batch_size: The batch size will be baked into both placeholders.
      Returns:
            images_placeholder: Images placeholder.
            labels_placeholder: Labels placeholder.
    """
    # Note that the shapes of the placeholders match the shapes of the full
    # image and label tensors, except the first dimension is now batch_size
    # rather than the full size of the train or test data sets.
    images_placeholder = tf.placeholder(tf.float32, shape=(batch_size,mnist.IMAGE_PIXELS))
    labels_placeholder = tf.placeholder(tf.int32, shape=(batch_size))
    return images_placeholder, labels_placeholder

In [22]:
def fill_feed_dict(data_set, images_pl, labels_pl):
    images_feed, labels_feed = data_set.next_batch(FLAGS.batch_size,FLAGS.fake_data)
    feed_dict = {
        images_pl: images_feed,
        labels_pl: labels_feed,
    }
    
    return feed_dict

In [23]:
def do_eval(sess,
            eval_correct,
            images_placeholder,
            labels_placeholder,
            data_set):
    """Runs one evaluation against the full epoch of data.
      Args:
        sess: The session in which the model has been trained.
        eval_correct: The Tensor that returns the number of correct predictions.
        images_placeholder: The images placeholder.
        labels_placeholder: The labels placeholder.
        data_set: The set of images and labels to evaluate, from
        input_data.read_data_sets().
      """
    # And run one epoch of eval.
    true_count = 0  # Counts the number of correct predictions.
    steps_per_epoch = data_set.num_examples // FLAGS.batch_size
    num_examples = steps_per_epoch * FLAGS.batch_size
    for step in xrange(steps_per_epoch):
        feed_dict = fill_feed_dict(data_set,
                               images_placeholder,
                               labels_placeholder)
        true_count += sess.run(eval_correct, feed_dict=feed_dict)
    
    
    precision = float(true_count) / num_examples
    print('  Num examples: %d  Num correct: %d  Precision @ 1: %0.04f' % (num_examples, true_count, precision))

In [24]:
def run_training():
    data_sets = input_data.read_data_sets(FLAGS.input_data_dir, FLAGS.fake_data)

    # Tell TensorFlow that the model will be built into the default Graph.
    with tf.Graph().as_default():
        # Generate placeholders for the images and labels.
        images_placeholder, labels_placeholder = placeholder_inputs(FLAGS.batch_size)

        # Build a Graph that computes predictions from the inference model.
        logits = mnist.inference(images_placeholder,FLAGS.hidden1,FLAGS.hidden2)

        # Add to the Graph the Ops for loss calculation.
        loss = mnist.loss(logits, labels_placeholder)

        # Add to the Graph the Ops that calculate and apply gradients.
        train_op = mnist.training(loss, FLAGS.learning_rate)

        # Add the Op to compare the logits to the labels during evaluation.
        eval_correct = mnist.evaluation(logits, labels_placeholder)

        # Build the summary Tensor based on the TF collection of Summaries.
        summary = tf.summary.merge_all()

        # Add the variable initializer Op.
        init = tf.global_variables_initializer()

        # Create a saver for writing training checkpoints.
        saver = tf.train.Saver()

        # Create a session for running Ops on the Graph.
        sess = tf.Session()

        # Instantiate a SummaryWriter to output summaries and the Graph.
        summary_writer = tf.summary.FileWriter(FLAGS.log_dir, sess.graph)

        # And then after everything is built:

        # Run the Op to initialize the variables.
        sess.run(init)

        # Start the training loop.
        for step in xrange(FLAGS.max_steps):
            start_time = time.time()

            # Fill a feed dictionary with the actual set of images and labels
            # for this particular training step.
            feed_dict = fill_feed_dict(data_sets.train,images_placeholder,labels_placeholder)

            # Run one step of the model.  The return values are the activations
            # from the `train_op` (which is discarded) and the `loss` Op.  To
            # inspect the values of your Ops or variables, you may include them
            # in the list passed to sess.run() and the value tensors will be
            # returned in the tuple from the call.
            _, loss_value = sess.run([train_op, loss],feed_dict=feed_dict)

            duration = time.time() - start_time

            # Write the summaries and print an overview fairly often.
            if step % 100 == 0:
                # Print status to stdout.
                print('Step %d: loss = %.2f (%.3f sec)' % (step, loss_value, duration))
                # Update the events file.
                summary_str = sess.run(summary, feed_dict=feed_dict)
                summary_writer.add_summary(summary_str, step)
                summary_writer.flush()

            # Save a checkpoint and evaluate the model periodically.
            if (step + 1) % 1000 == 0 or (step + 1) == FLAGS.max_steps:
                checkpoint_file = os.path.join(FLAGS.log_dir, 'model.ckpt')
                saver.save(sess, checkpoint_file, global_step=step)
                # Evaluate against the training set.
                print('Training Data Eval:')
                do_eval(sess,
                    eval_correct,
                    images_placeholder,
                    labels_placeholder,
                    data_sets.train)
                # Evaluate against the validation set.
                print('Validation Data Eval:')
                do_eval(sess,
                    eval_correct,
                    images_placeholder,
                    labels_placeholder,
                    data_sets.validation)
                # Evaluate against the test set.
                print('Test Data Eval:')
                do_eval(sess,
                    eval_correct,
                    images_placeholder,
                    labels_placeholder,
                    data_sets.test)

Function Entry

The following code is the entry of the function, that can start from the command line, or notebook.

  • if there is no parameters passed, then they will use the default parameters
  • otherwise, they will use the passed parameters.

for example, here I use the command to only use 1000 iterations(max-steps):

    [cliu@ycao-hadoop2 mnist]$ python fully_connected_feed.py --max_steps=1000

The entry is the if __name=='__main__' part, that accpets the parameters


In [26]:
def main(_):
    # the following parameters are constructed: (by the if __name == '__main__', use default parameter)
    # Namespace(batch_size=100, fake_data=False, hidden1=128, hidden2=32, input_data_dir='/tmp/tensorflow/mnist/input_data',
    # learning_rate=0.01, log_dir='/tmp/tensorflow/mnist/logs/fully_connected_feed', max_steps=20000)
    #print(FLAGS)
    
    if tf.gfile.Exists(FLAGS.log_dir):
        tf.gfile.DeleteRecursively(FLAGS.log_dir)
    tf.gfile.MakeDirs(FLAGS.log_dir)
    run_training()


if __name__ == '__main__':
    parser = argparse.ArgumentParser()
    parser.add_argument(
      '--learning_rate',
      type=float,
      default=0.01,
      help='Initial learning rate.')

    parser.add_argument(
      '--max_steps',
      type=int,
      default=20000,
      help='Number of steps to run trainer.')
    
    parser.add_argument(
      '--hidden1',
      type=int,
      default=128,
      help='Number of units in hidden layer 1.')
    
    parser.add_argument(
      '--hidden2',
      type=int,
      default=32,
      help='Number of units in hidden layer 2.')
    
    parser.add_argument(
      '--batch_size',
      type=int,
      default=100,
      help='Batch size.  Must divide evenly into the dataset sizes.')
    
    parser.add_argument(
      '--input_data_dir',
      type=str,
      default='/tmp/tensorflow/mnist/input_data',
      help='Directory to put the input data.')
    
    parser.add_argument(
      '--log_dir',
      type=str,
      default='/tmp/tensorflow/mnist/logs/fully_connected_feed',
      help='Directory to put the log data.')
    
    parser.add_argument(
      '--fake_data',
      default=False,
      help='If true, uses fake data for unit testing.',
      action='store_true')

    FLAGS, unparsed = parser.parse_known_args()
    tf.app.run(main=main, argv=[sys.argv[0]] + unparsed)


Namespace(batch_size=100, fake_data=False, hidden1=128, hidden2=32, input_data_dir='/tmp/tensorflow/mnist/input_data', learning_rate=0.01, log_dir='/tmp/tensorflow/mnist/logs/fully_connected_feed', max_steps=20000)
Extracting /tmp/tensorflow/mnist/input_data/train-images-idx3-ubyte.gz
Extracting /tmp/tensorflow/mnist/input_data/train-labels-idx1-ubyte.gz
Extracting /tmp/tensorflow/mnist/input_data/t10k-images-idx3-ubyte.gz
Extracting /tmp/tensorflow/mnist/input_data/t10k-labels-idx1-ubyte.gz
Step 0: loss = 2.32 (0.014 sec)
Step 100: loss = 2.13 (0.002 sec)
Step 200: loss = 1.86 (0.002 sec)
Step 300: loss = 1.57 (0.002 sec)
Step 400: loss = 1.24 (0.002 sec)
Step 500: loss = 0.90 (0.002 sec)
Step 600: loss = 0.70 (0.002 sec)
Step 700: loss = 0.72 (0.002 sec)
Step 800: loss = 0.60 (0.002 sec)
Step 900: loss = 0.47 (0.002 sec)
Training Data Eval:
  Num examples: 55000  Num correct: 47099  Precision @ 1: 0.8563
Validation Data Eval:
  Num examples: 5000  Num correct: 4308  Precision @ 1: 0.8616
Test Data Eval:
  Num examples: 10000  Num correct: 8628  Precision @ 1: 0.8628
Step 1000: loss = 0.42 (0.003 sec)
Step 1100: loss = 0.56 (0.090 sec)
Step 1200: loss = 0.50 (0.002 sec)
Step 1300: loss = 0.52 (0.002 sec)
Step 1400: loss = 0.37 (0.002 sec)
Step 1500: loss = 0.46 (0.002 sec)
Step 1600: loss = 0.46 (0.002 sec)
Step 1700: loss = 0.41 (0.002 sec)
Step 1800: loss = 0.39 (0.002 sec)
Step 1900: loss = 0.58 (0.002 sec)
Training Data Eval:
  Num examples: 55000  Num correct: 49219  Precision @ 1: 0.8949
Validation Data Eval:
  Num examples: 5000  Num correct: 4508  Precision @ 1: 0.9016
Test Data Eval:
  Num examples: 10000  Num correct: 9002  Precision @ 1: 0.9002
Step 2000: loss = 0.30 (0.003 sec)
Step 2100: loss = 0.33 (0.002 sec)
Step 2200: loss = 0.42 (0.087 sec)
Step 2300: loss = 0.26 (0.002 sec)
Step 2400: loss = 0.36 (0.002 sec)
Step 2500: loss = 0.33 (0.002 sec)
Step 2600: loss = 0.34 (0.002 sec)
Step 2700: loss = 0.32 (0.002 sec)
Step 2800: loss = 0.34 (0.002 sec)
Step 2900: loss = 0.28 (0.002 sec)
Training Data Eval:
  Num examples: 55000  Num correct: 49898  Precision @ 1: 0.9072
Validation Data Eval:
  Num examples: 5000  Num correct: 4580  Precision @ 1: 0.9160
Test Data Eval:
  Num examples: 10000  Num correct: 9140  Precision @ 1: 0.9140
Step 3000: loss = 0.33 (0.003 sec)
Step 3100: loss = 0.29 (0.002 sec)
Step 3200: loss = 0.32 (0.002 sec)
Step 3300: loss = 0.38 (0.087 sec)
Step 3400: loss = 0.26 (0.002 sec)
Step 3500: loss = 0.16 (0.002 sec)
Step 3600: loss = 0.29 (0.002 sec)
Step 3700: loss = 0.21 (0.002 sec)
Step 3800: loss = 0.39 (0.002 sec)
Step 3900: loss = 0.44 (0.002 sec)
Training Data Eval:
  Num examples: 55000  Num correct: 50508  Precision @ 1: 0.9183
Validation Data Eval:
  Num examples: 5000  Num correct: 4615  Precision @ 1: 0.9230
Test Data Eval:
  Num examples: 10000  Num correct: 9207  Precision @ 1: 0.9207
Step 4000: loss = 0.33 (0.003 sec)
Step 4100: loss = 0.31 (0.002 sec)
Step 4200: loss = 0.18 (0.002 sec)
Step 4300: loss = 0.21 (0.002 sec)
Step 4400: loss = 0.19 (0.087 sec)
Step 4500: loss = 0.23 (0.002 sec)
Step 4600: loss = 0.38 (0.002 sec)
Step 4700: loss = 0.30 (0.002 sec)
Step 4800: loss = 0.33 (0.002 sec)
Step 4900: loss = 0.14 (0.002 sec)
Training Data Eval:
  Num examples: 55000  Num correct: 50837  Precision @ 1: 0.9243
Validation Data Eval:
  Num examples: 5000  Num correct: 4653  Precision @ 1: 0.9306
Test Data Eval:
  Num examples: 10000  Num correct: 9250  Precision @ 1: 0.9250
Step 5000: loss = 0.35 (0.003 sec)
Step 5100: loss = 0.34 (0.002 sec)
Step 5200: loss = 0.40 (0.002 sec)
Step 5300: loss = 0.20 (0.002 sec)
Step 5400: loss = 0.12 (0.002 sec)
Step 5500: loss = 0.28 (0.091 sec)
Step 5600: loss = 0.22 (0.002 sec)
Step 5700: loss = 0.22 (0.002 sec)
Step 5800: loss = 0.21 (0.002 sec)
Step 5900: loss = 0.31 (0.002 sec)
Training Data Eval:
  Num examples: 55000  Num correct: 51149  Precision @ 1: 0.9300
Validation Data Eval:
  Num examples: 5000  Num correct: 4677  Precision @ 1: 0.9354
Test Data Eval:
  Num examples: 10000  Num correct: 9330  Precision @ 1: 0.9330
Step 6000: loss = 0.18 (0.003 sec)
Step 6100: loss = 0.24 (0.002 sec)
Step 6200: loss = 0.28 (0.002 sec)
Step 6300: loss = 0.24 (0.002 sec)
Step 6400: loss = 0.20 (0.002 sec)
Step 6500: loss = 0.17 (0.002 sec)
Step 6600: loss = 0.35 (0.086 sec)
Step 6700: loss = 0.30 (0.002 sec)
Step 6800: loss = 0.27 (0.002 sec)
Step 6900: loss = 0.15 (0.002 sec)
Training Data Eval:
  Num examples: 55000  Num correct: 51438  Precision @ 1: 0.9352
Validation Data Eval:
  Num examples: 5000  Num correct: 4700  Precision @ 1: 0.9400
Test Data Eval:
  Num examples: 10000  Num correct: 9356  Precision @ 1: 0.9356
Step 7000: loss = 0.18 (0.003 sec)
Step 7100: loss = 0.18 (0.002 sec)
Step 7200: loss = 0.19 (0.002 sec)
Step 7300: loss = 0.34 (0.002 sec)
Step 7400: loss = 0.20 (0.002 sec)
Step 7500: loss = 0.13 (0.002 sec)
Step 7600: loss = 0.18 (0.002 sec)
Step 7700: loss = 0.25 (0.087 sec)
Step 7800: loss = 0.07 (0.002 sec)
Step 7900: loss = 0.22 (0.002 sec)
Training Data Eval:
  Num examples: 55000  Num correct: 51604  Precision @ 1: 0.9383
Validation Data Eval:
  Num examples: 5000  Num correct: 4723  Precision @ 1: 0.9446
Test Data Eval:
  Num examples: 10000  Num correct: 9378  Precision @ 1: 0.9378
Step 8000: loss = 0.13 (0.003 sec)
Step 8100: loss = 0.23 (0.002 sec)
Step 8200: loss = 0.36 (0.002 sec)
Step 8300: loss = 0.23 (0.002 sec)
Step 8400: loss = 0.14 (0.002 sec)
Step 8500: loss = 0.13 (0.002 sec)
Step 8600: loss = 0.21 (0.002 sec)
Step 8700: loss = 0.35 (0.002 sec)
Step 8800: loss = 0.34 (0.087 sec)
Step 8900: loss = 0.12 (0.002 sec)
Training Data Eval:
  Num examples: 55000  Num correct: 51869  Precision @ 1: 0.9431
Validation Data Eval:
  Num examples: 5000  Num correct: 4741  Precision @ 1: 0.9482
Test Data Eval:
  Num examples: 10000  Num correct: 9409  Precision @ 1: 0.9409
Step 9000: loss = 0.16 (0.003 sec)
Step 9100: loss = 0.12 (0.002 sec)
Step 9200: loss = 0.16 (0.002 sec)
Step 9300: loss = 0.28 (0.002 sec)
Step 9400: loss = 0.25 (0.002 sec)
Step 9500: loss = 0.15 (0.002 sec)
Step 9600: loss = 0.17 (0.002 sec)
Step 9700: loss = 0.28 (0.002 sec)
Step 9800: loss = 0.16 (0.002 sec)
Step 9900: loss = 0.28 (0.091 sec)
Training Data Eval:
  Num examples: 55000  Num correct: 52013  Precision @ 1: 0.9457
Validation Data Eval:
  Num examples: 5000  Num correct: 4766  Precision @ 1: 0.9532
Test Data Eval:
  Num examples: 10000  Num correct: 9447  Precision @ 1: 0.9447
Step 10000: loss = 0.14 (0.003 sec)
Step 10100: loss = 0.20 (0.002 sec)
Step 10200: loss = 0.16 (0.002 sec)
Step 10300: loss = 0.19 (0.002 sec)
Step 10400: loss = 0.08 (0.002 sec)
Step 10500: loss = 0.17 (0.002 sec)
Step 10600: loss = 0.11 (0.002 sec)
Step 10700: loss = 0.10 (0.002 sec)
Step 10800: loss = 0.22 (0.002 sec)
Step 10900: loss = 0.26 (0.002 sec)
Training Data Eval:
  Num examples: 55000  Num correct: 52260  Precision @ 1: 0.9502
Validation Data Eval:
  Num examples: 5000  Num correct: 4771  Precision @ 1: 0.9542
Test Data Eval:
  Num examples: 10000  Num correct: 9471  Precision @ 1: 0.9471
Step 11000: loss = 0.17 (0.089 sec)
Step 11100: loss = 0.12 (0.002 sec)
Step 11200: loss = 0.14 (0.002 sec)
Step 11300: loss = 0.25 (0.002 sec)
Step 11400: loss = 0.15 (0.002 sec)
Step 11500: loss = 0.20 (0.002 sec)
Step 11600: loss = 0.21 (0.002 sec)
Step 11700: loss = 0.09 (0.002 sec)
Step 11800: loss = 0.33 (0.002 sec)
Step 11900: loss = 0.11 (0.002 sec)
Training Data Eval:
  Num examples: 55000  Num correct: 52457  Precision @ 1: 0.9538
Validation Data Eval:
  Num examples: 5000  Num correct: 4785  Precision @ 1: 0.9570
Test Data Eval:
  Num examples: 10000  Num correct: 9497  Precision @ 1: 0.9497
Step 12000: loss = 0.16 (0.004 sec)
Step 12100: loss = 0.16 (0.092 sec)
Step 12200: loss = 0.11 (0.002 sec)
Step 12300: loss = 0.06 (0.002 sec)
Step 12400: loss = 0.10 (0.002 sec)
Step 12500: loss = 0.15 (0.002 sec)
Step 12600: loss = 0.27 (0.002 sec)
Step 12700: loss = 0.21 (0.002 sec)
Step 12800: loss = 0.15 (0.002 sec)
Step 12900: loss = 0.17 (0.002 sec)
Training Data Eval:
  Num examples: 55000  Num correct: 52628  Precision @ 1: 0.9569
Validation Data Eval:
  Num examples: 5000  Num correct: 4789  Precision @ 1: 0.9578
Test Data Eval:
  Num examples: 10000  Num correct: 9515  Precision @ 1: 0.9515
Step 13000: loss = 0.08 (0.003 sec)
Step 13100: loss = 0.15 (0.002 sec)
Step 13200: loss = 0.24 (0.090 sec)
Step 13300: loss = 0.12 (0.002 sec)
Step 13400: loss = 0.09 (0.002 sec)
Step 13500: loss = 0.15 (0.002 sec)
Step 13600: loss = 0.14 (0.002 sec)
Step 13700: loss = 0.14 (0.002 sec)
Step 13800: loss = 0.21 (0.002 sec)
Step 13900: loss = 0.19 (0.002 sec)
Training Data Eval:
  Num examples: 55000  Num correct: 52690  Precision @ 1: 0.9580
Validation Data Eval:
  Num examples: 5000  Num correct: 4803  Precision @ 1: 0.9606
Test Data Eval:
  Num examples: 10000  Num correct: 9524  Precision @ 1: 0.9524
Step 14000: loss = 0.28 (0.003 sec)
Step 14100: loss = 0.13 (0.002 sec)
Step 14200: loss = 0.08 (0.002 sec)
Step 14300: loss = 0.13 (0.087 sec)
Step 14400: loss = 0.14 (0.002 sec)
Step 14500: loss = 0.11 (0.002 sec)
Step 14600: loss = 0.23 (0.002 sec)
Step 14700: loss = 0.18 (0.002 sec)
Step 14800: loss = 0.10 (0.002 sec)
Step 14900: loss = 0.15 (0.002 sec)
Training Data Eval:
  Num examples: 55000  Num correct: 52753  Precision @ 1: 0.9591
Validation Data Eval:
  Num examples: 5000  Num correct: 4808  Precision @ 1: 0.9616
Test Data Eval:
  Num examples: 10000  Num correct: 9549  Precision @ 1: 0.9549
Step 15000: loss = 0.13 (0.003 sec)
Step 15100: loss = 0.16 (0.002 sec)
Step 15200: loss = 0.24 (0.002 sec)
Step 15300: loss = 0.19 (0.002 sec)
Step 15400: loss = 0.10 (0.088 sec)
Step 15500: loss = 0.05 (0.002 sec)
Step 15600: loss = 0.03 (0.002 sec)
Step 15700: loss = 0.15 (0.002 sec)
Step 15800: loss = 0.20 (0.002 sec)
Step 15900: loss = 0.19 (0.002 sec)
Training Data Eval:
  Num examples: 55000  Num correct: 52901  Precision @ 1: 0.9618
Validation Data Eval:
  Num examples: 5000  Num correct: 4814  Precision @ 1: 0.9628
Test Data Eval:
  Num examples: 10000  Num correct: 9571  Precision @ 1: 0.9571
Step 16000: loss = 0.09 (0.004 sec)
Step 16100: loss = 0.21 (0.002 sec)
Step 16200: loss = 0.08 (0.002 sec)
Step 16300: loss = 0.07 (0.002 sec)
Step 16400: loss = 0.21 (0.002 sec)
Step 16500: loss = 0.10 (0.087 sec)
Step 16600: loss = 0.22 (0.002 sec)
Step 16700: loss = 0.11 (0.002 sec)
Step 16800: loss = 0.14 (0.002 sec)
Step 16900: loss = 0.17 (0.002 sec)
Training Data Eval:
  Num examples: 55000  Num correct: 53003  Precision @ 1: 0.9637
Validation Data Eval:
  Num examples: 5000  Num correct: 4821  Precision @ 1: 0.9642
Test Data Eval:
  Num examples: 10000  Num correct: 9591  Precision @ 1: 0.9591
Step 17000: loss = 0.09 (0.003 sec)
Step 17100: loss = 0.07 (0.002 sec)
Step 17200: loss = 0.15 (0.002 sec)
Step 17300: loss = 0.09 (0.002 sec)
Step 17400: loss = 0.16 (0.002 sec)
Step 17500: loss = 0.31 (0.002 sec)
Step 17600: loss = 0.06 (0.090 sec)
Step 17700: loss = 0.06 (0.002 sec)
Step 17800: loss = 0.05 (0.002 sec)
Step 17900: loss = 0.09 (0.002 sec)
Training Data Eval:
  Num examples: 55000  Num correct: 53082  Precision @ 1: 0.9651
Validation Data Eval:
  Num examples: 5000  Num correct: 4829  Precision @ 1: 0.9658
Test Data Eval:
  Num examples: 10000  Num correct: 9611  Precision @ 1: 0.9611
Step 18000: loss = 0.08 (0.003 sec)
Step 18100: loss = 0.19 (0.002 sec)
Step 18200: loss = 0.15 (0.002 sec)
Step 18300: loss = 0.08 (0.002 sec)
Step 18400: loss = 0.21 (0.002 sec)
Step 18500: loss = 0.10 (0.002 sec)
Step 18600: loss = 0.18 (0.002 sec)
Step 18700: loss = 0.20 (0.087 sec)
Step 18800: loss = 0.06 (0.002 sec)
Step 18900: loss = 0.12 (0.002 sec)
Training Data Eval:
  Num examples: 55000  Num correct: 53142  Precision @ 1: 0.9662
Validation Data Eval:
  Num examples: 5000  Num correct: 4830  Precision @ 1: 0.9660
Test Data Eval:
  Num examples: 10000  Num correct: 9628  Precision @ 1: 0.9628
Step 19000: loss = 0.06 (0.003 sec)
Step 19100: loss = 0.09 (0.002 sec)
Step 19200: loss = 0.07 (0.002 sec)
Step 19300: loss = 0.16 (0.002 sec)
Step 19400: loss = 0.11 (0.002 sec)
Step 19500: loss = 0.06 (0.002 sec)
Step 19600: loss = 0.07 (0.002 sec)
Step 19700: loss = 0.25 (0.002 sec)
Step 19800: loss = 0.07 (0.132 sec)
Step 19900: loss = 0.05 (0.002 sec)
Training Data Eval:
  Num examples: 55000  Num correct: 53195  Precision @ 1: 0.9672
Validation Data Eval:
  Num examples: 5000  Num correct: 4832  Precision @ 1: 0.9664
Test Data Eval:
  Num examples: 10000  Num correct: 9637  Precision @ 1: 0.9637
An exception has occurred, use %tb to see the full traceback.

SystemExit

In [ ]:


In [ ]:


In [ ]:


In [ ]:


In [ ]:


In [ ]:


In [ ]:


In [ ]:


In [ ]:


In [ ]:


In [ ]:


In [ ]:


In [ ]:


In [ ]:


In [ ]:


In [ ]:


In [ ]:


In [ ]:


In [ ]:


In [ ]:


In [ ]:


In [ ]:


In [ ]:


In [ ]:


In [ ]:


In [ ]:


In [ ]:


In [ ]: