Trains and Evaluates the IndianPines network using a feed dictionary


In [1]:
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function

import time
import numpy as np
from six.moves import xrange  # pylint: disable=redefined-builtin
import tensorflow as tf
import os
import IndianPinesMLP 
import patch_size
# import IndianPines_data_set as input_data
import Spatial_dataset as input_data

Declare model parameters as external flags


In [2]:
flags = tf.app.flags
FLAGS = flags.FLAGS
flags.DEFINE_float('learning_rate', 0.01, 'Initial learning rate.')
flags.DEFINE_integer('max_steps', 50000, 'Number of steps to run trainer.')
flags.DEFINE_integer('hidden1', 500, 'Number of units in hidden layer 1.')
flags.DEFINE_integer('hidden2', 350, 'Number of units in hidden layer 2.')
flags.DEFINE_integer('hidden3', 150, 'Number of units in hidden layer 3.')
flags.DEFINE_integer('batch_size', 200, 'Batch size.  '
                     'Must divide evenly into the dataset sizes.')
# flags.DEFINE_string('train_dir', '1.mat', 'Directory to put the training data.')

In [3]:
learning_rate = 0.01
num_epochs = 20
max_steps = 50000
IMAGE_SIZE = patch_size.patch_size
fc1 = 500
fc2 = 350
fc3 = 150
batch_size = 200
TRAIN_FILES = 8
TEST_FILES = 4
DATA_PATH = os.path.join(os.getcwd(),"Data")

In [4]:
def placeholder_inputs(batch_size):
    """Generate placeholder variables to represent the input tensors.
    These placeholders are used as inputs by the rest of the model building
    code and will be fed from the downloaded data in the .run() loop, below.
    Args:
    batch_size: The batch size will be baked into both placeholders.
    Returns:
    images_placeholder: Images placeholder.
    labels_placeholder: Labels placeholder.
    """
    # Note that the shapes of the placeholders match the shapes of the full
    # image and label tensors, except the first dimension is now batch_size
    # rather than the full size of the train or test data sets.
    images_placeholder = tf.placeholder(tf.float32, shape=(batch_size, IndianPinesMLP
                                                           .IMAGE_PIXELS))
    labels_placeholder = tf.placeholder(tf.int32, shape=(batch_size))
    return images_placeholder, labels_placeholder

In [5]:
def fill_feed_dict(data_set, images_pl, labels_pl):
    """Fills the feed_dict for training the given step.
    A feed_dict takes the form of:
    feed_dict = {
      <placeholder>: <tensor of values to be passed for placeholder>,
      ....
    }
    Args:
    data_set: The set of images and labels, from input_data.read_data_sets()
    images_pl: The images placeholder, from placeholder_inputs().
    labels_pl: The labels placeholder, from placeholder_inputs().
    Returns:
    feed_dict: The feed dictionary mapping from placeholders to values.
    """
    # Create the feed_dict for the placeholders filled with the next
    # `batch size ` examples.
    images_feed, labels_feed = data_set.next_batch(batch_size)
    feed_dict = {
      images_pl: images_feed,
      labels_pl: labels_feed,
    }
    return feed_dict

In [6]:
def do_eval(sess,
            eval_correct,
            images_placeholder,
            labels_placeholder,
            data_set):
    """Runs one evaluation against the full epoch of data.
    Args:
    sess: The session in which the model has been trained.
    eval_correct: The Tensor that returns the number of correct predictions.
    images_placeholder: The images placeholder.
    labels_placeholder: The labels placeholder.
    data_set: The set of images and labels to evaluate, from
      input_data.read_data_sets().
    """
    # And run one epoch of eval.
    true_count = 0  # Counts the number of correct predictions.
    steps_per_epoch = data_set.num_examples // batch_size
    num_examples = steps_per_epoch * batch_size
    for step in xrange(steps_per_epoch):
        feed_dict = fill_feed_dict(data_set,
                                   images_placeholder,
                                   labels_placeholder)
        true_count += sess.run(eval_correct, feed_dict=feed_dict)
    precision = true_count / num_examples
    print('  Num examples: %d  Num correct: %d  Precision @ 1: %0.04f' %
        (num_examples, true_count, precision))

In [7]:
def add_DataSet(first,second):
    temp_image = np.concatenate((first.images,second.images),axis=0)
    temp_labels = np.concatenate((first.labels,second.labels),axis=0)
    temp_image = temp_image.reshape(temp_image.shape[0],IMAGE_SIZE,IMAGE_SIZE,220)
    temp_image = np.transpose(temp_image,(0,3,1,2))
    temp_labels = np.transpose(temp_labels)
    return input_data.DataSet(temp_image,temp_labels)

In [8]:
def run_training():
    """Train MNIST for a number of steps."""
    # Get the sets of images and labels for training, validation, and
    # test on IndianPines.
    
    """Concatenating all the training and test mat files"""
    for i in range(TRAIN_FILES):
        data_sets = input_data.read_data_sets(os.path.join(DATA_PATH, 'Train_'+str(IMAGE_SIZE)+'_'+str(i+1)+'.mat'), 'train')
        if(i==0):
            Training_data = data_sets
            continue
        else:
            Training_data = add_DataSet(Training_data,data_sets)
            
    for i in range(TEST_FILES):
        data_sets = input_data.read_data_sets(os.path.join(DATA_PATH, 'Test_'+str(IMAGE_SIZE)+'_'+str(i+1)+'.mat'),'test')
        if(i==0):
            Test_data = data_sets
            continue
        else:
            Test_data = add_DataSet(Test_data,data_sets)
        
    # Tell TensorFlow that the model will be built into the default Graph.
    with tf.Graph().as_default():
    # Generate placeholders for the images and labels.
        images_placeholder, labels_placeholder = placeholder_inputs(FLAGS.batch_size)

        # Build a Graph that computes predictions from the inference model.
        logits = IndianPinesMLP.inference(images_placeholder,
                                 FLAGS.hidden1,
                                 FLAGS.hidden2,
                                 FLAGS.hidden3)

        # Add to the Graph the Ops for loss calculation.
        loss = IndianPinesMLP.loss(logits, labels_placeholder)

        # Add to the Graph the Ops that calculate and apply gradients.
        train_op = IndianPinesMLP.training(loss, FLAGS.learning_rate)

        # Add the Op to compare the logits to the labels during evaluation.
        eval_correct = IndianPinesMLP.evaluation(logits, labels_placeholder)

        # Build the summary operation based on the TF collection of Summaries.
    #    summary_op = tf.merge_all_summaries()

        # Add the variable initializer Op.
        init = tf.initialize_all_variables()

        # Create a saver for writing training checkpoints.
        saver = tf.train.Saver()

        # Create a session for running Ops on the Graph.
        sess = tf.Session()

        # Instantiate a SummaryWriter to output summaries and the Graph.
    #    summary_writer = tf.train.SummaryWriter(FLAGS.train_dir, sess.graph)

        # And then after everything is built:

        # Run the Op to initialize the variables.
        sess.run(init)

        # Start the training loop.
        for step in xrange(FLAGS.max_steps):
            start_time = time.time()

            # Fill a feed dictionary with the actual set of images and labels
            # for this particular training step.
            feed_dict = fill_feed_dict(Training_data,
                                     images_placeholder,
                                     labels_placeholder)

            # Run one step of the model.  The return values are the activations
            # from the `train_op` (which is discarded) and the `loss` Op.  To
            # inspect the values of your Ops or variables, you may include them
            # in the list passed to sess.run() and the value tensors will be
            # returned in the tuple from the call.
            _, loss_value = sess.run([train_op, loss],
                                   feed_dict=feed_dict)

            duration = time.time() - start_time

            # Write the summaries and print an overview fairly often.
            if step % 50 == 0:
            # Print status to stdout.
                print('Step %d: loss = %.2f (%.3f sec)' % (step, loss_value, duration))
            # Update the events file.
    #             summary_str = sess.run(summary_op, feed_dict=feed_dict)
    #             summary_writer.add_summary(summary_str, step)
    #             summary_writer.flush()

            # Save a checkpoint and evaluate the model periodically.
            if (step + 1) % 1000 == 0 or (step + 1) == FLAGS.max_steps:
                saver.save(sess, 'model-MLP-'+str(IMAGE_SIZE)+'X'+str(IMAGE_SIZE)+'.ckpt', global_step=step)

            # Evaluate against the training set.
                print('Training Data Eval:')
                do_eval(sess,
                        eval_correct,
                        images_placeholder,
                        labels_placeholder,
                        Training_data)
                print('Test Data Eval:')
                do_eval(sess,
                        eval_correct,
                        images_placeholder,
                        labels_placeholder,
                        Test_data)

In [9]:
run_training()


Tensor("Reshape:0", shape=(200, 220), dtype=float32)
Step 0: loss = 2.77 (0.172 sec)
Step 50: loss = 2.76 (0.002 sec)
Step 100: loss = 2.75 (0.003 sec)
Step 150: loss = 2.74 (0.002 sec)
Step 200: loss = 2.73 (0.002 sec)
Step 250: loss = 2.70 (0.002 sec)
Step 300: loss = 2.66 (0.002 sec)
Step 350: loss = 2.59 (0.003 sec)
Step 400: loss = 2.52 (0.004 sec)
Step 450: loss = 2.46 (0.002 sec)
Step 500: loss = 2.34 (0.002 sec)
Step 550: loss = 2.22 (0.002 sec)
Step 600: loss = 2.11 (0.002 sec)
Step 650: loss = 2.02 (0.003 sec)
Step 700: loss = 1.99 (0.002 sec)
Step 750: loss = 1.85 (0.002 sec)
Step 800: loss = 1.82 (0.004 sec)
Step 850: loss = 1.69 (0.003 sec)
Step 900: loss = 1.72 (0.002 sec)
Step 950: loss = 1.55 (0.002 sec)
Training Data Eval:
  Num examples: 3200  Num correct: 1655  Precision @ 1: 0.5172
Test Data Eval:
  Num examples: 1600  Num correct: 507  Precision @ 1: 0.3169
Step 1000: loss = 1.56 (0.002 sec)
Step 1050: loss = 1.43 (0.002 sec)
Step 1100: loss = 1.43 (0.002 sec)
Step 1150: loss = 1.44 (0.002 sec)
Step 1200: loss = 1.27 (0.004 sec)
Step 1250: loss = 1.33 (0.002 sec)
Step 1300: loss = 1.24 (0.002 sec)
Step 1350: loss = 1.24 (0.002 sec)
Step 1400: loss = 1.17 (0.002 sec)
Step 1450: loss = 1.41 (0.002 sec)
Step 1500: loss = 1.15 (0.002 sec)
Step 1550: loss = 1.19 (0.002 sec)
Step 1600: loss = 1.12 (0.004 sec)
Step 1650: loss = 1.27 (0.002 sec)
Step 1700: loss = 1.09 (0.002 sec)
Step 1750: loss = 1.25 (0.002 sec)
Step 1800: loss = 1.11 (0.002 sec)
Step 1850: loss = 1.15 (0.002 sec)
Step 1900: loss = 1.05 (0.002 sec)
Step 1950: loss = 1.07 (0.002 sec)
Training Data Eval:
  Num examples: 3200  Num correct: 1904  Precision @ 1: 0.5950
Test Data Eval:
  Num examples: 1600  Num correct: 706  Precision @ 1: 0.4412
Step 2000: loss = 1.12 (0.007 sec)
Step 2050: loss = 1.07 (0.002 sec)
Step 2100: loss = 1.12 (0.002 sec)
Step 2150: loss = 1.10 (0.002 sec)
Step 2200: loss = 1.13 (0.002 sec)
Step 2250: loss = 1.24 (0.002 sec)
Step 2300: loss = 1.15 (0.002 sec)
Step 2350: loss = 1.18 (0.002 sec)
Step 2400: loss = 0.94 (0.004 sec)
Step 2450: loss = 1.03 (0.002 sec)
Step 2500: loss = 0.99 (0.002 sec)
Step 2550: loss = 1.03 (0.002 sec)
Step 2600: loss = 1.03 (0.002 sec)
Step 2650: loss = 1.08 (0.002 sec)
Step 2700: loss = 0.96 (0.002 sec)
Step 2750: loss = 0.96 (0.002 sec)
Step 2800: loss = 1.03 (0.007 sec)
Step 2850: loss = 1.06 (0.002 sec)
Step 2900: loss = 1.06 (0.002 sec)
Step 2950: loss = 1.12 (0.002 sec)
Training Data Eval:
  Num examples: 3200  Num correct: 2033  Precision @ 1: 0.6353
Test Data Eval:
  Num examples: 1600  Num correct: 793  Precision @ 1: 0.4956
Step 3000: loss = 0.94 (0.003 sec)
Step 3050: loss = 1.12 (0.003 sec)
Step 3100: loss = 0.92 (0.002 sec)
Step 3150: loss = 0.91 (0.002 sec)
Step 3200: loss = 0.99 (0.003 sec)
Step 3250: loss = 1.08 (0.002 sec)
Step 3300: loss = 0.93 (0.002 sec)
Step 3350: loss = 1.01 (0.002 sec)
Step 3400: loss = 1.02 (0.002 sec)
Step 3450: loss = 1.00 (0.003 sec)
Step 3500: loss = 0.84 (0.002 sec)
Step 3550: loss = 0.99 (0.002 sec)
Step 3600: loss = 0.97 (0.004 sec)
Step 3650: loss = 0.92 (0.002 sec)
Step 3700: loss = 1.05 (0.002 sec)
Step 3750: loss = 1.00 (0.002 sec)
Step 3800: loss = 1.00 (0.002 sec)
Step 3850: loss = 0.86 (0.002 sec)
Step 3900: loss = 1.02 (0.002 sec)
Step 3950: loss = 0.80 (0.002 sec)
Training Data Eval:
  Num examples: 3200  Num correct: 2158  Precision @ 1: 0.6744
Test Data Eval:
  Num examples: 1600  Num correct: 860  Precision @ 1: 0.5375
Step 4000: loss = 0.98 (0.005 sec)
Step 4050: loss = 0.84 (0.003 sec)
Step 4100: loss = 0.83 (0.002 sec)
Step 4150: loss = 0.86 (0.002 sec)
Step 4200: loss = 0.86 (0.002 sec)
Step 4250: loss = 0.81 (0.002 sec)
Step 4300: loss = 0.84 (0.002 sec)
Step 4350: loss = 1.04 (0.002 sec)
Step 4400: loss = 1.00 (0.004 sec)
Step 4450: loss = 0.86 (0.002 sec)
Step 4500: loss = 1.00 (0.002 sec)
Step 4550: loss = 0.81 (0.002 sec)
Step 4600: loss = 0.92 (0.002 sec)
Step 4650: loss = 0.86 (0.002 sec)
Step 4700: loss = 0.91 (0.002 sec)
Step 4750: loss = 0.91 (0.002 sec)
Step 4800: loss = 0.97 (0.004 sec)
Step 4850: loss = 0.84 (0.002 sec)
Step 4900: loss = 0.85 (0.002 sec)
Step 4950: loss = 0.84 (0.002 sec)
Training Data Eval:
  Num examples: 3200  Num correct: 2182  Precision @ 1: 0.6819
Test Data Eval:
  Num examples: 1600  Num correct: 882  Precision @ 1: 0.5513
Step 5000: loss = 0.82 (0.003 sec)
Step 5050: loss = 0.90 (0.002 sec)
Step 5100: loss = 0.79 (0.002 sec)
Step 5150: loss = 0.89 (0.002 sec)
Step 5200: loss = 0.67 (0.004 sec)
Step 5250: loss = 0.81 (0.002 sec)
Step 5300: loss = 0.77 (0.003 sec)
Step 5350: loss = 0.76 (0.002 sec)
Step 5400: loss = 0.76 (0.004 sec)
Step 5450: loss = 0.80 (0.002 sec)
Step 5500: loss = 0.82 (0.002 sec)
Step 5550: loss = 0.78 (0.002 sec)
Step 5600: loss = 0.85 (0.004 sec)
Step 5650: loss = 0.78 (0.002 sec)
Step 5700: loss = 0.77 (0.002 sec)
Step 5750: loss = 0.81 (0.002 sec)
Step 5800: loss = 0.78 (0.002 sec)
Step 5850: loss = 0.70 (0.002 sec)
Step 5900: loss = 0.79 (0.002 sec)
Step 5950: loss = 0.72 (0.002 sec)
Training Data Eval:
  Num examples: 3200  Num correct: 2308  Precision @ 1: 0.7212
Test Data Eval:
  Num examples: 1600  Num correct: 949  Precision @ 1: 0.5931
Step 6000: loss = 0.79 (0.005 sec)
Step 6050: loss = 0.85 (0.002 sec)
Step 6100: loss = 0.69 (0.002 sec)
Step 6150: loss = 0.76 (0.002 sec)
Step 6200: loss = 0.68 (0.002 sec)
Step 6250: loss = 0.85 (0.002 sec)
Step 6300: loss = 0.79 (0.002 sec)
Step 6350: loss = 0.76 (0.002 sec)
Step 6400: loss = 0.77 (0.004 sec)
Step 6450: loss = 0.72 (0.003 sec)
Step 6500: loss = 0.74 (0.002 sec)
Step 6550: loss = 0.68 (0.002 sec)
Step 6600: loss = 0.69 (0.002 sec)
Step 6650: loss = 0.77 (0.002 sec)
Step 6700: loss = 0.74 (0.002 sec)
Step 6750: loss = 0.71 (0.002 sec)
Step 6800: loss = 0.64 (0.003 sec)
Step 6850: loss = 0.80 (0.002 sec)
Step 6900: loss = 0.61 (0.002 sec)
Step 6950: loss = 0.65 (0.002 sec)
Training Data Eval:
  Num examples: 3200  Num correct: 2369  Precision @ 1: 0.7403
Test Data Eval:
  Num examples: 1600  Num correct: 973  Precision @ 1: 0.6081
Step 7000: loss = 0.65 (0.003 sec)
Step 7050: loss = 0.82 (0.002 sec)
Step 7100: loss = 0.63 (0.002 sec)
Step 7150: loss = 0.75 (0.002 sec)
Step 7200: loss = 0.76 (0.003 sec)
Step 7250: loss = 0.79 (0.002 sec)
Step 7300: loss = 0.66 (0.002 sec)
Step 7350: loss = 0.70 (0.002 sec)
Step 7400: loss = 0.62 (0.002 sec)
Step 7450: loss = 0.65 (0.002 sec)
Step 7500: loss = 0.60 (0.003 sec)
Step 7550: loss = 0.71 (0.003 sec)
Step 7600: loss = 0.62 (0.004 sec)
Step 7650: loss = 0.67 (0.002 sec)
Step 7700: loss = 0.61 (0.002 sec)
Step 7750: loss = 0.60 (0.002 sec)
Step 7800: loss = 0.61 (0.002 sec)
Step 7850: loss = 0.66 (0.002 sec)
Step 7900: loss = 0.75 (0.002 sec)
Step 7950: loss = 0.83 (0.003 sec)
Training Data Eval:
  Num examples: 3200  Num correct: 2436  Precision @ 1: 0.7612
Test Data Eval:
  Num examples: 1600  Num correct: 985  Precision @ 1: 0.6156
Step 8000: loss = 0.64 (0.005 sec)
Step 8050: loss = 0.63 (0.002 sec)
Step 8100: loss = 0.69 (0.002 sec)
Step 8150: loss = 0.78 (0.002 sec)
Step 8200: loss = 0.60 (0.002 sec)
Step 8250: loss = 0.60 (0.002 sec)
Step 8300: loss = 0.63 (0.002 sec)
Step 8350: loss = 0.61 (0.002 sec)
Step 8400: loss = 0.64 (0.004 sec)
Step 8450: loss = 0.63 (0.002 sec)
Step 8500: loss = 0.71 (0.002 sec)
Step 8550: loss = 0.58 (0.002 sec)
Step 8600: loss = 0.67 (0.002 sec)
Step 8650: loss = 0.55 (0.002 sec)
Step 8700: loss = 0.55 (0.002 sec)
Step 8750: loss = 0.66 (0.002 sec)
Step 8800: loss = 0.61 (0.004 sec)
Step 8850: loss = 0.59 (0.002 sec)
Step 8900: loss = 0.72 (0.002 sec)
Step 8950: loss = 0.63 (0.002 sec)
Training Data Eval:
  Num examples: 3200  Num correct: 2486  Precision @ 1: 0.7769
Test Data Eval:
  Num examples: 1600  Num correct: 1005  Precision @ 1: 0.6281
Step 9000: loss = 0.54 (0.002 sec)
Step 9050: loss = 0.62 (0.003 sec)
Step 9100: loss = 0.57 (0.003 sec)
Step 9150: loss = 0.69 (0.002 sec)
Step 9200: loss = 0.65 (0.003 sec)
Step 9250: loss = 0.65 (0.002 sec)
Step 9300: loss = 0.59 (0.002 sec)
Step 9350: loss = 0.58 (0.002 sec)
Step 9400: loss = 0.64 (0.002 sec)
Step 9450: loss = 0.61 (0.003 sec)
Step 9500: loss = 0.63 (0.002 sec)
Step 9550: loss = 0.63 (0.002 sec)
Step 9600: loss = 0.60 (0.004 sec)
Step 9650: loss = 0.55 (0.002 sec)
Step 9700: loss = 0.66 (0.002 sec)
Step 9750: loss = 0.56 (0.002 sec)
Step 9800: loss = 0.56 (0.002 sec)
Step 9850: loss = 0.54 (0.002 sec)
Step 9900: loss = 0.61 (0.003 sec)
Step 9950: loss = 0.50 (0.002 sec)
Training Data Eval:
  Num examples: 3200  Num correct: 2544  Precision @ 1: 0.7950
Test Data Eval:
  Num examples: 1600  Num correct: 1018  Precision @ 1: 0.6362
Step 10000: loss = 0.58 (0.006 sec)
Step 10050: loss = 0.65 (0.002 sec)
Step 10100: loss = 0.54 (0.002 sec)
Step 10150: loss = 0.54 (0.002 sec)
Step 10200: loss = 0.61 (0.003 sec)
Step 10250: loss = 0.62 (0.002 sec)
Step 10300: loss = 0.57 (0.002 sec)
Step 10350: loss = 0.68 (0.002 sec)
Step 10400: loss = 0.53 (0.004 sec)
Step 10450: loss = 0.60 (0.002 sec)
Step 10500: loss = 0.63 (0.002 sec)
Step 10550: loss = 0.52 (0.002 sec)
Step 10600: loss = 0.48 (0.002 sec)
Step 10650: loss = 0.57 (0.002 sec)
Step 10700: loss = 0.53 (0.002 sec)
Step 10750: loss = 0.58 (0.002 sec)
Step 10800: loss = 0.57 (0.003 sec)
Step 10850: loss = 0.46 (0.002 sec)
Step 10900: loss = 0.55 (0.003 sec)
Step 10950: loss = 0.54 (0.002 sec)
Training Data Eval:
  Num examples: 3200  Num correct: 2579  Precision @ 1: 0.8059
Test Data Eval:
  Num examples: 1600  Num correct: 1049  Precision @ 1: 0.6556
Step 11000: loss = 0.49 (0.002 sec)
Step 11050: loss = 0.66 (0.002 sec)
Step 11100: loss = 0.50 (0.002 sec)
Step 11150: loss = 0.63 (0.002 sec)
Step 11200: loss = 0.52 (0.004 sec)
Step 11250: loss = 0.52 (0.002 sec)
Step 11300: loss = 0.45 (0.002 sec)
Step 11350: loss = 0.52 (0.002 sec)
Step 11400: loss = 0.59 (0.002 sec)
Step 11450: loss = 0.51 (0.002 sec)
Step 11500: loss = 0.48 (0.002 sec)
Step 11550: loss = 0.55 (0.002 sec)
Step 11600: loss = 0.50 (0.003 sec)
Step 11650: loss = 0.51 (0.002 sec)
Step 11700: loss = 0.53 (0.002 sec)
Step 11750: loss = 0.47 (0.002 sec)
Step 11800: loss = 0.55 (0.002 sec)
Step 11850: loss = 0.54 (0.002 sec)
Step 11900: loss = 0.52 (0.002 sec)
Step 11950: loss = 0.50 (0.002 sec)
Training Data Eval:
  Num examples: 3200  Num correct: 2648  Precision @ 1: 0.8275
Test Data Eval:
  Num examples: 1600  Num correct: 1072  Precision @ 1: 0.6700
Step 12000: loss = 0.58 (0.007 sec)
Step 12050: loss = 0.49 (0.002 sec)
Step 12100: loss = 0.49 (0.002 sec)
Step 12150: loss = 0.51 (0.002 sec)
Step 12200: loss = 0.43 (0.002 sec)
Step 12250: loss = 0.55 (0.002 sec)
Step 12300: loss = 0.47 (0.002 sec)
Step 12350: loss = 0.41 (0.002 sec)
Step 12400: loss = 0.49 (0.004 sec)
Step 12450: loss = 0.49 (0.002 sec)
Step 12500: loss = 0.50 (0.002 sec)
Step 12550: loss = 0.48 (0.002 sec)
Step 12600: loss = 0.48 (0.002 sec)
Step 12650: loss = 0.46 (0.003 sec)
Step 12700: loss = 0.53 (0.002 sec)
Step 12750: loss = 0.45 (0.002 sec)
Step 12800: loss = 0.51 (0.003 sec)
Step 12850: loss = 0.50 (0.002 sec)
Step 12900: loss = 0.42 (0.002 sec)
Step 12950: loss = 0.47 (0.002 sec)
Training Data Eval:
  Num examples: 3200  Num correct: 2653  Precision @ 1: 0.8291
Test Data Eval:
  Num examples: 1600  Num correct: 1097  Precision @ 1: 0.6856
Step 13000: loss = 0.45 (0.003 sec)
Step 13050: loss = 0.44 (0.002 sec)
Step 13100: loss = 0.35 (0.002 sec)
Step 13150: loss = 0.45 (0.002 sec)
Step 13200: loss = 0.37 (0.004 sec)
Step 13250: loss = 0.50 (0.002 sec)
Step 13300: loss = 0.56 (0.002 sec)
Step 13350: loss = 0.47 (0.002 sec)
Step 13400: loss = 0.52 (0.002 sec)
Step 13450: loss = 0.53 (0.002 sec)
Step 13500: loss = 0.44 (0.002 sec)
Step 13550: loss = 0.49 (0.002 sec)
Step 13600: loss = 0.43 (0.003 sec)
Step 13650: loss = 0.44 (0.002 sec)
Step 13700: loss = 0.45 (0.002 sec)
Step 13750: loss = 0.48 (0.002 sec)
Step 13800: loss = 0.52 (0.002 sec)
Step 13850: loss = 0.45 (0.002 sec)
Step 13900: loss = 0.49 (0.002 sec)
Step 13950: loss = 0.51 (0.002 sec)
Training Data Eval:
  Num examples: 3200  Num correct: 2715  Precision @ 1: 0.8484
Test Data Eval:
  Num examples: 1600  Num correct: 1118  Precision @ 1: 0.6987
Step 14000: loss = 0.41 (0.005 sec)
Step 14050: loss = 0.46 (0.002 sec)
Step 14100: loss = 0.41 (0.002 sec)
Step 14150: loss = 0.42 (0.002 sec)
Step 14200: loss = 0.56 (0.002 sec)
Step 14250: loss = 0.48 (0.002 sec)
Step 14300: loss = 0.39 (0.002 sec)
Step 14350: loss = 0.44 (0.002 sec)
Step 14400: loss = 0.46 (0.004 sec)
Step 14450: loss = 0.33 (0.002 sec)
Step 14500: loss = 0.37 (0.002 sec)
Step 14550: loss = 0.48 (0.003 sec)
Step 14600: loss = 0.37 (0.002 sec)
Step 14650: loss = 0.49 (0.003 sec)
Step 14700: loss = 0.34 (0.002 sec)
Step 14750: loss = 0.53 (0.002 sec)
Step 14800: loss = 0.42 (0.004 sec)
Step 14850: loss = 0.40 (0.002 sec)
Step 14900: loss = 0.42 (0.002 sec)
Step 14950: loss = 0.45 (0.002 sec)
Training Data Eval:
  Num examples: 3200  Num correct: 2748  Precision @ 1: 0.8588
Test Data Eval:
  Num examples: 1600  Num correct: 1132  Precision @ 1: 0.7075
Step 15000: loss = 0.37 (0.002 sec)
Step 15050: loss = 0.44 (0.002 sec)
Step 15100: loss = 0.35 (0.002 sec)
Step 15150: loss = 0.42 (0.002 sec)
Step 15200: loss = 0.38 (0.003 sec)
Step 15250: loss = 0.32 (0.002 sec)
Step 15300: loss = 0.38 (0.002 sec)
Step 15350: loss = 0.35 (0.002 sec)
Step 15400: loss = 0.38 (0.002 sec)
Step 15450: loss = 0.35 (0.002 sec)
Step 15500: loss = 0.43 (0.002 sec)
Step 15550: loss = 0.42 (0.002 sec)
Step 15600: loss = 0.49 (0.004 sec)
Step 15650: loss = 0.44 (0.002 sec)
Step 15700: loss = 0.38 (0.002 sec)
Step 15750: loss = 0.39 (0.002 sec)
Step 15800: loss = 0.46 (0.002 sec)
Step 15850: loss = 0.39 (0.002 sec)
Step 15900: loss = 0.47 (0.002 sec)
Step 15950: loss = 0.48 (0.002 sec)
Training Data Eval:
  Num examples: 3200  Num correct: 2765  Precision @ 1: 0.8641
Test Data Eval:
  Num examples: 1600  Num correct: 1165  Precision @ 1: 0.7281
Step 16000: loss = 0.30 (0.006 sec)
Step 16050: loss = 0.48 (0.002 sec)
Step 16100: loss = 0.37 (0.002 sec)
Step 16150: loss = 0.37 (0.002 sec)
Step 16200: loss = 0.41 (0.002 sec)
Step 16250: loss = 0.34 (0.002 sec)
Step 16300: loss = 0.39 (0.002 sec)
Step 16350: loss = 0.42 (0.002 sec)
Step 16400: loss = 0.37 (0.004 sec)
Step 16450: loss = 0.38 (0.002 sec)
Step 16500: loss = 0.46 (0.002 sec)
Step 16550: loss = 0.38 (0.002 sec)
Step 16600: loss = 0.34 (0.002 sec)
Step 16650: loss = 0.36 (0.002 sec)
Step 16700: loss = 0.41 (0.002 sec)
Step 16750: loss = 0.33 (0.002 sec)
Step 16800: loss = 0.30 (0.003 sec)
Step 16850: loss = 0.36 (0.002 sec)
Step 16900: loss = 0.34 (0.002 sec)
Step 16950: loss = 0.41 (0.002 sec)
Training Data Eval:
  Num examples: 3200  Num correct: 2760  Precision @ 1: 0.8625
Test Data Eval:
  Num examples: 1600  Num correct: 1158  Precision @ 1: 0.7238
Step 17000: loss = 0.42 (0.003 sec)
Step 17050: loss = 0.36 (0.002 sec)
Step 17100: loss = 0.37 (0.002 sec)
Step 17150: loss = 0.51 (0.002 sec)
Step 17200: loss = 0.40 (0.004 sec)
Step 17250: loss = 0.35 (0.002 sec)
Step 17300: loss = 0.34 (0.002 sec)
Step 17350: loss = 0.35 (0.002 sec)
Step 17400: loss = 0.34 (0.002 sec)
Step 17450: loss = 0.26 (0.002 sec)
Step 17500: loss = 0.35 (0.002 sec)
Step 17550: loss = 0.39 (0.002 sec)
Step 17600: loss = 0.36 (0.004 sec)
Step 17650: loss = 0.42 (0.002 sec)
Step 17700: loss = 0.29 (0.002 sec)
Step 17750: loss = 0.41 (0.002 sec)
Step 17800: loss = 0.29 (0.002 sec)
Step 17850: loss = 0.34 (0.002 sec)
Step 17900: loss = 0.33 (0.002 sec)
Step 17950: loss = 0.43 (0.002 sec)
Training Data Eval:
  Num examples: 3200  Num correct: 2788  Precision @ 1: 0.8712
Test Data Eval:
  Num examples: 1600  Num correct: 1161  Precision @ 1: 0.7256
Step 18000: loss = 0.39 (0.006 sec)
Step 18050: loss = 0.40 (0.002 sec)
Step 18100: loss = 0.36 (0.002 sec)
Step 18150: loss = 0.31 (0.002 sec)
Step 18200: loss = 0.29 (0.002 sec)
Step 18250: loss = 0.38 (0.002 sec)
Step 18300: loss = 0.36 (0.002 sec)
Step 18350: loss = 0.36 (0.002 sec)
Step 18400: loss = 0.33 (0.004 sec)
Step 18450: loss = 0.39 (0.002 sec)
Step 18500: loss = 0.36 (0.002 sec)
Step 18550: loss = 0.36 (0.002 sec)
Step 18600: loss = 0.35 (0.002 sec)
Step 18650: loss = 0.51 (0.002 sec)
Step 18700: loss = 0.28 (0.002 sec)
Step 18750: loss = 0.38 (0.002 sec)
Step 18800: loss = 0.36 (0.004 sec)
Step 18850: loss = 0.30 (0.002 sec)
Step 18900: loss = 0.39 (0.002 sec)
Step 18950: loss = 0.31 (0.002 sec)
Training Data Eval:
  Num examples: 3200  Num correct: 2858  Precision @ 1: 0.8931
Test Data Eval:
  Num examples: 1600  Num correct: 1189  Precision @ 1: 0.7431
Step 19000: loss = 0.29 (0.003 sec)
Step 19050: loss = 0.33 (0.003 sec)
Step 19100: loss = 0.29 (0.002 sec)
Step 19150: loss = 0.35 (0.002 sec)
Step 19200: loss = 0.30 (0.003 sec)
Step 19250: loss = 0.33 (0.003 sec)
Step 19300: loss = 0.37 (0.002 sec)
Step 19350: loss = 0.33 (0.002 sec)
Step 19400: loss = 0.35 (0.002 sec)
Step 19450: loss = 0.35 (0.002 sec)
Step 19500: loss = 0.36 (0.002 sec)
Step 19550: loss = 0.34 (0.002 sec)
Step 19600: loss = 0.34 (0.004 sec)
Step 19650: loss = 0.35 (0.003 sec)
Step 19700: loss = 0.35 (0.002 sec)
Step 19750: loss = 0.32 (0.002 sec)
Step 19800: loss = 0.26 (0.002 sec)
Step 19850: loss = 0.41 (0.002 sec)
Step 19900: loss = 0.31 (0.002 sec)
Step 19950: loss = 0.28 (0.002 sec)
Training Data Eval:
  Num examples: 3200  Num correct: 2839  Precision @ 1: 0.8872
Test Data Eval:
  Num examples: 1600  Num correct: 1195  Precision @ 1: 0.7469
Step 20000: loss = 0.32 (0.005 sec)
Step 20050: loss = 0.28 (0.002 sec)
Step 20100: loss = 0.31 (0.002 sec)
Step 20150: loss = 0.28 (0.002 sec)
Step 20200: loss = 0.31 (0.002 sec)
Step 20250: loss = 0.32 (0.002 sec)
Step 20300: loss = 0.34 (0.002 sec)
Step 20350: loss = 0.36 (0.002 sec)
Step 20400: loss = 0.44 (0.005 sec)
Step 20450: loss = 0.35 (0.002 sec)
Step 20500: loss = 0.28 (0.002 sec)
Step 20550: loss = 0.29 (0.002 sec)
Step 20600: loss = 0.35 (0.003 sec)
Step 20650: loss = 0.32 (0.002 sec)
Step 20700: loss = 0.29 (0.002 sec)
Step 20750: loss = 0.27 (0.002 sec)
Step 20800: loss = 0.30 (0.003 sec)
Step 20850: loss = 0.30 (0.002 sec)
Step 20900: loss = 0.34 (0.002 sec)
Step 20950: loss = 0.32 (0.002 sec)
Training Data Eval:
  Num examples: 3200  Num correct: 2872  Precision @ 1: 0.8975
Test Data Eval:
  Num examples: 1600  Num correct: 1210  Precision @ 1: 0.7562
Step 21000: loss = 0.29 (0.003 sec)
Step 21050: loss = 0.28 (0.002 sec)
Step 21100: loss = 0.23 (0.002 sec)
Step 21150: loss = 0.29 (0.002 sec)
Step 21200: loss = 0.30 (0.004 sec)
Step 21250: loss = 0.26 (0.002 sec)
Step 21300: loss = 0.33 (0.002 sec)
Step 21350: loss = 0.29 (0.002 sec)
Step 21400: loss = 0.23 (0.002 sec)
Step 21450: loss = 0.28 (0.002 sec)
Step 21500: loss = 0.31 (0.003 sec)
Step 21550: loss = 0.30 (0.002 sec)
Step 21600: loss = 0.34 (0.004 sec)
Step 21650: loss = 0.30 (0.002 sec)
Step 21700: loss = 0.28 (0.003 sec)
Step 21750: loss = 0.22 (0.002 sec)
Step 21800: loss = 0.34 (0.002 sec)
Step 21850: loss = 0.33 (0.002 sec)
Step 21900: loss = 0.28 (0.002 sec)
Step 21950: loss = 0.38 (0.002 sec)
Training Data Eval:
  Num examples: 3200  Num correct: 2868  Precision @ 1: 0.8962
Test Data Eval:
  Num examples: 1600  Num correct: 1214  Precision @ 1: 0.7588
Step 22000: loss = 0.27 (0.005 sec)
Step 22050: loss = 0.28 (0.002 sec)
Step 22100: loss = 0.29 (0.002 sec)
Step 22150: loss = 0.26 (0.002 sec)
Step 22200: loss = 0.24 (0.003 sec)
Step 22250: loss = 0.34 (0.002 sec)
Step 22300: loss = 0.26 (0.002 sec)
Step 22350: loss = 0.32 (0.002 sec)
Step 22400: loss = 0.22 (0.004 sec)
Step 22450: loss = 0.29 (0.002 sec)
Step 22500: loss = 0.34 (0.002 sec)
Step 22550: loss = 0.28 (0.002 sec)
Step 22600: loss = 0.30 (0.002 sec)
Step 22650: loss = 0.32 (0.002 sec)
Step 22700: loss = 0.32 (0.002 sec)
Step 22750: loss = 0.32 (0.002 sec)
Step 22800: loss = 0.30 (0.003 sec)
Step 22850: loss = 0.25 (0.002 sec)
Step 22900: loss = 0.32 (0.002 sec)
Step 22950: loss = 0.19 (0.002 sec)
Training Data Eval:
  Num examples: 3200  Num correct: 2883  Precision @ 1: 0.9009
Test Data Eval:
  Num examples: 1600  Num correct: 1206  Precision @ 1: 0.7538
Step 23000: loss = 0.27 (0.003 sec)
Step 23050: loss = 0.24 (0.002 sec)
Step 23100: loss = 0.28 (0.002 sec)
Step 23150: loss = 0.25 (0.002 sec)
Step 23200: loss = 0.25 (0.003 sec)
Step 23250: loss = 0.29 (0.002 sec)
Step 23300: loss = 0.25 (0.002 sec)
Step 23350: loss = 0.28 (0.002 sec)
Step 23400: loss = 0.41 (0.002 sec)
Step 23450: loss = 0.28 (0.002 sec)
Step 23500: loss = 0.25 (0.002 sec)
Step 23550: loss = 0.28 (0.002 sec)
Step 23600: loss = 0.34 (0.003 sec)
Step 23650: loss = 0.29 (0.002 sec)
Step 23700: loss = 0.24 (0.002 sec)
Step 23750: loss = 0.27 (0.002 sec)
Step 23800: loss = 0.23 (0.002 sec)
Step 23850: loss = 0.23 (0.002 sec)
Step 23900: loss = 0.22 (0.003 sec)
Step 23950: loss = 0.24 (0.002 sec)
Training Data Eval:
  Num examples: 3200  Num correct: 2919  Precision @ 1: 0.9122
Test Data Eval:
  Num examples: 1600  Num correct: 1249  Precision @ 1: 0.7806
Step 24000: loss = 0.26 (0.005 sec)
Step 24050: loss = 0.28 (0.002 sec)
Step 24100: loss = 0.30 (0.002 sec)
Step 24150: loss = 0.26 (0.003 sec)
Step 24200: loss = 0.23 (0.002 sec)
Step 24250: loss = 0.33 (0.002 sec)
Step 24300: loss = 0.32 (0.002 sec)
Step 24350: loss = 0.25 (0.002 sec)
Step 24400: loss = 0.25 (0.003 sec)
Step 24450: loss = 0.25 (0.002 sec)
Step 24500: loss = 0.27 (0.002 sec)
Step 24550: loss = 0.31 (0.002 sec)
Step 24600: loss = 0.26 (0.002 sec)
Step 24650: loss = 0.25 (0.002 sec)
Step 24700: loss = 0.22 (0.002 sec)
Step 24750: loss = 0.30 (0.002 sec)
Step 24800: loss = 0.26 (0.003 sec)
Step 24850: loss = 0.20 (0.002 sec)
Step 24900: loss = 0.20 (0.002 sec)
Step 24950: loss = 0.29 (0.002 sec)
Training Data Eval:
  Num examples: 3200  Num correct: 2916  Precision @ 1: 0.9113
Test Data Eval:
  Num examples: 1600  Num correct: 1236  Precision @ 1: 0.7725
Step 25000: loss = 0.26 (0.003 sec)
Step 25050: loss = 0.28 (0.002 sec)
Step 25100: loss = 0.22 (0.002 sec)
Step 25150: loss = 0.28 (0.002 sec)
Step 25200: loss = 0.21 (0.003 sec)
Step 25250: loss = 0.23 (0.002 sec)
Step 25300: loss = 0.27 (0.002 sec)
Step 25350: loss = 0.25 (0.002 sec)
Step 25400: loss = 0.26 (0.002 sec)
Step 25450: loss = 0.23 (0.002 sec)
Step 25500: loss = 0.22 (0.002 sec)
Step 25550: loss = 0.22 (0.002 sec)
Step 25600: loss = 0.25 (0.004 sec)
Step 25650: loss = 0.24 (0.002 sec)
Step 25700: loss = 0.29 (0.002 sec)
Step 25750: loss = 0.29 (0.002 sec)
Step 25800: loss = 0.26 (0.002 sec)
Step 25850: loss = 0.25 (0.002 sec)
Step 25900: loss = 0.23 (0.002 sec)
Step 25950: loss = 0.25 (0.002 sec)
Training Data Eval:
  Num examples: 3200  Num correct: 2919  Precision @ 1: 0.9122
Test Data Eval:
  Num examples: 1600  Num correct: 1259  Precision @ 1: 0.7869
Step 26000: loss = 0.22 (0.005 sec)
Step 26050: loss = 0.29 (0.002 sec)
Step 26100: loss = 0.26 (0.003 sec)
Step 26150: loss = 0.31 (0.002 sec)
Step 26200: loss = 0.25 (0.003 sec)
Step 26250: loss = 0.21 (0.002 sec)
Step 26300: loss = 0.26 (0.002 sec)
Step 26350: loss = 0.20 (0.002 sec)
Step 26400: loss = 0.26 (0.003 sec)
Step 26450: loss = 0.19 (0.002 sec)
Step 26500: loss = 0.27 (0.003 sec)
Step 26550: loss = 0.34 (0.002 sec)
Step 26600: loss = 0.26 (0.003 sec)
Step 26650: loss = 0.23 (0.002 sec)
Step 26700: loss = 0.22 (0.002 sec)
Step 26750: loss = 0.23 (0.002 sec)
Step 26800: loss = 0.26 (0.004 sec)
Step 26850: loss = 0.25 (0.002 sec)
Step 26900: loss = 0.22 (0.002 sec)
Step 26950: loss = 0.21 (0.002 sec)
Training Data Eval:
  Num examples: 3200  Num correct: 2930  Precision @ 1: 0.9156
Test Data Eval:
  Num examples: 1600  Num correct: 1256  Precision @ 1: 0.7850
Step 27000: loss = 0.21 (0.003 sec)
Step 27050: loss = 0.23 (0.002 sec)
Step 27100: loss = 0.26 (0.002 sec)
Step 27150: loss = 0.22 (0.003 sec)
Step 27200: loss = 0.19 (0.004 sec)
Step 27250: loss = 0.27 (0.002 sec)
Step 27300: loss = 0.20 (0.004 sec)
Step 27350: loss = 0.28 (0.002 sec)
Step 27400: loss = 0.21 (0.002 sec)
Step 27450: loss = 0.23 (0.003 sec)
Step 27500: loss = 0.29 (0.002 sec)
Step 27550: loss = 0.16 (0.002 sec)
Step 27600: loss = 0.14 (0.004 sec)
Step 27650: loss = 0.20 (0.002 sec)
Step 27700: loss = 0.27 (0.002 sec)
Step 27750: loss = 0.20 (0.002 sec)
Step 27800: loss = 0.27 (0.002 sec)
Step 27850: loss = 0.22 (0.002 sec)
Step 27900: loss = 0.20 (0.002 sec)
Step 27950: loss = 0.23 (0.003 sec)
Training Data Eval:
  Num examples: 3200  Num correct: 2924  Precision @ 1: 0.9137
Test Data Eval:
  Num examples: 1600  Num correct: 1245  Precision @ 1: 0.7781
Step 28000: loss = 0.29 (0.005 sec)
Step 28050: loss = 0.32 (0.002 sec)
Step 28100: loss = 0.25 (0.002 sec)
Step 28150: loss = 0.23 (0.002 sec)
Step 28200: loss = 0.27 (0.002 sec)
Step 28250: loss = 0.23 (0.002 sec)
Step 28300: loss = 0.28 (0.002 sec)
Step 28350: loss = 0.21 (0.002 sec)
Step 28400: loss = 0.21 (0.003 sec)
Step 28450: loss = 0.29 (0.002 sec)
Step 28500: loss = 0.29 (0.003 sec)
Step 28550: loss = 0.23 (0.002 sec)
Step 28600: loss = 0.17 (0.002 sec)
Step 28650: loss = 0.21 (0.002 sec)
Step 28700: loss = 0.19 (0.002 sec)
Step 28750: loss = 0.21 (0.002 sec)
Step 28800: loss = 0.21 (0.003 sec)
Step 28850: loss = 0.20 (0.002 sec)
Step 28900: loss = 0.22 (0.002 sec)
Step 28950: loss = 0.20 (0.002 sec)
Training Data Eval:
  Num examples: 3200  Num correct: 2955  Precision @ 1: 0.9234
Test Data Eval:
  Num examples: 1600  Num correct: 1273  Precision @ 1: 0.7956
Step 29000: loss = 0.15 (0.003 sec)
Step 29050: loss = 0.23 (0.002 sec)
Step 29100: loss = 0.21 (0.002 sec)
Step 29150: loss = 0.29 (0.002 sec)
Step 29200: loss = 0.31 (0.004 sec)
Step 29250: loss = 0.25 (0.002 sec)
Step 29300: loss = 0.21 (0.002 sec)
Step 29350: loss = 0.26 (0.002 sec)
Step 29400: loss = 0.22 (0.002 sec)
Step 29450: loss = 0.20 (0.002 sec)
Step 29500: loss = 0.19 (0.002 sec)
Step 29550: loss = 0.25 (0.002 sec)
Step 29600: loss = 0.25 (0.004 sec)
Step 29650: loss = 0.25 (0.002 sec)
Step 29700: loss = 0.22 (0.002 sec)
Step 29750: loss = 0.21 (0.002 sec)
Step 29800: loss = 0.23 (0.003 sec)
Step 29850: loss = 0.29 (0.002 sec)
Step 29900: loss = 0.22 (0.002 sec)
Step 29950: loss = 0.16 (0.002 sec)
Training Data Eval:
  Num examples: 3200  Num correct: 2925  Precision @ 1: 0.9141
Test Data Eval:
  Num examples: 1600  Num correct: 1246  Precision @ 1: 0.7788
Step 30000: loss = 0.25 (0.006 sec)
Step 30050: loss = 0.24 (0.002 sec)
Step 30100: loss = 0.24 (0.002 sec)
Step 30150: loss = 0.19 (0.003 sec)
Step 30200: loss = 0.18 (0.002 sec)
Step 30250: loss = 0.19 (0.002 sec)
Step 30300: loss = 0.23 (0.002 sec)
Step 30350: loss = 0.16 (0.002 sec)
Step 30400: loss = 0.20 (0.003 sec)
Step 30450: loss = 0.26 (0.002 sec)
Step 30500: loss = 0.24 (0.002 sec)
Step 30550: loss = 0.23 (0.002 sec)
Step 30600: loss = 0.24 (0.002 sec)
Step 30650: loss = 0.25 (0.002 sec)
Step 30700: loss = 0.25 (0.002 sec)
Step 30750: loss = 0.22 (0.002 sec)
Step 30800: loss = 0.18 (0.003 sec)
Step 30850: loss = 0.16 (0.002 sec)
Step 30900: loss = 0.26 (0.002 sec)
Step 30950: loss = 0.21 (0.002 sec)
Training Data Eval:
  Num examples: 3200  Num correct: 2959  Precision @ 1: 0.9247
Test Data Eval:
  Num examples: 1600  Num correct: 1281  Precision @ 1: 0.8006
Step 31000: loss = 0.25 (0.003 sec)
Step 31050: loss = 0.21 (0.002 sec)
Step 31100: loss = 0.14 (0.002 sec)
Step 31150: loss = 0.19 (0.002 sec)
Step 31200: loss = 0.22 (0.004 sec)
Step 31250: loss = 0.19 (0.002 sec)
Step 31300: loss = 0.21 (0.002 sec)
Step 31350: loss = 0.26 (0.002 sec)
Step 31400: loss = 0.15 (0.002 sec)
Step 31450: loss = 0.18 (0.002 sec)
Step 31500: loss = 0.18 (0.002 sec)
Step 31550: loss = 0.19 (0.002 sec)
Step 31600: loss = 0.23 (0.004 sec)
Step 31650: loss = 0.18 (0.002 sec)
Step 31700: loss = 0.22 (0.002 sec)
Step 31750: loss = 0.17 (0.002 sec)
Step 31800: loss = 0.25 (0.002 sec)
Step 31850: loss = 0.26 (0.002 sec)
Step 31900: loss = 0.17 (0.003 sec)
Step 31950: loss = 0.18 (0.002 sec)
Training Data Eval:
  Num examples: 3200  Num correct: 2969  Precision @ 1: 0.9278
Test Data Eval:
  Num examples: 1600  Num correct: 1279  Precision @ 1: 0.7994
Step 32000: loss = 0.23 (0.006 sec)
Step 32050: loss = 0.19 (0.002 sec)
Step 32100: loss = 0.21 (0.002 sec)
Step 32150: loss = 0.20 (0.002 sec)
Step 32200: loss = 0.17 (0.002 sec)
Step 32250: loss = 0.17 (0.002 sec)
Step 32300: loss = 0.20 (0.002 sec)
Step 32350: loss = 0.24 (0.002 sec)
Step 32400: loss = 0.21 (0.004 sec)
Step 32450: loss = 0.19 (0.002 sec)
Step 32500: loss = 0.23 (0.002 sec)
Step 32550: loss = 0.16 (0.002 sec)
Step 32600: loss = 0.16 (0.002 sec)
Step 32650: loss = 0.17 (0.002 sec)
Step 32700: loss = 0.23 (0.002 sec)
Step 32750: loss = 0.22 (0.003 sec)
Step 32800: loss = 0.17 (0.003 sec)
Step 32850: loss = 0.26 (0.002 sec)
Step 32900: loss = 0.18 (0.002 sec)
Step 32950: loss = 0.14 (0.002 sec)
Training Data Eval:
  Num examples: 3200  Num correct: 2968  Precision @ 1: 0.9275
Test Data Eval:
  Num examples: 1600  Num correct: 1286  Precision @ 1: 0.8037
Step 33000: loss = 0.21 (0.003 sec)
Step 33050: loss = 0.21 (0.002 sec)
Step 33100: loss = 0.23 (0.002 sec)
Step 33150: loss = 0.21 (0.002 sec)
Step 33200: loss = 0.16 (0.004 sec)
Step 33250: loss = 0.25 (0.002 sec)
Step 33300: loss = 0.22 (0.002 sec)
Step 33350: loss = 0.20 (0.003 sec)
Step 33400: loss = 0.22 (0.002 sec)
Step 33450: loss = 0.20 (0.002 sec)
Step 33500: loss = 0.23 (0.002 sec)
Step 33550: loss = 0.22 (0.002 sec)
Step 33600: loss = 0.15 (0.004 sec)
Step 33650: loss = 0.22 (0.002 sec)
Step 33700: loss = 0.20 (0.002 sec)
Step 33750: loss = 0.22 (0.002 sec)
Step 33800: loss = 0.23 (0.002 sec)
Step 33850: loss = 0.22 (0.003 sec)
Step 33900: loss = 0.22 (0.002 sec)
Step 33950: loss = 0.17 (0.002 sec)
Training Data Eval:
  Num examples: 3200  Num correct: 2982  Precision @ 1: 0.9319
Test Data Eval:
  Num examples: 1600  Num correct: 1291  Precision @ 1: 0.8069
Step 34000: loss = 0.16 (0.006 sec)
Step 34050: loss = 0.19 (0.002 sec)
Step 34100: loss = 0.24 (0.002 sec)
Step 34150: loss = 0.19 (0.002 sec)
Step 34200: loss = 0.23 (0.002 sec)
Step 34250: loss = 0.20 (0.002 sec)
Step 34300: loss = 0.23 (0.002 sec)
Step 34350: loss = 0.14 (0.002 sec)
Step 34400: loss = 0.15 (0.004 sec)
Step 34450: loss = 0.19 (0.003 sec)
Step 34500: loss = 0.19 (0.002 sec)
Step 34550: loss = 0.17 (0.002 sec)
Step 34600: loss = 0.17 (0.002 sec)
Step 34650: loss = 0.20 (0.002 sec)
Step 34700: loss = 0.18 (0.002 sec)
Step 34750: loss = 0.21 (0.002 sec)
Step 34800: loss = 0.27 (0.003 sec)
Step 34850: loss = 0.18 (0.002 sec)
Step 34900: loss = 0.22 (0.002 sec)
Step 34950: loss = 0.18 (0.003 sec)
Training Data Eval:
  Num examples: 3200  Num correct: 2985  Precision @ 1: 0.9328
Test Data Eval:
  Num examples: 1600  Num correct: 1295  Precision @ 1: 0.8094
Step 35000: loss = 0.18 (0.003 sec)
Step 35050: loss = 0.14 (0.002 sec)
Step 35100: loss = 0.22 (0.002 sec)
Step 35150: loss = 0.17 (0.002 sec)
Step 35200: loss = 0.12 (0.003 sec)
Step 35250: loss = 0.15 (0.002 sec)
Step 35300: loss = 0.17 (0.002 sec)
Step 35350: loss = 0.16 (0.002 sec)
Step 35400: loss = 0.23 (0.002 sec)
Step 35450: loss = 0.21 (0.002 sec)
Step 35500: loss = 0.16 (0.002 sec)
Step 35550: loss = 0.14 (0.003 sec)
Step 35600: loss = 0.13 (0.004 sec)
Step 35650: loss = 0.21 (0.002 sec)
Step 35700: loss = 0.20 (0.002 sec)
Step 35750: loss = 0.13 (0.002 sec)
Step 35800: loss = 0.19 (0.002 sec)
Step 35850: loss = 0.28 (0.003 sec)
Step 35900: loss = 0.23 (0.002 sec)
Step 35950: loss = 0.17 (0.002 sec)
Training Data Eval:
  Num examples: 3200  Num correct: 2969  Precision @ 1: 0.9278
Test Data Eval:
  Num examples: 1600  Num correct: 1270  Precision @ 1: 0.7937
Step 36000: loss = 0.15 (0.007 sec)
Step 36050: loss = 0.19 (0.002 sec)
Step 36100: loss = 0.16 (0.002 sec)
Step 36150: loss = 0.16 (0.002 sec)
Step 36200: loss = 0.16 (0.002 sec)
Step 36250: loss = 0.24 (0.002 sec)
Step 36300: loss = 0.23 (0.002 sec)
Step 36350: loss = 0.17 (0.002 sec)
Step 36400: loss = 0.23 (0.005 sec)
Step 36450: loss = 0.14 (0.002 sec)
Step 36500: loss = 0.23 (0.003 sec)
Step 36550: loss = 0.19 (0.003 sec)
Step 36600: loss = 0.18 (0.002 sec)
Step 36650: loss = 0.23 (0.002 sec)
Step 36700: loss = 0.13 (0.002 sec)
Step 36750: loss = 0.18 (0.002 sec)
Step 36800: loss = 0.21 (0.003 sec)
Step 36850: loss = 0.18 (0.002 sec)
Step 36900: loss = 0.19 (0.002 sec)
Step 36950: loss = 0.16 (0.002 sec)
Training Data Eval:
  Num examples: 3200  Num correct: 2980  Precision @ 1: 0.9313
Test Data Eval:
  Num examples: 1600  Num correct: 1290  Precision @ 1: 0.8063
Step 37000: loss = 0.11 (0.003 sec)
Step 37050: loss = 0.25 (0.003 sec)
Step 37100: loss = 0.17 (0.002 sec)
Step 37150: loss = 0.22 (0.002 sec)
Step 37200: loss = 0.16 (0.004 sec)
Step 37250: loss = 0.25 (0.002 sec)
Step 37300: loss = 0.18 (0.002 sec)
Step 37350: loss = 0.18 (0.002 sec)
Step 37400: loss = 0.19 (0.002 sec)
Step 37450: loss = 0.19 (0.002 sec)
Step 37500: loss = 0.19 (0.003 sec)
Step 37550: loss = 0.15 (0.003 sec)
Step 37600: loss = 0.16 (0.003 sec)
Step 37650: loss = 0.20 (0.002 sec)
Step 37700: loss = 0.15 (0.002 sec)
Step 37750: loss = 0.14 (0.002 sec)
Step 37800: loss = 0.22 (0.002 sec)
Step 37850: loss = 0.16 (0.002 sec)
Step 37900: loss = 0.21 (0.002 sec)
Step 37950: loss = 0.16 (0.002 sec)
Training Data Eval:
  Num examples: 3200  Num correct: 3010  Precision @ 1: 0.9406
Test Data Eval:
  Num examples: 1600  Num correct: 1300  Precision @ 1: 0.8125
Step 38000: loss = 0.16 (0.005 sec)
Step 38050: loss = 0.22 (0.002 sec)
Step 38100: loss = 0.20 (0.002 sec)
Step 38150: loss = 0.18 (0.002 sec)
Step 38200: loss = 0.20 (0.002 sec)
Step 38250: loss = 0.17 (0.002 sec)
Step 38300: loss = 0.17 (0.003 sec)
Step 38350: loss = 0.15 (0.002 sec)
Step 38400: loss = 0.16 (0.003 sec)
Step 38450: loss = 0.16 (0.003 sec)
Step 38500: loss = 0.17 (0.002 sec)
Step 38550: loss = 0.14 (0.002 sec)
Step 38600: loss = 0.15 (0.002 sec)
Step 38650: loss = 0.18 (0.002 sec)
Step 38700: loss = 0.20 (0.002 sec)
Step 38750: loss = 0.17 (0.002 sec)
Step 38800: loss = 0.15 (0.003 sec)
Step 38850: loss = 0.19 (0.002 sec)
Step 38900: loss = 0.21 (0.002 sec)
Step 38950: loss = 0.22 (0.002 sec)
Training Data Eval:
  Num examples: 3200  Num correct: 3002  Precision @ 1: 0.9381
Test Data Eval:
  Num examples: 1600  Num correct: 1302  Precision @ 1: 0.8137
Step 39000: loss = 0.15 (0.003 sec)
Step 39050: loss = 0.16 (0.002 sec)
Step 39100: loss = 0.14 (0.002 sec)
Step 39150: loss = 0.19 (0.002 sec)
Step 39200: loss = 0.26 (0.003 sec)
Step 39250: loss = 0.19 (0.002 sec)
Step 39300: loss = 0.17 (0.002 sec)
Step 39350: loss = 0.16 (0.003 sec)
Step 39400: loss = 0.17 (0.002 sec)
Step 39450: loss = 0.17 (0.002 sec)
Step 39500: loss = 0.17 (0.002 sec)
Step 39550: loss = 0.24 (0.002 sec)
Step 39600: loss = 0.23 (0.003 sec)
Step 39650: loss = 0.13 (0.002 sec)
Step 39700: loss = 0.17 (0.003 sec)
Step 39750: loss = 0.15 (0.002 sec)
Step 39800: loss = 0.14 (0.002 sec)
Step 39850: loss = 0.18 (0.002 sec)
Step 39900: loss = 0.14 (0.002 sec)
Step 39950: loss = 0.17 (0.003 sec)
Training Data Eval:
  Num examples: 3200  Num correct: 3019  Precision @ 1: 0.9434
Test Data Eval:
  Num examples: 1600  Num correct: 1303  Precision @ 1: 0.8144
Step 40000: loss = 0.14 (0.008 sec)
Step 40050: loss = 0.24 (0.003 sec)
Step 40100: loss = 0.18 (0.002 sec)
Step 40150: loss = 0.13 (0.002 sec)
Step 40200: loss = 0.14 (0.002 sec)
Step 40250: loss = 0.22 (0.002 sec)
Step 40300: loss = 0.20 (0.002 sec)
Step 40350: loss = 0.20 (0.002 sec)
Step 40400: loss = 0.12 (0.004 sec)
Step 40450: loss = 0.18 (0.002 sec)
Step 40500: loss = 0.19 (0.002 sec)
Step 40550: loss = 0.12 (0.002 sec)
Step 40600: loss = 0.15 (0.002 sec)
Step 40650: loss = 0.13 (0.002 sec)
Step 40700: loss = 0.16 (0.002 sec)
Step 40750: loss = 0.17 (0.002 sec)
Step 40800: loss = 0.16 (0.004 sec)
Step 40850: loss = 0.19 (0.002 sec)
Step 40900: loss = 0.19 (0.002 sec)
Step 40950: loss = 0.12 (0.002 sec)
Training Data Eval:
  Num examples: 3200  Num correct: 3018  Precision @ 1: 0.9431
Test Data Eval:
  Num examples: 1600  Num correct: 1288  Precision @ 1: 0.8050
Step 41000: loss = 0.18 (0.003 sec)
Step 41050: loss = 0.13 (0.002 sec)
Step 41100: loss = 0.18 (0.002 sec)
Step 41150: loss = 0.19 (0.002 sec)
Step 41200: loss = 0.14 (0.003 sec)
Step 41250: loss = 0.13 (0.002 sec)
Step 41300: loss = 0.14 (0.002 sec)
Step 41350: loss = 0.14 (0.002 sec)
Step 41400: loss = 0.17 (0.002 sec)
Step 41450: loss = 0.12 (0.002 sec)
Step 41500: loss = 0.18 (0.002 sec)
Step 41550: loss = 0.15 (0.002 sec)
Step 41600: loss = 0.15 (0.004 sec)
Step 41650: loss = 0.21 (0.002 sec)
Step 41700: loss = 0.18 (0.002 sec)
Step 41750: loss = 0.22 (0.002 sec)
Step 41800: loss = 0.12 (0.002 sec)
Step 41850: loss = 0.12 (0.003 sec)
Step 41900: loss = 0.21 (0.002 sec)
Step 41950: loss = 0.13 (0.002 sec)
Training Data Eval:
  Num examples: 3200  Num correct: 3029  Precision @ 1: 0.9466
Test Data Eval:
  Num examples: 1600  Num correct: 1307  Precision @ 1: 0.8169
Step 42000: loss = 0.14 (0.005 sec)
Step 42050: loss = 0.16 (0.003 sec)
Step 42100: loss = 0.12 (0.002 sec)
Step 42150: loss = 0.15 (0.002 sec)
Step 42200: loss = 0.18 (0.002 sec)
Step 42250: loss = 0.12 (0.002 sec)
Step 42300: loss = 0.16 (0.002 sec)
Step 42350: loss = 0.17 (0.002 sec)
Step 42400: loss = 0.13 (0.004 sec)
Step 42450: loss = 0.17 (0.003 sec)
Step 42500: loss = 0.21 (0.002 sec)
Step 42550: loss = 0.13 (0.002 sec)
Step 42600: loss = 0.16 (0.002 sec)
Step 42650: loss = 0.15 (0.002 sec)
Step 42700: loss = 0.14 (0.002 sec)
Step 42750: loss = 0.14 (0.002 sec)
Step 42800: loss = 0.14 (0.004 sec)
Step 42850: loss = 0.14 (0.002 sec)
Step 42900: loss = 0.16 (0.002 sec)
Step 42950: loss = 0.11 (0.002 sec)
Training Data Eval:
  Num examples: 3200  Num correct: 3018  Precision @ 1: 0.9431
Test Data Eval:
  Num examples: 1600  Num correct: 1309  Precision @ 1: 0.8181
Step 43000: loss = 0.16 (0.003 sec)
Step 43050: loss = 0.16 (0.002 sec)
Step 43100: loss = 0.13 (0.002 sec)
Step 43150: loss = 0.19 (0.002 sec)
Step 43200: loss = 0.16 (0.004 sec)
Step 43250: loss = 0.15 (0.003 sec)
Step 43300: loss = 0.17 (0.003 sec)
Step 43350: loss = 0.15 (0.002 sec)
Step 43400: loss = 0.17 (0.002 sec)
Step 43450: loss = 0.15 (0.002 sec)
Step 43500: loss = 0.17 (0.002 sec)
Step 43550: loss = 0.13 (0.002 sec)
Step 43600: loss = 0.14 (0.003 sec)
Step 43650: loss = 0.09 (0.002 sec)
Step 43700: loss = 0.17 (0.002 sec)
Step 43750: loss = 0.10 (0.002 sec)
Step 43800: loss = 0.14 (0.002 sec)
Step 43850: loss = 0.18 (0.002 sec)
Step 43900: loss = 0.10 (0.002 sec)
Step 43950: loss = 0.13 (0.002 sec)
Training Data Eval:
  Num examples: 3200  Num correct: 3039  Precision @ 1: 0.9497
Test Data Eval:
  Num examples: 1600  Num correct: 1295  Precision @ 1: 0.8094
Step 44000: loss = 0.17 (0.008 sec)
Step 44050: loss = 0.13 (0.002 sec)
Step 44100: loss = 0.13 (0.002 sec)
Step 44150: loss = 0.16 (0.002 sec)
Step 44200: loss = 0.16 (0.002 sec)
Step 44250: loss = 0.14 (0.002 sec)
Step 44300: loss = 0.10 (0.002 sec)
Step 44350: loss = 0.13 (0.003 sec)
Step 44400: loss = 0.21 (0.003 sec)
Step 44450: loss = 0.11 (0.002 sec)
Step 44500: loss = 0.13 (0.002 sec)
Step 44550: loss = 0.16 (0.003 sec)
Step 44600: loss = 0.21 (0.002 sec)
Step 44650: loss = 0.10 (0.002 sec)
Step 44700: loss = 0.13 (0.002 sec)
Step 44750: loss = 0.16 (0.002 sec)
Step 44800: loss = 0.14 (0.004 sec)
Step 44850: loss = 0.12 (0.002 sec)
Step 44900: loss = 0.15 (0.002 sec)
Step 44950: loss = 0.14 (0.002 sec)
Training Data Eval:
  Num examples: 3200  Num correct: 3037  Precision @ 1: 0.9491
Test Data Eval:
  Num examples: 1600  Num correct: 1319  Precision @ 1: 0.8244
Step 45000: loss = 0.15 (0.003 sec)
Step 45050: loss = 0.15 (0.002 sec)
Step 45100: loss = 0.14 (0.003 sec)
Step 45150: loss = 0.20 (0.002 sec)
Step 45200: loss = 0.16 (0.004 sec)
Step 45250: loss = 0.12 (0.002 sec)
Step 45300: loss = 0.11 (0.002 sec)
Step 45350: loss = 0.14 (0.002 sec)
Step 45400: loss = 0.18 (0.002 sec)
Step 45450: loss = 0.15 (0.002 sec)
Step 45500: loss = 0.16 (0.002 sec)
Step 45550: loss = 0.11 (0.002 sec)
Step 45600: loss = 0.14 (0.004 sec)
Step 45650: loss = 0.12 (0.002 sec)
Step 45700: loss = 0.16 (0.002 sec)
Step 45750: loss = 0.18 (0.002 sec)
Step 45800: loss = 0.12 (0.002 sec)
Step 45850: loss = 0.13 (0.003 sec)
Step 45900: loss = 0.17 (0.002 sec)
Step 45950: loss = 0.13 (0.002 sec)
Training Data Eval:
  Num examples: 3200  Num correct: 3044  Precision @ 1: 0.9513
Test Data Eval:
  Num examples: 1600  Num correct: 1315  Precision @ 1: 0.8219
Step 46000: loss = 0.13 (0.006 sec)
Step 46050: loss = 0.15 (0.002 sec)
Step 46100: loss = 0.17 (0.002 sec)
Step 46150: loss = 0.14 (0.002 sec)
Step 46200: loss = 0.17 (0.002 sec)
Step 46250: loss = 0.12 (0.002 sec)
Step 46300: loss = 0.20 (0.002 sec)
Step 46350: loss = 0.16 (0.002 sec)
Step 46400: loss = 0.18 (0.004 sec)
Step 46450: loss = 0.13 (0.003 sec)
Step 46500: loss = 0.18 (0.002 sec)
Step 46550: loss = 0.11 (0.003 sec)
Step 46600: loss = 0.12 (0.003 sec)
Step 46650: loss = 0.10 (0.002 sec)
Step 46700: loss = 0.14 (0.002 sec)
Step 46750: loss = 0.13 (0.002 sec)
Step 46800: loss = 0.10 (0.004 sec)
Step 46850: loss = 0.17 (0.002 sec)
Step 46900: loss = 0.14 (0.002 sec)
Step 46950: loss = 0.15 (0.003 sec)
Training Data Eval:
  Num examples: 3200  Num correct: 3063  Precision @ 1: 0.9572
Test Data Eval:
  Num examples: 1600  Num correct: 1307  Precision @ 1: 0.8169
Step 47000: loss = 0.13 (0.002 sec)
Step 47050: loss = 0.16 (0.002 sec)
Step 47100: loss = 0.15 (0.003 sec)
Step 47150: loss = 0.16 (0.002 sec)
Step 47200: loss = 0.12 (0.003 sec)
Step 47250: loss = 0.16 (0.002 sec)
Step 47300: loss = 0.16 (0.002 sec)
Step 47350: loss = 0.14 (0.003 sec)
Step 47400: loss = 0.11 (0.002 sec)
Step 47450: loss = 0.13 (0.002 sec)
Step 47500: loss = 0.13 (0.002 sec)
Step 47550: loss = 0.16 (0.003 sec)
Step 47600: loss = 0.20 (0.003 sec)
Step 47650: loss = 0.16 (0.002 sec)
Step 47700: loss = 0.22 (0.002 sec)
Step 47750: loss = 0.15 (0.002 sec)
Step 47800: loss = 0.13 (0.002 sec)
Step 47850: loss = 0.13 (0.002 sec)
Step 47900: loss = 0.13 (0.002 sec)
Step 47950: loss = 0.12 (0.002 sec)
Training Data Eval:
  Num examples: 3200  Num correct: 3064  Precision @ 1: 0.9575
Test Data Eval:
  Num examples: 1600  Num correct: 1317  Precision @ 1: 0.8231
Step 48000: loss = 0.11 (0.006 sec)
Step 48050: loss = 0.14 (0.002 sec)
Step 48100: loss = 0.11 (0.002 sec)
Step 48150: loss = 0.12 (0.002 sec)
Step 48200: loss = 0.14 (0.002 sec)
Step 48250: loss = 0.20 (0.002 sec)
Step 48300: loss = 0.13 (0.002 sec)
Step 48350: loss = 0.16 (0.002 sec)
Step 48400: loss = 0.17 (0.004 sec)
Step 48450: loss = 0.12 (0.002 sec)
Step 48500: loss = 0.16 (0.002 sec)
Step 48550: loss = 0.18 (0.002 sec)
Step 48600: loss = 0.17 (0.002 sec)
Step 48650: loss = 0.15 (0.002 sec)
Step 48700: loss = 0.14 (0.002 sec)
Step 48750: loss = 0.16 (0.002 sec)
Step 48800: loss = 0.10 (0.003 sec)
Step 48850: loss = 0.17 (0.002 sec)
Step 48900: loss = 0.13 (0.003 sec)
Step 48950: loss = 0.12 (0.002 sec)
Training Data Eval:
  Num examples: 3200  Num correct: 3070  Precision @ 1: 0.9594
Test Data Eval:
  Num examples: 1600  Num correct: 1321  Precision @ 1: 0.8256
Step 49000: loss = 0.16 (0.003 sec)
Step 49050: loss = 0.15 (0.002 sec)
Step 49100: loss = 0.13 (0.002 sec)
Step 49150: loss = 0.09 (0.002 sec)
Step 49200: loss = 0.09 (0.003 sec)
Step 49250: loss = 0.09 (0.002 sec)
Step 49300: loss = 0.13 (0.002 sec)
Step 49350: loss = 0.14 (0.002 sec)
Step 49400: loss = 0.08 (0.002 sec)
Step 49450: loss = 0.16 (0.002 sec)
Step 49500: loss = 0.13 (0.002 sec)
Step 49550: loss = 0.09 (0.002 sec)
Step 49600: loss = 0.13 (0.003 sec)
Step 49650: loss = 0.11 (0.002 sec)
Step 49700: loss = 0.12 (0.002 sec)
Step 49750: loss = 0.11 (0.002 sec)
Step 49800: loss = 0.14 (0.002 sec)
Step 49850: loss = 0.15 (0.002 sec)
Step 49900: loss = 0.10 (0.002 sec)
Step 49950: loss = 0.14 (0.002 sec)
Training Data Eval:
  Num examples: 3200  Num correct: 3060  Precision @ 1: 0.9563
Test Data Eval:
  Num examples: 1600  Num correct: 1312  Precision @ 1: 0.8200

In [ ]: