https://github.com/tensorflow/tensorflow

# For Mac CPU only: Python 2
wget http://ci.tensorflow.org/view/Nightly/job/nightly-matrix-cpu/TF_BUILD_CONTAINER_TYPE=CPU,TF_BUILD_IS_OPT=OPT,TF_BUILD_IS_PIP=PIP,TF_BUILD_PYTHON_VERSION=PYTHON2,label=mac-slave/lastSuccessfulBuild/artifact/pip_test/whl/tensorflow-0.8.0-py2-none-any.whl
pip install ./tensorflow-0.8.0-py2-none-any.whl

In [68]:
import numpy as np
import tensorflow as tf

In [69]:
hello = tf.constant('Hello, TensorFlow!')

In [70]:
sess = tf.Session()
sess.run(hello)


Out[70]:
'Hello, TensorFlow!'

In [71]:
a, b = tf.constant(10), tf.constant(32)
sess.run(a+b)


Out[71]:
42

In [72]:
def binary_encode(i, num_digits):
    return np.array([i >> d & 1 for d in range(num_digits)])

In [73]:
binary_encode(128, 10)


Out[73]:
array([0, 0, 0, 0, 0, 0, 0, 1, 0, 0])

In [74]:
def fizz_buzz_encode(i):
    if   i % 15 == 0: return np.array([0, 0, 0, 1])
    elif i % 5  == 0: return np.array([0, 0, 1, 0])
    elif i % 3  == 0: return np.array([0, 1, 0, 0])
    else:             return np.array([1, 0, 0, 0])

In [75]:
NUM_DIGITS = 10
trX = np.array([binary_encode(i, NUM_DIGITS) for i in range(101, 2 ** NUM_DIGITS)])
trY = np.array([fizz_buzz_encode(i)          for i in range(101, 2 ** NUM_DIGITS)])

In [76]:
(trX, trY)


Out[76]:
(array([[1, 0, 1, ..., 0, 0, 0],
        [0, 1, 1, ..., 0, 0, 0],
        [1, 1, 1, ..., 0, 0, 0],
        ..., 
        [1, 0, 1, ..., 1, 1, 1],
        [0, 1, 1, ..., 1, 1, 1],
        [1, 1, 1, ..., 1, 1, 1]]), array([[1, 0, 0, 0],
        [0, 1, 0, 0],
        [1, 0, 0, 0],
        ..., 
        [1, 0, 0, 0],
        [1, 0, 0, 0],
        [0, 1, 0, 0]]))

In [77]:
NUM_HIDDEN = 10000

In [78]:
X = tf.placeholder("float", [None, NUM_DIGITS])
Y = tf.placeholder("float", [None, 4])

In [79]:
def init_weights(shape):
    return tf.Variable(tf.random_normal(shape, stddev=0.01))

w_h = init_weights([NUM_DIGITS, NUM_HIDDEN])
w_o = init_weights([NUM_HIDDEN, 4])

In [80]:
def model(X, w_h, w_o):
    h = tf.nn.relu(tf.matmul(X, w_h))
    return tf.matmul(h, w_o)

In [81]:
py_x = model(X, w_h, w_o)

cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(py_x, Y))
train_op = tf.train.GradientDescentOptimizer(0.05).minimize(cost)

In [82]:
predict_op = tf.argmax(py_x, 1)

In [83]:
def fizz_buzz(i, prediction):
    return [str(i), "fizz", "buzz", "fizzbuzz"][prediction]

In [84]:
BATCH_SIZE = 128
with tf.Session() as sess:
    tf.initialize_all_variables().run()
    for epoch in range(10000):
        p = np.random.permutation(range(len(trX)))
        trX, trY = trX[p], trY[p]
        for start in range(0, len(trX), BATCH_SIZE):
            end = start + BATCH_SIZE
            sess.run(train_op, feed_dict={X: trX[start:end], Y: trY[start:end]})
        if epoch % 1000 == 0:
            print(epoch, np.mean(np.argmax(trY, axis=1) ==
                             sess.run(predict_op, feed_dict={X: trX, Y: trY})))
    numbers = np.arange(1, 101)
    teX = np.transpose(binary_encode(numbers, NUM_DIGITS))
    teY = sess.run(predict_op, feed_dict={X: teX})
    output = np.vectorize(fizz_buzz)(numbers, teY)

    print(output)


(0, 0.53412784398699886)
(1000, 0.96749729144095342)
(2000, 1.0)
(3000, 1.0)
(4000, 1.0)
(5000, 1.0)
(6000, 1.0)
(7000, 1.0)
(8000, 1.0)
(9000, 1.0)
['1' '2' 'fizz' '4' 'buzz' 'fizz' '7' '8' 'fizz' 'buzz' '11' 'fizz' '13'
 '14' 'fizzbuzz' '16' '17' 'fizz' '19' 'buzz' 'fizz' '22' '23' 'fizz'
 'buzz' '26' 'fizz' '28' '29' 'fizzbuzz' '31' '32' 'fizz' '34' 'buzz'
 'fizz' '37' '38' 'fizz' 'buzz' '41' 'fizz' '43' '44' 'fizzbuzz' '46' '47'
 'fizz' '49' 'buzz' 'fizz' '52' '53' 'fizz' 'buzz' '56' 'fizz' '58' '59'
 'fizzbuzz' '61' '62' 'fizz' '64' 'buzz' 'fizz' '67' '68' 'fizz' 'buzz'
 '71' 'fizz' '73' '74' 'fizzbuzz' '76' '77' 'fizz' '79' 'buzz' 'fizz' '82'
 '83' 'fizz' 'buzz' '86' 'fizz' '88' '89' 'fizzbuzz' '91' '92' 'fizz' '94'
 'buzz' 'fizz' '97' '98' 'fizz' 'buzz']

In [88]:
def direct_fizz_buzz(i):
    if   i % 15 == 0: return 'fizzbuzz'
    elif i % 5  == 0: return 'buzz'
    elif i % 3  == 0: return 'fizz'
    else:             return str(i)

In [89]:
correct_fizz_buzz = [direct_fizz_buzz(i) for i in range(1, 100)]

In [90]:
for i in range(0, 99):
    if output[i] != correct_fizz_buzz[i]:
        print i+1, output[i], correct_fizz_buzz[i]

In [91]:
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets("MNIST_data/", one_hot=True)


Successfully downloaded train-images-idx3-ubyte.gz 9912422 bytes.
Extracting MNIST_data/train-images-idx3-ubyte.gz
Successfully downloaded train-labels-idx1-ubyte.gz 28881 bytes.
Extracting MNIST_data/train-labels-idx1-ubyte.gz
Successfully downloaded t10k-images-idx3-ubyte.gz 1648877 bytes.
Extracting MNIST_data/t10k-images-idx3-ubyte.gz
Successfully downloaded t10k-labels-idx1-ubyte.gz 4542 bytes.
Extracting MNIST_data/t10k-labels-idx1-ubyte.gz

In [94]:
x = tf.placeholder(tf.float32, [None, 784])
W = tf.Variable(tf.zeros([784, 10]))
b = tf.Variable(tf.zeros([10]))
y = tf.nn.softmax(tf.matmul(x, W) + b)
y_ = tf.placeholder(tf.float32, [None, 10])
cross_entropy = tf.reduce_mean(-tf.reduce_sum(y_ * tf.log(y), reduction_indices=[1]))
train_step = tf.train.GradientDescentOptimizer(0.5).minimize(cross_entropy)
init = tf.initialize_all_variables()
sess = tf.Session()
sess.run(init)
for i in range(100000):
  batch_xs, batch_ys = mnist.train.next_batch(100)
  sess.run(train_step, feed_dict={x: batch_xs, y_: batch_ys})

In [93]:
correct_prediction = tf.equal(tf.argmax(y,1), tf.argmax(y_,1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
print(sess.run(accuracy, feed_dict={x: mnist.test.images, y_: mnist.test.labels}))


0.9121

In [95]:
scores = [3.0, 1.0, 0.2]
def softmax(x):
    """Compute softmax values for each sets of scores in x."""
    expx = np.exp(x)
    return expx / np.sum(expx, axis=0)
print(softmax(scores))


[ 0.8360188   0.11314284  0.05083836]

In [101]:
num = 1000000000
for i in range(0, 1000000):
    num += 0.000001
round(num - 1000000000, 3)


Out[101]:
0.954

In [112]:
def weight_variable(shape):
  initial = tf.truncated_normal(shape, stddev=0.1)
  return tf.Variable(initial)

def bias_variable(shape):
  initial = tf.constant(0.1, shape=shape)
  return tf.Variable(initial)

In [113]:
def conv2d(x, W):
  return tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding='SAME')

def max_pool_2x2(x):
  return tf.nn.max_pool(x, ksize=[1, 2, 2, 1],
                        strides=[1, 2, 2, 1], padding='SAME')

In [114]:
W_conv1 = weight_variable([5, 5, 1, 32])
b_conv1 = bias_variable([32])
x_image = tf.reshape(x, [-1,28,28,1])
h_conv1 = tf.nn.relu(conv2d(x_image, W_conv1) + b_conv1)
h_pool1 = max_pool_2x2(h_conv1)

In [115]:
W_conv2 = weight_variable([5, 5, 32, 64])
b_conv2 = bias_variable([64])

h_conv2 = tf.nn.relu(conv2d(h_pool1, W_conv2) + b_conv2)
h_pool2 = max_pool_2x2(h_conv2)

In [116]:
W_fc1 = weight_variable([7 * 7 * 64, 1024])
b_fc1 = bias_variable([1024])

h_pool2_flat = tf.reshape(h_pool2, [-1, 7*7*64])
h_fc1 = tf.nn.relu(tf.matmul(h_pool2_flat, W_fc1) + b_fc1)

In [117]:
keep_prob = tf.placeholder("float")
h_fc1_drop = tf.nn.dropout(h_fc1, keep_prob)

In [118]:
W_fc2 = weight_variable([1024, 10])
b_fc2 = bias_variable([10])

y_conv=tf.nn.softmax(tf.matmul(h_fc1_drop, W_fc2) + b_fc2)

In [121]:
summary_op = tf.merge_all_summaries()

In [119]:
sess = tf.InteractiveSession()
cross_entropy = -tf.reduce_sum(y_*tf.log(y_conv))
train_step = tf.train.AdamOptimizer(1e-4).minimize(cross_entropy)
correct_prediction = tf.equal(tf.argmax(y_conv,1), tf.argmax(y_,1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float"))
sess.run(tf.initialize_all_variables())
for i in range(1600):
  batch = mnist.train.next_batch(50)
  if i%100 == 0:
    train_accuracy = accuracy.eval(feed_dict={
        x:batch[0], y_: batch[1], keep_prob: 1.0})
    print "step %d, training accuracy %g"%(i, train_accuracy)
  train_step.run(feed_dict={x: batch[0], y_: batch[1], keep_prob: 0.5})

print "test accuracy %g"%accuracy.eval(feed_dict={
    x: mnist.test.images, y_: mnist.test.labels, keep_prob: 1.0})


step 0, training accuracy 0.14
step 100, training accuracy 0.88
step 200, training accuracy 0.86
step 300, training accuracy 0.96
step 400, training accuracy 0.94
step 500, training accuracy 0.92
step 600, training accuracy 0.9
step 700, training accuracy 0.94
step 800, training accuracy 1
step 900, training accuracy 0.98
step 1000, training accuracy 0.98
step 1100, training accuracy 0.94
step 1200, training accuracy 0.94
step 1300, training accuracy 0.94
step 1400, training accuracy 0.94
step 1500, training accuracy 0.9
test accuracy 0.9743

In [1]:
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================

"""Trains and Evaluates the MNIST network using a feed dictionary."""
# pylint: disable=missing-docstring
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function

import time

from six.moves import xrange  # pylint: disable=redefined-builtin
import tensorflow as tf

from tensorflow.examples.tutorials.mnist import input_data
from tensorflow.examples.tutorials.mnist import mnist


# Basic model parameters as external flags.
flags = tf.app.flags
FLAGS = flags.FLAGS
flags.DEFINE_float('learning_rate', 0.01, 'Initial learning rate.')
flags.DEFINE_integer('max_steps', 12000, 'Number of steps to run trainer.')
flags.DEFINE_integer('hidden1', 128, 'Number of units in hidden layer 1.')
flags.DEFINE_integer('hidden2', 32, 'Number of units in hidden layer 2.')
flags.DEFINE_integer('batch_size', 100, 'Batch size.  '
                     'Must divide evenly into the dataset sizes.')
flags.DEFINE_string('train_dir', 'data', 'Directory to put the training data.')
flags.DEFINE_boolean('fake_data', False, 'If true, uses fake data '
                     'for unit testing.')


def placeholder_inputs(batch_size):
  """Generate placeholder variables to represent the input tensors.

  These placeholders are used as inputs by the rest of the model building
  code and will be fed from the downloaded data in the .run() loop, below.

  Args:
    batch_size: The batch size will be baked into both placeholders.

  Returns:
    images_placeholder: Images placeholder.
    labels_placeholder: Labels placeholder.
  """
  # Note that the shapes of the placeholders match the shapes of the full
  # image and label tensors, except the first dimension is now batch_size
  # rather than the full size of the train or test data sets.
  images_placeholder = tf.placeholder(tf.float32, shape=(batch_size,
                                                         mnist.IMAGE_PIXELS))
  labels_placeholder = tf.placeholder(tf.int32, shape=(batch_size))
  return images_placeholder, labels_placeholder


def fill_feed_dict(data_set, images_pl, labels_pl):
  """Fills the feed_dict for training the given step.

  A feed_dict takes the form of:
  feed_dict = {
      <placeholder>: <tensor of values to be passed for placeholder>,
      ....
  }

  Args:
    data_set: The set of images and labels, from input_data.read_data_sets()
    images_pl: The images placeholder, from placeholder_inputs().
    labels_pl: The labels placeholder, from placeholder_inputs().

  Returns:
    feed_dict: The feed dictionary mapping from placeholders to values.
  """
  # Create the feed_dict for the placeholders filled with the next
  # `batch size ` examples.
  images_feed, labels_feed = data_set.next_batch(FLAGS.batch_size,
                                                 FLAGS.fake_data)
  feed_dict = {
      images_pl: images_feed,
      labels_pl: labels_feed,
  }
  return feed_dict


def do_eval(sess,
            eval_correct,
            images_placeholder,
            labels_placeholder,
            data_set):
  """Runs one evaluation against the full epoch of data.

  Args:
    sess: The session in which the model has been trained.
    eval_correct: The Tensor that returns the number of correct predictions.
    images_placeholder: The images placeholder.
    labels_placeholder: The labels placeholder.
    data_set: The set of images and labels to evaluate, from
      input_data.read_data_sets().
  """
  # And run one epoch of eval.
  true_count = 0  # Counts the number of correct predictions.
  steps_per_epoch = data_set.num_examples // FLAGS.batch_size
  num_examples = steps_per_epoch * FLAGS.batch_size
  for step in xrange(steps_per_epoch):
    feed_dict = fill_feed_dict(data_set,
                               images_placeholder,
                               labels_placeholder)
    true_count += sess.run(eval_correct, feed_dict=feed_dict)
  precision = true_count / num_examples
  print('  Num examples: %d  Num correct: %d  Precision @ 1: %0.04f' %
        (num_examples, true_count, precision))


def run_training():
  """Train MNIST for a number of steps."""
  # Get the sets of images and labels for training, validation, and
  # test on MNIST.
  data_sets = input_data.read_data_sets(FLAGS.train_dir, FLAGS.fake_data)

  # Tell TensorFlow that the model will be built into the default Graph.
  with tf.Graph().as_default():
    # Generate placeholders for the images and labels.
    images_placeholder, labels_placeholder = placeholder_inputs(
        FLAGS.batch_size)

    # Build a Graph that computes predictions from the inference model.
    logits = mnist.inference(images_placeholder,
                             FLAGS.hidden1,
                             FLAGS.hidden2)

    # Add to the Graph the Ops for loss calculation.
    loss = mnist.loss(logits, labels_placeholder)

    # Add to the Graph the Ops that calculate and apply gradients.
    train_op = mnist.training(loss, FLAGS.learning_rate)

    # Add the Op to compare the logits to the labels during evaluation.
    eval_correct = mnist.evaluation(logits, labels_placeholder)

    # Build the summary operation based on the TF collection of Summaries.
    summary_op = tf.merge_all_summaries()

    # Create a saver for writing training checkpoints.
    saver = tf.train.Saver()

    # Create a session for running Ops on the Graph.
    sess = tf.Session()

    # Run the Op to initialize the variables.
    init = tf.initialize_all_variables()
    sess.run(init)

    # Instantiate a SummaryWriter to output summaries and the Graph.
    summary_writer = tf.train.SummaryWriter(FLAGS.train_dir, sess.graph)

    # And then after everything is built, start the training loop.
    for step in xrange(FLAGS.max_steps):
      start_time = time.time()

      # Fill a feed dictionary with the actual set of images and labels
      # for this particular training step.
      feed_dict = fill_feed_dict(data_sets.train,
                                 images_placeholder,
                                 labels_placeholder)

      # Run one step of the model.  The return values are the activations
      # from the `train_op` (which is discarded) and the `loss` Op.  To
      # inspect the values of your Ops or variables, you may include them
      # in the list passed to sess.run() and the value tensors will be
      # returned in the tuple from the call.
      _, loss_value = sess.run([train_op, loss],
                               feed_dict=feed_dict)

      duration = time.time() - start_time

      # Write the summaries and print an overview fairly often.
      if step % 100 == 0:
        # Print status to stdout.
        print('Step %d: loss = %.2f (%.3f sec)' % (step, loss_value, duration))
        # Update the events file.
        summary_str = sess.run(summary_op, feed_dict=feed_dict)
        summary_writer.add_summary(summary_str, step)
        summary_writer.flush()

      # Save a checkpoint and evaluate the model periodically.
      if (step + 1) % 1000 == 0 or (step + 1) == FLAGS.max_steps:
        saver.save(sess, FLAGS.train_dir, global_step=step)
        # Evaluate against the training set.
        print('Training Data Eval:')
        do_eval(sess,
                eval_correct,
                images_placeholder,
                labels_placeholder,
                data_sets.train)
        # Evaluate against the validation set.
        print('Validation Data Eval:')
        do_eval(sess,
                eval_correct,
                images_placeholder,
                labels_placeholder,
                data_sets.validation)
        # Evaluate against the test set.
        print('Test Data Eval:')
        do_eval(sess,
                eval_correct,
                images_placeholder,
                labels_placeholder,
                data_sets.test)


def main(_):
  run_training()

tf.app.run()


Extracting data/train-images-idx3-ubyte.gz
Extracting data/train-labels-idx1-ubyte.gz
Extracting data/t10k-images-idx3-ubyte.gz
Extracting data/t10k-labels-idx1-ubyte.gz
Step 0: loss = 2.32 (0.008 sec)
Step 100: loss = 2.10 (0.003 sec)
Step 200: loss = 1.77 (0.003 sec)
Step 300: loss = 1.46 (0.005 sec)
Step 400: loss = 1.17 (0.005 sec)
Step 500: loss = 0.81 (0.003 sec)
Step 600: loss = 0.79 (0.003 sec)
Step 700: loss = 0.73 (0.004 sec)
Step 800: loss = 0.56 (0.003 sec)
Step 900: loss = 0.53 (0.006 sec)
Training Data Eval:
  Num examples: 55000  Num correct: 47543  Precision @ 1: 0.8644
Validation Data Eval:
  Num examples: 5000  Num correct: 4355  Precision @ 1: 0.8710
Test Data Eval:
  Num examples: 10000  Num correct: 8683  Precision @ 1: 0.8683
Step 1000: loss = 0.56 (0.016 sec)
Step 1100: loss = 0.39 (0.117 sec)
Step 1200: loss = 0.41 (0.003 sec)
Step 1300: loss = 0.57 (0.003 sec)
Step 1400: loss = 0.50 (0.003 sec)
Step 1500: loss = 0.57 (0.003 sec)
Step 1600: loss = 0.42 (0.003 sec)
Step 1700: loss = 0.47 (0.003 sec)
Step 1800: loss = 0.29 (0.004 sec)
Step 1900: loss = 0.34 (0.005 sec)
Training Data Eval:
  Num examples: 55000  Num correct: 49365  Precision @ 1: 0.8975
Validation Data Eval:
  Num examples: 5000  Num correct: 4519  Precision @ 1: 0.9038
Test Data Eval:
  Num examples: 10000  Num correct: 9029  Precision @ 1: 0.9029
Step 2000: loss = 0.24 (0.015 sec)
Step 2100: loss = 0.26 (0.003 sec)
Step 2200: loss = 0.47 (0.112 sec)
Step 2300: loss = 0.40 (0.003 sec)
Step 2400: loss = 0.32 (0.004 sec)
Step 2500: loss = 0.28 (0.003 sec)
Step 2600: loss = 0.27 (0.003 sec)
Step 2700: loss = 0.34 (0.003 sec)
Step 2800: loss = 0.34 (0.003 sec)
Step 2900: loss = 0.22 (0.005 sec)
Training Data Eval:
  Num examples: 55000  Num correct: 50062  Precision @ 1: 0.9102
Validation Data Eval:
  Num examples: 5000  Num correct: 4591  Precision @ 1: 0.9182
Test Data Eval:
  Num examples: 10000  Num correct: 9126  Precision @ 1: 0.9126
Step 3000: loss = 0.40 (0.018 sec)
Step 3100: loss = 0.26 (0.004 sec)
Step 3200: loss = 0.32 (0.003 sec)
Step 3300: loss = 0.21 (0.197 sec)
Step 3400: loss = 0.25 (0.003 sec)
Step 3500: loss = 0.22 (0.003 sec)
Step 3600: loss = 0.39 (0.003 sec)
Step 3700: loss = 0.40 (0.004 sec)
Step 3800: loss = 0.26 (0.003 sec)
Step 3900: loss = 0.24 (0.004 sec)
Training Data Eval:
  Num examples: 55000  Num correct: 50423  Precision @ 1: 0.9168
Validation Data Eval:
  Num examples: 5000  Num correct: 4621  Precision @ 1: 0.9242
Test Data Eval:
  Num examples: 10000  Num correct: 9213  Precision @ 1: 0.9213
Step 4000: loss = 0.19 (0.015 sec)
Step 4100: loss = 0.36 (0.004 sec)
Step 4200: loss = 0.35 (0.003 sec)
Step 4300: loss = 0.39 (0.005 sec)
Step 4400: loss = 0.31 (0.116 sec)
Step 4500: loss = 0.24 (0.004 sec)
Step 4600: loss = 0.48 (0.005 sec)
Step 4700: loss = 0.33 (0.005 sec)
Step 4800: loss = 0.19 (0.005 sec)
Step 4900: loss = 0.26 (0.003 sec)
Training Data Eval:
  Num examples: 55000  Num correct: 50833  Precision @ 1: 0.9242
Validation Data Eval:
  Num examples: 5000  Num correct: 4664  Precision @ 1: 0.9328
Test Data Eval:
  Num examples: 10000  Num correct: 9263  Precision @ 1: 0.9263
Step 5000: loss = 0.37 (0.016 sec)
Step 5100: loss = 0.25 (0.005 sec)
Step 5200: loss = 0.22 (0.003 sec)
Step 5300: loss = 0.27 (0.003 sec)
Step 5400: loss = 0.30 (0.003 sec)
Step 5500: loss = 0.35 (0.164 sec)
Step 5600: loss = 0.26 (0.004 sec)
Step 5700: loss = 0.35 (0.011 sec)
Step 5800: loss = 0.18 (0.003 sec)
Step 5900: loss = 0.20 (0.003 sec)
Training Data Eval:
  Num examples: 55000  Num correct: 51179  Precision @ 1: 0.9305
Validation Data Eval:
  Num examples: 5000  Num correct: 4681  Precision @ 1: 0.9362
Test Data Eval:
  Num examples: 10000  Num correct: 9320  Precision @ 1: 0.9320
Step 6000: loss = 0.30 (0.016 sec)
Step 6100: loss = 0.22 (0.003 sec)
Step 6200: loss = 0.26 (0.003 sec)
Step 6300: loss = 0.24 (0.003 sec)
Step 6400: loss = 0.27 (0.003 sec)
Step 6500: loss = 0.25 (0.003 sec)
Step 6600: loss = 0.15 (0.110 sec)
Step 6700: loss = 0.19 (0.003 sec)
Step 6800: loss = 0.32 (0.003 sec)
Step 6900: loss = 0.15 (0.003 sec)
Training Data Eval:
  Num examples: 55000  Num correct: 51419  Precision @ 1: 0.9349
Validation Data Eval:
  Num examples: 5000  Num correct: 4693  Precision @ 1: 0.9386
Test Data Eval:
  Num examples: 10000  Num correct: 9369  Precision @ 1: 0.9369
Step 7000: loss = 0.16 (0.018 sec)
Step 7100: loss = 0.18 (0.004 sec)
Step 7200: loss = 0.24 (0.003 sec)
Step 7300: loss = 0.15 (0.003 sec)
Step 7400: loss = 0.18 (0.003 sec)
Step 7500: loss = 0.17 (0.003 sec)
Step 7600: loss = 0.27 (0.004 sec)
Step 7700: loss = 0.23 (0.109 sec)
Step 7800: loss = 0.26 (0.003 sec)
Step 7900: loss = 0.15 (0.003 sec)
Training Data Eval:
  Num examples: 55000  Num correct: 51653  Precision @ 1: 0.9391
Validation Data Eval:
  Num examples: 5000  Num correct: 4716  Precision @ 1: 0.9432
Test Data Eval:
  Num examples: 10000  Num correct: 9378  Precision @ 1: 0.9378
Step 8000: loss = 0.36 (0.017 sec)
Step 8100: loss = 0.13 (0.004 sec)
Step 8200: loss = 0.27 (0.003 sec)
Step 8300: loss = 0.18 (0.003 sec)
Step 8400: loss = 0.16 (0.003 sec)
Step 8500: loss = 0.22 (0.003 sec)
Step 8600: loss = 0.19 (0.003 sec)
Step 8700: loss = 0.23 (0.003 sec)
Step 8800: loss = 0.33 (0.115 sec)
Step 8900: loss = 0.22 (0.003 sec)
Training Data Eval:
  Num examples: 55000  Num correct: 51928  Precision @ 1: 0.9441
Validation Data Eval:
  Num examples: 5000  Num correct: 4730  Precision @ 1: 0.9460
Test Data Eval:
  Num examples: 10000  Num correct: 9434  Precision @ 1: 0.9434
Step 9000: loss = 0.17 (0.017 sec)
Step 9100: loss = 0.14 (0.003 sec)
Step 9200: loss = 0.25 (0.004 sec)
Step 9300: loss = 0.23 (0.003 sec)
Step 9400: loss = 0.12 (0.003 sec)
Step 9500: loss = 0.29 (0.003 sec)
Step 9600: loss = 0.11 (0.003 sec)
Step 9700: loss = 0.25 (0.003 sec)
Step 9800: loss = 0.25 (0.003 sec)
Step 9900: loss = 0.25 (0.120 sec)
Training Data Eval:
  Num examples: 55000  Num correct: 51992  Precision @ 1: 0.9453
Validation Data Eval:
  Num examples: 5000  Num correct: 4753  Precision @ 1: 0.9506
Test Data Eval:
  Num examples: 10000  Num correct: 9451  Precision @ 1: 0.9451
Step 10000: loss = 0.28 (0.014 sec)
Step 10100: loss = 0.14 (0.003 sec)
Step 10200: loss = 0.13 (0.003 sec)
Step 10300: loss = 0.15 (0.003 sec)
Step 10400: loss = 0.18 (0.004 sec)
Step 10500: loss = 0.32 (0.003 sec)
Step 10600: loss = 0.18 (0.003 sec)
Step 10700: loss = 0.15 (0.003 sec)
Step 10800: loss = 0.14 (0.004 sec)
Step 10900: loss = 0.16 (0.003 sec)
Training Data Eval:
  Num examples: 55000  Num correct: 52253  Precision @ 1: 0.9501
Validation Data Eval:
  Num examples: 5000  Num correct: 4765  Precision @ 1: 0.9530
Test Data Eval:
  Num examples: 10000  Num correct: 9491  Precision @ 1: 0.9491
Step 11000: loss = 0.12 (0.231 sec)
Step 11100: loss = 0.15 (0.003 sec)
Step 11200: loss = 0.17 (0.004 sec)
Step 11300: loss = 0.21 (0.003 sec)
Step 11400: loss = 0.20 (0.003 sec)
Step 11500: loss = 0.37 (0.003 sec)
Step 11600: loss = 0.29 (0.003 sec)
Step 11700: loss = 0.10 (0.003 sec)
Step 11800: loss = 0.27 (0.003 sec)
Step 11900: loss = 0.21 (0.003 sec)
Training Data Eval:
  Num examples: 55000  Num correct: 52384  Precision @ 1: 0.9524
Validation Data Eval:
  Num examples: 5000  Num correct: 4769  Precision @ 1: 0.9538
Test Data Eval:
  Num examples: 10000  Num correct: 9499  Precision @ 1: 0.9499
An exception has occurred, use %tb to see the full traceback.

SystemExit
To exit: use 'exit', 'quit', or Ctrl-D.

In [2]:
%tb


---------------------------------------------------------------------------
SystemExit                                Traceback (most recent call last)
<ipython-input-1-d416aa56119e> in <module>()
    222   run_training()
    223 
--> 224 tf.app.run()

/Users/utensil/miniconda2/lib/python2.7/site-packages/tensorflow/python/platform/app.pyc in run(main)
     28   f._parse_flags()
     29   main = main or sys.modules['__main__'].main
---> 30   sys.exit(main(sys.argv))

SystemExit: 

In [ ]: