In [1]:
from __future__ import print_function

In [2]:
# Download helper from google tutorial
from __future__ import print_function
import gzip
import os
import urllib

import numpy

SOURCE_URL = 'http://yann.lecun.com/exdb/mnist/'


def maybe_download(filename, work_directory):
  """Download the data from Yann's website, unless it's already here."""
  if not os.path.exists(work_directory):
    os.mkdir(work_directory)
  filepath = os.path.join(work_directory, filename)
  if not os.path.exists(filepath):
    filepath, _ = urllib.urlretrieve(SOURCE_URL + filename, filepath)
    statinfo = os.stat(filepath)
    print('Succesfully downloaded', filename, statinfo.st_size, 'bytes.')
  return filepath


def _read32(bytestream):
  dt = numpy.dtype(numpy.uint32).newbyteorder('>')
  return numpy.frombuffer(bytestream.read(4), dtype=dt)


def extract_images(filename):
  """Extract the images into a 4D uint8 numpy array [index, y, x, depth]."""
  print('Extracting', filename)
  with gzip.open(filename) as bytestream:
    magic = _read32(bytestream)
    if magic != 2051:
      raise ValueError(
          'Invalid magic number %d in MNIST image file: %s' %
          (magic, filename))
    num_images = _read32(bytestream)
    rows = _read32(bytestream)
    cols = _read32(bytestream)
    buf = bytestream.read(rows * cols * num_images)
    data = numpy.frombuffer(buf, dtype=numpy.uint8)
    data = data.reshape(num_images, rows, cols, 1)
    return data


def dense_to_one_hot(labels_dense, num_classes=10):
  """Convert class labels from scalars to one-hot vectors."""
  num_labels = labels_dense.shape[0]
  index_offset = numpy.arange(num_labels) * num_classes
  labels_one_hot = numpy.zeros((num_labels, num_classes))
  labels_one_hot.flat[index_offset + labels_dense.ravel()] = 1
  return labels_one_hot


def extract_labels(filename, one_hot=False):
  """Extract the labels into a 1D uint8 numpy array [index]."""
  print('Extracting', filename)
  with gzip.open(filename) as bytestream:
    magic = _read32(bytestream)
    if magic != 2049:
      raise ValueError(
          'Invalid magic number %d in MNIST label file: %s' %
          (magic, filename))
    num_items = _read32(bytestream)
    buf = bytestream.read(num_items)
    labels = numpy.frombuffer(buf, dtype=numpy.uint8)
    if one_hot:
      return dense_to_one_hot(labels)
    return labels


class DataSet(object):

  def __init__(self, images, labels, fake_data=False):
    if fake_data:
      self._num_examples = 10000
    else:
      assert images.shape[0] == labels.shape[0], (
          "images.shape: %s labels.shape: %s" % (images.shape,
                                                 labels.shape))
      self._num_examples = images.shape[0]

      # Convert shape from [num examples, rows, columns, depth]
      # to [num examples, rows*columns] (assuming depth == 1)
      assert images.shape[3] == 1
      images = images.reshape(images.shape[0],
                              images.shape[1] * images.shape[2])
      # Convert from [0, 255] -> [0.0, 1.0].
      images = images.astype(numpy.float32)
      images = numpy.multiply(images, 1.0 / 255.0)
    self._images = images
    self._labels = labels
    self._epochs_completed = 0
    self._index_in_epoch = 0

  @property
  def images(self):
    return self._images

  @property
  def labels(self):
    return self._labels

  @property
  def num_examples(self):
    return self._num_examples

  @property
  def epochs_completed(self):
    return self._epochs_completed

  def next_batch(self, batch_size, fake_data=False):
    """Return the next `batch_size` examples from this data set."""
    if fake_data:
      fake_image = [1.0 for _ in xrange(784)]
      fake_label = 0
      return [fake_image for _ in xrange(batch_size)], [
          fake_label for _ in xrange(batch_size)]
    start = self._index_in_epoch
    self._index_in_epoch += batch_size
    if self._index_in_epoch > self._num_examples:
      # Finished epoch
      self._epochs_completed += 1
      # Shuffle the data
      perm = numpy.arange(self._num_examples)
      numpy.random.shuffle(perm)
      self._images = self._images[perm]
      self._labels = self._labels[perm]
      # Start next epoch
      start = 0
      self._index_in_epoch = batch_size
      assert batch_size <= self._num_examples
    end = self._index_in_epoch
    return self._images[start:end], self._labels[start:end]


def read_data_sets(train_dir, fake_data=False, one_hot=False):
  class DataSets(object):
    pass
  data_sets = DataSets()

  if fake_data:
    data_sets.train = DataSet([], [], fake_data=True)
    data_sets.validation = DataSet([], [], fake_data=True)
    data_sets.test = DataSet([], [], fake_data=True)
    return data_sets

  TRAIN_IMAGES = 'train-images-idx3-ubyte.gz'
  TRAIN_LABELS = 'train-labels-idx1-ubyte.gz'
  TEST_IMAGES = 't10k-images-idx3-ubyte.gz'
  TEST_LABELS = 't10k-labels-idx1-ubyte.gz'
  VALIDATION_SIZE = 5000

  local_file = maybe_download(TRAIN_IMAGES, train_dir)
  train_images = extract_images(local_file)

  local_file = maybe_download(TRAIN_LABELS, train_dir)
  train_labels = extract_labels(local_file, one_hot=one_hot)

  local_file = maybe_download(TEST_IMAGES, train_dir)
  test_images = extract_images(local_file)

  local_file = maybe_download(TEST_LABELS, train_dir)
  test_labels = extract_labels(local_file, one_hot=one_hot)

  validation_images = train_images[:VALIDATION_SIZE]
  validation_labels = train_labels[:VALIDATION_SIZE]
  train_images = train_images[VALIDATION_SIZE:]
  train_labels = train_labels[VALIDATION_SIZE:]

  data_sets.train = DataSet(train_images, train_labels)
  data_sets.validation = DataSet(validation_images, validation_labels)
  data_sets.test = DataSet(test_images, test_labels)

  return data_sets

In [3]:
mnist = read_data_sets("MNIST_data/", one_hot=True)


Extracting MNIST_data/train-images-idx3-ubyte.gz
/usr/lib/python3.5/gzip.py:274: VisibleDeprecationWarning: converting an array with ndim > 0 to an index will result in an error in the future
  return self._buffer.read(size)
Extracting MNIST_data/train-labels-idx1-ubyte.gz
Extracting MNIST_data/t10k-images-idx3-ubyte.gz
Extracting MNIST_data/t10k-labels-idx1-ubyte.gz
/usr/local/lib/python3.5/dist-packages/ipykernel/__main__.py:43: VisibleDeprecationWarning: converting an array with ndim > 0 to an index will result in an error in the future

In [4]:
mnist.test.images


Out[4]:
array([[ 0.,  0.,  0., ...,  0.,  0.,  0.],
       [ 0.,  0.,  0., ...,  0.,  0.,  0.],
       [ 0.,  0.,  0., ...,  0.,  0.,  0.],
       ..., 
       [ 0.,  0.,  0., ...,  0.,  0.,  0.],
       [ 0.,  0.,  0., ...,  0.,  0.,  0.],
       [ 0.,  0.,  0., ...,  0.,  0.,  0.]], dtype=float32)

In [5]:
mnist.test.images.shape


Out[5]:
(10000, 784)

In [6]:
import matplotlib.pyplot as plt
%matplotlib inline
plt.imshow(mnist.test.images[0].reshape([28,28]), cmap='gray')


Out[6]:
<matplotlib.image.AxesImage at 0x7f871ffec6d8>

In [7]:
from io import BytesIO
import PIL
import numpy as np
from IPython.display import display, Image

def display_img_array(ima, **kwargs):
    if ima.dtype == np.float32 or ima.dtype == np.float64:
        ima = (ima*255).astype(np.uint8)
    im = PIL.Image.fromarray(ima)
    bio = BytesIO()
    im.save(bio, format='png')
    display(Image(bio.getvalue(), format='png', **kwargs))
print(mnist.train.num_examples)
for i in range(10):
    print(mnist.train.labels[i])
    display_img_array(mnist.train.images[i].reshape([28,28]), width=200)


55000
[ 0.  0.  0.  0.  0.  0.  0.  1.  0.  0.]
[ 0.  0.  0.  1.  0.  0.  0.  0.  0.  0.]
[ 0.  0.  0.  0.  1.  0.  0.  0.  0.  0.]
[ 0.  0.  0.  0.  0.  0.  1.  0.  0.  0.]
[ 0.  1.  0.  0.  0.  0.  0.  0.  0.  0.]
[ 0.  0.  0.  0.  0.  0.  0.  0.  1.  0.]
[ 0.  1.  0.  0.  0.  0.  0.  0.  0.  0.]
[ 1.  0.  0.  0.  0.  0.  0.  0.  0.  0.]
[ 0.  0.  0.  0.  0.  0.  0.  0.  0.  1.]
[ 0.  0.  0.  0.  0.  0.  0.  0.  1.  0.]

In [8]:
import tensorflow as tf
# Interactive session (aka default session)
sess = tf.InteractiveSession()

In [9]:
from tfdot import tfdot

Softmax regression


In [10]:
x = tf.placeholder("float", shape=[None, 784], name="x")
y_ = tf.placeholder("float", shape=[None, 10], name="y_")

In [11]:
W = tf.Variable(tf.zeros([784, 10]), name='W')
b = tf.Variable(tf.zeros([10]), name='b')
tf.initialize_all_variables().run()

In [12]:
tfdot()


Out[12]:
root cluster_W W cluster_b b W/Assign Assign W W W/Assign->W W/read read b/Assign Assign b b b/Assign->b b/read read x x y_ y_ zeros zeros zeros->W/Assign W->W/read zeros_1 zeros_1 zeros_1->b/Assign b->b/read init init

In [13]:
y = tf.nn.softmax(tf.matmul(x,W)+b, name="y_softmax")
cross_entropy = -tf.reduce_sum(y_*tf.log(y))

In [14]:
tfdot()


Out[14]:
root cluster_range range cluster_W W cluster_b b range/start start range range range/start->range range/delta delta range/delta->range W/Assign Assign W W W/Assign->W W/read read MatMul MatMul W/read->MatMul b/Assign Assign b b b/Assign->b b/read read add add b/read->add x x x->MatMul y_ y_ mul mul y_->mul zeros zeros zeros->W/Assign W->W/read zeros_1 zeros_1 zeros_1->b/Assign b->b/read init init MatMul->add y_softmax y_softmax add->y_softmax Log Log y_softmax->Log Log->mul Rank Rank mul->Rank Sum Sum mul->Sum Rank->range range->Sum Neg Neg Sum->Neg

In [15]:
train_step = tf.train.GradientDescentOptimizer(0.01).minimize(cross_entropy)

In [16]:
tfdot()


Out[16]:
root cluster_gradients gradients cluster_gradients/y_softmax_grad gradients/y_softmax_grad cluster_gradients/y_softmax_grad/Reshape gradients/y_softmax_grad/Reshape cluster_gradients/y_softmax_grad/Sum gradients/y_softmax_grad/Sum cluster_gradients/Log_grad gradients/Log_grad cluster_gradients/Sum_grad gradients/Sum_grad cluster_gradients/Sum_grad/range gradients/Sum_grad/range cluster_gradients/Sum_grad/Maximum gradients/Sum_grad/Maximum cluster_gradients/Sum_grad/Fill gradients/Sum_grad/Fill cluster_gradients/mul_grad gradients/mul_grad cluster_gradients/mul_grad/tuple gradients/mul_grad/tuple cluster_gradients/add_grad gradients/add_grad cluster_gradients/add_grad/tuple gradients/add_grad/tuple cluster_gradients/Neg_grad gradients/Neg_grad cluster_gradients/MatMul_grad gradients/MatMul_grad cluster_gradients/MatMul_grad/tuple gradients/MatMul_grad/tuple cluster_range range cluster_GradientDescent GradientDescent cluster_GradientDescent/update_W GradientDescent/update_W cluster_GradientDescent/update_b GradientDescent/update_b cluster_W W cluster_b b gradients/y_softmax_grad/Reshape/shape shape gradients/y_softmax_grad/Reshape Reshape gradients/y_softmax_grad/Reshape/shape->gradients/y_softmax_grad/Reshape gradients/y_softmax_grad/Sum/reduction_indices reduction_indices gradients/y_softmax_grad/Sum Sum gradients/y_softmax_grad/Sum/reduction_indices->gradients/y_softmax_grad/Sum gradients/y_softmax_grad/mul mul gradients/y_softmax_grad/mul->gradients/y_softmax_grad/Sum gradients/y_softmax_grad/Sum->gradients/y_softmax_grad/Reshape gradients/y_softmax_grad/sub sub gradients/y_softmax_grad/Reshape->gradients/y_softmax_grad/sub gradients/y_softmax_grad/mul_1 mul_1 gradients/y_softmax_grad/sub->gradients/y_softmax_grad/mul_1 gradients/add_grad/Sum Sum gradients/y_softmax_grad/mul_1->gradients/add_grad/Sum gradients/add_grad/Sum_1 Sum_1 gradients/y_softmax_grad/mul_1->gradients/add_grad/Sum_1 gradients/Log_grad/Inv Inv gradients/Log_grad/mul mul gradients/Log_grad/Inv->gradients/Log_grad/mul gradients/Log_grad/mul->gradients/y_softmax_grad/mul gradients/Log_grad/mul->gradients/y_softmax_grad/sub gradients/Sum_grad/range/start start gradients/Sum_grad/range range gradients/Sum_grad/range/start->gradients/Sum_grad/range gradients/Sum_grad/range/delta delta gradients/Sum_grad/range/delta->gradients/Sum_grad/range gradients/Sum_grad/Maximum/y y gradients/Sum_grad/Maximum Maximum gradients/Sum_grad/Maximum/y->gradients/Sum_grad/Maximum gradients/Sum_grad/Fill/value value gradients/Sum_grad/Fill Fill gradients/Sum_grad/Fill/value->gradients/Sum_grad/Fill gradients/Sum_grad/Shape Shape gradients/Sum_grad/Size Size gradients/Sum_grad/Shape->gradients/Sum_grad/Size gradients/Sum_grad/DynamicStitch DynamicStitch gradients/Sum_grad/Shape->gradients/Sum_grad/DynamicStitch gradients/Sum_grad/floordiv floordiv gradients/Sum_grad/Shape->gradients/Sum_grad/floordiv gradients/Sum_grad/add add gradients/Sum_grad/Size->gradients/Sum_grad/add gradients/Sum_grad/mod mod gradients/Sum_grad/Size->gradients/Sum_grad/mod gradients/Sum_grad/Size->gradients/Sum_grad/range gradients/Sum_grad/add->gradients/Sum_grad/mod gradients/Sum_grad/Shape_1 Shape_1 gradients/Sum_grad/mod->gradients/Sum_grad/Shape_1 gradients/Sum_grad/mod->gradients/Sum_grad/DynamicStitch gradients/Sum_grad/Shape_1->gradients/Sum_grad/Fill gradients/Sum_grad/range->gradients/Sum_grad/DynamicStitch gradients/Sum_grad/Fill->gradients/Sum_grad/DynamicStitch gradients/Sum_grad/DynamicStitch->gradients/Sum_grad/Maximum gradients/Sum_grad/Reshape Reshape gradients/Sum_grad/DynamicStitch->gradients/Sum_grad/Reshape gradients/Sum_grad/Maximum->gradients/Sum_grad/floordiv gradients/Sum_grad/Tile Tile gradients/Sum_grad/floordiv->gradients/Sum_grad/Tile gradients/Sum_grad/Reshape->gradients/Sum_grad/Tile gradients/mul_grad/mul mul gradients/Sum_grad/Tile->gradients/mul_grad/mul gradients/mul_grad/mul_1 mul_1 gradients/Sum_grad/Tile->gradients/mul_grad/mul_1 gradients/mul_grad/tuple/group_deps group_deps gradients/mul_grad/tuple/control_dependency control_dependency gradients/mul_grad/tuple/control_dependency_1 control_dependency_1 gradients/mul_grad/tuple/control_dependency_1->gradients/Log_grad/mul gradients/mul_grad/Shape Shape gradients/mul_grad/BroadcastGradientArgs BroadcastGradientArgs gradients/mul_grad/Shape->gradients/mul_grad/BroadcastGradientArgs gradients/mul_grad/Reshape Reshape gradients/mul_grad/Shape->gradients/mul_grad/Reshape gradients/mul_grad/Shape_1 Shape_1 gradients/mul_grad/Shape_1->gradients/mul_grad/BroadcastGradientArgs gradients/mul_grad/Reshape_1 Reshape_1 gradients/mul_grad/Shape_1->gradients/mul_grad/Reshape_1 gradients/mul_grad/Sum Sum gradients/mul_grad/BroadcastGradientArgs->gradients/mul_grad/Sum gradients/mul_grad/Sum_1 Sum_1 gradients/mul_grad/BroadcastGradientArgs->gradients/mul_grad/Sum_1 gradients/mul_grad/mul->gradients/mul_grad/Sum gradients/mul_grad/Sum->gradients/mul_grad/Reshape gradients/mul_grad/Reshape->gradients/mul_grad/tuple/control_dependency gradients/mul_grad/mul_1->gradients/mul_grad/Sum_1 gradients/mul_grad/Sum_1->gradients/mul_grad/Reshape_1 gradients/mul_grad/Reshape_1->gradients/mul_grad/tuple/control_dependency_1 gradients/add_grad/tuple/group_deps group_deps gradients/add_grad/tuple/control_dependency control_dependency gradients/MatMul_grad/MatMul MatMul gradients/add_grad/tuple/control_dependency->gradients/MatMul_grad/MatMul gradients/MatMul_grad/MatMul_1 MatMul_1 gradients/add_grad/tuple/control_dependency->gradients/MatMul_grad/MatMul_1 gradients/add_grad/tuple/control_dependency_1 control_dependency_1 GradientDescent/update_b/ApplyGradientDescent ApplyGradientDescent gradients/add_grad/tuple/control_dependency_1->GradientDescent/update_b/ApplyGradientDescent gradients/add_grad/Shape Shape gradients/add_grad/BroadcastGradientArgs BroadcastGradientArgs gradients/add_grad/Shape->gradients/add_grad/BroadcastGradientArgs gradients/add_grad/Reshape Reshape gradients/add_grad/Shape->gradients/add_grad/Reshape gradients/add_grad/Shape_1 Shape_1 gradients/add_grad/Shape_1->gradients/add_grad/BroadcastGradientArgs gradients/add_grad/Reshape_1 Reshape_1 gradients/add_grad/Shape_1->gradients/add_grad/Reshape_1 gradients/add_grad/BroadcastGradientArgs->gradients/add_grad/Sum gradients/add_grad/BroadcastGradientArgs->gradients/add_grad/Sum_1 gradients/add_grad/Sum->gradients/add_grad/Reshape gradients/add_grad/Reshape->gradients/add_grad/tuple/control_dependency gradients/add_grad/Sum_1->gradients/add_grad/Reshape_1 gradients/add_grad/Reshape_1->gradients/add_grad/tuple/control_dependency_1 gradients/Neg_grad/Neg Neg gradients/Neg_grad/Neg->gradients/Sum_grad/Reshape gradients/MatMul_grad/tuple/group_deps group_deps gradients/MatMul_grad/tuple/control_dependency control_dependency gradients/MatMul_grad/tuple/control_dependency_1 control_dependency_1 GradientDescent/update_W/ApplyGradientDescent ApplyGradientDescent gradients/MatMul_grad/tuple/control_dependency_1->GradientDescent/update_W/ApplyGradientDescent gradients/MatMul_grad/MatMul->gradients/MatMul_grad/tuple/control_dependency gradients/MatMul_grad/MatMul_1->gradients/MatMul_grad/tuple/control_dependency_1 gradients/Shape Shape gradients/Fill Fill gradients/Shape->gradients/Fill gradients/Const Const gradients/Const->gradients/Fill gradients/Fill->gradients/Neg_grad/Neg range/start start range range range/start->range range/delta delta range/delta->range GradientDescent/learning_rate learning_rate GradientDescent/learning_rate->GradientDescent/update_W/ApplyGradientDescent GradientDescent/learning_rate->GradientDescent/update_b/ApplyGradientDescent W/Assign Assign W W W/Assign->W W/read read W/read->gradients/MatMul_grad/MatMul MatMul MatMul W/read->MatMul b/Assign Assign b b b/Assign->b b/read read b/read->gradients/add_grad/Shape_1 add add b/read->add x x x->gradients/MatMul_grad/MatMul_1 x->MatMul y_ y_ y_->gradients/mul_grad/Shape y_->gradients/mul_grad/mul_1 mul mul y_->mul zeros zeros zeros->W/Assign W->GradientDescent/update_W/ApplyGradientDescent W->W/read zeros_1 zeros_1 zeros_1->b/Assign b->GradientDescent/update_b/ApplyGradientDescent b->b/read init init MatMul->gradients/add_grad/Shape MatMul->add y_softmax y_softmax add->y_softmax y_softmax->gradients/y_softmax_grad/mul y_softmax->gradients/y_softmax_grad/mul_1 y_softmax->gradients/Log_grad/Inv Log Log y_softmax->Log Log->gradients/mul_grad/Shape_1 Log->gradients/mul_grad/mul Log->mul mul->gradients/Sum_grad/Shape Rank Rank mul->Rank Sum Sum mul->Sum Rank->range range->gradients/Sum_grad/add range->Sum Neg Neg Sum->Neg Neg->gradients/Shape GradientDescent GradientDescent

In [17]:
for i in range(1000):
    batch = mnist.train.next_batch(50)
    train_step.run(feed_dict={x:batch[0], y_:batch[1]})

In [18]:
y.eval(feed_dict={x: mnist.test.images[:10]})


Out[18]:
array([[  2.04607495e-05,   1.72720882e-09,   7.74096188e-05,
          3.64776165e-03,   1.25222232e-06,   2.27521141e-05,
          1.14668319e-08,   9.95974720e-01,   3.68831243e-05,
          2.18784597e-04],
       [  3.07305111e-03,   5.82525308e-06,   9.38659370e-01,
          1.41670089e-02,   2.41183823e-10,   1.88912619e-02,
          1.82044487e-02,   3.78664530e-11,   6.99905958e-03,
          4.36300551e-09],
       [  1.08050426e-05,   9.58789229e-01,   1.68991685e-02,
          4.96273441e-03,   1.30500193e-04,   1.69314141e-03,
          1.65022223e-03,   7.72828655e-03,   7.73562770e-03,
          4.00237361e-04],
       [  9.98436034e-01,   3.56979273e-11,   1.14412091e-04,
          5.70518314e-05,   1.15544543e-08,   1.05535379e-03,
          7.04479899e-05,   2.59409717e-05,   2.33748593e-04,
          7.07744903e-06],
       [  4.65250545e-04,   2.06047775e-06,   3.11118132e-03,
          3.27388814e-04,   9.57790196e-01,   7.64666591e-04,
          3.24418326e-03,   8.40911549e-03,   6.37929933e-03,
          1.95066575e-02],
       [  9.48207344e-07,   9.67540145e-01,   4.06762632e-03,
          7.12868478e-03,   2.67504438e-05,   1.92249936e-04,
          6.22161606e-05,   1.09165125e-02,   9.53817368e-03,
          5.26619609e-04],
       [  2.11476481e-06,   4.60086369e-07,   9.28449424e-07,
          3.25824134e-04,   9.70149279e-01,   1.16053643e-02,
          3.44484906e-05,   5.92230936e-04,   1.45400241e-02,
          2.74942839e-03],
       [  3.40133084e-07,   1.48918875e-03,   6.09815179e-04,
          8.33091419e-03,   6.56317547e-03,   4.04195413e-02,
          3.06424161e-04,   3.62417544e-03,   1.53113138e-02,
          9.23345029e-01],
       [  1.70827424e-03,   9.22327331e-07,   1.23926089e-03,
          4.42538749e-07,   4.65040654e-03,   3.26604536e-03,
          9.87447560e-01,   1.36961035e-07,   1.66551699e-03,
          2.14085157e-05],
       [  4.20810511e-06,   6.50707843e-09,   1.87727176e-06,
          1.07238739e-05,   1.11040333e-02,   2.37043874e-04,
          5.80500864e-06,   1.05574459e-01,   2.32600849e-02,
          8.59801710e-01]], dtype=float32)

In [19]:
prediction = tf.argmax(y, 1)

In [20]:
# print predictions
prediction.eval(feed_dict={x: mnist.test.images[:10]})


Out[20]:
array([7, 2, 1, 0, 4, 1, 4, 9, 6, 9])

In [21]:
# print labels
np.argmax(mnist.test.labels[:10],1)


Out[21]:
array([7, 2, 1, 0, 4, 1, 4, 9, 5, 9])

In [22]:
display_img_array(mnist.test.images[8].reshape(28,28), width=100)



In [23]:
correct_prediction = tf.equal(tf.argmax(y,1), tf.argmax(y_, 1))

In [24]:
correct_prediction.eval(feed_dict={x: mnist.test.images[:10] , y_: mnist.test.labels[:10]})


Out[24]:
array([ True,  True,  True,  True,  True,  True,  True,  True, False,  True], dtype=bool)

In [25]:
accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float"))

In [26]:
accuracy.eval(feed_dict={x: mnist.test.images[:10] , y_: mnist.test.labels[:10]})


Out[26]:
0.89999998

In [27]:
accuracy.eval(feed_dict={x: mnist.test.images , y_: mnist.test.labels})


Out[27]:
0.90920007

In [28]:
for t in range(10):
    for i in range(1000):
        batch = mnist.train.next_batch(200)
        train_step.run(feed_dict={x:batch[0], y_:batch[1]})
    a = accuracy.eval(feed_dict={x: mnist.validation.images , y_: mnist.validation.labels})
    print (t, a)


0 0.9194
1 0.9026
2 0.9162
3 0.9218
4 0.9232
5 0.9234
6 0.9054
7 0.9226
8 0.9062
9 0.9262

In [29]:
accuracy.eval(feed_dict={x: mnist.test.images , y_: mnist.test.labels})


Out[29]:
0.92140007

91% accuracy on MNIST is bad. It's almost embarrassingly bad.

http://rodrigob.github.io/are_we_there_yet/build/classification_datasets_results.html

Multilayer Convolutional Network


In [30]:
# reset session and graph
import tensorflow as tf
from tfdot import tfdot
tf.reset_default_graph()
if 'sess' in globals():
    sess.close()
sess = tf.InteractiveSession()
x = tf.placeholder("float", shape=[None, 784], name="x")
y_ = tf.placeholder("float", shape=[None, 10], name="y_")

In [31]:
def weight_variable(shape):
    initial = tf.truncated_normal(shape, stddev=0.1)
    return tf.Variable(initial, name ='W')
def bias_variable(shape):
    initial = tf.constant(0.1, shape=shape)
    return tf.Variable(initial, name = 'b')

In [32]:
def conv2d(x, W):
    return tf.nn.conv2d(x, W, strides=[1,1,1,1], padding='SAME')
def max_pool_2x2(x):
    return tf.nn.max_pool(x, ksize=[1,2,2,1], strides=[1,2,2,1], padding='SAME')

In [33]:
# fisrt layer
with tf.name_scope('conv1'):
    ## variables
    W_conv1 = weight_variable([5,5,1,32])
    b_conv1 = bias_variable([32])
    ## build the layer
    x_image = tf.reshape(x, [-1, 28, 28, 1])
    h_conv1 = tf.nn.relu(conv2d(x_image, W_conv1) + b_conv1)
    h_pool1 = max_pool_2x2(h_conv1)

In [34]:
tfdot()


Out[34]:
root cluster_conv1 conv1 cluster_conv1/Reshape conv1/Reshape cluster_conv1/truncated_normal conv1/truncated_normal cluster_conv1/b conv1/b cluster_conv1/W conv1/W conv1/Reshape/shape shape conv1/Reshape Reshape conv1/Reshape/shape->conv1/Reshape conv1/truncated_normal/shape shape conv1/truncated_normal/TruncatedNormal TruncatedNormal conv1/truncated_normal/shape->conv1/truncated_normal/TruncatedNormal conv1/truncated_normal/mean mean conv1/truncated_normal truncated_normal conv1/truncated_normal/mean->conv1/truncated_normal conv1/truncated_normal/stddev stddev conv1/truncated_normal/mul mul conv1/truncated_normal/stddev->conv1/truncated_normal/mul conv1/truncated_normal/TruncatedNormal->conv1/truncated_normal/mul conv1/truncated_normal/mul->conv1/truncated_normal conv1/b/Assign Assign conv1/b b conv1/b/Assign->conv1/b conv1/b/read read conv1/add add conv1/b/read->conv1/add conv1/W/Assign Assign conv1/W W conv1/W/Assign->conv1/W conv1/W/read read conv1/Conv2D Conv2D conv1/W/read->conv1/Conv2D conv1/truncated_normal->conv1/W/Assign conv1/W->conv1/W/read conv1/Const Const conv1/Const->conv1/b/Assign conv1/b->conv1/b/read conv1/Reshape->conv1/Conv2D conv1/Conv2D->conv1/add conv1/Relu Relu conv1/add->conv1/Relu conv1/MaxPool MaxPool conv1/Relu->conv1/MaxPool x x x->conv1/Reshape y_ y_

In [35]:
# second layer
with tf.name_scope('conv2'):
    ## variables
    W_conv2 = weight_variable([5,5,32,64])
    b_conv2 = bias_variable([64])
    ## build the layer
    h_conv2 = tf.nn.relu(conv2d(h_pool1, W_conv2) + b_conv2)
    h_pool2 = max_pool_2x2(h_conv2)

In [36]:
tfdot()


Out[36]:
root cluster_conv1 conv1 cluster_conv1/Reshape conv1/Reshape cluster_conv1/truncated_normal conv1/truncated_normal cluster_conv1/b conv1/b cluster_conv1/W conv1/W cluster_conv2 conv2 cluster_conv2/truncated_normal conv2/truncated_normal cluster_conv2/b conv2/b cluster_conv2/W conv2/W conv1/Reshape/shape shape conv1/Reshape Reshape conv1/Reshape/shape->conv1/Reshape conv1/truncated_normal/shape shape conv1/truncated_normal/TruncatedNormal TruncatedNormal conv1/truncated_normal/shape->conv1/truncated_normal/TruncatedNormal conv1/truncated_normal/mean mean conv1/truncated_normal truncated_normal conv1/truncated_normal/mean->conv1/truncated_normal conv1/truncated_normal/stddev stddev conv1/truncated_normal/mul mul conv1/truncated_normal/stddev->conv1/truncated_normal/mul conv1/truncated_normal/TruncatedNormal->conv1/truncated_normal/mul conv1/truncated_normal/mul->conv1/truncated_normal conv1/b/Assign Assign conv1/b b conv1/b/Assign->conv1/b conv1/b/read read conv1/add add conv1/b/read->conv1/add conv1/W/Assign Assign conv1/W W conv1/W/Assign->conv1/W conv1/W/read read conv1/Conv2D Conv2D conv1/W/read->conv1/Conv2D conv1/truncated_normal->conv1/W/Assign conv1/W->conv1/W/read conv1/Const Const conv1/Const->conv1/b/Assign conv1/b->conv1/b/read conv1/Reshape->conv1/Conv2D conv1/Conv2D->conv1/add conv1/Relu Relu conv1/add->conv1/Relu conv1/MaxPool MaxPool conv1/Relu->conv1/MaxPool conv2/Conv2D Conv2D conv1/MaxPool->conv2/Conv2D conv2/truncated_normal/shape shape conv2/truncated_normal/TruncatedNormal TruncatedNormal conv2/truncated_normal/shape->conv2/truncated_normal/TruncatedNormal conv2/truncated_normal/mean mean conv2/truncated_normal truncated_normal conv2/truncated_normal/mean->conv2/truncated_normal conv2/truncated_normal/stddev stddev conv2/truncated_normal/mul mul conv2/truncated_normal/stddev->conv2/truncated_normal/mul conv2/truncated_normal/TruncatedNormal->conv2/truncated_normal/mul conv2/truncated_normal/mul->conv2/truncated_normal conv2/b/Assign Assign conv2/b b conv2/b/Assign->conv2/b conv2/b/read read conv2/add add conv2/b/read->conv2/add conv2/W/Assign Assign conv2/W W conv2/W/Assign->conv2/W conv2/W/read read conv2/W/read->conv2/Conv2D conv2/truncated_normal->conv2/W/Assign conv2/W->conv2/W/read conv2/Const Const conv2/Const->conv2/b/Assign conv2/b->conv2/b/read conv2/Conv2D->conv2/add conv2/Relu Relu conv2/add->conv2/Relu conv2/MaxPool MaxPool conv2/Relu->conv2/MaxPool x x x->conv1/Reshape y_ y_

In [37]:
# fully-connected layer
with tf.name_scope('full'):
    W_fc1 = weight_variable([7*7*64, 1024])
    b_fc1 = bias_variable([1024])
    h_pool2_flat = tf.reshape(h_pool2, [-1, 7*7*64])
    h_fc1 = tf.nn.relu(tf.matmul(h_pool2_flat, W_fc1)+b_fc1)

In [38]:
tfdot()


Out[38]:
root cluster_conv1 conv1 cluster_conv1/Reshape conv1/Reshape cluster_conv1/truncated_normal conv1/truncated_normal cluster_conv1/b conv1/b cluster_conv1/W conv1/W cluster_conv2 conv2 cluster_conv2/truncated_normal conv2/truncated_normal cluster_conv2/b conv2/b cluster_conv2/W conv2/W cluster_full full cluster_full/b full/b cluster_full/Reshape full/Reshape cluster_full/W full/W cluster_full/truncated_normal full/truncated_normal conv1/Reshape/shape shape conv1/Reshape Reshape conv1/Reshape/shape->conv1/Reshape conv1/truncated_normal/shape shape conv1/truncated_normal/TruncatedNormal TruncatedNormal conv1/truncated_normal/shape->conv1/truncated_normal/TruncatedNormal conv1/truncated_normal/mean mean conv1/truncated_normal truncated_normal conv1/truncated_normal/mean->conv1/truncated_normal conv1/truncated_normal/stddev stddev conv1/truncated_normal/mul mul conv1/truncated_normal/stddev->conv1/truncated_normal/mul conv1/truncated_normal/TruncatedNormal->conv1/truncated_normal/mul conv1/truncated_normal/mul->conv1/truncated_normal conv1/b/Assign Assign conv1/b b conv1/b/Assign->conv1/b conv1/b/read read conv1/add add conv1/b/read->conv1/add conv1/W/Assign Assign conv1/W W conv1/W/Assign->conv1/W conv1/W/read read conv1/Conv2D Conv2D conv1/W/read->conv1/Conv2D conv1/truncated_normal->conv1/W/Assign conv1/W->conv1/W/read conv1/Const Const conv1/Const->conv1/b/Assign conv1/b->conv1/b/read conv1/Reshape->conv1/Conv2D conv1/Conv2D->conv1/add conv1/Relu Relu conv1/add->conv1/Relu conv1/MaxPool MaxPool conv1/Relu->conv1/MaxPool conv2/Conv2D Conv2D conv1/MaxPool->conv2/Conv2D conv2/truncated_normal/shape shape conv2/truncated_normal/TruncatedNormal TruncatedNormal conv2/truncated_normal/shape->conv2/truncated_normal/TruncatedNormal conv2/truncated_normal/mean mean conv2/truncated_normal truncated_normal conv2/truncated_normal/mean->conv2/truncated_normal conv2/truncated_normal/stddev stddev conv2/truncated_normal/mul mul conv2/truncated_normal/stddev->conv2/truncated_normal/mul conv2/truncated_normal/TruncatedNormal->conv2/truncated_normal/mul conv2/truncated_normal/mul->conv2/truncated_normal conv2/b/Assign Assign conv2/b b conv2/b/Assign->conv2/b conv2/b/read read conv2/add add conv2/b/read->conv2/add conv2/W/Assign Assign conv2/W W conv2/W/Assign->conv2/W conv2/W/read read conv2/W/read->conv2/Conv2D conv2/truncated_normal->conv2/W/Assign conv2/W->conv2/W/read conv2/Const Const conv2/Const->conv2/b/Assign conv2/b->conv2/b/read conv2/Conv2D->conv2/add conv2/Relu Relu conv2/add->conv2/Relu conv2/MaxPool MaxPool conv2/Relu->conv2/MaxPool full/Reshape Reshape conv2/MaxPool->full/Reshape full/b/Assign Assign full/b b full/b/Assign->full/b full/b/read read full/add add full/b/read->full/add full/Reshape/shape shape full/Reshape/shape->full/Reshape full/W/Assign Assign full/W W full/W/Assign->full/W full/W/read read full/MatMul MatMul full/W/read->full/MatMul full/truncated_normal/shape shape full/truncated_normal/TruncatedNormal TruncatedNormal full/truncated_normal/shape->full/truncated_normal/TruncatedNormal full/truncated_normal/mean mean full/truncated_normal truncated_normal full/truncated_normal/mean->full/truncated_normal full/truncated_normal/stddev stddev full/truncated_normal/mul mul full/truncated_normal/stddev->full/truncated_normal/mul full/truncated_normal/TruncatedNormal->full/truncated_normal/mul full/truncated_normal/mul->full/truncated_normal full/truncated_normal->full/W/Assign full/W->full/W/read full/Const Const full/Const->full/b/Assign full/b->full/b/read full/Reshape->full/MatMul full/MatMul->full/add full/Relu Relu full/add->full/Relu x x x->conv1/Reshape y_ y_

In [39]:
# Dropout:  A Simple Way to Prevent Neural Networks from Over fitting
# https://www.cs.toronto.edu/~hinton/absps/JMLRdropout.pdf
with tf.name_scope('dropout'):
    keep_prob = tf.placeholder("float")
    h_fc1_drop = tf.nn.dropout(h_fc1, keep_prob)

In [40]:
# Readout
with tf.name_scope('readout'):
    W_fc2 = weight_variable([1024,10])
    b_fc2 = bias_variable([10])
    y_conv = tf.nn.softmax(tf.matmul(h_fc1_drop, W_fc2)+b_fc2)

In [41]:
tfdot()


Out[41]:
root cluster_conv1 conv1 cluster_conv1/Reshape conv1/Reshape cluster_conv1/truncated_normal conv1/truncated_normal cluster_conv1/b conv1/b cluster_conv1/W conv1/W cluster_dropout dropout cluster_dropout/dropout dropout/dropout cluster_dropout/dropout/random_uniform dropout/dropout/random_uniform cluster_conv2 conv2 cluster_conv2/truncated_normal conv2/truncated_normal cluster_conv2/b conv2/b cluster_conv2/W conv2/W cluster_full full cluster_full/b full/b cluster_full/Reshape full/Reshape cluster_full/W full/W cluster_full/truncated_normal full/truncated_normal cluster_readout readout cluster_readout/W readout/W cluster_readout/truncated_normal readout/truncated_normal cluster_readout/b readout/b conv1/Reshape/shape shape conv1/Reshape Reshape conv1/Reshape/shape->conv1/Reshape conv1/truncated_normal/shape shape conv1/truncated_normal/TruncatedNormal TruncatedNormal conv1/truncated_normal/shape->conv1/truncated_normal/TruncatedNormal conv1/truncated_normal/mean mean conv1/truncated_normal truncated_normal conv1/truncated_normal/mean->conv1/truncated_normal conv1/truncated_normal/stddev stddev conv1/truncated_normal/mul mul conv1/truncated_normal/stddev->conv1/truncated_normal/mul conv1/truncated_normal/TruncatedNormal->conv1/truncated_normal/mul conv1/truncated_normal/mul->conv1/truncated_normal conv1/b/Assign Assign conv1/b b conv1/b/Assign->conv1/b conv1/b/read read conv1/add add conv1/b/read->conv1/add conv1/W/Assign Assign conv1/W W conv1/W/Assign->conv1/W conv1/W/read read conv1/Conv2D Conv2D conv1/W/read->conv1/Conv2D conv1/truncated_normal->conv1/W/Assign conv1/W->conv1/W/read conv1/Const Const conv1/Const->conv1/b/Assign conv1/b->conv1/b/read conv1/Reshape->conv1/Conv2D conv1/Conv2D->conv1/add conv1/Relu Relu conv1/add->conv1/Relu conv1/MaxPool MaxPool conv1/Relu->conv1/MaxPool conv2/Conv2D Conv2D conv1/MaxPool->conv2/Conv2D dropout/dropout/random_uniform/min min dropout/dropout/random_uniform/sub sub dropout/dropout/random_uniform/min->dropout/dropout/random_uniform/sub dropout/dropout/random_uniform random_uniform dropout/dropout/random_uniform/min->dropout/dropout/random_uniform dropout/dropout/random_uniform/max max dropout/dropout/random_uniform/max->dropout/dropout/random_uniform/sub dropout/dropout/random_uniform/RandomUniform RandomUniform dropout/dropout/random_uniform/mul mul dropout/dropout/random_uniform/RandomUniform->dropout/dropout/random_uniform/mul dropout/dropout/random_uniform/sub->dropout/dropout/random_uniform/mul dropout/dropout/random_uniform/mul->dropout/dropout/random_uniform dropout/dropout/Shape Shape dropout/dropout/Shape->dropout/dropout/random_uniform/RandomUniform dropout/dropout/add add dropout/dropout/random_uniform->dropout/dropout/add dropout/dropout/Floor Floor dropout/dropout/add->dropout/dropout/Floor dropout/dropout/mul_1 mul_1 dropout/dropout/Floor->dropout/dropout/mul_1 dropout/dropout/Inv Inv dropout/dropout/mul mul dropout/dropout/Inv->dropout/dropout/mul dropout/dropout/mul->dropout/dropout/mul_1 readout/MatMul MatMul dropout/dropout/mul_1->readout/MatMul dropout/Placeholder Placeholder dropout/Placeholder->dropout/dropout/add dropout/Placeholder->dropout/dropout/Inv conv2/truncated_normal/shape shape conv2/truncated_normal/TruncatedNormal TruncatedNormal conv2/truncated_normal/shape->conv2/truncated_normal/TruncatedNormal conv2/truncated_normal/mean mean conv2/truncated_normal truncated_normal conv2/truncated_normal/mean->conv2/truncated_normal conv2/truncated_normal/stddev stddev conv2/truncated_normal/mul mul conv2/truncated_normal/stddev->conv2/truncated_normal/mul conv2/truncated_normal/TruncatedNormal->conv2/truncated_normal/mul conv2/truncated_normal/mul->conv2/truncated_normal conv2/b/Assign Assign conv2/b b conv2/b/Assign->conv2/b conv2/b/read read conv2/add add conv2/b/read->conv2/add conv2/W/Assign Assign conv2/W W conv2/W/Assign->conv2/W conv2/W/read read conv2/W/read->conv2/Conv2D conv2/truncated_normal->conv2/W/Assign conv2/W->conv2/W/read conv2/Const Const conv2/Const->conv2/b/Assign conv2/b->conv2/b/read conv2/Conv2D->conv2/add conv2/Relu Relu conv2/add->conv2/Relu conv2/MaxPool MaxPool conv2/Relu->conv2/MaxPool full/Reshape Reshape conv2/MaxPool->full/Reshape full/b/Assign Assign full/b b full/b/Assign->full/b full/b/read read full/add add full/b/read->full/add full/Reshape/shape shape full/Reshape/shape->full/Reshape full/W/Assign Assign full/W W full/W/Assign->full/W full/W/read read full/MatMul MatMul full/W/read->full/MatMul full/truncated_normal/shape shape full/truncated_normal/TruncatedNormal TruncatedNormal full/truncated_normal/shape->full/truncated_normal/TruncatedNormal full/truncated_normal/mean mean full/truncated_normal truncated_normal full/truncated_normal/mean->full/truncated_normal full/truncated_normal/stddev stddev full/truncated_normal/mul mul full/truncated_normal/stddev->full/truncated_normal/mul full/truncated_normal/TruncatedNormal->full/truncated_normal/mul full/truncated_normal/mul->full/truncated_normal full/truncated_normal->full/W/Assign full/W->full/W/read full/Const Const full/Const->full/b/Assign full/b->full/b/read full/Reshape->full/MatMul full/MatMul->full/add full/Relu Relu full/add->full/Relu full/Relu->dropout/dropout/Shape full/Relu->dropout/dropout/mul readout/W/Assign Assign readout/W W readout/W/Assign->readout/W readout/W/read read readout/W/read->readout/MatMul readout/truncated_normal/shape shape readout/truncated_normal/TruncatedNormal TruncatedNormal readout/truncated_normal/shape->readout/truncated_normal/TruncatedNormal readout/truncated_normal/mean mean readout/truncated_normal truncated_normal readout/truncated_normal/mean->readout/truncated_normal readout/truncated_normal/stddev stddev readout/truncated_normal/mul mul readout/truncated_normal/stddev->readout/truncated_normal/mul readout/truncated_normal/TruncatedNormal->readout/truncated_normal/mul readout/truncated_normal/mul->readout/truncated_normal readout/b/Assign Assign readout/b b readout/b/Assign->readout/b readout/b/read read readout/add add readout/b/read->readout/add readout/truncated_normal->readout/W/Assign readout/W->readout/W/read readout/Const Const readout/Const->readout/b/Assign readout/b->readout/b/read readout/MatMul->readout/add readout/Softmax Softmax readout/add->readout/Softmax x x x->conv1/Reshape y_ y_

In [42]:
cross_entropy = - tf.reduce_sum(y_*tf.log(y_conv))
train_step = tf.train.AdamOptimizer(1e-4).minimize(cross_entropy)
prediction = tf.argmax(y_conv, 1)
correct_prediction = tf.equal(prediction, tf.argmax(y_, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float"))

In [43]:
%%timeit -r 1 -n 1
tf.initialize_all_variables().run()
for i in range(20000):
    batch = mnist.train.next_batch(50)
    if i%100 == 0:
        train_accuracy = accuracy.eval(feed_dict = {
                x: batch[0], y_: batch[1], keep_prob: 1.0 })
        print("step %d, training accuracy %g"%(i, train_accuracy))
    train_step.run(feed_dict= {x: batch[0], y_: batch[1], keep_prob: 0.5 })
for i in range(0, mnist.test.num_examples, 1000):
    print("test accuracy %g"%accuracy.eval(feed_dict={x: mnist.test.images[i:i+1000], 
                                                  y_: mnist.test.labels[i:i+1000],
                                                  keep_prob: 1.0}))


step 0, training accuracy 0.16
step 100, training accuracy 0.8
step 200, training accuracy 0.92
step 300, training accuracy 0.92
step 400, training accuracy 0.92
step 500, training accuracy 0.9
step 600, training accuracy 0.98
step 700, training accuracy 0.96
step 800, training accuracy 0.96
step 900, training accuracy 0.98
step 1000, training accuracy 0.94
step 1100, training accuracy 0.98
step 1200, training accuracy 0.96
step 1300, training accuracy 0.98
step 1400, training accuracy 0.96
step 1500, training accuracy 0.98
step 1600, training accuracy 1
step 1700, training accuracy 1
step 1800, training accuracy 0.98
step 1900, training accuracy 1
step 2000, training accuracy 0.96
step 2100, training accuracy 0.98
step 2200, training accuracy 0.98
step 2300, training accuracy 0.92
step 2400, training accuracy 1
step 2500, training accuracy 0.96
step 2600, training accuracy 0.94
step 2700, training accuracy 1
step 2800, training accuracy 0.98
step 2900, training accuracy 1
step 3000, training accuracy 0.98
step 3100, training accuracy 0.98
step 3200, training accuracy 0.98
step 3300, training accuracy 0.96
step 3400, training accuracy 1
step 3500, training accuracy 1
step 3600, training accuracy 0.98
step 3700, training accuracy 0.94
step 3800, training accuracy 0.96
step 3900, training accuracy 1
step 4000, training accuracy 0.98
step 4100, training accuracy 1
step 4200, training accuracy 1
step 4300, training accuracy 0.98
step 4400, training accuracy 1
step 4500, training accuracy 0.98
step 4600, training accuracy 1
step 4700, training accuracy 0.96
step 4800, training accuracy 0.96
step 4900, training accuracy 1
step 5000, training accuracy 1
step 5100, training accuracy 0.98
step 5200, training accuracy 1
step 5300, training accuracy 0.96
step 5400, training accuracy 1
step 5500, training accuracy 1
step 5600, training accuracy 0.98
step 5700, training accuracy 1
step 5800, training accuracy 0.98
step 5900, training accuracy 1
step 6000, training accuracy 1
step 6100, training accuracy 1
step 6200, training accuracy 1
step 6300, training accuracy 0.98
step 6400, training accuracy 0.98
step 6500, training accuracy 1
step 6600, training accuracy 0.98
step 6700, training accuracy 1
step 6800, training accuracy 1
step 6900, training accuracy 1
step 7000, training accuracy 1
step 7100, training accuracy 1
step 7200, training accuracy 1
step 7300, training accuracy 0.98
step 7400, training accuracy 1
step 7500, training accuracy 1
step 7600, training accuracy 1
step 7700, training accuracy 1
step 7800, training accuracy 1
step 7900, training accuracy 1
step 8000, training accuracy 1
step 8100, training accuracy 1
step 8200, training accuracy 1
step 8300, training accuracy 0.98
step 8400, training accuracy 1
step 8500, training accuracy 1
step 8600, training accuracy 1
step 8700, training accuracy 1
step 8800, training accuracy 1
step 8900, training accuracy 1
step 9000, training accuracy 1
step 9100, training accuracy 1
step 9200, training accuracy 1
step 9300, training accuracy 0.98
step 9400, training accuracy 1
step 9500, training accuracy 0.98
step 9600, training accuracy 1
step 9700, training accuracy 1
step 9800, training accuracy 1
step 9900, training accuracy 1
step 10000, training accuracy 1
step 10100, training accuracy 1
step 10200, training accuracy 1
step 10300, training accuracy 0.98
step 10400, training accuracy 0.98
step 10500, training accuracy 1
step 10600, training accuracy 0.98
step 10700, training accuracy 1
step 10800, training accuracy 1
step 10900, training accuracy 0.98
step 11000, training accuracy 1
step 11100, training accuracy 1
step 11200, training accuracy 1
step 11300, training accuracy 1
step 11400, training accuracy 1
step 11500, training accuracy 1
step 11600, training accuracy 1
step 11700, training accuracy 0.98
step 11800, training accuracy 1
step 11900, training accuracy 1
step 12000, training accuracy 1
step 12100, training accuracy 1
step 12200, training accuracy 1
step 12300, training accuracy 1
step 12400, training accuracy 1
step 12500, training accuracy 1
step 12600, training accuracy 1
step 12700, training accuracy 1
step 12800, training accuracy 0.98
step 12900, training accuracy 1
step 13000, training accuracy 1
step 13100, training accuracy 1
step 13200, training accuracy 1
step 13300, training accuracy 1
step 13400, training accuracy 1
step 13500, training accuracy 1
step 13600, training accuracy 1
step 13700, training accuracy 0.98
step 13800, training accuracy 0.98
step 13900, training accuracy 1
step 14000, training accuracy 1
step 14100, training accuracy 1
step 14200, training accuracy 1
step 14300, training accuracy 1
step 14400, training accuracy 1
step 14500, training accuracy 0.98
step 14600, training accuracy 1
step 14700, training accuracy 1
step 14800, training accuracy 1
step 14900, training accuracy 0.98
step 15000, training accuracy 1
step 15100, training accuracy 1
step 15200, training accuracy 1
step 15300, training accuracy 1
step 15400, training accuracy 1
step 15500, training accuracy 1
step 15600, training accuracy 1
step 15700, training accuracy 0.98
step 15800, training accuracy 1
step 15900, training accuracy 1
step 16000, training accuracy 1
step 16100, training accuracy 1
step 16200, training accuracy 1
step 16300, training accuracy 1
step 16400, training accuracy 1
step 16500, training accuracy 1
step 16600, training accuracy 1
step 16700, training accuracy 1
step 16800, training accuracy 1
step 16900, training accuracy 1
step 17000, training accuracy 1
step 17100, training accuracy 0.98
step 17200, training accuracy 1
step 17300, training accuracy 1
step 17400, training accuracy 1
step 17500, training accuracy 1
step 17600, training accuracy 1
step 17700, training accuracy 1
step 17800, training accuracy 1
step 17900, training accuracy 1
step 18000, training accuracy 1
step 18100, training accuracy 1
step 18200, training accuracy 1
step 18300, training accuracy 1
step 18400, training accuracy 1
step 18500, training accuracy 1
step 18600, training accuracy 1
step 18700, training accuracy 1
step 18800, training accuracy 1
step 18900, training accuracy 1
step 19000, training accuracy 1
step 19100, training accuracy 1
step 19200, training accuracy 1
step 19300, training accuracy 1
step 19400, training accuracy 1
step 19500, training accuracy 1
step 19600, training accuracy 1
step 19700, training accuracy 1
step 19800, training accuracy 1
step 19900, training accuracy 1
test accuracy 0.991
test accuracy 0.985
test accuracy 0.988
test accuracy 0.994
test accuracy 0.987
test accuracy 0.997
test accuracy 0.995
test accuracy 0.999
test accuracy 0.999
test accuracy 0.993
1 loop, best of 1: 3min 59s per loop

In [44]:
np.mean([accuracy.eval(feed_dict={x: mnist.test.images[i:i+1000], 
                                   y_: mnist.test.labels[i:i+1000],
                                   keep_prob: 1.0}) 
        for i in range(0, mnist.test.num_examples, 1000)]
)


Out[44]:
0.99280006

In [45]:
tf.scalar_summary(accuracy.op.name, accuracy)
summary_op = tf.merge_all_summaries()
summary_writer = tf.train.SummaryWriter("log1", graph=sess.graph)

In [ ]:
%%timeit -r 1 -n 1
tf.initialize_all_variables().run()
for i in range(20000):
    batch = mnist.train.next_batch(50)
    if i%100 == 0:
        train_accuracy = accuracy.eval(feed_dict = {
                x: batch[0], y_: batch[1], keep_prob: 1.0 })
        print("step %d, training accuracy %g"%(i, train_accuracy))
        summary_str = sess.run(summary_op, feed_dict={x: batch[0], y_: batch[1], keep_prob: 1.0 })
        summary_writer.add_summary(summary_str, i)
    train_step.run(feed_dict= {x: batch[0], y_: batch[1], keep_prob: 0.5 })
for i in range(0, mnist.test.num_examples, 1000):
    print("test accuracy %g"%accuracy.eval(feed_dict={x: mnist.test.images[i:i+1000], 
                                                  y_: mnist.test.labels[i:i+1000],
                                                  keep_prob: 1.0}))

In [ ]:
np.mean([accuracy.eval(feed_dict={x: mnist.test.images[i:i+1000], 
                                   y_: mnist.test.labels[i:i+1000],
                                   keep_prob: 1.0}) 
        for i in range(0, mnist.test.num_examples, 1000)]
)

In [ ]: