In [12]:
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function

# Imports
import numpy as np
import tensorflow as tf

tf.logging.set_verbosity(tf.logging.INFO)

def cnn_model_fn(features, labels, mode):
    input_layer = tf.reshape(features['x'], [-1, 28, 28, 1])
    
    conv1 = tf.layers.conv2d(inputs=input_layer, filters=32, 
                             kernel_size=[5, 5], padding='same',
                            activation=tf.nn.relu)
    
    pool1 = tf.layers.max_pooling2d(inputs=conv1, pool_size=[2, 2], strides=2)
    
    conv2 = tf.layers.conv2d(inputs=pool1, filters=64, kernel_size=[5, 5], 
                             padding='same', activation=tf.nn.relu)
    pool2 = tf.layers.max_pooling2d(inputs=conv2, pool_size=[2, 2], strides=2)
    
    pool2_flat = tf.reshape(pool2, [-1, 7*7*64])
    
    dense = tf.layers.dense(inputs=pool2_flat, units=1024, activation=tf.nn.relu)
    
    dropout = tf.layers.dropout(inputs=dense, rate=0.4, training=mode == tf.estimator.ModeKeys.TRAIN)
    
    logits = tf.layers.dense(inputs=dropout, units=10)
    
    predictions = {
        'classes': tf.argmax(input=logits, axis=1),
        'probabilities': tf.nn.softmax(logits, name='softmax_tensor')
    }
    if mode == tf.estimator.ModeKeys.PREDICT:
        return tf.estimator.EstimatorSpec(mode=mode, predictions=predictions)
    
    onehot_labels = tf.one_hot(indices=tf.cast(labels, tf.int32), depth=10)
    loss = tf.losses.softmax_cross_entropy(onehot_labels=onehot_labels, logits=logits)
    
    if mode == tf.estimator.ModeKeys.TRAIN:
        optimizer = tf.train.GradientDescentOptimizer(learning_rate=0.001)
        train_op = optimizer.minimize(loss=loss, global_step=tf.train.get_global_step())
        return tf.estimator.EstimatorSpec(mode=mode, loss=loss, train_op=train_op)
    
    eval_metric_ops = {
      "accuracy": tf.metrics.accuracy(
          labels=labels, predictions=predictions["classes"])}
    return tf.estimator.EstimatorSpec(
      mode=mode, loss=loss, eval_metric_ops=eval_metric_ops)

def main(unused_argv):
    # Load training and eval data
    mnist = tf.contrib.learn.datasets.load_dataset("mnist")
    train_data = mnist.train.images  # Returns np.array
    train_labels = np.asarray(mnist.train.labels, dtype=np.int32)
    eval_data = mnist.test.images  # Returns np.array
    eval_labels = np.asarray(mnist.test.labels, dtype=np.int32)

    # Create the Estimator
    mnist_classifier = tf.estimator.Estimator(
      model_fn=cnn_model_fn, model_dir="/tmp/mnist_convnet_model")

    # Set up logging for predictions
    # Log the values in the "Softmax" tensor with label "probabilities"
    tensors_to_log = {"probabilities": "softmax_tensor"}
    logging_hook = tf.train.LoggingTensorHook(
      tensors=tensors_to_log, every_n_iter=50)

    # Train the model
    train_input_fn = tf.estimator.inputs.numpy_input_fn(
      x={"x": train_data},
      y=train_labels,
      batch_size=100,
      num_epochs=None,
      shuffle=True)
    mnist_classifier.train(
      input_fn=train_input_fn,
      steps=2000)

    # Evaluate the model and print results
    eval_input_fn = tf.estimator.inputs.numpy_input_fn(
      x={"x": eval_data},
      y=eval_labels,
      num_epochs=1,
      shuffle=False)
    eval_results = mnist_classifier.evaluate(input_fn=eval_input_fn)
    print(eval_results)


if __name__ == "__main__":
    tf.app.run()


Extracting MNIST-data/train-images-idx3-ubyte.gz
Extracting MNIST-data/train-labels-idx1-ubyte.gz
Extracting MNIST-data/t10k-images-idx3-ubyte.gz
Extracting MNIST-data/t10k-labels-idx1-ubyte.gz
INFO:tensorflow:Using default config.
INFO:tensorflow:Using config: {'_save_checkpoints_secs': 600, '_session_config': None, '_keep_checkpoint_max': 5, '_tf_random_seed': 1, '_keep_checkpoint_every_n_hours': 10000, '_log_step_count_steps': 100, '_save_checkpoints_steps': None, '_model_dir': '/tmp/mnist_convnet_model', '_save_summary_steps': 100}
INFO:tensorflow:Create CheckpointSaverHook.
INFO:tensorflow:Restoring parameters from /tmp/mnist_convnet_model/model.ckpt-201
INFO:tensorflow:Saving checkpoints for 202 into /tmp/mnist_convnet_model/model.ckpt.
INFO:tensorflow:loss = 2.2577, step = 202
INFO:tensorflow:global_step/sec: 5.14336
INFO:tensorflow:loss = 2.24191, step = 302 (19.444 sec)
INFO:tensorflow:global_step/sec: 4.76332
INFO:tensorflow:loss = 2.22952, step = 402 (20.994 sec)
INFO:tensorflow:global_step/sec: 4.17879
INFO:tensorflow:loss = 2.17337, step = 502 (23.931 sec)
INFO:tensorflow:global_step/sec: 4.44531
INFO:tensorflow:loss = 2.14476, step = 602 (22.495 sec)
INFO:tensorflow:global_step/sec: 5.24176
INFO:tensorflow:loss = 2.12841, step = 702 (19.077 sec)
INFO:tensorflow:global_step/sec: 5.12082
INFO:tensorflow:loss = 1.99575, step = 802 (19.528 sec)
INFO:tensorflow:global_step/sec: 4.85169
INFO:tensorflow:loss = 1.96625, step = 902 (20.611 sec)
INFO:tensorflow:global_step/sec: 5.31015
INFO:tensorflow:loss = 1.91727, step = 1002 (18.832 sec)
INFO:tensorflow:global_step/sec: 5.17021
INFO:tensorflow:loss = 1.74486, step = 1102 (19.341 sec)
INFO:tensorflow:global_step/sec: 3.26404
INFO:tensorflow:loss = 1.57992, step = 1202 (30.638 sec)
INFO:tensorflow:global_step/sec: 4.90514
INFO:tensorflow:loss = 1.4793, step = 1302 (20.386 sec)
INFO:tensorflow:global_step/sec: 4.8124
INFO:tensorflow:loss = 1.24084, step = 1402 (20.780 sec)
INFO:tensorflow:global_step/sec: 5.12563
INFO:tensorflow:loss = 0.944429, step = 1502 (19.510 sec)
INFO:tensorflow:global_step/sec: 5.4055
INFO:tensorflow:loss = 0.880275, step = 1602 (18.499 sec)
INFO:tensorflow:global_step/sec: 5.37937
INFO:tensorflow:loss = 0.741957, step = 1702 (18.590 sec)
INFO:tensorflow:global_step/sec: 5.27733
INFO:tensorflow:loss = 0.802933, step = 1802 (18.949 sec)
INFO:tensorflow:global_step/sec: 4.74714
INFO:tensorflow:loss = 0.549236, step = 1902 (21.065 sec)
INFO:tensorflow:global_step/sec: 4.79841
INFO:tensorflow:loss = 0.818456, step = 2002 (20.840 sec)
INFO:tensorflow:global_step/sec: 4.90897
INFO:tensorflow:loss = 0.770689, step = 2102 (20.376 sec)
INFO:tensorflow:Saving checkpoints for 2201 into /tmp/mnist_convnet_model/model.ckpt.
INFO:tensorflow:Loss for final step: 0.617396.
INFO:tensorflow:Starting evaluation at 2018-01-18-13:05:30
INFO:tensorflow:Restoring parameters from /tmp/mnist_convnet_model/model.ckpt-2201
INFO:tensorflow:Finished evaluation at 2018-01-18-13:05:36
INFO:tensorflow:Saving dict for global step 2201: accuracy = 0.8668, global_step = 2201, loss = 0.537884
{'loss': 0.53788447, 'global_step': 2201, 'accuracy': 0.86680001}
An exception has occurred, use %tb to see the full traceback.

SystemExit

In [9]:


In [13]:
tf.losses.softmax_cross_entropy?

In [ ]: