In [1]:
import os
import re
import sys
import tarfile
from six.moves import urllib

def maybe_download_and_extract(
  dest_directory='data',
  data_url='http://www.cs.toronto.edu/~kriz/cifar-10-binary.tar.gz'):
  
  """Download and extract the tarball from Alex's website."""
  dest_directory = dest_directory
  if not os.path.exists(dest_directory):
    os.makedirs(dest_directory)
  filename = data_url.split('/')[-1]
  filepath = os.path.join(dest_directory, filename)
  if not os.path.exists(filepath):
    def _progress(count, block_size, total_size):
      sys.stdout.write('\r>> Downloading %s %.1f%%' % (
          filename, float(count * block_size) / float(total_size) * 100.0))
      sys.stdout.flush()
    filepath, _ = urllib.request.urlretrieve(DATA_URL, filepath, _progress)
    print()
    statinfo = os.stat(filepath)
    print('Successfully downloaded', filename, statinfo.st_size, 'bytes.')
  extracted_dir_path = os.path.join(dest_directory, 'cifar-10-batches-bin')
  if not os.path.exists(extracted_dir_path):
    tarfile.open(filepath, 'r:gz').extractall(dest_directory)

In [2]:
DATA_DIR = 'data'
DATA_URL = 'http://www.cs.toronto.edu/~kriz/cifar-10-binary.tar.gz'
maybe_download_and_extract(DATA_DIR, DATA_URL)


>> Downloading cifar-10-binary.tar.gz 100.0%
Successfully downloaded cifar-10-binary.tar.gz 170052171 bytes.

In [3]:
import numpy as np
import matplotlib.pyplot as plt

def extract_data(index=0, filepath='data/cifar-10-batches-bin/data_batch_5.bin'):
  bytestream = open(filepath, mode='rb')

  label_bytes_length = 1
  image_bytes_length = (32 ** 2) * 3
  record_bytes_length = label_bytes_length + image_bytes_length

  bytestream.seek(record_bytes_length * index, 0)
  label_bytes = bytestream.read(label_bytes_length)
  image_bytes = bytestream.read(image_bytes_length)

  label = np.frombuffer(label_bytes, dtype=np.uint8)  
  image = np.frombuffer(image_bytes, dtype=np.uint8)
  
  image = np.reshape(image, [3, 32, 32])
  image = np.transpose(image, [1, 2, 0])
  image = image.astype(np.float32)
  
  result = {
    'image': image,
    'label': label,
  }
  bytestream.close()
  return result

In [8]:
%matplotlib inline
result = extract_data(np.random.randint(1000))
plt.imshow(result['image'])


Out[8]:
<matplotlib.image.AxesImage at 0x1d15cba64e0>

In [9]:
import math
import shutil
import tensorflow as tf

from datetime import datetime
from tensorflow.python.feature_column import feature_column

from tensorflow.contrib.learn import learn_runner
from tensorflow.contrib.learn import make_export_strategy

print(tf.__version__)


1.4.0

In [10]:
train_data_files = ['data/cifar-10-batches-bin/data_batch_{}.bin'.format(i) for i in range(1,5)]
valid_data_files = ['data/cifar-10-batches-bin/data_batch_5.bin']
test_data_files = ['data/cifar-10-batches-bin/test_batch.bin']

In [11]:
# Process images of this size. Note that this differs from the original CIFAR
# image size of 32 x 32. If one alters this number, then the entire model
# architecture will change and any model would need to be retrained.
IMAGE_HEIGHT = 32
IMAGE_WIDTH = 32
IMAGE_DEPTH = 3

# Global constants describing the CIFAR-10 data set.
NUM_CLASSES = 10
NUM_EXAMPLES_PER_EPOCH_FOR_TRAIN = 50000
NUM_EXAMPLES_PER_EPOCH_FOR_EVAL = 10000

# If a model is trained with multiple GPUs, prefix all Op names with tower_name
# to differentiate the operations. Note that this prefix is removed from the
# names of the summaries when visualizing a model.
TOWER_NAME = 'tower'

# We use a weight decay of 0.0002, which performs better than the 0.0001 that
# was originally suggested.
WEIGHT_DECAY = 2e-4
MOMENTUM = 0.9

# Global constants describing model behaviors
MODEL_NAME = 'cnn-model-01'
USE_CHECKPOINT = False

In [15]:
def parse_record(raw_record):
    # Every record consists of a label followed by the image, with a fixed number
    # of bytes for each.
    label_bytes = 1
    image_bytes = IMAGE_HEIGHT * IMAGE_WIDTH * IMAGE_DEPTH
    record_bytes = label_bytes + image_bytes

    # Convert from a string to a vector of uint8 that is record_bytes long.
    record_vector = tf.decode_raw(raw_record, tf.uint8)

    # The first byte represents the label, which we convert from uint8 to int32
    # and then to one-hot.
    label = tf.cast(record_vector[0], tf.int32)
    label = tf.one_hot(label, NUM_CLASSES)

    # The remaining bytes after the label represent the image, which we reshape
    # from [depth * height * width] to [depth, height, width].
    depth_major = tf.reshape(
    record_vector[label_bytes:record_bytes], [IMAGE_DEPTH, IMAGE_HEIGHT, IMAGE_WIDTH])

    # Convert from [depth, height, width] to [height, width, depth], and cast as
    # float32.
    image = tf.cast(tf.transpose(depth_major, [1, 2, 0]), tf.float32)

    return image, label

In [17]:
def preprocess_image(image, is_training=False):
    """Preprocess a single image of layout [height, width, depth]."""
    if is_training:
    # Resize the image to add four extra pixels on each side.
        image = tf.image.resize_image_with_crop_or_pad(
        image, IMAGE_HEIGHT + 8, IMAGE_WIDTH + 8)

    # Randomly crop a [_HEIGHT, _WIDTH] section of the image.
    image = tf.random_crop(image, [IMAGE_HEIGHT, IMAGE_WIDTH, IMAGE_DEPTH])

    # Randomly flip the image horizontally.
    image = tf.image.random_flip_left_right(image)

    # Subtract off the mean and divide by the variance of the pixels.
    image = tf.image.per_image_standardization(image)
    return image

In [19]:
def generate_input_fn(file_names,
                      mode=tf.estimator.ModeKeys.EVAL,
                      batch_size=1):

    def _input_fn():
        label_bytes = 1
        image_bytes = IMAGE_HEIGHT * IMAGE_WIDTH * IMAGE_DEPTH
        record_bytes = label_bytes + image_bytes
        dataset = tf.data.FixedLengthRecordDataset(filenames=file_names,
                                                   record_bytes=record_bytes)

        is_training = (mode == tf.estimator.ModeKeys.TRAIN)
        if is_training:
            buffer_size = batch_size * 2 + 1
            dataset = dataset.shuffle(buffer_size=buffer_size)

        dataset = dataset.map(parse_record)
        dataset = dataset.map(lambda image, label: (preprocess_image(image, is_training), label))

        dataset = dataset.prefetch(2 * batch_size)

        # We call repeat after shuffling, rather than before, to prevent separate
        # epochs from blending together.
        dataset = dataset.repeat()

        # Batch results by up to batch_size, and then fetch the tuple from the
        # iterator.
        dataset = dataset.batch(batch_size)
        iterator = dataset.make_one_shot_iterator()
        images, labels = iterator.get_next()

        features = {'images': images}
        return features, labels

    return _input_fn

In [20]:
def get_feature_columns():
    feature_columns = {
    'images': tf.feature_column.numeric_column('images', (IMAGE_HEIGHT, IMAGE_WIDTH, IMAGE_DEPTH)),
    }
    return feature_columns

In [21]:
feature_columns = get_feature_columns()
print("Feature Columns: {}".format(feature_columns))


Feature Columns: {'images': _NumericColumn(key='images', shape=(32, 32, 3), default_value=None, dtype=tf.float32, normalizer_fn=None)}

In [23]:
def _activation_summary(x):
    """Helper to create summaries for activations.
    Creates a summary that provides a histogram of activations.
    Creates a summary that measures the sparsity of activations.
    Args:
    x: Tensor
    Returns:
    nothing
    """
    # Remove 'tower_[0-9]/' from the name in case this is a multi-GPU training
    # session. This helps the clarity of presentation on tensorboard.
    tensor_name = re.sub('%s_[0-9]*/' % TOWER_NAME, '', x.op.name)
    tf.summary.histogram(tensor_name + '/activations', x)
    tf.summary.scalar(tensor_name + '/sparsity', tf.nn.zero_fraction(x))

def _variable_on_cpu(name, shape, initializer):
    """Helper to create a Variable stored on CPU memory.
    Args:
    name: name of the variable
    shape: list of ints
    initializer: initializer for Variable
    Returns:
    Variable Tensor
    """
    with tf.device('/cpu:0'):
        dtype = tf.float32
        var = tf.get_variable(name, shape, initializer=initializer, dtype=dtype)
    return var

def _variable_with_weight_decay(name, shape, stddev, wd):
    """Helper to create an initialized Variable with weight decay.
    Note that the Variable is initialized with a truncated normal distribution.
    A weight decay is added only if one is specified.
    Args:
    name: name of the variable
    shape: list of ints
    stddev: standard deviation of a truncated Gaussian
    wd: add L2Loss weight decay multiplied by this float. If None, weight
        decay is not added for this Variable.
    Returns:
    Variable Tensor
    """
    dtype = tf.float32
    var = _variable_on_cpu(
      name,
      shape,
      tf.truncated_normal_initializer(stddev=stddev, dtype=dtype))
    if wd is not None:
        weight_decay = tf.multiply(tf.nn.l2_loss(var), wd, name='weight_loss')
        tf.add_to_collection('losses', weight_decay)
    return var

In [24]:
def inference(images):
    with tf.variable_scope('conv1') as scope:
        kernel = _variable_with_weight_decay('weights', shape=[5, 5, 3, 64], stddev=5e-2, wd=0.0)
        conv = tf.nn.conv2d(images, kernel, [1, 1, 1, 1], padding='SAME')
        biases = _variable_on_cpu('biases', [64], tf.constant_initializer(0.0))
        pre_activation = tf.nn.bias_add(conv, biases)
        conv1 = tf.nn.relu(pre_activation, name=scope.name)
        _activation_summary(conv1)
    
    pool1 = tf.nn.max_pool(conv1, ksize=[1, 3, 3, 1], strides=[1, 2, 2, 1], padding='SAME', name='pool1')
    norm1 = tf.nn.lrn(pool1, 4, bias=1.0, alpha=0.001 / 9.0, beta=0.75, name='norm1')

    with tf.variable_scope('conv2') as scope:
        kernel = _variable_with_weight_decay('weights', shape=[5, 5, 64, 64], stddev=5e-2, wd=0.0)
        conv = tf.nn.conv2d(norm1, kernel, [1, 1, 1, 1], padding='SAME')
        biases = _variable_on_cpu('biases', [64], tf.constant_initializer(0.1))
        pre_activation = tf.nn.bias_add(conv, biases)
        conv2 = tf.nn.relu(pre_activation, name=scope.name)
        _activation_summary(conv2)

    norm2 = tf.nn.lrn(conv2, 4, bias=1.0, alpha=0.001 / 9.0, beta=0.75, name='norm2')
    pool2 = tf.nn.max_pool(norm2, ksize=[1, 3, 3, 1], strides=[1, 2, 2, 1], padding='SAME', name='pool2')
    
    with tf.variable_scope('local3') as scope:
        pool2_shape = pool2.get_shape()
        dim = pool2_shape[1] * pool2_shape[2] * pool2_shape[3]
        reshape = tf.reshape(pool2, [-1, dim])
        weights = _variable_with_weight_decay('weights', shape=[dim, 384], stddev=0.04, wd=0.004)
        biases = _variable_on_cpu('biases', [384], tf.constant_initializer(0.1))
        local3 = tf.nn.relu(tf.matmul(reshape, weights) + biases, name=scope.name)
        _activation_summary(local3)

    with tf.variable_scope('local4') as scope:
        weights = _variable_with_weight_decay('weights', shape=[384, 192], stddev=0.04, wd=0.004)
        biases = _variable_on_cpu('biases', [192], tf.constant_initializer(0.1))
        local4 = tf.nn.relu(tf.matmul(local3, weights) + biases, name=scope.name)
        _activation_summary(local4)

    with tf.variable_scope('softmax_linear') as scope:
        weights = _variable_with_weight_decay('weights', [192, NUM_CLASSES], stddev=1/192.0, wd=0.0)
        biases = _variable_on_cpu('biases', [NUM_CLASSES], tf.constant_initializer(0.0))
        logits = tf.add(tf.matmul(local4, weights), biases, name=scope.name)
        _activation_summary(logits)

    return logits

In [25]:
def get_loss(logits, labels):
    # Calculate loss, which includes softmax cross entropy and L2 regularization.
    cross_entropy = tf.losses.softmax_cross_entropy(
    logits=logits, onehot_labels=labels)

    # Create a tensor named cross_entropy for logging purposes.
    tf.identity(cross_entropy, name='cross_entropy')
    tf.summary.scalar('cross_entropy', cross_entropy)

    # Add weight decay to the loss.
    loss = cross_entropy + WEIGHT_DECAY * tf.add_n(
      [tf.nn.l2_loss(v) for v in tf.trainable_variables()])

    return loss

In [26]:
def get_train_op(loss, params, mode):
    if mode == tf.estimator.ModeKeys.TRAIN:
        # Scale the learning rate linearly with the batch size. When the batch size
        # is 128, the learning rate should be 0.1.
        initial_learning_rate = 0.1 * params.batch_size / 128
        batches_per_epoch = NUM_EXAMPLES_PER_EPOCH_FOR_TRAIN / params.batch_size
        global_step = tf.train.get_or_create_global_step()

        # Multiply the learning rate by 0.1 at 100, 150, and 200 epochs.
        boundaries = [int(batches_per_epoch * epoch) for epoch in [100, 150, 200]]
        values = [initial_learning_rate * decay for decay in [1, 0.1, 0.01, 0.001]]
        learning_rate = tf.train.piecewise_constant(
            tf.cast(global_step, tf.int32), boundaries, values)

        # Create a tensor named learning_rate for logging purposes
        tf.identity(learning_rate, name='learning_rate')
        tf.summary.scalar('learning_rate', learning_rate)

        optimizer = tf.train.MomentumOptimizer(
            learning_rate=learning_rate,
            momentum=MOMENTUM)

        # Batch norm requires update ops to be added as a dependency to the train_op
        update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
        with tf.control_dependencies(update_ops):
            train_op = optimizer.minimize(loss, global_step)
    else:
        train_op = None

    return train_op

In [27]:
def get_metrics(predictions, labels):
    # Calculate accuracy
    accuracy = tf.metrics.accuracy(predictions['classes'],
                                 tf.argmax(labels, axis=1))

    # Create a tensor named train_accuracy for logging purposes
    tf.identity(accuracy[1], name='train_accuracy')
    tf.summary.scalar('train_accuracy', accuracy[1])

    return {'accuracy': accuracy}

In [35]:
def model_fn(features, labels, mode, params):
    # Create the input layers from the features
    feature_columns = list(get_feature_columns().values())

    images = tf.feature_column.input_layer(
    features=features, feature_columns=feature_columns)

    images = tf.reshape(
    images, shape=(-1, IMAGE_HEIGHT, IMAGE_WIDTH, IMAGE_DEPTH))

    # Calculate logits through CNN
    logits = inference(images)

    # Get predictions
    predictions = {
    'classes': tf.argmax(logits, axis=1),
    'probabilities': tf.nn.softmax(logits, name='softmax_tensor')
    }

    # Provide an estimator spec for `ModeKeys.PREDICT`
    if mode == tf.estimator.ModeKeys.PREDICT:
        export_outputs = {
          'predictions': tf.estimator.export.PredictOutput(predictions)
        }
        return tf.estimator.EstimatorSpec(mode=mode,
                                      predictions=predictions,
                                      export_outputs=export_outputs)

    loss = get_loss(logits=logits, labels=labels)
    train_op = get_train_op(loss=loss, mode=mode, params=params)
    metrics = get_metrics(predictions=predictions, labels=labels)

    # Return EstimatorSpec
    return tf.estimator.EstimatorSpec(
    mode=mode,
    predictions=predictions,
    loss=loss,
    train_op=train_op,
    eval_metric_ops=metrics)

In [36]:
def create_estimator(run_config, hparams):
    return tf.estimator.Estimator(
      model_fn=model_fn,
      params=hparams,
      config=run_config)

In [45]:
hparams = tf.contrib.training.HParams(
  batch_size=200,
  max_steps=100000,
)

model_dir = 'trained_models/{}'.format(MODEL_NAME)

run_config = tf.contrib.learn.RunConfig(
  save_checkpoints_steps=100,
  tf_random_seed=19851211,
  model_dir=model_dir
)

In [47]:
estimator = create_estimator(run_config, hparams)

train_spec = tf.estimator.TrainSpec(
  input_fn=generate_input_fn(file_names=train_data_files,
                             mode=tf.contrib.learn.ModeKeys.TRAIN,
                             batch_size=hparams.batch_size),
  max_steps=hparams.max_steps,
  hooks=None
)

eval_spec = tf.estimator.EvalSpec(
  input_fn=generate_input_fn(file_names=valid_data_files,
                             mode=tf.contrib.learn.ModeKeys.EVAL,
                             batch_size=hparams.batch_size),
  steps=50,
  name=None,
  hooks=None,
  start_delay_secs=120,
  throttle_secs=600
)


INFO:tensorflow:Using config: {'_environment': 'local', '_keep_checkpoint_every_n_hours': 10000, '_save_checkpoints_steps': 100, '_tf_config': gpu_options {
  per_process_gpu_memory_fraction: 1
}
, '_task_id': 0, '_evaluation_master': '', '_model_dir': 'trained_models/cnn-model-01', '_keep_checkpoint_max': 5, '_save_summary_steps': 100, '_num_worker_replicas': 0, '_num_ps_replicas': 0, '_save_checkpoints_secs': None, '_tf_random_seed': 19851211, '_cluster_spec': <tensorflow.python.training.server_lib.ClusterSpec object at 0x000001D1633113C8>, '_master': '', '_task_type': None, '_log_step_count_steps': 100, '_is_chief': True, '_session_config': None}

In [48]:
if not USE_CHECKPOINT:
    print("Removing previous artifacts...")
    shutil.rmtree(model_dir, ignore_errors=True)

tf.estimator.train_and_evaluate(estimator, train_spec, eval_spec)


Removing previous artifacts...
INFO:tensorflow:Running training and evaluation locally (non-distributed).
INFO:tensorflow:Start train and evaluate loop. The evaluate will happen after 600 secs (eval_spec.throttle_secs) or training is finished.
INFO:tensorflow:Create CheckpointSaverHook.
INFO:tensorflow:Saving checkpoints for 1 into trained_models/cnn-model-01\model.ckpt.
INFO:tensorflow:step = 1, loss = 2.53161
INFO:tensorflow:Saving checkpoints for 101 into trained_models/cnn-model-01\model.ckpt.
INFO:tensorflow:global_step/sec: 14.1991
INFO:tensorflow:step = 101, loss = 2.82478 (7.040 sec)
INFO:tensorflow:Saving checkpoints for 201 into trained_models/cnn-model-01\model.ckpt.
INFO:tensorflow:global_step/sec: 14.3742
INFO:tensorflow:step = 201, loss = 2.79661 (6.963 sec)
INFO:tensorflow:Saving checkpoints for 301 into trained_models/cnn-model-01\model.ckpt.
INFO:tensorflow:global_step/sec: 14.6083
INFO:tensorflow:step = 301, loss = 2.76467 (6.852 sec)
INFO:tensorflow:Saving checkpoints for 401 into trained_models/cnn-model-01\model.ckpt.
INFO:tensorflow:global_step/sec: 14.312
INFO:tensorflow:step = 401, loss = 2.73688 (6.976 sec)
INFO:tensorflow:Saving checkpoints for 501 into trained_models/cnn-model-01\model.ckpt.
INFO:tensorflow:global_step/sec: 14.5971
INFO:tensorflow:step = 501, loss = 2.71076 (6.861 sec)
INFO:tensorflow:Saving checkpoints for 601 into trained_models/cnn-model-01\model.ckpt.
INFO:tensorflow:global_step/sec: 14.4705
INFO:tensorflow:step = 601, loss = 2.68723 (6.909 sec)
INFO:tensorflow:Saving checkpoints for 701 into trained_models/cnn-model-01\model.ckpt.
INFO:tensorflow:global_step/sec: 14.3045
INFO:tensorflow:step = 701, loss = 2.66135 (6.985 sec)
INFO:tensorflow:Saving checkpoints for 801 into trained_models/cnn-model-01\model.ckpt.
INFO:tensorflow:global_step/sec: 14.1023
INFO:tensorflow:step = 801, loss = 2.64175 (7.102 sec)
INFO:tensorflow:Saving checkpoints for 901 into trained_models/cnn-model-01\model.ckpt.
INFO:tensorflow:global_step/sec: 14.1442
INFO:tensorflow:step = 901, loss = 2.61844 (7.058 sec)
INFO:tensorflow:Saving checkpoints for 1001 into trained_models/cnn-model-01\model.ckpt.
INFO:tensorflow:global_step/sec: 14.4332
INFO:tensorflow:step = 1001, loss = 2.58876 (6.932 sec)
INFO:tensorflow:Saving checkpoints for 1101 into trained_models/cnn-model-01\model.ckpt.
INFO:tensorflow:global_step/sec: 13.8316
INFO:tensorflow:step = 1101, loss = 2.3568 (7.231 sec)
INFO:tensorflow:Saving checkpoints for 1201 into trained_models/cnn-model-01\model.ckpt.
INFO:tensorflow:global_step/sec: 14.0662
INFO:tensorflow:step = 1201, loss = 2.28892 (7.110 sec)
INFO:tensorflow:Saving checkpoints for 1301 into trained_models/cnn-model-01\model.ckpt.
INFO:tensorflow:global_step/sec: 14.3072
INFO:tensorflow:step = 1301, loss = 2.17211 (6.996 sec)
INFO:tensorflow:Saving checkpoints for 1401 into trained_models/cnn-model-01\model.ckpt.
INFO:tensorflow:global_step/sec: 13.8847
INFO:tensorflow:step = 1401, loss = 1.96638 (7.192 sec)
INFO:tensorflow:Saving checkpoints for 1501 into trained_models/cnn-model-01\model.ckpt.
INFO:tensorflow:global_step/sec: 14.0022
INFO:tensorflow:step = 1501, loss = 1.97863 (7.138 sec)
INFO:tensorflow:Saving checkpoints for 1601 into trained_models/cnn-model-01\model.ckpt.
INFO:tensorflow:global_step/sec: 14.3219
INFO:tensorflow:step = 1601, loss = 1.95582 (6.984 sec)
INFO:tensorflow:Saving checkpoints for 1701 into trained_models/cnn-model-01\model.ckpt.
INFO:tensorflow:global_step/sec: 14.1039
INFO:tensorflow:step = 1701, loss = 1.99821 (7.096 sec)
INFO:tensorflow:Saving checkpoints for 1801 into trained_models/cnn-model-01\model.ckpt.
INFO:tensorflow:global_step/sec: 14.7625
INFO:tensorflow:step = 1801, loss = 1.91237 (6.766 sec)
INFO:tensorflow:Saving checkpoints for 1901 into trained_models/cnn-model-01\model.ckpt.
INFO:tensorflow:global_step/sec: 14.2328
INFO:tensorflow:step = 1901, loss = 1.81053 (7.034 sec)
INFO:tensorflow:Saving checkpoints for 2001 into trained_models/cnn-model-01\model.ckpt.
INFO:tensorflow:global_step/sec: 14.3621
INFO:tensorflow:step = 2001, loss = 1.87312 (6.972 sec)
INFO:tensorflow:Saving checkpoints for 2101 into trained_models/cnn-model-01\model.ckpt.
INFO:tensorflow:global_step/sec: 14.4522
INFO:tensorflow:step = 2101, loss = 1.84363 (6.906 sec)
INFO:tensorflow:Saving checkpoints for 2201 into trained_models/cnn-model-01\model.ckpt.
INFO:tensorflow:global_step/sec: 14.4663
INFO:tensorflow:step = 2201, loss = 1.79924 (6.928 sec)
INFO:tensorflow:Saving checkpoints for 2301 into trained_models/cnn-model-01\model.ckpt.
INFO:tensorflow:global_step/sec: 14.5127
INFO:tensorflow:step = 2301, loss = 1.93596 (6.892 sec)
INFO:tensorflow:Saving checkpoints for 2401 into trained_models/cnn-model-01\model.ckpt.
INFO:tensorflow:global_step/sec: 14.4917
INFO:tensorflow:step = 2401, loss = 1.86604 (6.904 sec)
INFO:tensorflow:Saving checkpoints for 2501 into trained_models/cnn-model-01\model.ckpt.
INFO:tensorflow:global_step/sec: 14.316
INFO:tensorflow:step = 2501, loss = 1.84155 (6.969 sec)
INFO:tensorflow:Saving checkpoints for 2601 into trained_models/cnn-model-01\model.ckpt.
INFO:tensorflow:global_step/sec: 14.3726
INFO:tensorflow:step = 2601, loss = 1.90761 (6.946 sec)
INFO:tensorflow:Saving checkpoints for 2701 into trained_models/cnn-model-01\model.ckpt.
INFO:tensorflow:global_step/sec: 14.003
INFO:tensorflow:step = 2701, loss = 1.84276 (7.146 sec)
INFO:tensorflow:Saving checkpoints for 2801 into trained_models/cnn-model-01\model.ckpt.
INFO:tensorflow:global_step/sec: 14.256
INFO:tensorflow:step = 2801, loss = 1.80655 (7.014 sec)
INFO:tensorflow:Saving checkpoints for 2901 into trained_models/cnn-model-01\model.ckpt.
INFO:tensorflow:global_step/sec: 14.7406
INFO:tensorflow:step = 2901, loss = 1.81684 (6.788 sec)
INFO:tensorflow:Saving checkpoints for 3001 into trained_models/cnn-model-01\model.ckpt.
INFO:tensorflow:global_step/sec: 14.8816
INFO:tensorflow:step = 3001, loss = 1.79195 (6.729 sec)
INFO:tensorflow:Saving checkpoints for 3101 into trained_models/cnn-model-01\model.ckpt.
INFO:tensorflow:global_step/sec: 14.8843
INFO:tensorflow:step = 3101, loss = 1.86683 (6.722 sec)
INFO:tensorflow:Saving checkpoints for 3201 into trained_models/cnn-model-01\model.ckpt.
INFO:tensorflow:global_step/sec: 14.7309
INFO:tensorflow:step = 3201, loss = 1.75879 (6.779 sec)
INFO:tensorflow:Saving checkpoints for 3301 into trained_models/cnn-model-01\model.ckpt.
INFO:tensorflow:global_step/sec: 14.4476
INFO:tensorflow:step = 3301, loss = 1.76333 (6.921 sec)
INFO:tensorflow:Saving checkpoints for 3401 into trained_models/cnn-model-01\model.ckpt.
INFO:tensorflow:global_step/sec: 14.5708
INFO:tensorflow:step = 3401, loss = 1.59036 (6.870 sec)
INFO:tensorflow:Saving checkpoints for 3501 into trained_models/cnn-model-01\model.ckpt.
INFO:tensorflow:global_step/sec: 14.8984
INFO:tensorflow:step = 3501, loss = 1.58469 (6.701 sec)
INFO:tensorflow:Saving checkpoints for 3601 into trained_models/cnn-model-01\model.ckpt.
INFO:tensorflow:global_step/sec: 14.7351
INFO:tensorflow:step = 3601, loss = 1.61553 (6.795 sec)
INFO:tensorflow:Saving checkpoints for 3701 into trained_models/cnn-model-01\model.ckpt.
INFO:tensorflow:global_step/sec: 14.8347
INFO:tensorflow:step = 3701, loss = 1.84686 (6.748 sec)
INFO:tensorflow:Saving checkpoints for 3801 into trained_models/cnn-model-01\model.ckpt.
INFO:tensorflow:global_step/sec: 14.5816
INFO:tensorflow:step = 3801, loss = 1.76227 (6.856 sec)
INFO:tensorflow:Saving checkpoints for 3901 into trained_models/cnn-model-01\model.ckpt.
INFO:tensorflow:global_step/sec: 14.6934
INFO:tensorflow:step = 3901, loss = 1.60978 (6.812 sec)
INFO:tensorflow:Saving checkpoints for 4001 into trained_models/cnn-model-01\model.ckpt.
INFO:tensorflow:global_step/sec: 14.6957
INFO:tensorflow:step = 4001, loss = 1.77672 (6.793 sec)
INFO:tensorflow:Saving checkpoints for 4101 into trained_models/cnn-model-01\model.ckpt.
INFO:tensorflow:global_step/sec: 14.6245
INFO:tensorflow:step = 4101, loss = 1.85348 (6.834 sec)
INFO:tensorflow:Saving checkpoints for 4201 into trained_models/cnn-model-01\model.ckpt.
INFO:tensorflow:global_step/sec: 14.4018
INFO:tensorflow:step = 4201, loss = 1.67691 (6.957 sec)
INFO:tensorflow:Saving checkpoints for 4301 into trained_models/cnn-model-01\model.ckpt.
INFO:tensorflow:global_step/sec: 14.9506
INFO:tensorflow:step = 4301, loss = 1.87105 (6.688 sec)
INFO:tensorflow:Saving checkpoints for 4401 into trained_models/cnn-model-01\model.ckpt.
INFO:tensorflow:global_step/sec: 15.0598
INFO:tensorflow:step = 4401, loss = 1.41967 (6.630 sec)
INFO:tensorflow:Saving checkpoints for 4501 into trained_models/cnn-model-01\model.ckpt.
INFO:tensorflow:global_step/sec: 14.7812
INFO:tensorflow:step = 4501, loss = 1.85558 (6.769 sec)
INFO:tensorflow:Saving checkpoints for 4601 into trained_models/cnn-model-01\model.ckpt.
INFO:tensorflow:global_step/sec: 14.7117
INFO:tensorflow:step = 4601, loss = 1.71622 (6.803 sec)
INFO:tensorflow:Saving checkpoints for 4701 into trained_models/cnn-model-01\model.ckpt.
INFO:tensorflow:global_step/sec: 14.9184
INFO:tensorflow:step = 4701, loss = 1.96739 (6.697 sec)
INFO:tensorflow:Saving checkpoints for 4801 into trained_models/cnn-model-01\model.ckpt.
INFO:tensorflow:global_step/sec: 15.1296
INFO:tensorflow:step = 4801, loss = 1.76862 (6.616 sec)
INFO:tensorflow:Saving checkpoints for 4901 into trained_models/cnn-model-01\model.ckpt.
INFO:tensorflow:global_step/sec: 14.6792
INFO:tensorflow:step = 4901, loss = 1.84602 (6.805 sec)
INFO:tensorflow:Saving checkpoints for 5001 into trained_models/cnn-model-01\model.ckpt.
INFO:tensorflow:global_step/sec: 14.9994
INFO:tensorflow:step = 5001, loss = 1.65198 (6.667 sec)
INFO:tensorflow:Saving checkpoints for 5101 into trained_models/cnn-model-01\model.ckpt.
INFO:tensorflow:global_step/sec: 15.2619
INFO:tensorflow:step = 5101, loss = 1.74725 (6.559 sec)
INFO:tensorflow:Saving checkpoints for 5201 into trained_models/cnn-model-01\model.ckpt.
INFO:tensorflow:global_step/sec: 14.7381
INFO:tensorflow:step = 5201, loss = 1.58873 (6.784 sec)
INFO:tensorflow:Saving checkpoints for 5301 into trained_models/cnn-model-01\model.ckpt.
INFO:tensorflow:global_step/sec: 14.854
INFO:tensorflow:step = 5301, loss = 1.72703 (6.720 sec)
INFO:tensorflow:Saving checkpoints for 5401 into trained_models/cnn-model-01\model.ckpt.
INFO:tensorflow:global_step/sec: 14.3939
INFO:tensorflow:step = 5401, loss = 1.75776 (6.963 sec)
INFO:tensorflow:Saving checkpoints for 5501 into trained_models/cnn-model-01\model.ckpt.
INFO:tensorflow:global_step/sec: 15.0155
INFO:tensorflow:step = 5501, loss = 1.70106 (6.655 sec)
INFO:tensorflow:Saving checkpoints for 5601 into trained_models/cnn-model-01\model.ckpt.
INFO:tensorflow:global_step/sec: 14.8145
INFO:tensorflow:step = 5601, loss = 1.80408 (6.750 sec)
INFO:tensorflow:Saving checkpoints for 5701 into trained_models/cnn-model-01\model.ckpt.
INFO:tensorflow:global_step/sec: 15.1997
INFO:tensorflow:step = 5701, loss = 1.81489 (6.577 sec)
INFO:tensorflow:Saving checkpoints for 5801 into trained_models/cnn-model-01\model.ckpt.
INFO:tensorflow:global_step/sec: 15.0331
INFO:tensorflow:step = 5801, loss = 1.75184 (6.660 sec)
INFO:tensorflow:Saving checkpoints for 5901 into trained_models/cnn-model-01\model.ckpt.
INFO:tensorflow:global_step/sec: 14.7126
INFO:tensorflow:step = 5901, loss = 1.64708 (6.790 sec)
INFO:tensorflow:Saving checkpoints for 6001 into trained_models/cnn-model-01\model.ckpt.
INFO:tensorflow:global_step/sec: 14.5081
INFO:tensorflow:step = 6001, loss = 1.684 (6.902 sec)
INFO:tensorflow:Saving checkpoints for 6101 into trained_models/cnn-model-01\model.ckpt.
INFO:tensorflow:global_step/sec: 14.8524
INFO:tensorflow:step = 6101, loss = 1.78829 (6.719 sec)
INFO:tensorflow:Saving checkpoints for 6201 into trained_models/cnn-model-01\model.ckpt.
INFO:tensorflow:global_step/sec: 14.6063
INFO:tensorflow:step = 6201, loss = 1.79428 (6.843 sec)
INFO:tensorflow:Saving checkpoints for 6301 into trained_models/cnn-model-01\model.ckpt.
INFO:tensorflow:global_step/sec: 15.3125
INFO:tensorflow:step = 6301, loss = 1.94475 (6.556 sec)
INFO:tensorflow:Saving checkpoints for 6401 into trained_models/cnn-model-01\model.ckpt.
INFO:tensorflow:global_step/sec: 14.7103
INFO:tensorflow:step = 6401, loss = 1.63868 (6.779 sec)
INFO:tensorflow:Saving checkpoints for 6501 into trained_models/cnn-model-01\model.ckpt.
INFO:tensorflow:global_step/sec: 14.8156
INFO:tensorflow:step = 6501, loss = 1.68586 (6.751 sec)
INFO:tensorflow:Saving checkpoints for 6601 into trained_models/cnn-model-01\model.ckpt.
INFO:tensorflow:global_step/sec: 14.9043
INFO:tensorflow:step = 6601, loss = 1.75704 (6.714 sec)
INFO:tensorflow:Saving checkpoints for 6701 into trained_models/cnn-model-01\model.ckpt.
INFO:tensorflow:global_step/sec: 14.7641
INFO:tensorflow:step = 6701, loss = 1.65482 (6.776 sec)
INFO:tensorflow:Saving checkpoints for 6801 into trained_models/cnn-model-01\model.ckpt.
INFO:tensorflow:global_step/sec: 14.7445
INFO:tensorflow:step = 6801, loss = 1.63472 (6.779 sec)
INFO:tensorflow:Saving checkpoints for 6901 into trained_models/cnn-model-01\model.ckpt.
INFO:tensorflow:global_step/sec: 15.295
INFO:tensorflow:step = 6901, loss = 1.84185 (6.536 sec)
INFO:tensorflow:Saving checkpoints for 7001 into trained_models/cnn-model-01\model.ckpt.
INFO:tensorflow:global_step/sec: 14.6586
INFO:tensorflow:step = 7001, loss = 1.78925 (6.811 sec)
INFO:tensorflow:Saving checkpoints for 7101 into trained_models/cnn-model-01\model.ckpt.
INFO:tensorflow:global_step/sec: 15.0578
INFO:tensorflow:step = 7101, loss = 1.80522 (6.648 sec)
INFO:tensorflow:Saving checkpoints for 7201 into trained_models/cnn-model-01\model.ckpt.
INFO:tensorflow:global_step/sec: 14.8408
INFO:tensorflow:step = 7201, loss = 1.60579 (6.744 sec)
INFO:tensorflow:Saving checkpoints for 7301 into trained_models/cnn-model-01\model.ckpt.
INFO:tensorflow:global_step/sec: 14.8598
INFO:tensorflow:step = 7301, loss = 1.83821 (6.733 sec)
INFO:tensorflow:Saving checkpoints for 7401 into trained_models/cnn-model-01\model.ckpt.
INFO:tensorflow:global_step/sec: 15.0143
INFO:tensorflow:step = 7401, loss = 1.48602 (6.646 sec)
INFO:tensorflow:Saving checkpoints for 7501 into trained_models/cnn-model-01\model.ckpt.
INFO:tensorflow:global_step/sec: 15.1287
INFO:tensorflow:step = 7501, loss = 1.84129 (6.616 sec)
INFO:tensorflow:Saving checkpoints for 7601 into trained_models/cnn-model-01\model.ckpt.
INFO:tensorflow:global_step/sec: 14.9631
INFO:tensorflow:step = 7601, loss = 1.78427 (6.696 sec)
INFO:tensorflow:Saving checkpoints for 7701 into trained_models/cnn-model-01\model.ckpt.
INFO:tensorflow:global_step/sec: 15.093
INFO:tensorflow:step = 7701, loss = 1.7137 (6.620 sec)
INFO:tensorflow:Saving checkpoints for 7801 into trained_models/cnn-model-01\model.ckpt.
INFO:tensorflow:global_step/sec: 14.9189
INFO:tensorflow:step = 7801, loss = 1.74742 (6.697 sec)
INFO:tensorflow:Saving checkpoints for 7901 into trained_models/cnn-model-01\model.ckpt.
INFO:tensorflow:global_step/sec: 15.0445
INFO:tensorflow:step = 7901, loss = 1.91155 (6.641 sec)
INFO:tensorflow:Saving checkpoints for 8001 into trained_models/cnn-model-01\model.ckpt.
INFO:tensorflow:global_step/sec: 14.7601
INFO:tensorflow:step = 8001, loss = 1.58801 (6.781 sec)
INFO:tensorflow:Saving checkpoints for 8101 into trained_models/cnn-model-01\model.ckpt.
INFO:tensorflow:global_step/sec: 15.0243
INFO:tensorflow:step = 8101, loss = 1.75778 (6.663 sec)
INFO:tensorflow:Saving checkpoints for 8201 into trained_models/cnn-model-01\model.ckpt.
INFO:tensorflow:global_step/sec: 14.846
INFO:tensorflow:step = 8201, loss = 1.67166 (6.725 sec)
INFO:tensorflow:Saving checkpoints for 8301 into trained_models/cnn-model-01\model.ckpt.
INFO:tensorflow:global_step/sec: 15.149
INFO:tensorflow:step = 8301, loss = 1.80751 (6.598 sec)
INFO:tensorflow:Saving checkpoints for 8401 into trained_models/cnn-model-01\model.ckpt.
INFO:tensorflow:global_step/sec: 14.9058
INFO:tensorflow:step = 8401, loss = 1.68409 (6.707 sec)
INFO:tensorflow:Saving checkpoints for 8501 into trained_models/cnn-model-01\model.ckpt.
INFO:tensorflow:global_step/sec: 14.985
INFO:tensorflow:step = 8501, loss = 1.73954 (6.672 sec)
INFO:tensorflow:Saving checkpoints for 8601 into trained_models/cnn-model-01\model.ckpt.
INFO:tensorflow:global_step/sec: 14.9542
INFO:tensorflow:step = 8601, loss = 1.58851 (6.686 sec)
INFO:tensorflow:Saving checkpoints for 8701 into trained_models/cnn-model-01\model.ckpt.
INFO:tensorflow:global_step/sec: 14.8544
INFO:tensorflow:step = 8701, loss = 2.02931 (6.738 sec)
INFO:tensorflow:Saving checkpoints for 8766 into trained_models/cnn-model-01\model.ckpt.
INFO:tensorflow:Loss for final step: 1.97516.
INFO:tensorflow:Starting evaluation at 2018-01-14-15:57:50
INFO:tensorflow:Restoring parameters from trained_models/cnn-model-01\model.ckpt-8766
INFO:tensorflow:Evaluation [1/50]
INFO:tensorflow:Evaluation [2/50]
INFO:tensorflow:Evaluation [3/50]
INFO:tensorflow:Evaluation [4/50]
INFO:tensorflow:Evaluation [5/50]
INFO:tensorflow:Evaluation [6/50]
INFO:tensorflow:Evaluation [7/50]
INFO:tensorflow:Evaluation [8/50]
INFO:tensorflow:Evaluation [9/50]
INFO:tensorflow:Evaluation [10/50]
INFO:tensorflow:Evaluation [11/50]
INFO:tensorflow:Evaluation [12/50]
INFO:tensorflow:Evaluation [13/50]
INFO:tensorflow:Evaluation [14/50]
INFO:tensorflow:Evaluation [15/50]
INFO:tensorflow:Evaluation [16/50]
INFO:tensorflow:Evaluation [17/50]
INFO:tensorflow:Evaluation [18/50]
INFO:tensorflow:Evaluation [19/50]
INFO:tensorflow:Evaluation [20/50]
INFO:tensorflow:Evaluation [21/50]
INFO:tensorflow:Evaluation [22/50]
INFO:tensorflow:Evaluation [23/50]
INFO:tensorflow:Evaluation [24/50]
INFO:tensorflow:Evaluation [25/50]
INFO:tensorflow:Evaluation [26/50]
INFO:tensorflow:Evaluation [27/50]
INFO:tensorflow:Evaluation [28/50]
INFO:tensorflow:Evaluation [29/50]
INFO:tensorflow:Evaluation [30/50]
INFO:tensorflow:Evaluation [31/50]
INFO:tensorflow:Evaluation [32/50]
INFO:tensorflow:Evaluation [33/50]
INFO:tensorflow:Evaluation [34/50]
INFO:tensorflow:Evaluation [35/50]
INFO:tensorflow:Evaluation [36/50]
INFO:tensorflow:Evaluation [37/50]
INFO:tensorflow:Evaluation [38/50]
INFO:tensorflow:Evaluation [39/50]
INFO:tensorflow:Evaluation [40/50]
INFO:tensorflow:Evaluation [41/50]
INFO:tensorflow:Evaluation [42/50]
INFO:tensorflow:Evaluation [43/50]
INFO:tensorflow:Evaluation [44/50]
INFO:tensorflow:Evaluation [45/50]
INFO:tensorflow:Evaluation [46/50]
INFO:tensorflow:Evaluation [47/50]
INFO:tensorflow:Evaluation [48/50]
INFO:tensorflow:Evaluation [49/50]
INFO:tensorflow:Evaluation [50/50]
INFO:tensorflow:Finished evaluation at 2018-01-14-15:57:52
INFO:tensorflow:Saving dict for global step 8766: accuracy = 0.5425, global_step = 8766, loss = 1.80442
INFO:tensorflow:Create CheckpointSaverHook.
INFO:tensorflow:Restoring parameters from trained_models/cnn-model-01\model.ckpt-8766
INFO:tensorflow:Saving checkpoints for 8767 into trained_models/cnn-model-01\model.ckpt.
INFO:tensorflow:step = 8767, loss = 1.76696
INFO:tensorflow:Saving checkpoints for 8867 into trained_models/cnn-model-01\model.ckpt.
INFO:tensorflow:global_step/sec: 14.4806
INFO:tensorflow:step = 8867, loss = 1.8598 (6.905 sec)
INFO:tensorflow:Saving checkpoints for 8967 into trained_models/cnn-model-01\model.ckpt.
INFO:tensorflow:global_step/sec: 15.2296
INFO:tensorflow:step = 8967, loss = 1.91069 (6.575 sec)
INFO:tensorflow:Saving checkpoints for 9067 into trained_models/cnn-model-01\model.ckpt.
INFO:tensorflow:global_step/sec: 14.8891
INFO:tensorflow:step = 9067, loss = 1.75123 (6.727 sec)
INFO:tensorflow:Saving checkpoints for 9167 into trained_models/cnn-model-01\model.ckpt.
INFO:tensorflow:global_step/sec: 14.6122
INFO:tensorflow:step = 9167, loss = 1.93798 (6.833 sec)
INFO:tensorflow:Saving checkpoints for 9267 into trained_models/cnn-model-01\model.ckpt.
INFO:tensorflow:global_step/sec: 14.9805
INFO:tensorflow:step = 9267, loss = 1.79415 (6.663 sec)
INFO:tensorflow:Saving checkpoints for 9367 into trained_models/cnn-model-01\model.ckpt.
INFO:tensorflow:global_step/sec: 14.8585
INFO:tensorflow:step = 9367, loss = 1.82399 (6.740 sec)
INFO:tensorflow:Saving checkpoints for 9467 into trained_models/cnn-model-01\model.ckpt.
INFO:tensorflow:global_step/sec: 15.3087
INFO:tensorflow:step = 9467, loss = 1.8765 (6.530 sec)
INFO:tensorflow:Saving checkpoints for 9567 into trained_models/cnn-model-01\model.ckpt.
INFO:tensorflow:global_step/sec: 14.6302
INFO:tensorflow:step = 9567, loss = 1.6039 (6.839 sec)
INFO:tensorflow:Saving checkpoints for 9667 into trained_models/cnn-model-01\model.ckpt.
INFO:tensorflow:global_step/sec: 14.8496
INFO:tensorflow:step = 9667, loss = 1.85231 (6.724 sec)
INFO:tensorflow:Saving checkpoints for 9767 into trained_models/cnn-model-01\model.ckpt.
INFO:tensorflow:global_step/sec: 14.7158
INFO:tensorflow:step = 9767, loss = 1.94892 (6.815 sec)
INFO:tensorflow:Saving checkpoints for 9867 into trained_models/cnn-model-01\model.ckpt.
INFO:tensorflow:global_step/sec: 14.7278
INFO:tensorflow:step = 9867, loss = 1.68965 (6.770 sec)
INFO:tensorflow:Saving checkpoints for 9967 into trained_models/cnn-model-01\model.ckpt.
INFO:tensorflow:global_step/sec: 14.8457
INFO:tensorflow:step = 9967, loss = 1.57463 (6.739 sec)
INFO:tensorflow:Saving checkpoints for 10067 into trained_models/cnn-model-01\model.ckpt.
INFO:tensorflow:global_step/sec: 15.1559
INFO:tensorflow:step = 10067, loss = 1.72472 (6.596 sec)
INFO:tensorflow:Saving checkpoints for 10167 into trained_models/cnn-model-01\model.ckpt.
INFO:tensorflow:global_step/sec: 14.9881
INFO:tensorflow:step = 10167, loss = 1.49279 (6.675 sec)
INFO:tensorflow:Saving checkpoints for 10267 into trained_models/cnn-model-01\model.ckpt.
INFO:tensorflow:global_step/sec: 14.9514
INFO:tensorflow:step = 10267, loss = 1.77788 (6.696 sec)
INFO:tensorflow:Saving checkpoints for 10367 into trained_models/cnn-model-01\model.ckpt.
INFO:tensorflow:global_step/sec: 14.958
INFO:tensorflow:step = 10367, loss = 1.66181 (6.676 sec)
INFO:tensorflow:Saving checkpoints for 10467 into trained_models/cnn-model-01\model.ckpt.
INFO:tensorflow:global_step/sec: 15.1045
INFO:tensorflow:step = 10467, loss = 2.0792 (6.627 sec)
INFO:tensorflow:Saving checkpoints for 10567 into trained_models/cnn-model-01\model.ckpt.
INFO:tensorflow:global_step/sec: 14.9519
INFO:tensorflow:step = 10567, loss = 1.79256 (6.676 sec)
INFO:tensorflow:Saving checkpoints for 10667 into trained_models/cnn-model-01\model.ckpt.
INFO:tensorflow:global_step/sec: 14.8844
INFO:tensorflow:step = 10667, loss = 1.62683 (6.720 sec)
INFO:tensorflow:Saving checkpoints for 10767 into trained_models/cnn-model-01\model.ckpt.
INFO:tensorflow:global_step/sec: 14.7202
INFO:tensorflow:step = 10767, loss = 1.77351 (6.803 sec)
INFO:tensorflow:Saving checkpoints for 10867 into trained_models/cnn-model-01\model.ckpt.
INFO:tensorflow:global_step/sec: 14.8309
INFO:tensorflow:step = 10867, loss = 1.66926 (6.738 sec)
INFO:tensorflow:Saving checkpoints for 10967 into trained_models/cnn-model-01\model.ckpt.
INFO:tensorflow:global_step/sec: 14.8264
INFO:tensorflow:step = 10967, loss = 1.81141 (6.758 sec)
INFO:tensorflow:Saving checkpoints for 11067 into trained_models/cnn-model-01\model.ckpt.
INFO:tensorflow:global_step/sec: 14.931
INFO:tensorflow:step = 11067, loss = 1.86212 (6.694 sec)
INFO:tensorflow:Saving checkpoints for 11167 into trained_models/cnn-model-01\model.ckpt.
INFO:tensorflow:global_step/sec: 14.9997
INFO:tensorflow:step = 11167, loss = 1.71579 (6.667 sec)
INFO:tensorflow:Saving checkpoints for 11267 into trained_models/cnn-model-01\model.ckpt.
INFO:tensorflow:global_step/sec: 15.2107
INFO:tensorflow:step = 11267, loss = 1.78361 (6.567 sec)
INFO:tensorflow:Saving checkpoints for 11367 into trained_models/cnn-model-01\model.ckpt.
INFO:tensorflow:global_step/sec: 14.9045
INFO:tensorflow:step = 11367, loss = 1.683 (6.712 sec)
INFO:tensorflow:Saving checkpoints for 11467 into trained_models/cnn-model-01\model.ckpt.
INFO:tensorflow:global_step/sec: 15.0445
INFO:tensorflow:step = 11467, loss = 1.90662 (6.644 sec)
INFO:tensorflow:Saving checkpoints for 11567 into trained_models/cnn-model-01\model.ckpt.
INFO:tensorflow:global_step/sec: 14.6415
INFO:tensorflow:step = 11567, loss = 1.73052 (6.827 sec)
INFO:tensorflow:Saving checkpoints for 11667 into trained_models/cnn-model-01\model.ckpt.
INFO:tensorflow:global_step/sec: 14.9536
INFO:tensorflow:step = 11667, loss = 1.85201 (6.700 sec)
INFO:tensorflow:Saving checkpoints for 11767 into trained_models/cnn-model-01\model.ckpt.
INFO:tensorflow:global_step/sec: 15.1187
INFO:tensorflow:step = 11767, loss = 1.84993 (6.607 sec)
INFO:tensorflow:Saving checkpoints for 11867 into trained_models/cnn-model-01\model.ckpt.
INFO:tensorflow:global_step/sec: 14.9173
INFO:tensorflow:step = 11867, loss = 1.71916 (6.711 sec)
INFO:tensorflow:Saving checkpoints for 11967 into trained_models/cnn-model-01\model.ckpt.
INFO:tensorflow:global_step/sec: 14.8974
INFO:tensorflow:step = 11967, loss = 1.69115 (6.708 sec)
INFO:tensorflow:Saving checkpoints for 12067 into trained_models/cnn-model-01\model.ckpt.
INFO:tensorflow:global_step/sec: 14.7328
INFO:tensorflow:step = 12067, loss = 1.79292 (6.777 sec)
INFO:tensorflow:Saving checkpoints for 12167 into trained_models/cnn-model-01\model.ckpt.
INFO:tensorflow:global_step/sec: 14.6244
INFO:tensorflow:step = 12167, loss = 1.76884 (6.845 sec)
INFO:tensorflow:Saving checkpoints for 12267 into trained_models/cnn-model-01\model.ckpt.
INFO:tensorflow:global_step/sec: 14.813
INFO:tensorflow:step = 12267, loss = 1.77488 (6.760 sec)
INFO:tensorflow:Saving checkpoints for 12367 into trained_models/cnn-model-01\model.ckpt.
INFO:tensorflow:global_step/sec: 14.9179
INFO:tensorflow:step = 12367, loss = 1.65124 (6.700 sec)
INFO:tensorflow:Saving checkpoints for 12467 into trained_models/cnn-model-01\model.ckpt.
INFO:tensorflow:global_step/sec: 14.8846
INFO:tensorflow:step = 12467, loss = 1.82285 (6.722 sec)
INFO:tensorflow:Saving checkpoints for 12567 into trained_models/cnn-model-01\model.ckpt.
INFO:tensorflow:global_step/sec: 14.7066
INFO:tensorflow:step = 12567, loss = 1.84656 (6.786 sec)
INFO:tensorflow:Saving checkpoints for 12667 into trained_models/cnn-model-01\model.ckpt.
INFO:tensorflow:global_step/sec: 15.0284
INFO:tensorflow:step = 12667, loss = 1.60867 (6.656 sec)
INFO:tensorflow:Saving checkpoints for 12767 into trained_models/cnn-model-01\model.ckpt.
INFO:tensorflow:global_step/sec: 14.9042
INFO:tensorflow:step = 12767, loss = 1.74793 (6.722 sec)
INFO:tensorflow:Saving checkpoints for 12867 into trained_models/cnn-model-01\model.ckpt.
INFO:tensorflow:global_step/sec: 14.6803
INFO:tensorflow:step = 12867, loss = 1.97922 (6.797 sec)
INFO:tensorflow:Saving checkpoints for 12967 into trained_models/cnn-model-01\model.ckpt.
INFO:tensorflow:global_step/sec: 15.0744
INFO:tensorflow:step = 12967, loss = 1.7471 (6.648 sec)
INFO:tensorflow:Saving checkpoints for 13067 into trained_models/cnn-model-01\model.ckpt.
INFO:tensorflow:global_step/sec: 15.0933
INFO:tensorflow:step = 13067, loss = 1.70065 (6.610 sec)
INFO:tensorflow:Saving checkpoints for 13167 into trained_models/cnn-model-01\model.ckpt.
INFO:tensorflow:global_step/sec: 15.1635
INFO:tensorflow:step = 13167, loss = 1.86303 (6.599 sec)
INFO:tensorflow:Saving checkpoints for 13267 into trained_models/cnn-model-01\model.ckpt.
INFO:tensorflow:global_step/sec: 15.0944
INFO:tensorflow:step = 13267, loss = 1.75094 (6.631 sec)
INFO:tensorflow:Saving checkpoints for 13367 into trained_models/cnn-model-01\model.ckpt.
INFO:tensorflow:global_step/sec: 14.8839
INFO:tensorflow:step = 13367, loss = 1.65821 (6.720 sec)
INFO:tensorflow:Saving checkpoints for 13467 into trained_models/cnn-model-01\model.ckpt.
INFO:tensorflow:global_step/sec: 14.8152
INFO:tensorflow:step = 13467, loss = 1.97815 (6.743 sec)
INFO:tensorflow:Saving checkpoints for 13567 into trained_models/cnn-model-01\model.ckpt.
INFO:tensorflow:global_step/sec: 14.5444
INFO:tensorflow:step = 13567, loss = 1.6296 (6.886 sec)
INFO:tensorflow:Saving checkpoints for 13667 into trained_models/cnn-model-01\model.ckpt.
INFO:tensorflow:global_step/sec: 14.8864
INFO:tensorflow:step = 13667, loss = 1.72704 (6.702 sec)
INFO:tensorflow:Saving checkpoints for 13767 into trained_models/cnn-model-01\model.ckpt.
INFO:tensorflow:global_step/sec: 15.3025
INFO:tensorflow:step = 13767, loss = 1.65031 (6.534 sec)
INFO:tensorflow:Saving checkpoints for 13867 into trained_models/cnn-model-01\model.ckpt.
INFO:tensorflow:global_step/sec: 15.1096
INFO:tensorflow:step = 13867, loss = 1.81464 (6.640 sec)
INFO:tensorflow:Saving checkpoints for 13967 into trained_models/cnn-model-01\model.ckpt.
INFO:tensorflow:global_step/sec: 14.9104
INFO:tensorflow:step = 13967, loss = 2.08274 (6.694 sec)
INFO:tensorflow:Saving checkpoints for 14067 into trained_models/cnn-model-01\model.ckpt.
INFO:tensorflow:global_step/sec: 14.8089
INFO:tensorflow:step = 14067, loss = 1.91838 (6.759 sec)
INFO:tensorflow:Saving checkpoints for 14167 into trained_models/cnn-model-01\model.ckpt.
INFO:tensorflow:global_step/sec: 14.7147
INFO:tensorflow:step = 14167, loss = 1.71102 (6.798 sec)
INFO:tensorflow:Saving checkpoints for 14267 into trained_models/cnn-model-01\model.ckpt.
INFO:tensorflow:global_step/sec: 14.6486
INFO:tensorflow:step = 14267, loss = 1.75191 (6.830 sec)
INFO:tensorflow:Saving checkpoints for 14367 into trained_models/cnn-model-01\model.ckpt.
INFO:tensorflow:global_step/sec: 14.9936
INFO:tensorflow:step = 14367, loss = 1.66661 (6.666 sec)
INFO:tensorflow:Saving checkpoints for 14467 into trained_models/cnn-model-01\model.ckpt.
INFO:tensorflow:global_step/sec: 14.9805
INFO:tensorflow:step = 14467, loss = 1.88216 (6.681 sec)
INFO:tensorflow:Saving checkpoints for 14567 into trained_models/cnn-model-01\model.ckpt.
INFO:tensorflow:global_step/sec: 14.2946
INFO:tensorflow:step = 14567, loss = 1.75313 (6.976 sec)
INFO:tensorflow:Saving checkpoints for 14667 into trained_models/cnn-model-01\model.ckpt.
INFO:tensorflow:global_step/sec: 14.5408
INFO:tensorflow:step = 14667, loss = 1.69422 (6.874 sec)
---------------------------------------------------------------------------
KeyboardInterrupt                         Traceback (most recent call last)
<ipython-input-48-c29ccd08c5b3> in <module>()
      3     shutil.rmtree(model_dir, ignore_errors=True)
      4 
----> 5 tf.estimator.train_and_evaluate(estimator, train_spec, eval_spec)

E:\Miniconda3\envs\env3-gpu\lib\site-packages\tensorflow\python\estimator\training.py in train_and_evaluate(estimator, train_spec, eval_spec)
    428       config.task_type != run_config_lib.TaskType.EVALUATOR):
    429     logging.info('Running training and evaluation locally (non-distributed).')
--> 430     executor.run_local()
    431     return
    432 

E:\Miniconda3\envs\env3-gpu\lib\site-packages\tensorflow\python\estimator\training.py in run_local(self)
    607           input_fn=self._train_spec.input_fn,
    608           max_steps=self._train_spec.max_steps,
--> 609           hooks=train_hooks)
    610 
    611       # Final export signal: For any eval result with global_step >= train

E:\Miniconda3\envs\env3-gpu\lib\site-packages\tensorflow\python\estimator\estimator.py in train(self, input_fn, hooks, steps, max_steps, saving_listeners)
    300 
    301     saving_listeners = _check_listeners_type(saving_listeners)
--> 302     loss = self._train_model(input_fn, hooks, saving_listeners)
    303     logging.info('Loss for final step: %s.', loss)
    304     return self

E:\Miniconda3\envs\env3-gpu\lib\site-packages\tensorflow\python\estimator\estimator.py in _train_model(self, input_fn, hooks, saving_listeners)
    781         loss = None
    782         while not mon_sess.should_stop():
--> 783           _, loss = mon_sess.run([estimator_spec.train_op, estimator_spec.loss])
    784       return loss
    785 

E:\Miniconda3\envs\env3-gpu\lib\site-packages\tensorflow\python\training\monitored_session.py in run(self, fetches, feed_dict, options, run_metadata)
    519                           feed_dict=feed_dict,
    520                           options=options,
--> 521                           run_metadata=run_metadata)
    522 
    523   def should_stop(self):

E:\Miniconda3\envs\env3-gpu\lib\site-packages\tensorflow\python\training\monitored_session.py in run(self, fetches, feed_dict, options, run_metadata)
    890                               feed_dict=feed_dict,
    891                               options=options,
--> 892                               run_metadata=run_metadata)
    893       except _PREEMPTION_ERRORS as e:
    894         logging.info('An error was raised. This may be due to a preemption in '

E:\Miniconda3\envs\env3-gpu\lib\site-packages\tensorflow\python\training\monitored_session.py in run(self, *args, **kwargs)
    950   def run(self, *args, **kwargs):
    951     try:
--> 952       return self._sess.run(*args, **kwargs)
    953     except _PREEMPTION_ERRORS:
    954       raise

E:\Miniconda3\envs\env3-gpu\lib\site-packages\tensorflow\python\training\monitored_session.py in run(self, fetches, feed_dict, options, run_metadata)
   1022                                   feed_dict=feed_dict,
   1023                                   options=options,
-> 1024                                   run_metadata=run_metadata)
   1025 
   1026     for hook in self._hooks:

E:\Miniconda3\envs\env3-gpu\lib\site-packages\tensorflow\python\training\monitored_session.py in run(self, *args, **kwargs)
    825 
    826   def run(self, *args, **kwargs):
--> 827     return self._sess.run(*args, **kwargs)
    828 
    829 

E:\Miniconda3\envs\env3-gpu\lib\site-packages\tensorflow\python\client\session.py in run(self, fetches, feed_dict, options, run_metadata)
    887     try:
    888       result = self._run(None, fetches, feed_dict, options_ptr,
--> 889                          run_metadata_ptr)
    890       if run_metadata:
    891         proto_data = tf_session.TF_GetBuffer(run_metadata_ptr)

E:\Miniconda3\envs\env3-gpu\lib\site-packages\tensorflow\python\client\session.py in _run(self, handle, fetches, feed_dict, options, run_metadata)
   1118     if final_fetches or final_targets or (handle and feed_dict_tensor):
   1119       results = self._do_run(handle, final_targets, final_fetches,
-> 1120                              feed_dict_tensor, options, run_metadata)
   1121     else:
   1122       results = []

E:\Miniconda3\envs\env3-gpu\lib\site-packages\tensorflow\python\client\session.py in _do_run(self, handle, target_list, fetch_list, feed_dict, options, run_metadata)
   1315     if handle is None:
   1316       return self._do_call(_run_fn, self._session, feeds, fetches, targets,
-> 1317                            options, run_metadata)
   1318     else:
   1319       return self._do_call(_prun_fn, self._session, handle, feeds, fetches)

E:\Miniconda3\envs\env3-gpu\lib\site-packages\tensorflow\python\client\session.py in _do_call(self, fn, *args)
   1321   def _do_call(self, fn, *args):
   1322     try:
-> 1323       return fn(*args)
   1324     except errors.OpError as e:
   1325       message = compat.as_text(e.message)

E:\Miniconda3\envs\env3-gpu\lib\site-packages\tensorflow\python\client\session.py in _run_fn(session, feed_dict, fetch_list, target_list, options, run_metadata)
   1300           return tf_session.TF_Run(session, options,
   1301                                    feed_dict, fetch_list, target_list,
-> 1302                                    status, run_metadata)
   1303 
   1304     def _prun_fn(session, handle, feed_dict, fetch_list):

KeyboardInterrupt: 

In [ ]: