In [ ]:
import os
import time

import tensorflow as tf
from tensorflow import keras
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Dropout, Activation, Flatten, Conv2D, MaxPooling2D

Data Pipeline


In [ ]:
def parse_tfrecord(example):
    feature={'label': tf.FixedLenFeature((), tf.int64),
             'image': tf.FixedLenFeature((), tf.string, default_value="")}
    parsed = tf.parse_single_example(example, feature)
    image = tf.decode_raw(parsed['image'],tf.float64)
    image = tf.cast(image,tf.float32)
    image = tf.reshape(image,[32,32,3])
    return image, parsed['label']

def image_scaling(x):
    return tf.image.per_image_standardization(x)

def distort(x):
    x = tf.image.resize_image_with_crop_or_pad(x, 40, 40)
    x = tf.random_crop(x, [32, 32, 3])
    x = tf.image.random_flip_left_right(x)
    return x

In [ ]:
def dataset_input_fn(params):
    dataset = tf.data.TFRecordDataset(
        params['filenames'],num_parallel_reads=params['threads'])
    dataset = dataset.map(parse_tfrecord, num_parallel_calls=params['threads'])
    dataset = dataset.map(lambda x,y: (image_scaling(x),y),num_parallel_calls=params['threads'])
    if params['mode']==tf.estimator.ModeKeys.TRAIN:
        dataset = dataset.map(lambda x,y: (distort(x),y),num_parallel_calls=params['threads'])
        dataset = dataset.shuffle(buffer_size=params['shuffle_buff'])
    dataset = dataset.repeat()
    dataset = dataset.batch(params['batch'])
    dataset = dataset.prefetch(8*params['batch'])
    return dataset

In [ ]:
train_files = tf.gfile.Glob('./data/cifar10_data_00*.tfrecords')
eval_files = tf.gfile.Glob('./data/cifar10_data_01*.tfrecords')

train_params = {'filenames': train_files,
                'mode': tf.estimator.ModeKeys.TRAIN,
                'threads': 16,
                'shuffle_buff': 10000,
                'batch': 200}

eval_params  = {'filenames': eval_files,
                'mode': tf.estimator.ModeKeys.EVAL,
                'threads': 8,
                'batch': 200}

Model


In [ ]:
num_classes = 10
model_params  = {'drop_out': 0.2, 'dense_units': 1024, 'learning_rate': 1e-3}

In [ ]:
model = Sequential()
model.add(Conv2D(128, (5, 5), padding='same', input_shape=[32, 32, 3]))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2), strides=(2,2)))

model.add(Conv2D(128, (5,5)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2), strides=(2,2)))

model.add(Conv2D(256, (3, 3), padding='same'))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2), strides=(2,2)))

model.add(Conv2D(512, (3, 3)))
model.add(Activation('relu'))

model.add(Flatten())
model.add(Dense(model_params['dense_units']))
model.add(Activation('relu'))

model.add(Dense(num_classes))
model.add(Dropout(model_params['drop_out']))
model.add(Activation('softmax'))

opt = tf.train.GradientDescentOptimizer(model_params['learning_rate'])

model.compile(loss='sparse_categorical_crossentropy', optimizer=opt)

Estimator from Keras model


In [ ]:
name = 'cnn_model/keras_model_'
name = name + 'dense' + str(model_params['dense_units']) + '_'
name = name + 'drop' + str(model_params['drop_out']) + '_'
name = name + 'lr' + str(model_params['learning_rate']) + '_'
name = name + time.strftime("%Y%m%d%H%M%S")
model_dir  = os.path.join('./',name)

print(model_dir)

In [ ]:
distribution = tf.contrib.distribute.MirroredStrategy(num_gpus=8)

config = tf.estimator.RunConfig(
    save_checkpoints_secs = 300,
    keep_checkpoint_max = 5,
    session_config=tf.ConfigProto(allow_soft_placement=True, log_device_placement=True),
    train_distribute = distribution)

estimator = keras.estimator.model_to_estimator(
  keras_model=model, config=config, model_dir=model_dir)

In [ ]:
# Add metric to estimator

def my_accuracy(labels, predictions):
    pred_values = tf.argmax(predictions[model.output_names[0]], axis=1)
    return {'accuracy': tf.metrics.accuracy(labels, pred_values)}

estimator = tf.contrib.estimator.add_metrics(estimator, my_accuracy)

Train a Model


In [ ]:
# Run on 8 GPUs
estimator.train(input_fn=lambda: dataset_input_fn(train_params),max_steps=1000)

Evaluate a Model


In [ ]:
# Run on 1 GPU
estimator.evaluate(input_fn=lambda: dataset_input_fn(eval_params),steps=10)