In [ ]:
import tensorflow as tf
import time
import os
In [ ]:
def parse_tfrecord(example):
feature={'label': tf.FixedLenFeature((), tf.int64),
'image': tf.FixedLenFeature((), tf.string, default_value="")}
parsed = tf.parse_single_example(example, feature)
image = tf.decode_raw(parsed['image'],tf.float64)
image = tf.cast(image,tf.float32)
image = tf.reshape(image,[32,32,3])
return image, parsed['label']
def image_scaling(x):
return tf.image.per_image_standardization(x)
def distort(x):
x = tf.image.resize_image_with_crop_or_pad(x, 40, 40)
x = tf.random_crop(x, [32, 32, 3])
x = tf.image.random_flip_left_right(x)
return x
def dataset_input_fn(params):
dataset = tf.data.TFRecordDataset(
params['filenames'],num_parallel_reads=params['threads'])
dataset = dataset.map(parse_tfrecord, num_parallel_calls=params['threads'])
dataset = dataset.map(lambda x,y: (image_scaling(x),y),num_parallel_calls=params['threads'])
if params['mode']==tf.estimator.ModeKeys.TRAIN:
dataset = dataset.map(lambda x,y: (distort(x),y),num_parallel_calls=params['threads'])
dataset = dataset.shuffle(buffer_size=params['shuffle_buff'])
dataset = dataset.repeat()
dataset = dataset.batch(params['batch'])
dataset = dataset.prefetch(8*params['batch'])
return dataset
In [ ]:
train_files = tf.gfile.Glob('./data/cifar10_data_00*')
eval_files = tf.gfile.Glob('./data/cifar10_data_01*')
train_params = {'filenames': train_files,
'mode': tf.estimator.ModeKeys.TRAIN,
'threads': 16,
'shuffle_buff': 100000,
'batch': 200}
eval_params = {'filenames': eval_files,
'mode': tf.estimator.ModeKeys.EVAL,
'threads': 8,
'batch': 200}
In [ ]:
model_params = {'drop_out': 0.2, 'dense_units': 1024, 'learning_rate': 1e-3, 'log': True}
In [ ]:
def _conv(x,kernel,name,log=False):
with tf.variable_scope(name):
W = tf.get_variable(initializer=tf.truncated_normal(shape=kernel,stddev=0.01),name='W')
b = tf.get_variable(initializer=tf.constant(0.0,shape=[kernel[3]]),name='b')
conv = tf.nn.conv2d(x, W, strides=[1,1,1,1],padding='SAME')
activation = tf.nn.relu(tf.add(conv,b))
pool = tf.nn.max_pool(activation,ksize=[1,2,2,1],strides=[1,2,2,1],padding='SAME')
if log==True:
tf.summary.histogram("weights",W)
tf.summary.histogram("biases",b)
tf.summary.histogram("activations",activation)
return pool
def _dense(x,size_in,size_out,name,relu=False,log=False):
with tf.variable_scope(name):
flat = tf.reshape(x,[-1,size_in])
W = tf.get_variable(initializer=tf.truncated_normal([size_in,size_out],stddev=0.1),name='W')
b = tf.get_variable(initializer=tf.constant(0.0,shape=[size_out]),name='b')
activation = tf.add(tf.matmul(flat,W),b)
if relu==True:
activation = tf.nn.relu(activation)
if log==True:
tf.summary.histogram("weights",W)
tf.summary.histogram("biases",b)
tf.summary.histogram("activations",activation)
return activation
def _model(features, mode, params):
input_layer = tf.reshape(features, [-1, 32, 32, 3])
conv1 = _conv(input_layer, kernel=[5,5,3,128], name='conv1', log=params['log'])
conv2 = _conv(conv1, kernel=[5,5,128,128], name='conv2', log=params['log'])
conv3 = _conv(conv2, kernel=[3,3,128,256], name='conv3', log=params['log'])
conv4 = _conv(conv3, kernel=[3,3,256,512], name='conv4', log=params['log'])
dense = _dense(conv4, size_in=2*2*512, size_out=params['dense_units'],
name='Dense', relu=True, log=params['log'])
if mode==tf.estimator.ModeKeys.TRAIN:
dense = tf.nn.dropout(dense, params['drop_out'])
logits = _dense(dense, size_in=params['dense_units'],
size_out=10, name='Output', relu=False, log=params['log'])
return logits
In [ ]:
def model_fn(features, labels, mode, params):
logits = _model(features, mode, params)
predictions = {"logits": logits,
"classes": tf.argmax(input=logits,axis=1),
"probabilities": tf.nn.softmax(logits,name='softmax')}
export_outputs = {'predictions': tf.estimator.export.PredictOutput(predictions)}
if (mode==tf.estimator.ModeKeys.TRAIN or mode==tf.estimator.ModeKeys.EVAL):
loss = tf.losses.sparse_softmax_cross_entropy(labels=labels,logits=logits)
if mode == tf.estimator.ModeKeys.TRAIN:
learning_rate = tf.train.exponential_decay(params['learning_rate'],
tf.train.get_global_step(),
decay_steps=100000,
decay_rate=0.96)
optimizer = tf.train.GradientDescentOptimizer(learning_rate=learning_rate)
train_op = optimizer.minimize(loss=loss, global_step=tf.train.get_global_step())
tf.summary.scalar('learning_rate', learning_rate)
return tf.estimator.EstimatorSpec(mode=mode, loss=loss, train_op=train_op)
if mode == tf.estimator.ModeKeys.EVAL:
accuracy = tf.metrics.accuracy(
labels=labels, predictions=tf.argmax(logits, axis=1))
metrics = {'accuracy':accuracy}
return tf.estimator.EstimatorSpec(mode=mode,loss=loss, eval_metric_ops=metrics)
if mode == tf.estimator.ModeKeys.PREDICT:
return tf.estimator.EstimatorSpec(
mode=mode, predictions=predictions, export_outputs=export_outputs)
In [ ]:
name = 'cnn_model/tflow_model_'
name = name + 'dense' + str(model_params['dense_units']) + '_'
name = name + 'drop' + str(model_params['drop_out']) + '_'
name = name + 'lr' + str(model_params['learning_rate']) + '_'
name = name + time.strftime("%Y%m%d%H%M%S")
model_dir = os.path.join('./',name)
print(model_dir)
In [ ]:
distribution = tf.contrib.distribute.MirroredStrategy(num_gpus=8)
config = tf.estimator.RunConfig(
save_checkpoints_secs=300,
keep_checkpoint_max=5,
session_config=tf.ConfigProto(allow_soft_placement=True, log_device_placement=True),
train_distribute=distribution)
In [ ]:
estimator = tf.estimator.Estimator(
model_fn=model_fn, model_dir=model_dir, params=model_params, config=config)
In [ ]:
estimator.train(input_fn=lambda: dataset_input_fn(train_params), max_steps=1000)
In [ ]:
estimator.evaluate(input_fn=lambda: dataset_input_fn(eval_params), steps=10)
In [ ]: