Restore FashionNet architecture using tf-graph in ./graph and this code sample
In [1]:
from tensorflow.examples.tutorials.mnist import input_data
import tensorflow as tf
from IPython.display import Image
mnist = input_data.read_data_sets('fashion-mnist/data/fashion', one_hot=True)
In [2]:
def weight_variable(shape, name):
initial = tf.truncated_normal(shape, stddev=0.1)
return tf.Variable(initial, name=name)
def bias_variable(shape, name):
initial = tf.constant(0.1, shape=shape)
return tf.Variable(initial, name=name)
In [3]:
def conv2d(x, W, name=None):
return tf.nn.conv2d(x, W, strides=[1, 1, 1, 1],
padding='SAME', name=name)
def max_pool_2x2(x, name=None):
return tf.nn.max_pool(x, ksize=[1, 2, 2, 1],
strides=[1, 2, 2, 1],
padding='SAME', name=name)
In [4]:
class FasionNet:
def __init__(self):
pass
def _create_placeholders(self):
with tf.name_scope("data"):
self.x = tf.placeholder(dtype=tf.float32, shape=[None, 784], name='X')
self.y_ = tf.placeholder(dtype=tf.float32, shape=[None, 10], name='y')
def _create_layer1(self):
with tf.name_scope("layer1"):
W_conv1 = weight_variable([5, 5, 1, 32], name='W_conv1')
b_conv1 = bias_variable([32], name='b_conv1')
x_image = tf.reshape(self.x, [-1, 28, 28, 1], name='x_image')
h_conv1 = tf.nn.relu(tf.add(
conv2d(x_image, W_conv1, name='Conv2D'),
b_conv1))
self.h_pool1 = max_pool_2x2(tf.nn.relu(h_conv1), name='h_pool1')
def _create_layer2(self):
with tf.name_scope("layer2"):
W_conv2 = weight_variable([5,5,32,64], name='W_conv2')
b_conv2 = bias_variable([64], name='b_conv2')
h_conv2 = tf.nn.relu(tf.add(conv2d(self.h_pool1, W_conv2), b_conv2), name='h_conv2')
h_pool2 = max_pool_2x2(h_conv2, name='h_pool2')
W_fc1 = weight_variable([3136, 1024], name='W_fc1')
b_fc1 = bias_variable([1024], name='b_fc1')
h_pool2_flat = tf.reshape(h_pool2, [-1, 3136])
h_fc1 = tf.nn.relu(tf.add(tf.matmul(h_pool2_flat, W_fc1), b_fc1), name='h_fc1')
self.keep_prob = tf.placeholder(tf.float32, name='keep_prob')
h_fc1_drop = tf.nn.dropout(h_fc1, self.keep_prob, name='h_fc1_drop')
W_fc2 = weight_variable([1024, 10], name='W_fc2')
b_fc2 = bias_variable([10], name='b_fc2')
# self.y_conv = tf.matmul(h_fc1_drop, W_fc2) + b_fc2
self.y_conv = tf.add(tf.matmul(h_fc1_drop, W_fc2), b_fc2, name='y_conv')
def _create_loss(self):
with tf.name_scope("loss"):
self.cross_entropy = tf.reduce_mean(
tf.nn.softmax_cross_entropy_with_logits(labels=self.y_, logits=self.y_conv))
def _create_optimizer(self):
with tf.name_scope("optimizer"):
self.train_step = tf.train.AdamOptimizer(1e-4).minimize(self.cross_entropy)
def _create_summaries(self):
with tf.name_scope("summaries"):
tf.summary.scalar('loss', self.cross_entropy)
correct_prediction = tf.equal(tf.argmax(self.y_conv, 1), tf.argmax(self.y_, 1))
self.accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
self.summary = tf.summary.merge_all()
saver = tf.train.Saver()
def build_graph(self):
self._create_placeholders()
self._create_layer1()
self._create_layer2()
self._create_loss()
self._create_optimizer()
self._create_summaries()
def train_model(self):
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
summary_writer = tf.summary.FileWriter('graphs', sess.graph)
for i in range(2000):
batch = mnist.train.next_batch(50)
if i % 100 == 0:
train_accuracy = self.accuracy.eval(feed_dict={
self.x: batch[0], self.y_: batch[1], self.keep_prob: 1.0})
print('step %d, training accuracy %g' % (i, train_accuracy))
_, summary_str = sess.run([self.train_step, self.summary],
feed_dict={self.x: batch[0], self.y_: batch[1], self.keep_prob: 0.5})
summary_writer.add_summary(summary_str, i)
print('test accuracy {}'.format(self.accuracy.eval(feed_dict={self.x: mnist.test.images,
self.y_: mnist.test.labels,
self.keep_prob: 1})))
In [5]:
fn = FasionNet()
fn.build_graph()
fn.train_model()
In [ ]: