In [ ]:
# Restore FashionNet architecture using tf-graph in ./graph and this code sample
# dataset available at https://github.com/zalandoresearch/fashion-mnist
In [1]:
from tensorflow.examples.tutorials.mnist import input_data
import tensorflow as tf
from IPython.display import Image
mnist = input_data.read_data_sets('fashion-mnist/data/fashion', one_hot=True)
In [2]:
def weight_variable(shape, name):
initial = tf.truncated_normal(shape, stddev=0.1)
return tf.Variable(initial, name=name)
def bias_variable(shape, name):
initial = tf.constant(0.1, shape=shape)
return tf.Variable(initial, name=name)
In [3]:
def conv2d(x, W, name=None):
return tf.nn.conv2d(x, W, strides=[1, 1, 1, 1],
padding='SAME', name=name)
def max_pool_2x2(x, name=None):
return tf.nn.max_pool(x, ksize=[1, 2, 2, 1],
strides=[1, 2, 2, 1],
padding='SAME', name=name)
In [ ]:
class FasionNet:
def __init__(self):
pass
def _create_placeholders(self):
with tf.name_scope("data"):
self.x =
self.y_ =
def _create_layer1(self):
with tf.name_scope("layer1"):
W_conv1 = weight_variable(#
b_conv1 = bias_variable(#
x_image = tf.reshape(#
h_conv1 = tf.nn.relu(#
self.h_pool1 = max_pool_2x2(#
def _create_layer2(self):
with tf.name_scope("layer2"):
W_conv2 = weight_variable(#
b_conv2 = bias_variable(#
h_conv2 = tf.nn.relu(#
h_pool2 = max_pool_2x2(#
W_fc1 = weight_variable(#
b_fc1 = bias_variable(#
h_pool2_flat = tf.reshape(h_pool2, #
h_fc1 = tf.nn.relu(#
self.keep_prob = tf.placeholder(#
h_fc1_drop = tf.nn.dropout(#
W_fc2 = weight_variable(#
b_fc2 = bias_variable(#
# self.y_conv = tf.matmul(h_fc1_drop, W_fc2) + b_fc2
self.y_conv = tf.add(tf.matmul(h_fc1_drop, W_fc2), b_fc2, name='y_conv')
def _create_loss(self):
with tf.name_scope("loss"):
self.cross_entropy = tf.reduce_mean(
tf.nn.softmax_cross_entropy_with_logits(labels=self.y_, logits=self.y_conv))
def _create_optimizer(self):
with tf.name_scope("optimizer"):
self.train_step = tf.train.AdamOptimizer(1e-4).minimize(self.cross_entropy)
def _create_summaries(self):
with tf.name_scope("summaries"):
tf.summary.scalar('loss', self.cross_entropy)
correct_prediction = tf.equal(tf.argmax(self.y_conv, 1), tf.argmax(self.y_, 1))
self.accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
self.summary = tf.summary.merge_all()
saver = tf.train.Saver()
def build_graph(self):
self._create_placeholders()
self._create_layer1()
self._create_layer2()
self._create_loss()
self._create_optimizer()
self._create_summaries()
def train_model(self):
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
summary_writer = tf.summary.FileWriter('graphs', sess.graph)
for i in range(20000):
batch = mnist.train.next_batch(50)
if i % 100 == 0:
train_accuracy = self.accuracy.eval(feed_dict={
self.x: batch[0], self.y_: batch[1], self.keep_prob: 1.0})
print('step %d, training accuracy %g' % (i, train_accuracy))
_, summary_str = sess.run([self.train_step, self.summary],
feed_dict={self.x: batch[0], self.y_: batch[1], self.keep_prob: 0.5})
summary_writer.add_summary(summary_str, i)
print('test accuracy {}'.format(self.accuracy.eval(feed_dict={x: mnist.test.images, y_: mnist.test.labels, keep_prob: 1})))
In [ ]:
fn = FasionNet()
fn.build_graph()
fn.train_model()
In [ ]: