In [17]:
import numpy as np
import tensorflow as tf
import matplotlib.pyplot as plt
% matplotlib inline
plt.style.use('ggplot')
In [18]:
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets("MNIST_data", one_hot = True)
In [19]:
print(mnist.train.images.shape)
In [20]:
plt.imshow(mnist.train.images[0].reshape(28,28))
for i in range(0,9):
if mnist.train.labels[0][i] == 1:
print('LABEL:{}' .format(i))
In [23]:
input_nodes = 28*28
output_nodes = 10
learning_rate = 0.001
# Creating the graph
tf.reset_default_graph()
with tf.name_scope('Placeholders'):
x = tf.placeholder(dtype=tf.float32, shape=[None, input_nodes], name ='Input')
y = tf.placeholder(dtype=tf.float32, shape=[None, output_nodes], name = 'Labels')
x_image = tf.reshape(x, [-1, 28, 28, 1])
tf.summary.image('Input_images', x_image, 3)
with tf.name_scope('Operations'):
W = tf.get_variable(dtype=tf.float32,
initializer=tf.random_normal_initializer(mean = 0, stddev=0.1),
shape=[input_nodes, output_nodes],
name = 'W')
b = tf.get_variable(dtype=tf.float32,
initializer=tf.constant_initializer(0.0),
shape =[output_nodes],
name = 'b'
)
h = tf.nn.softmax(tf.matmul(x,W) + b)
# Cross Entropy
with tf.name_scope('xEnt'):
cross_entropy = tf.reduce_mean(-tf.reduce_sum(y * tf.log(h), reduction_indices=[1]))
tf.summary.scalar('Cross_Entropy', cross_entropy) # TB
# Optimiser
with tf.name_scope('Train'):
optimiser = tf.train.GradientDescentOptimizer(learning_rate).minimize(cross_entropy)
# Accuracy
with tf.name_scope('Accuracy'):
correct_pediction = tf.equal(tf.arg_max(y,1), tf.arg_max(h,1))
accuracy = tf.reduce_mean(tf.cast(correct_pediction, tf.float32))
tf.summary.scalar('Accuracy', accuracy) # TB
Out[23]:
In [24]:
%%time
sess = tf.Session()
sess.run(tf.global_variables_initializer())
# TensorBoard Filewriter
merged_summary = tf.summary.merge_all()
writer = tf.summary.FileWriter("./tmp/mnist/1")
writer.add_graph(sess.graph)
# Train
batch_size = 100
epoch = 10
epoch_size = 500
n_iter = epoch * epoch_size
for iter in range(n_iter+1):
xs, ys = mnist.train.next_batch(batch_size)
sess.run(optimiser, feed_dict={x: xs, y: ys})
if iter % 5 ==0:
s = sess.run(merged_summary, feed_dict={x:xs, y:ys})
writer.add_summary(s,iter)
if iter%epoch_size ==0:
print("Epoch: {} Cross Entropy: {:.2f} " .format(int(iter/epoch_size),
cross_entropy.eval(feed_dict={x:xs, y:ys}, session=sess)))
In [ ]:
t_accur = sess.run(accuracy, feed_dict={x:mnist.train.images, y:mnist.train.labels})
print('Test Accuracy: {:4.2f}%' .format(100*t_accur))
In [ ]:
v_accur = sess.run(accuracy, feed_dict={x:mnist.validation.images, y: mnist.validation.labels})
print('Valid Accuracy: {:4.2f}%' .format(100*v_accur))
In [ ]:
t_accur = sess.run(accuracy, feed_dict={x:mnist.test.images, y: mnist.test.labels})
print('Test Accuracy: {:4.2f}%' .format(100*t_accur))