In [45]:
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets("MNIST_data/", one_hot=True)
In [46]:
%matplotlib inline
import matplotlib.pyplot as plt
sample = mnist.train.images[0]
print(sample.shape)
reshaped_sample = sample.reshape((28,28))
print(reshaped_sample.shape)
In [47]:
plt.imshow(reshaped_sample, cmap = 'gray')
Out[47]:
In [48]:
test_sample = mnist.test.images[0]
print(test_sample.shape)
reshaped_test_sample = test_sample.reshape((28,28))
print(reshaped_sample.shape)
In [49]:
plt.imshow(reshaped_test_sample, cmap = 'gray')
Out[49]:
In [50]:
import tensorflow as tf
x = tf.placeholder(tf.float32, [None, 784])
W = tf.Variable(tf.zeros([784, 10]))
b = tf.Variable(tf.zeros([10]))
y = tf.matmul(x,W) + b
reduce_sum : adds the elements in the second dimension of yreduce_mean : computes the mean over all the examples in the batchGradientDescentOptimizer
In [51]:
y_ = tf.placeholder(tf.float32, [None, 10])
softmax_cross_entropy_with_logits(logits, labels, dim=-1, name=None) : computes softmax cross entropy between logits and labels.
In [52]:
learning_rate = 0.5
cross_entropy = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(y, y_))
train_step = tf.train.GradientDescentOptimizer(learning_rate).minimize(cross_entropy)
In [53]:
init = tf.global_variables_initializer()
sess = tf.Session()
sess.run(init)
In [54]:
for _ in range(1000):
batch_xs, batch_ys = mnist.train.next_batch(100)
sess.run(train_step, feed_dict={x: batch_xs, y_: batch_ys})
In [55]:
correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1))
In [56]:
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
print(sess.run(accuracy, feed_dict={x: mnist.test.images, y_: mnist.test.labels}))