In [24]:
# Softmax Regression
import tensorflow as tf
import numpy as np
from tensorflow.examples.tutorials.mnist import input_data
In [2]:
mnist = input_data.read_data_sets("MNIST_data/", one_hot=True)
In [9]:
print(mnist.train.images.shape, mnist.test.images.shape, mnist.validation.images.shape)
print(mnist.train.labels.shape)
In [11]:
sess = tf.InteractiveSession()
In [12]:
x = tf.placeholder(tf.float32, [None, 784])
W = tf.Variable(tf.zeros([784,10]))
b = tf.Variable(tf.zeros([10]))
In [14]:
y = tf.nn.softmax(tf.matmul(x,W) + b)
In [15]:
labels = tf.placeholder(tf.float32, [None, 10])
cross_entropy = tf.reduce_mean(-tf.reduce_sum(labels * tf.log(y), reduction_indices = [1]))
In [16]:
train_step = tf.train.GradientDescentOptimizer(0.5).minimize(cross_entropy)
In [17]:
tf.global_variables_initializer().run()
In [18]:
for i in range(1000):
batch_xs, batch_ys = mnist.train.next_batch(100)
train_step.run({x:batch_xs, labels:batch_ys})
In [33]:
correct_pred = tf.equal(tf.argmax(y, 1), tf.argmax(labels, 1))
pred = correct_pred.eval({x:mnist.train.images, labels:mnist.train.labels})
pred = correct_pred.eval({x:mnist.test.images, labels:mnist.test.labels})
In [35]:
accuracy = np.count_nonzero(pred) / np.size(pred)
print(accuracy)
accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))
print(accuracy.eval({x:mnist.test.images, labels:mnist.test.labels}))