Build a 2-hidden layers fully connected neural network (a.k.a multilayer perceptron) with TensorFlow's Eager API.
This example is using some of TensorFlow higher-level wrappers (tf.estimators, tf.layers, tf.metrics, ...), you can check 'neural_network_raw' example for a raw, and more detailed TensorFlow implementation.
This example is using MNIST handwritten digits. The dataset contains 60,000 examples for training and 10,000 examples for testing. The digits have been size-normalized and centered in a fixed-size image (28x28 pixels) with values from 0 to 1. For simplicity, each image has been flattened and converted to a 1-D numpy array of 784 features (28*28).
More info: http://yann.lecun.com/exdb/mnist/
In [1]:
from __future__ import print_function
import tensorflow as tf
In [2]:
# Set Eager API
tf.enable_eager_execution()
tfe = tf.contrib.eager
In [3]:
# Import MNIST data
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets("/tmp/data/", one_hot=False)
In [4]:
# Parameters
learning_rate = 0.001
num_steps = 1000
batch_size = 128
display_step = 100
# Network Parameters
n_hidden_1 = 256 # 1st layer number of neurons
n_hidden_2 = 256 # 2nd layer number of neurons
num_input = 784 # MNIST data input (img shape: 28*28)
num_classes = 10 # MNIST total classes (0-9 digits)
In [5]:
# Using TF Dataset to split data into batches
dataset = tf.data.Dataset.from_tensor_slices(
(mnist.train.images, mnist.train.labels))
dataset = dataset.repeat().batch(batch_size).prefetch(batch_size)
dataset_iter = tfe.Iterator(dataset)
In [6]:
# Define the neural network. To use eager API and tf.layers API together,
# we must instantiate a tfe.Network class as follow:
class NeuralNet(tfe.Network):
def __init__(self):
# Define each layer
super(NeuralNet, self).__init__()
# Hidden fully connected layer with 256 neurons
self.layer1 = self.track_layer(
tf.layers.Dense(n_hidden_1, activation=tf.nn.relu))
# Hidden fully connected layer with 256 neurons
self.layer2 = self.track_layer(
tf.layers.Dense(n_hidden_2, activation=tf.nn.relu))
# Output fully connected layer with a neuron for each class
self.out_layer = self.track_layer(tf.layers.Dense(num_classes))
def call(self, x):
x = self.layer1(x)
x = self.layer2(x)
return self.out_layer(x)
neural_net = NeuralNet()
In [7]:
# Cross-Entropy loss function
def loss_fn(inference_fn, inputs, labels):
# Using sparse_softmax cross entropy
return tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(
logits=inference_fn(inputs), labels=labels))
# Calculate accuracy
def accuracy_fn(inference_fn, inputs, labels):
prediction = tf.nn.softmax(inference_fn(inputs))
correct_pred = tf.equal(tf.argmax(prediction, 1), labels)
return tf.reduce_mean(tf.cast(correct_pred, tf.float32))
# SGD Optimizer
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)
# Compute gradients
grad = tfe.implicit_gradients(loss_fn)
In [8]:
# Training
average_loss = 0.
average_acc = 0.
for step in range(num_steps):
# Iterate through the dataset
d = dataset_iter.next()
# Images
x_batch = d[0]
# Labels
y_batch = tf.cast(d[1], dtype=tf.int64)
# Compute the batch loss
batch_loss = loss_fn(neural_net, x_batch, y_batch)
average_loss += batch_loss
# Compute the batch accuracy
batch_accuracy = accuracy_fn(neural_net, x_batch, y_batch)
average_acc += batch_accuracy
if step == 0:
# Display the initial cost, before optimizing
print("Initial loss= {:.9f}".format(average_loss))
# Update the variables following gradients info
optimizer.apply_gradients(grad(neural_net, x_batch, y_batch))
# Display info
if (step + 1) % display_step == 0 or step == 0:
if step > 0:
average_loss /= display_step
average_acc /= display_step
print("Step:", '%04d' % (step + 1), " loss=",
"{:.9f}".format(average_loss), " accuracy=",
"{:.4f}".format(average_acc))
average_loss = 0.
average_acc = 0.
In [9]:
# Evaluate model on the test image set
testX = mnist.test.images
testY = mnist.test.labels
test_acc = accuracy_fn(neural_net, testX, testY)
print("Testset Accuracy: {:.4f}".format(test_acc))