Build a generative adversarial network (GAN) to generate digit images from a noise distribution with TensorFlow.
References:
Other tutorials:
This example is using MNIST handwritten digits. The dataset contains 60,000 examples for training and 10,000 examples for testing. The digits have been size-normalized and centered in a fixed-size image (28x28 pixels) with values from 0 to 1. For simplicity, each image has been flattened and converted to a 1-D numpy array of 784 features (28*28).
More info: http://yann.lecun.com/exdb/mnist/
In [1]:
from __future__ import division, print_function, absolute_import
import matplotlib.pyplot as plt
import numpy as np
import tensorflow as tf
In [2]:
# Import MNIST data
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets("/tmp/data/", one_hot=True)
In [3]:
# Training Params
num_steps = 70000
batch_size = 128
learning_rate = 0.0002
# Network Params
image_dim = 784 # 28*28 pixels
gen_hidden_dim = 256
disc_hidden_dim = 256
noise_dim = 100 # Noise data points
# A custom initialization (see Xavier Glorot init)
def glorot_init(shape):
return tf.random_normal(shape=shape, stddev=1. / tf.sqrt(shape[0] / 2.))
In [4]:
# Store layers weight & bias
weights = {
'gen_hidden1': tf.Variable(glorot_init([noise_dim, gen_hidden_dim])),
'gen_out': tf.Variable(glorot_init([gen_hidden_dim, image_dim])),
'disc_hidden1': tf.Variable(glorot_init([image_dim, disc_hidden_dim])),
'disc_out': tf.Variable(glorot_init([disc_hidden_dim, 1])),
}
biases = {
'gen_hidden1': tf.Variable(tf.zeros([gen_hidden_dim])),
'gen_out': tf.Variable(tf.zeros([image_dim])),
'disc_hidden1': tf.Variable(tf.zeros([disc_hidden_dim])),
'disc_out': tf.Variable(tf.zeros([1])),
}
In [5]:
# Generator
def generator(x):
hidden_layer = tf.matmul(x, weights['gen_hidden1'])
hidden_layer = tf.add(hidden_layer, biases['gen_hidden1'])
hidden_layer = tf.nn.relu(hidden_layer)
out_layer = tf.matmul(hidden_layer, weights['gen_out'])
out_layer = tf.add(out_layer, biases['gen_out'])
out_layer = tf.nn.sigmoid(out_layer)
return out_layer
# Discriminator
def discriminator(x):
hidden_layer = tf.matmul(x, weights['disc_hidden1'])
hidden_layer = tf.add(hidden_layer, biases['disc_hidden1'])
hidden_layer = tf.nn.relu(hidden_layer)
out_layer = tf.matmul(hidden_layer, weights['disc_out'])
out_layer = tf.add(out_layer, biases['disc_out'])
out_layer = tf.nn.sigmoid(out_layer)
return out_layer
# Build Networks
# Network Inputs
gen_input = tf.placeholder(tf.float32, shape=[None, noise_dim], name='input_noise')
disc_input = tf.placeholder(tf.float32, shape=[None, image_dim], name='disc_input')
# Build Generator Network
gen_sample = generator(gen_input)
# Build 2 Discriminator Networks (one from noise input, one from generated samples)
disc_real = discriminator(disc_input)
disc_fake = discriminator(gen_sample)
# Build Loss
gen_loss = -tf.reduce_mean(tf.log(disc_fake))
disc_loss = -tf.reduce_mean(tf.log(disc_real) + tf.log(1. - disc_fake))
# Build Optimizers
optimizer_gen = tf.train.AdamOptimizer(learning_rate=learning_rate)
optimizer_disc = tf.train.AdamOptimizer(learning_rate=learning_rate)
# Training Variables for each optimizer
# By default in TensorFlow, all variables are updated by each optimizer, so we
# need to precise for each one of them the specific variables to update.
# Generator Network Variables
gen_vars = [weights['gen_hidden1'], weights['gen_out'],
biases['gen_hidden1'], biases['gen_out']]
# Discriminator Network Variables
disc_vars = [weights['disc_hidden1'], weights['disc_out'],
biases['disc_hidden1'], biases['disc_out']]
# Create training operations
train_gen = optimizer_gen.minimize(gen_loss, var_list=gen_vars)
train_disc = optimizer_disc.minimize(disc_loss, var_list=disc_vars)
# Initialize the variables (i.e. assign their default value)
init = tf.global_variables_initializer()
In [6]:
# Start Training
# Start a new TF session
sess = tf.Session()
# Run the initializer
sess.run(init)
# Training
for i in range(1, num_steps+1):
# Prepare Data
# Get the next batch of MNIST data (only images are needed, not labels)
batch_x, _ = mnist.train.next_batch(batch_size)
# Generate noise to feed to the generator
z = np.random.uniform(-1., 1., size=[batch_size, noise_dim])
# Train
feed_dict = {disc_input: batch_x, gen_input: z}
_, _, gl, dl = sess.run([train_gen, train_disc, gen_loss, disc_loss],
feed_dict=feed_dict)
if i % 2000 == 0 or i == 1:
print('Step %i: Generator Loss: %f, Discriminator Loss: %f' % (i, gl, dl))
In [7]:
# Testing
# Generate images from noise, using the generator network.
n = 6
canvas = np.empty((28 * n, 28 * n))
for i in range(n):
# Noise input.
z = np.random.uniform(-1., 1., size=[n, noise_dim])
# Generate image from noise.
g = sess.run(gen_sample, feed_dict={gen_input: z})
# Reverse colours for better display
g = -1 * (g - 1)
for j in range(n):
# Draw the generated digits
canvas[i * 28:(i + 1) * 28, j * 28:(j + 1) * 28] = g[j].reshape([28, 28])
plt.figure(figsize=(n, n))
plt.imshow(canvas, origin="upper", cmap="gray")
plt.show()