In [ ]:
    
"""This area sets up the Jupyter environment.
Please do not modify anything in this cell.
"""
import os
import sys
import time
# Add project to PYTHONPATH for future use
sys.path.insert(1, os.path.join(sys.path[0], '..'))
# Import miscellaneous modules
from IPython.core.display import display, HTML
# Set CSS styling
with open('../admin/custom.css', 'r') as f:
    style = """<style>\n{}\n</style>""".format(f.read())
    display(HTML(style))
# Plots will be show inside the notebook
%matplotlib notebook
import matplotlib.pyplot as plt
import problem_unittests as tests
    
In [ ]:
    
import numpy as np 
from keras.datasets import mnist
import admin.tools as tools
# Load MNIST data
(X_train, y_train), (X_test, y_test) = mnist.load_data()
X_data = np.concatenate((X_train, X_test))
    
As we have done previously with MNIST, the first thing we will be doing is normalisation. However, this time we will normalise the 8-bit images from [0, 255] to [-1, 1].
Previous research with GANs indicates that this normalisation yields better results (reference paper).
In [ ]:
    
def normalize_images(images):
    """
    Create Matrix Y
    :param images: Np tensor with N x R x C x CH.
    Where R = Number of rows in a image
    Where C = Number of cols in a image
    Where CH = Number of channles in a image
    
    :return: images with its values normalized to [-1,1].
    """
    images = None
    return images
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
# Test normalisation function and normalise the data if it passes
tests.test_normalize_images(normalize_images)
X_data = normalize_images(X_data)
    
As we did in a previous notebook we will add an extra dimension to our greyscale images.
In [ ]:
    
X_data = np.expand_dims(X_data, axis=-1)
print('Shape of X_data {}'.format(X_data.shape))
    
Keras references: Reshape()
In [ ]:
    
# Import some useful keras libraries
import keras
from keras.models import Model
from keras.layers import *
def generator(z_dim, nb_outputs, ouput_shape):
    
    # Define the input_noise as a function of Input()
    latent_var = None
    # Insert the desired amount of layers for your network
    x = None
    
    # Map you latest layer to n_outputs
    x = None
    
    # Reshape you data
    x = Reshape(ouput_shape)(x)
    model = Model(inputs=latent_var, outputs=x)
    return model
    
Now, let's build a generative network using the function you just made.
In [ ]:
    
# Define the dimension of the latent vector
z_dim = 100
# Dimension of our sample
sample_dimentions = (28, 28, 1)
# Calculate the number of dimensions in a sample
n_dimensions=1
for x in list(sample_dimentions):
    n_dimensions *= x
print('A sample of data has shape {} composed of {} dimension(s)'.format(sample_dimentions, n_dimensions))
# Create the generative network
G = generator(z_dim, n_dimensions, sample_dimentions)
# We recommend the followin optimiser
g_optim = keras.optimizers.adam(lr=0.002, beta_1=0.5, beta_2=0.999, epsilon=1e-08, decay=0.0)
# Compile network
G.compile (loss='binary_crossentropy', optimizer=g_optim)
# Network Summary
G.summary()
    
The discriminator network is a simple binary classifier where the output indicates the probability of the input data being real or fake.
Keras references: Reshape()
In [ ]:
    
def discriminator(input_shape, nb_inputs):
    # Define the network input to have input_shape shape
    input_x = None
    
    # Reshape your input
    x = None
    
    # Implement the rest of you classifier
    x = None
    
    probabilities = Dense(1, activation='sigmoid')(x)
    
    model = Model(inputs=input_x, outputs=probabilities)
    return model
    
Now, let's build a discriminator network using the function you just made.
In [ ]:
    
# We already computed the shape and number of dimensions in a data sample
print('The data has shape {} composed of {} dimension(s)'.format(sample_dimentions, n_dimensions))
# Discriminative Network
D = discriminator(sample_dimentions,n_dimensions)
# Recommended optimiser
d_optim = keras.optimizers.adam(lr=0.002, beta_1=0.5, beta_2=0.999, epsilon=1e-08, decay=0.0)
# Compile Network
D.compile(loss='binary_crossentropy', optimizer=d_optim)
# Network summary
D.summary()
    
In [ ]:
    
from keras.models import   Sequential
def build(generator, discriminator):
    """Build a base model for a Generative Adversarial Networks.
    Parameters
    ----------
    generator : keras.engine.training.Model
        A keras model built either with keras.models ( Model, or Sequential ).
        This is the model that generates the data for the Generative Adversarial networks.
    Discriminator : keras.engine.training.Model
        A keras model built either with keras.models ( Model, or Sequential ).
        This is the model that is a binary classifier for REAL/GENERATED data.
    Returns
    -------
    (keras.engine.training.Model)
        It returns a Sequential Keras Model by connecting a Generator model to a
        Discriminator model.  [ generator-->discriminator]
    """
    model = Sequential()
    model.add(generator)
    discriminator.trainable = False
    model.add(discriminator)
    return model
# Create GAN
G_plus_D = build(G, D)
G_plus_D.compile(loss='binary_crossentropy', optimizer=g_optim)
D.trainable = True
    
In [ ]:
    
BATCH_SIZE = 32
NB_EPOCHS = 50
    
In [ ]:
    
# Figure for live plot
fig, ax = plt.subplots(1,1)
# Allocate space for noise variable
z = np.zeros((BATCH_SIZE, z_dim))
# n_bathces
number_of_batches = int(X_data.shape[0] / BATCH_SIZE)
for epoch in range(NB_EPOCHS):
    for index in range(number_of_batches):
        
        # Sample minimibath m=BATCH_SIZE from data generating distribution
        # in other words :
        # Grab a batch of the real data
        data_batch = X_data[index*BATCH_SIZE:(index+1)*BATCH_SIZE]
        
        # Sample minibatch of m= BATCH_SIZE noise samples
        # in other words, we sample from a uniform distribution
        z = np.random.uniform(-1, 1, (BATCH_SIZE,z_dim))
        # Sample minibatch m=BATCH_SIZE from data generating distribution Pdata
        # in ohter words
        # Use genrator to create new fake samples
        generated_batch = G.predict(z, verbose=0)
        # Update/Train discriminator D
        X = np.concatenate((data_batch, generated_batch))
        y = [1] * BATCH_SIZE + [0.0] * BATCH_SIZE
        d_loss = D.train_on_batch(X, y)
        # Sample minibatch of m= BATCH_SIZE noise samples
        # in other words, we sample from a uniform distribution
        z = np.random.uniform(-1, 1, (BATCH_SIZE,z_dim))
        #Update Generator while not updating discriminator
        D.trainable = False
        # to do gradient ascent we just flip the labels ...
        g_loss = G_plus_D.train_on_batch(z, [1] * BATCH_SIZE)
        D.trainable = True
        
        # Plot data every 10 mini batches
        if index % 10 == 0:
            ax.clear() 
            # Histogram of generated data
            image =tools.combine_images(X)
            image = image*127.5+127.5
            ax.imshow(image.astype(np.uint8))
            fig.canvas.draw()
            time.sleep(0.01)
    # End of epoch ....
    print("epoch %d : g_loss : %f  | d_loss : %f" % (epoch, g_loss,  d_loss))
    
In [ ]: