In [1]:
    
import keras
from keras import layers
import numpy as np
    
    
In [2]:
    
latent_dim = 32
height = 32
width = 32
channels = 3
    
In [3]:
    
generator_input = keras.Input(shape=(latent_dim, ))
x = layers.Dense(128 * 16 * 16)(generator_input)
x = layers.LeakyReLU()(x)
x = layers.Reshape((16, 16, 128))(x)
x = layers.Conv2D(256, 5, padding='same')(x)
x = layers.LeakyReLU()(x)
x = layers.Conv2DTranspose(256, 4, strides=2, padding='same')(x)
x = layers.LeakyReLU()(x)
x = layers.Conv2D(256, 5, padding='same')(x)
x = layers.LeakyReLU()(x)
x = layers.Conv2D(256, 5, padding='same')(x)
x = layers.LeakyReLU()(x)
x = layers.Conv2D(channels, 7, activation='tanh', padding='same')(x)
generator = keras.models.Model(generator_input, x)
generator.summary()
    
    
In [4]:
    
discriminator_input = layers.Input(shape=(height, width, channels))
x = layers.Conv2D(128, 3)(discriminator_input)
x = layers.LeakyReLU()(x)
x = layers.Conv2D(128, 4, strides=2)(x)
x = layers.LeakyReLU()(x)
x = layers.Conv2D(128, 4, strides=2)(x)
x = layers.LeakyReLU()(x)
x = layers.Conv2D(128, 4, strides=2)(x)
x = layers.LeakyReLU()(x)
x = layers.Flatten()(x)
x = layers.Dropout(0.4)(x)
x = layers.Dense(1, activation='sigmoid')(x)
discriminator = keras.models.Model(discriminator_input, x)
discriminator.summary()
    
    
In [5]:
    
discriminator_optimizer = keras.optimizers.RMSprop(
    lr=0.0008,
    clipvalue=1.0,
    decay=1e-8)
discriminator.compile(optimizer=discriminator_optimizer, loss='binary_crossentropy')
    
In [6]:
    
discriminator.trainable = False
gan_input = keras.Input(shape=(latent_dim, ))
gan_output = discriminator(generator(gan_input))
gan = keras.models.Model(gan_input, gan_output)
    
In [7]:
    
gan_optimizer = keras.optimizers.RMSprop(lr=0.0004, clipvalue=1.0, decay=1e-8)
gan.compile(optimizer=gan_optimizer, loss='binary_crossentropy')
    
In [8]:
    
import os
from keras.preprocessing import image
(x_train, y_train), (_, _) = keras.datasets.cifar10.load_data()
    
In [9]:
    
x_train = x_train[y_train.flatten() == 8] # ship images
x_train = x_train.reshape(
    (x_train.shape[0], ) + 
    (height, width, channels)).astype('float32') / 255. # normalizes data
    
In [10]:
    
iterations = 10000
batch_size = 20
save_dir = "E:\\temp\\dcgan"
    
In [11]:
    
start = 0
for step in range(iterations):
    # samples random points in the latent space
    random_latent_vectors = np.random.normal(size=(batch_size, latent_dim))
    
    # generate fake images
    generated_images = generator.predict(random_latent_vectors)
    
    # get real images
    stop = start + batch_size
    real_images = x_train[start: stop]
    # create training data for discriminator
    combined_images = np.concatenate([generated_images, real_images]) 
    # label = 1 => fake, label = 0 => real world
    labels = np.concatenate([np.ones((batch_size, 1)), np.zeros((batch_size, 1))])
    
    # train discriminator
    d_loss = discriminator.train_on_batch(combined_images, labels)
    
    # samples random points in the latent space
    random_latent_vectors = np.random.normal(size=(batch_size, latent_dim))
    
    # create fake targets (label 0 but it's from gan, not real world)
    misleading_targets = np.zeros((batch_size, 1))
    
    # train generator
    a_loss = gan.train_on_batch(random_latent_vectors, misleading_targets)
    
    start += batch_size
    if start > len(x_train) - batch_size:
        start = 0
    
    if step % 100 == 0:
        gan.save_weights('gan.h5')
        
        print("discriminator loss: ", d_loss)
        print("adversarial loss: ", a_loss)
        
        img = image.array_to_img(generated_images[0] * 255., scale=False)
        img.save(os.path.join(save_dir, 'generated_ship' + str(step) + '.png'))
        
        img = image.array_to_img(real_images[0] * 255., scale=False)
        img.save(os.path.join(save_dir, 'real_ship' + str(step) + '.png'))
    
    
    
In [ ]: