In [ ]:
import numpy as np
import keras
from keras.models import Model, Sequential
from keras.layers import *
from keras.optimizers import Adam
from tqdm import tqdm
from matplotlib import pyplot as plt
%matplotlib inline
In [ ]:
dogs = np.load('../4.cat_or_dog/data/dog.npz')
In [ ]:
dogs = dogs.reshape(12500, 128, 128, 3)
X_train = dogs
In [ ]:
generator = Sequential([
Dense(256*8*8, input_shape=(100,), activation='elu'),
# ELU(0.9),
BatchNormalization(),
Dropout(0.2),
Reshape((8,8,256)),
UpSampling2D(),
Conv2D(256, (4,4), padding='same', activation='elu'),
# ELU(0.9),
BatchNormalization(),
Dropout(0.2),
UpSampling2D(),
Conv2D(128, (4,4), padding='same', activation='elu'),
# ELU(0.9),
BatchNormalization(),
Dropout(0.2),
UpSampling2D(),
Conv2D(64, (4,4), padding='same', activation='elu'),
# ELU(0.9),
BatchNormalization(),
Dropout(0.2),
UpSampling2D(),
Conv2D(3, (4,4), padding='same', activation='sigmoid'),
])
generator.summary()
discriminator = Sequential([
Conv2D(64, (4,4), strides=(1,1), input_shape=(128,128,3), padding='same', activation='elu'),
MaxPool2D(),
# ELU(0.9),
BatchNormalization(),
Dropout(0.2),
Conv2D(128, (4,4), strides=(1,1), padding='same', activation='elu'),
MaxPool2D(),
# ELU(0.9),
BatchNormalization(),
Dropout(0.2),
Conv2D(256, (4,4), strides=(1,1), padding='same', activation='elu'),
MaxPool2D(),
# ELU(0.9),
BatchNormalization(),
Dropout(0.2),
Conv2D(256, (4,4), strides=(1,1), padding='same', activation='elu'),
MaxPool2D(),
# ELU(0.9),
BatchNormalization(),
Dropout(0.2),
Flatten(),
Dense(2, activation='softmax')
])
discriminator.summary()
# generator.compile(loss='mse', optimizer=Adam())
discriminator.compile(loss='categorical_crossentropy', optimizer=Adam(0.0005), metrics=['accuracy'])
gan_input = Input(shape=(100,))
gen_output = generator(gan_input)
dis_output = discriminator(gen_output)
gan = Model(inputs=gan_input, outputs=dis_output)
gan.compile(loss='categorical_crossentropy', optimizer=Adam(0.001), metrics=['accuracy'])
gan.summary()
In [ ]:
def make_trainable(net, val):
net.trainable = val
for l in net.layers:
l.trainable = val
In [ ]:
def plot_output():
try_input = np.random.rand(25, 100)
preds = generator.predict(try_input)
plt.figure(figsize=(10,10))
for i in range(preds.shape[0]):
plt.subplot(5, 5, i+1)
plt.imshow(preds[i, :, :, :])
plt.axis('off')
# tight_layout minimizes the overlap between 2 sub-plots
plt.tight_layout()
plt.show()
def train(epoch=150, batch_size=32):
batch_count = X_train.shape[0] // batch_size
for i in range(epoch):
# for j in tqdm(range(batch_count)):
for j in range(batch_count):
# Input for the generator
#######################################################
noise_input = np.random.rand(batch_size, 100)
# getting random images from X_train of size=batch_size
# these are the real images that will be fed to the discriminator
image_batch = X_train[np.random.randint(0, X_train.shape[0], size=batch_size)]
# these are the predicted images from the generator
predictions = generator.predict(noise_input, batch_size=batch_size)
# the discriminator takes in the real images and the generated images
X = np.concatenate([predictions, image_batch])
# labels for the discriminator
y = np.zeros([batch_size * 2, 2])
y[:batch_size, 1] = 1
y[batch_size:, 0] = 1
y_discriminator = y
# Let's train the discriminator
make_trainable(discriminator, True)
dis_loss = discriminator.train_on_batch(X, y_discriminator)
##########################################################
dis_loss_history.append(dis_loss[0])
# Let's train the generator
noise_input = np.random.rand(batch_size, 100)
y = np.zeros([batch_size, 2])
y[:, 0] = 1
y_generator = y
make_trainable(discriminator, False)
gen_loss = gan.train_on_batch(noise_input, y_generator)
# gen_loss = gan.train_on_batch(noise_input, y_generator)
# gen_loss = gan.train_on_batch(noise_input, y_generator)
gen_loss_history.append(gen_loss[0])
print(gen_loss[0], dis_loss[0])
if i % 10 == 0:
batch_size_eval = 64
print('evaluating epoch %s' %(i))
noise_input = np.random.rand(batch_size_eval, 100)
image_batch = X_train[np.random.randint(0, X_train.shape[0], size=batch_size_eval)]
predictions = generator.predict(noise_input, batch_size=batch_size_eval)
X = np.concatenate([predictions, image_batch])
y = np.zeros([batch_size_eval * 2, 2])
y[:batch_size_eval, 1] = 1
y[batch_size_eval:, 0] = 1
y_discriminator = y
print('evaluating discriminator')
print(discriminator.evaluate(X, y_discriminator))
noise_input = np.random.rand(batch_size_eval, 100)
y = np.zeros([batch_size_eval, 2])
y[:, 0] = 1
y_generator = y
print('evaluating gan')
print(gan.evaluate(noise_input, y_generator))
if i % 1 == 0:
plot_output()
In [ ]:
gen_loss_history = []
dis_loss_history = []
train()
In [ ]:
plt.imshow(generator.predict(np.random.rand(5, 100), batch_size=5)[1])
In [ ]:
discriminator.predict(generator.predict(np.random.rand(5, 100), batch_size=5))
In [ ]:
gan.predict(np.random.rand(5, 100))
In [ ]:
y = np.zeros([5,2])
y[:, 0] = 1
gan.evaluate(np.random.rand(5, 100), y)
In [ ]:
x = np.arange(0, len(dis_loss_history))
plt.plot(np.arange(0, len(dis_loss_history)-200), dis_loss_history[200:], color='red')
plt.plot(np.arange(0, len(gen_loss_history)-200), gen_loss_history[200:], color='blue')
In [ ]:
In [ ]:
# batch_size = 64
# noise_input = np.random.rand(batch_size, 100)
# # getting random images from X_train of size=batch_size
# # these are the real images that will be fed to the discriminator
# image_batch = X_train[np.random.randint(0, X_train.shape[0], size=batch_size)]
# # these are the predicted images from the generator
# predictions = generator.predict(noise_input, batch_size=batch_size)
# # the discriminator takes in the real images and the generated images
# X = np.concatenate([predictions, image_batch])
# # labels for the discriminator
# y = np.zeros([batch_size * 2, 2])
# y[:batch_size, 1] = 1
# y[batch_size:, 0] = 1
# y_discriminator = y
# # Let's train the discriminator
# discriminator.trainable = True
# dis_loss = discriminator.train_on_batch(X, y_discriminator)
# # Let's train the generator
# noise_input = np.random.rand(batch_size, 100)
# y = np.zeros([batch_size, 2])
# y[:, 0] = 1
# y_generator = y
# discriminator.trainable = False
# gen_loss = gan.train_on_batch(noise_input, y_generator)
# gen_loss = gan.train_on_batch(noise_input, y_generator)
# # gen_loss = gan.train_on_batch(noise_input, y_generator)
# gen_loss_history.append(gen_loss[0])
# print(gen_loss[0], dis_loss[0])
In [ ]:
# make_trainable(discriminator, True)
# batch_size = 32
# noise_input = np.random.rand(batch_size, 100)
# # getting random images from X_train of size=batch_size
# # these are the real images that will be fed to the discriminator
# image_batch = X_train[np.random.randint(0, X_train.shape[0], size=batch_size)]
# # these are the predicted images from the generator
# predictions = generator.predict(noise_input, batch_size=batch_size)
# # the discriminator takes in the real images and the generated images
# X = np.concatenate([predictions, image_batch])
# # labels for the discriminator
# y = np.zeros([batch_size * 2, 2])
# y[:batch_size, 1] = 1
# y[batch_size:, 0] = 1
# y_discriminator = y
# discriminator.trainable = True
# dis_loss = discriminator.train_on_batch(X, y_discriminator)
# print(dis_loss)
In [ ]:
# discriminator.evaluate(X, y)
In [ ]: