In [1]:
%matplotlib inline
import cPickle
import pickle
import pandas as pd
import numpy as np
import cv2
import matplotlib.pyplot as plt
import matplotlib.cm as cm
from termcolor import colored
In [4]:
import theano as th
import theano.tensor as T
from keras.utils import np_utils
import keras.models as models
from keras.layers import Input,merge
from keras.layers.core import Reshape,Dense,Dropout,Activation,Flatten
from keras.layers.advanced_activations import LeakyReLU
from keras.activations import *
from keras.layers.wrappers import TimeDistributed
from keras.layers.noise import GaussianNoise
from keras.layers.convolutional import Convolution2D, MaxPooling2D, ZeroPadding2D, UpSampling2D
from keras.layers.recurrent import LSTM
from keras.regularizers import *
from keras.layers.normalization import *
from keras.optimizers import *
from keras.datasets import mnist
import matplotlib.pyplot as plt
import seaborn as sns
import cPickle, random, sys, keras
from keras.models import Model
from IPython import display
from keras.utils import np_utils
from tqdm import tqdm
In [3]:
def load_data():
import pandas as pd
import numpy as np
from PIL import Image
from cv2 import resize
#from skimage.transform import resize
from skimage import exposure
train = pd.read_csv('/home/mckc/new///train.csv')
test = pd.read_csv('/home/mckc/new///test.csv')
train = pd.concat(train,test,axis=0)
print 'the training data shape is ',train.shape
X_tr = []
Y_tr = []
iteration = 0
for i in train.values[:,0]:
image = exposure.equalize_hist(resize(np.array(Image.open(i).convert('L')),(96,96)))
#print image.shape
X_tr.append(image)
Y_tr.append(train.values[iteration,1])
iteration+=1
if iteration % 500==0:
print colored((float(iteration)/len(train.values[:,0])*100 ,' Percentage complete'), 'green')
return X_tr,Y_tr
In [ ]:
def make_trainable(net, val):
net.trainable = val
for l in net.layers:
l.trainable = val
In [ ]:
X_train,Y_train = load_data()
In [ ]:
shp = X_train.shape[1:]
dropout_rate = 0.25
opt = Adam(lr=1e-4)
dopt = Adam(lr=1e-3)
# Build Generative model ...
nch = 200
g_input = Input(shape=[100])
H = Dense(nch*14*14, init='glorot_normal')(g_input)
H = BatchNormalization()(H)
H = Activation('relu')(H)
H = Reshape( [nch, 14, 14] )(H)
H = UpSampling2D(size=(2, 2))(H)
H = Convolution2D(nch/2, 3, 3, border_mode='same', init='glorot_uniform')(H)
H = BatchNormalization()(H)
H = Activation('relu')(H)
H = Convolution2D(nch/4, 3, 3, border_mode='same', init='glorot_uniform')(H)
H = BatchNormalization()(H)
H = Activation('relu')(H)
H = Convolution2D(nch/4, 3, 3, border_mode='same', init='glorot_uniform')(H)
H = BatchNormalization()(H)
H = Activation('relu')(H)
H = Convolution2D(nch/4, 3, 3, border_mode='same', init='glorot_uniform')(H)
H = BatchNormalization()(H)
H = Activation('relu')(H)
H = Convolution2D(1, 1, 1, border_mode='same', init='glorot_uniform')(H)
g_V = Activation('sigmoid')(H)
generator = Model(g_input,g_V)
generator.compile(loss='binary_crossentropy', optimizer=opt)
generator.summary()
# Build Discriminative model ...
d_input = Input(shape=shp)
H = Convolution2D(256, 5, 5, subsample=(2, 2), border_mode = 'same', activation='relu')(d_input)
H = LeakyReLU(0.2)(H)
H = Dropout(dropout_rate)(H)
H = Convolution2D(512, 5, 5, subsample=(2, 2), border_mode = 'same', activation='relu')(H)
H = LeakyReLU(0.2)(H)
H = Dropout(dropout_rate)(H)
H = Flatten()(H)
H = Dense(256)(H)
H = LeakyReLU(0.2)(H)
H = Dropout(dropout_rate)(H)
d_V = Dense(2,activation='softmax')(H)
discriminator = Model(d_input,d_V)
discriminator.compile(loss='categorical_crossentropy', optimizer=dopt)
discriminator.summary()
# Freeze weights in the discriminator for stacked training
make_trainable(discriminator, False)
# Build stacked GAN model
gan_input = Input(shape=[100])
H = generator(gan_input)
gan_V = discriminator(H)
GAN = Model(gan_input, gan_V)
GAN.compile(loss='categorical_crossentropy', optimizer=opt)
GAN.summary()
In [ ]:
def plot_loss(losses):
display.clear_output(wait=True)
display.display(plt.gcf())
plt.figure(figsize=(10,8))
plt.plot(losses["d"], label='discriminitive loss')
plt.plot(losses["g"], label='generative loss')
plt.legend()
plt.show()
In [ ]:
def plot_gen(n_ex=16,dim=(4,4), figsize=(10,10) ):
noise = np.random.uniform(0,1,size=[n_ex,100])
generated_images = generator.predict(noise)
plt.figure(figsize=figsize)
for i in range(generated_images.shape[0]):
plt.subplot(dim[0],dim[1],i+1)
img = generated_images[i,0,:,:]
plt.imshow(img)
plt.axis('off')
plt.tight_layout()
plt.show()
In [7]:
np.concatenate((np.array([1,2,3]),np.array([4,5])))
Out[7]:
In [ ]: