In [1]:
import numpy as np
import cPickle
import matplotlib.pyplot as plt
def extractImagesAndLabels(path, file):
f = open(path+file, 'rb')
dict = cPickle.load(f)
images = dict['bigdata']
images = np.reshape(images, (10000, 100, 100, 3))
labels = dict['labels']
return images, labels
def extractCategories(path, file):
f = open(path+file, 'rb')
dict = cPickle.load(f)
return dict['label_names']
categories = extractCategories("data/CIFAR-10/cifar-10-batches-py/", "batches.meta")
In [2]:
print ('loading 1 .... ')
images1, labels1 = extractImagesAndLabels("data/CIFAR-10/cifar-10-batches-py/", "data_batch_1_100")
print ('finished loading 1 .... ')
In [3]:
print ('loading 2 .... ')
images2, labels2 = extractImagesAndLabels("data/CIFAR-10/cifar-10-batches-py/", "data_batch_2_100")
print ('finished loading 2 .... ')
In [4]:
print ('loading 3 .... ')
images3, labels3 = extractImagesAndLabels("data/CIFAR-10/cifar-10-batches-py/", "data_batch_3_100")
print ('finished loading 3 .... ')
In [5]:
print ('loading 4 .... ')
images4, labels4 = extractImagesAndLabels("data/CIFAR-10/cifar-10-batches-py/", "data_batch_4_100")
print ('finished loading 4 .... ')
In [6]:
print ('loading 5 .... ')
images5, labels5 = extractImagesAndLabels("data/CIFAR-10/cifar-10-batches-py/", "data_batch_5_100")
print ('finished loading 5 .... ')
In [7]:
print ('loading tests .... ')
testimages, testlabels = extractImagesAndLabels("data/CIFAR-10/cifar-10-batches-py/", "test_batch_100")
print ('finished loading test .... ')
In [8]:
print (len(labels1))
images = np.vstack((images1, images2, images3, images4, images5))
labels = np.concatenate((labels1, labels2, labels3, labels4, labels5))
print (images.shape)
print (labels.shape)
In [9]:
images1 = None
images2 = None
images3 = None
images4 = None
images5 = None
labels1 = None
labels2 = None
labels3 = None
labels4 = None
labels5 = None
In [10]:
def getImage(id):
bigimages = np.reshape(images, (50000, 100, 100, 3))
image = bigimages[id]
#image = image.transpose([1, 2, 0])
image = image.astype('float32')
image /= 255
return image
imgid=35
image = getImage(imgid)
print(image.shape)
%matplotlib inline
imgplot = plt.imshow(image)
categoryid = labels[imgid]
print(categories[categoryid])
In [11]:
from __future__ import print_function
import numpy as np
np.random.seed(1337)
import keras
from keras.datasets import cifar10
from keras.models import Model
from keras.layers import Dense, Activation, Flatten, Input, MaxPooling2D, Dropout
from keras.layers import Conv2D
import h5py # to ensure we have this package installed
from keras.callbacks import ModelCheckpoint
batch_size = 32
num_classes = 10
epochs = 150
In [12]:
x_train = np.reshape(images, (50000, 100, 100, 3))
y_train = labels
x_test = np.reshape(testimages, (10000, 100, 100, 3))
y_test = testlabels
print ("x_train shape : "+str(x_train.shape))
print ("y_train shape : "+str(len(y_train)))
print ("x_test shape : "+str(x_test.shape))
print ("y_test shape : "+str(len(y_test)))
x_train = x_train.astype('float32')
x_test = x_test.astype('float32')
x_train /= 255
x_test /= 255
In [13]:
# Convert class vectors to binary class matrices.
y_train = keras.utils.to_categorical(y_train, num_classes)
y_test = keras.utils.to_categorical(y_test, num_classes)
In [14]:
# input layer is the same as our typical CNN model
inputs = Input(shape=(100, 100, 3))
output = Conv2D(100, (2, 2), strides=(1, 1), padding='same', activation='relu')(inputs)
output = Conv2D(100, (2, 2), strides=(1, 1), padding='same', activation='relu')(output)
output = Conv2D(100, (2, 2), strides=(2, 2), padding='same', activation='relu')(output)
output = Conv2D(200, (2, 2), strides=(1, 1), padding='same', activation='relu')(output)
output = Dropout(0.1)(output)
output = Conv2D(200, (2, 2), strides=(1, 1), padding='same', activation='relu')(output)
output = Dropout(0.1)(output)
output = Conv2D(200, (2, 2), strides=(2, 2), padding='same', activation='relu')(output)
output = Conv2D(300, (2, 2), strides=(1, 1), padding='same', activation='relu')(output)
output = Dropout(0.2)(output)
output = Conv2D(300, (2, 2), strides=(1, 1), padding='same', activation='relu')(output)
output = Dropout(0.2)(output)
output = Conv2D(300, (2, 2), strides=(2, 2), padding='same', activation='relu')(output)
output = Conv2D(400, (2, 2), strides=(1, 1), padding='same', activation='relu')(output)
output = Dropout(0.3)(output)
output = Conv2D(400, (2, 2), strides=(1, 1), padding='same', activation='relu')(output)
output = Dropout(0.3)(output)
output = Conv2D(400, (2, 2), strides=(2, 2), padding='same', activation='relu')(output)
output = Conv2D(500, (2, 2), strides=(1, 1), padding='same', activation='relu')(output)
output = Dropout(0.4)(output)
output = Conv2D(500, (1, 1), strides=(1, 1), padding='same', activation='relu')(output)
x = Flatten()(output)
x = Dense(num_classes)(x)
output = Activation('softmax')(x)
In [15]:
model = Model([inputs], output)
In [16]:
model.summary()
In [ ]:
# initiate RMSprop optimizer
opt = keras.optimizers.rmsprop(lr=0.0001, decay=1e-6)
# Let's train the model using RMSprop
model.compile(loss='categorical_crossentropy',
optimizer=opt,
metrics=['accuracy'])
filepath="checkpoints/cifar10-cnnall100-{epoch:02d}-{val_acc:.2f}.hdf5"
checkpoint = ModelCheckpoint(filepath,
monitor='val_acc',
verbose=1,
mode='max')
model.fit(x_train, y_train,
batch_size=batch_size,
epochs=epochs,
validation_data=(x_test, y_test),
callbacks=[checkpoint])
In [ ]: