In [1]:
from __future__ import print_function
import tensorflow as tf
from keras.datasets import cifar10
from keras.preprocessing.image import ImageDataGenerator
from keras.models import Sequential
from keras.layers import Dropout, Activation, Conv2D, GlobalAveragePooling2D, merge
from keras.utils import np_utils
from keras.optimizers import SGD
from keras import backend as K
from keras.models import Model
from keras.layers.core import Lambda
from keras.callbacks import ModelCheckpoint
from keras.callbacks import TensorBoard
import pandas
import numpy as np
import matplotlib.pyplot as plt
from os import makedirs
from os.path import exists, join


Using TensorFlow backend.

In [2]:
K.set_image_dim_ordering('tf')

batch_size = 32
nb_classes = 10
nb_epoch = 375
rows, cols = 32, 32
channels = 3
log_dir = './logs'

if not exists(log_dir):
    makedirs(log_dir)

In [3]:
(X_train, y_train), (X_test, y_test) = cifar10.load_data()
(img_train, lbl_train), (img_test, lbl_test) = cifar10.load_data()
print('X_train shape:', X_train.shape)
print(X_train.shape[0], 'train samples')
print(X_test.shape[0], 'test samples')

print (X_train.shape[1:])

Y_train = np_utils.to_categorical(y_train, nb_classes)
Y_test = np_utils.to_categorical(y_test, nb_classes)


X_train shape: (50000, 32, 32, 3)
50000 train samples
10000 test samples
(32, 32, 3)

In [4]:
categories = ['airplane', 'automobile', 'bird', 'cat', 'deer','dog', 'frog', 'horse', 'ship', 'truck']
imgid = 2
image = img_train[imgid]
image = image.astype('float32')
image /= 255

%matplotlib inline
imgplot = plt.imshow(image)

categoryid = lbl_train[imgid]

print(categories)
print("categoryid :"+str(categoryid))
print("category : "+str(categories[categoryid[0]]))


['airplane', 'automobile', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck']
categoryid :[9]
category : truck

In [5]:
model = Sequential()

model.add(Conv2D(96, (3, 3), padding='same', input_shape=(32, 32, 3)))
model.add(Activation('relu'))
model.add(Conv2D(96, (3, 3), padding='same'))
model.add(Activation('relu'))
model.add(Conv2D(96, (3, 3), padding='same', strides=(2,2)))
model.add(Dropout(0.5))

model.add(Conv2D(192, (3, 3), padding='same'))
model.add(Activation('relu'))
model.add(Conv2D(192, (3, 3), padding='same'))
model.add(Activation('relu'))
model.add(Conv2D(192, (3, 3), padding='same', strides=(2,2)))
model.add(Dropout(0.5))

model.add(Conv2D(192, (3, 3), padding='same'))
model.add(Activation('relu'))
model.add(Conv2D(192, (1, 1), padding='valid'))
model.add(Activation('relu'))
model.add(Conv2D(10, (1, 1), padding='valid',name='features'))

model.add(GlobalAveragePooling2D())
model.add(Activation('softmax'))

sgd = SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True)

In [6]:
model.compile(loss='categorical_crossentropy', optimizer=sgd, metrics=['accuracy'])
print (model.summary())


_________________________________________________________________
Layer (type)                 Output Shape              Param #   
=================================================================
conv2d_1 (Conv2D)            (None, 32, 32, 96)        2688      
_________________________________________________________________
activation_1 (Activation)    (None, 32, 32, 96)        0         
_________________________________________________________________
conv2d_2 (Conv2D)            (None, 32, 32, 96)        83040     
_________________________________________________________________
activation_2 (Activation)    (None, 32, 32, 96)        0         
_________________________________________________________________
conv2d_3 (Conv2D)            (None, 16, 16, 96)        83040     
_________________________________________________________________
dropout_1 (Dropout)          (None, 16, 16, 96)        0         
_________________________________________________________________
conv2d_4 (Conv2D)            (None, 16, 16, 192)       166080    
_________________________________________________________________
activation_3 (Activation)    (None, 16, 16, 192)       0         
_________________________________________________________________
conv2d_5 (Conv2D)            (None, 16, 16, 192)       331968    
_________________________________________________________________
activation_4 (Activation)    (None, 16, 16, 192)       0         
_________________________________________________________________
conv2d_6 (Conv2D)            (None, 8, 8, 192)         331968    
_________________________________________________________________
dropout_2 (Dropout)          (None, 8, 8, 192)         0         
_________________________________________________________________
conv2d_7 (Conv2D)            (None, 8, 8, 192)         331968    
_________________________________________________________________
activation_5 (Activation)    (None, 8, 8, 192)         0         
_________________________________________________________________
conv2d_8 (Conv2D)            (None, 8, 8, 192)         37056     
_________________________________________________________________
activation_6 (Activation)    (None, 8, 8, 192)         0         
_________________________________________________________________
features (Conv2D)            (None, 8, 8, 10)          1930      
_________________________________________________________________
global_average_pooling2d_1 ( (None, 10)                0         
_________________________________________________________________
activation_7 (Activation)    (None, 10)                0         
=================================================================
Total params: 1,369,738
Trainable params: 1,369,738
Non-trainable params: 0
_________________________________________________________________
None

In [7]:
X_train = X_train.astype('float32')
X_test = X_test.astype('float32')
X_train /= 255
X_test /= 255

datagen = ImageDataGenerator(
        featurewise_center=False,  # set input mean to 0 over the dataset
        samplewise_center=False,  # set each sample mean to 0
        featurewise_std_normalization=False,  # divide inputs by std of the dataset
        samplewise_std_normalization=False,  # divide each input by its std
        zca_whitening=False,  # apply ZCA whitening
        rotation_range=0,  # randomly rotate images in the range (degrees, 0 to 180)
        width_shift_range=0.1,  # randomly shift images horizontally (fraction of total width)
        height_shift_range=0.1,  # randomly shift images vertically (fraction of total height)
        horizontal_flip=True,  # randomly flip images
        vertical_flip=False) 

datagen.fit(X_train)
filepath="weights.hdf5"
checkpoint = ModelCheckpoint(filepath, monitor='val_acc', verbose=1, save_best_only=True, save_weights_only=False, mode='max')


with open(join(log_dir, 'metadata.tsv'), 'w') as f:
    np.savetxt(f, y_test)
    
tensorboard = TensorBoard(batch_size=batch_size,
                          embeddings_freq=1,
                          embeddings_layer_names=['features'],
                          embeddings_metadata='metadata.tsv',
                          embeddings_data=X_test)


callbacks_list = [checkpoint, tensorboard]
# Fit the model on the batches generated by datagen.flow().
history_callback = model.fit_generator(datagen.flow(X_train, Y_train,
                                    batch_size=batch_size),
                                    steps_per_epoch=2000,
                                    epochs=nb_epoch,
                                    validation_data=(X_test, Y_test),
                                    callbacks=callbacks_list, verbose=1)


Epoch 1/375
2000/2000 [==============================] - 70s 35ms/step - loss: 1.8876 - acc: 0.2806 - val_loss: 1.5358 - val_acc: 0.4272

Epoch 00001: val_acc improved from -inf to 0.42720, saving model to weights.hdf5
Epoch 2/375
2000/2000 [==============================] - 68s 34ms/step - loss: 1.4104 - acc: 0.4832 - val_loss: 1.2457 - val_acc: 0.5531

Epoch 00002: val_acc improved from 0.42720 to 0.55310, saving model to weights.hdf5
Epoch 3/375
2000/2000 [==============================] - 68s 34ms/step - loss: 1.1506 - acc: 0.5845 - val_loss: 1.0791 - val_acc: 0.6155

Epoch 00003: val_acc improved from 0.55310 to 0.61550, saving model to weights.hdf5
Epoch 4/375
2000/2000 [==============================] - 68s 34ms/step - loss: 0.9973 - acc: 0.6459 - val_loss: 0.8745 - val_acc: 0.6932

Epoch 00004: val_acc improved from 0.61550 to 0.69320, saving model to weights.hdf5
Epoch 5/375
2000/2000 [==============================] - 68s 34ms/step - loss: 0.8734 - acc: 0.6939 - val_loss: 0.9327 - val_acc: 0.6990

Epoch 00005: val_acc improved from 0.69320 to 0.69900, saving model to weights.hdf5
Epoch 6/375
2000/2000 [==============================] - 68s 34ms/step - loss: 0.7891 - acc: 0.7249 - val_loss: 0.6772 - val_acc: 0.7723

Epoch 00006: val_acc improved from 0.69900 to 0.77230, saving model to weights.hdf5
Epoch 7/375
2000/2000 [==============================] - 68s 34ms/step - loss: 0.7258 - acc: 0.7462 - val_loss: 0.6660 - val_acc: 0.7740

Epoch 00007: val_acc improved from 0.77230 to 0.77400, saving model to weights.hdf5
Epoch 8/375
2000/2000 [==============================] - 68s 34ms/step - loss: 0.6730 - acc: 0.7667 - val_loss: 0.6260 - val_acc: 0.7915

Epoch 00008: val_acc improved from 0.77400 to 0.79150, saving model to weights.hdf5
Epoch 9/375
 807/2000 [===========>..................] - ETA: 38s - loss: 0.6481 - acc: 0.7739

KeyboardInterruptTraceback (most recent call last)
<ipython-input-7-f5fe8fc9366d> in <module>()
     38                                     epochs=nb_epoch,
     39                                     validation_data=(X_test, Y_test),
---> 40                                     callbacks=callbacks_list, verbose=1)
     41 
     42 

/usr/lib64/python2.7/site-packages/keras/legacy/interfaces.pyc in wrapper(*args, **kwargs)
     89                 warnings.warn('Update your `' + object_name + '` call to the ' +
     90                               'Keras 2 API: ' + signature, stacklevel=2)
---> 91             return func(*args, **kwargs)
     92         wrapper._original_function = func
     93         return wrapper

/usr/lib64/python2.7/site-packages/keras/engine/training.pyc in fit_generator(self, generator, steps_per_epoch, epochs, verbose, callbacks, validation_data, validation_steps, class_weight, max_queue_size, workers, use_multiprocessing, shuffle, initial_epoch)
   1416             use_multiprocessing=use_multiprocessing,
   1417             shuffle=shuffle,
-> 1418             initial_epoch=initial_epoch)
   1419 
   1420     @interfaces.legacy_generator_methods_support

/usr/lib64/python2.7/site-packages/keras/engine/training_generator.pyc in fit_generator(model, generator, steps_per_epoch, epochs, verbose, callbacks, validation_data, validation_steps, class_weight, max_queue_size, workers, use_multiprocessing, shuffle, initial_epoch)
    215                 outs = model.train_on_batch(x, y,
    216                                             sample_weight=sample_weight,
--> 217                                             class_weight=class_weight)
    218 
    219                 outs = to_list(outs)

/usr/lib64/python2.7/site-packages/keras/engine/training.pyc in train_on_batch(self, x, y, sample_weight, class_weight)
   1215             ins = x + y + sample_weights
   1216         self._make_train_function()
-> 1217         outputs = self.train_function(ins)
   1218         return unpack_singleton(outputs)
   1219 

/usr/lib64/python2.7/site-packages/keras/backend/tensorflow_backend.pyc in __call__(self, inputs)
   2713                 return self._legacy_call(inputs)
   2714 
-> 2715             return self._call(inputs)
   2716         else:
   2717             if py_any(is_tensor(x) for x in inputs):

/usr/lib64/python2.7/site-packages/keras/backend/tensorflow_backend.pyc in _call(self, inputs)
   2673             fetched = self._callable_fn(*array_vals, run_metadata=self.run_metadata)
   2674         else:
-> 2675             fetched = self._callable_fn(*array_vals)
   2676         return fetched[:len(self.outputs)]
   2677 

/usr/lib/python2.7/site-packages/tensorflow/python/client/session.pyc in __call__(self, *args, **kwargs)
   1397           ret = tf_session.TF_SessionRunCallable(
   1398               self._session._session, self._handle, args, status,
-> 1399               run_metadata_ptr)
   1400         if run_metadata:
   1401           proto_data = tf_session.TF_GetBuffer(run_metadata_ptr)

KeyboardInterrupt: 

In [ ]:
def getImage(id):
    image = img_test[id]
    image = image.astype('float32')
    image /= 255
    return image

def showImage(id):
    image = getImage(id)
    %matplotlib inline
    imgplot = plt.imshow(image)
    labelid = lbl_test[id]
    category = categories[labelid[0]]
    print("category : "+category)    

def predictImage(id):
    image = getImage(id)
    showImage(id)
    image = np.expand_dims(image, axis=0)
    result = model.predict(image)
    result = result[0].tolist()
    best_index=result.index(max(result))
    print ("prediction : "+categories[best_index])
    
    
predictImage(50)

In [ ]:
pandas.DataFrame(history_callback.history).to_csv("history.csv")

model.save('keras_allconv.h5')

In [ ]:
model_json = model.to_json()
with open("allconv.json", "w") as json_file:
    json_file.write(model_json)

In [ ]:


In [ ]: