Usage of Keras for image_classification

  • configure random transformations and normalization operations to be done on your image data during training
  • instantiate generators of augmented image batches (and their labels) via .flow(data, labels) or .flow_from_directory(directory). These generators can then be used with the Keras model methods that accept data generators as inputs, fit_generator, evaluate_generator and predict_generator.

In [28]:
import os
curr_dir = %pwd
proj_dir = os.path.normpath(os.path.join(curr_dir, os.path.pardir))

In [1]:
from keras.models import Sequential
from keras.layers import Conv2D, MaxPooling2D
from keras.layers import Activation, Dropout, Flatten, Dense
from keras import regularizers
from keras.layers.normalization import BatchNormalization

import numpy as np
seed = 13
np.random.seed(seed)


Using TensorFlow backend.

GPU configuration, disregard if non-GPU


In [2]:
## use only 30 % of the memory
import tensorflow as tf
from keras.backend.tensorflow_backend import set_session
config = tf.ConfigProto()
config.gpu_options.per_process_gpu_memory_fraction = 0.5
set_session(tf.Session(config=config))

In [3]:
# imports to display the svg image of the network
from keras.utils import plot_model
from IPython.display import SVG
from keras.utils.vis_utils import model_to_dot
import matplotlib.pyplot as plt

import pydot
import graphviz

In [4]:
from keras.preprocessing.image import ImageDataGenerator

In [5]:
batch_size = 32

# train_datagen = ImageDataGenerator(rescale=1./255)
train_datagen = ImageDataGenerator(rescale=1./255, horizontal_flip=True, width_shift_range=0.1,
                                  height_shift_range=0.1,zoom_range=0.1)

# only rescaling augmentation for test samples
test_datagen = ImageDataGenerator(rescale=1./255)

In [11]:
train_dir = '../data/processed/train'
test_dir = '../data/processed/test'

In [8]:
%pwd


Out[8]:
'/Users/ericleijonmarck/dev/dicommunify/notebooks'

In [12]:
# this is a generaotr that will read pictures 
# found in sub folders of 'data/train',
# and indefinitely generate batches of augmented image data
train_generator = train_datagen.flow_from_directory(
                train_dir,
                target_size=(100,50),
                shuffle=True,
                color_mode='grayscale',
                batch_size=batch_size,
                class_mode='categorical') # since we use categoricalentropy loss


Found 400 images belonging to 5 classes.

In [13]:
# this is a similar generatr, for validation/test data
test_generator = test_datagen.flow_from_directory(
    test_dir,
    target_size=(100,50),
    color_mode='grayscale',
    batch_size=batch_size,
    class_mode='categorical')


Found 100 images belonging to 5 classes.

inspirational architectures taken from

brain_segmentation image_classification


In [14]:
nb_train_samples = 400
nb_test_samples = 100

model = Sequential()

model.add(Conv2D(64, (3,3), input_shape=(100,50,1), padding='same',
                 kernel_regularizer=regularizers.l2(0.001),activity_regularizer=regularizers.l1(0.001)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2,2)))
model.add(Dropout(0.5))

model.add(Conv2D(128, (3,3), padding='same'))#, kernel_regularizer=regularizers.l2(0.01),activity_regularizer=regularizers.l1(0.01)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2,2)))
model.add(Dropout(0.5))

model.add(Flatten())
model.add(Dense(512))
model.add(Activation('relu'))
model.add(Dropout(0.25))
model.add(Dense(5))
model.add(Activation('softmax'))

from keras.optimizers import Adam
          
adam = Adam(lr=0.0001)
model.compile(loss='categorical_crossentropy',
              optimizer=adam,
              metrics=['accuracy'])

In [16]:
SVG(model_to_dot(model, show_shapes=True).create(prog='dot', format='svg'))


Out[16]:
G 4673434792 conv2d_1_input: InputLayer input: output: (None, 100, 50, 1) (None, 100, 50, 1) 4673434568 conv2d_1: Conv2D input: output: (None, 100, 50, 1) (None, 100, 50, 64) 4673434792->4673434568 4673653280 activation_1: Activation input: output: (None, 100, 50, 64) (None, 100, 50, 64) 4673434568->4673653280 4673654176 max_pooling2d_1: MaxPooling2D input: output: (None, 100, 50, 64) (None, 50, 25, 64) 4673653280->4673654176 4672824936 dropout_1: Dropout input: output: (None, 50, 25, 64) (None, 50, 25, 64) 4673654176->4672824936 4673434960 conv2d_2: Conv2D input: output: (None, 50, 25, 64) (None, 50, 25, 128) 4672824936->4673434960 4673653560 activation_2: Activation input: output: (None, 50, 25, 128) (None, 50, 25, 128) 4673434960->4673653560 4673983376 max_pooling2d_2: MaxPooling2D input: output: (None, 50, 25, 128) (None, 25, 12, 128) 4673653560->4673983376 4673435688 dropout_2: Dropout input: output: (None, 25, 12, 128) (None, 25, 12, 128) 4673983376->4673435688 4674313912 flatten_1: Flatten input: output: (None, 25, 12, 128) (None, 38400) 4673435688->4674313912 4674313744 dense_1: Dense input: output: (None, 38400) (None, 512) 4674313912->4674313744 4674124992 activation_3: Activation input: output: (None, 512) (None, 512) 4674313744->4674124992 4674695120 dropout_3: Dropout input: output: (None, 512) (None, 512) 4674124992->4674695120 4673252712 dense_2: Dense input: output: (None, 512) (None, 5) 4674695120->4673252712 4673252600 activation_4: Activation input: output: (None, 5) (None, 5) 4673252712->4673252600

In [17]:
model.summary()


_________________________________________________________________
Layer (type)                 Output Shape              Param #   
=================================================================
conv2d_1 (Conv2D)            (None, 100, 50, 64)       640       
_________________________________________________________________
activation_1 (Activation)    (None, 100, 50, 64)       0         
_________________________________________________________________
max_pooling2d_1 (MaxPooling2 (None, 50, 25, 64)        0         
_________________________________________________________________
dropout_1 (Dropout)          (None, 50, 25, 64)        0         
_________________________________________________________________
conv2d_2 (Conv2D)            (None, 50, 25, 128)       73856     
_________________________________________________________________
activation_2 (Activation)    (None, 50, 25, 128)       0         
_________________________________________________________________
max_pooling2d_2 (MaxPooling2 (None, 25, 12, 128)       0         
_________________________________________________________________
dropout_2 (Dropout)          (None, 25, 12, 128)       0         
_________________________________________________________________
flatten_1 (Flatten)          (None, 38400)             0         
_________________________________________________________________
dense_1 (Dense)              (None, 512)               19661312  
_________________________________________________________________
activation_3 (Activation)    (None, 512)               0         
_________________________________________________________________
dropout_3 (Dropout)          (None, 512)               0         
_________________________________________________________________
dense_2 (Dense)              (None, 5)                 2565      
_________________________________________________________________
activation_4 (Activation)    (None, 5)                 0         
=================================================================
Total params: 19,738,373
Trainable params: 19,738,373
Non-trainable params: 0
_________________________________________________________________

In [19]:
run_count = 0

In [21]:
run_count += 1
from keras.callbacks import TensorBoard
tbCallBack = TensorBoard(log_dir="./summary/run_{}".format(run_count), 
                         histogram_freq=2, write_graph=True, 
                         write_images=True)

model.fit_generator(train_generator,
                   steps_per_epoch=nb_train_samples // batch_size,
                   epochs=120,
                   validation_data=test_generator,
                   validation_steps=nb_test_samples // batch_size,
                   callbacks=[tbCallBack])

model.save_weights('first_try.h5')
model.save('my_model.h5')


INFO:tensorflow:Summary name conv2d_1/kernel:0 is illegal; using conv2d_1/kernel_0 instead.
INFO:tensorflow:Summary name conv2d_1/kernel:0 is illegal; using conv2d_1/kernel_0 instead.
INFO:tensorflow:Summary name conv2d_1/bias:0 is illegal; using conv2d_1/bias_0 instead.
INFO:tensorflow:Summary name conv2d_1/bias:0 is illegal; using conv2d_1/bias_0 instead.
INFO:tensorflow:Summary name conv2d_2/kernel:0 is illegal; using conv2d_2/kernel_0 instead.
INFO:tensorflow:Summary name conv2d_2/bias:0 is illegal; using conv2d_2/bias_0 instead.
INFO:tensorflow:Summary name conv2d_2/bias:0 is illegal; using conv2d_2/bias_0 instead.
INFO:tensorflow:Summary name dense_1/kernel:0 is illegal; using dense_1/kernel_0 instead.
INFO:tensorflow:Summary name dense_1/kernel:0 is illegal; using dense_1/kernel_0 instead.
INFO:tensorflow:Summary name dense_1/bias:0 is illegal; using dense_1/bias_0 instead.
INFO:tensorflow:Summary name dense_1/bias:0 is illegal; using dense_1/bias_0 instead.
INFO:tensorflow:Summary name dense_2/kernel:0 is illegal; using dense_2/kernel_0 instead.
INFO:tensorflow:Summary name dense_2/kernel:0 is illegal; using dense_2/kernel_0 instead.
INFO:tensorflow:Summary name dense_2/bias:0 is illegal; using dense_2/bias_0 instead.
INFO:tensorflow:Summary name dense_2/bias:0 is illegal; using dense_2/bias_0 instead.
Epoch 1/120
12/12 [==============================] - 11s - loss: 487.8634 - acc: 0.3879 - val_loss: 503.3220 - val_acc: 0.4167
Epoch 2/120
12/12 [==============================] - 10s - loss: 425.5719 - acc: 0.4379 - val_loss: 457.3803 - val_acc: 0.5441
Epoch 3/120
12/12 [==============================] - 10s - loss: 404.9179 - acc: 0.5004 - val_loss: 386.5409 - val_acc: 0.5000
Epoch 4/120
12/12 [==============================] - 11s - loss: 366.0260 - acc: 0.5422 - val_loss: 324.2314 - val_acc: 0.4853
Epoch 5/120
12/12 [==============================] - 11s - loss: 353.4734 - acc: 0.5987 - val_loss: 345.5023 - val_acc: 0.4706
Epoch 6/120
12/12 [==============================] - 12s - loss: 321.8716 - acc: 0.5808 - val_loss: 340.1657 - val_acc: 0.5417
Epoch 7/120
12/12 [==============================] - 12s - loss: 292.1155 - acc: 0.6304 - val_loss: 269.5915 - val_acc: 0.5294
Epoch 8/120
12/12 [==============================] - 11s - loss: 286.5915 - acc: 0.6354 - val_loss: 288.3922 - val_acc: 0.5735
Epoch 9/120
12/12 [==============================] - 11s - loss: 251.1892 - acc: 0.6124 - val_loss: 253.1070 - val_acc: 0.6324
Epoch 10/120
12/12 [==============================] - 11s - loss: 235.8615 - acc: 0.6515 - val_loss: 246.2173 - val_acc: 0.6354
Epoch 11/120
12/12 [==============================] - 11s - loss: 213.7038 - acc: 0.6294 - val_loss: 216.4760 - val_acc: 0.6324
Epoch 12/120
12/12 [==============================] - 11s - loss: 200.6431 - acc: 0.6535 - val_loss: 200.0955 - val_acc: 0.6618
Epoch 13/120
12/12 [==============================] - 11s - loss: 183.1135 - acc: 0.6592 - val_loss: 183.9123 - val_acc: 0.5735
Epoch 14/120
12/12 [==============================] - 11s - loss: 177.1150 - acc: 0.6644 - val_loss: 182.0908 - val_acc: 0.6042
Epoch 15/120
12/12 [==============================] - 11s - loss: 161.3688 - acc: 0.6300 - val_loss: 160.4932 - val_acc: 0.5588
Epoch 16/120
12/12 [==============================] - 11s - loss: 151.4422 - acc: 0.6927 - val_loss: 157.0615 - val_acc: 0.6176
Epoch 17/120
12/12 [==============================] - 11s - loss: 143.6566 - acc: 0.6483 - val_loss: 145.0255 - val_acc: 0.6324
Epoch 18/120
12/12 [==============================] - 11s - loss: 135.9250 - acc: 0.6980 - val_loss: 145.9758 - val_acc: 0.6458
Epoch 19/120
12/12 [==============================] - 11s - loss: 128.8515 - acc: 0.6929 - val_loss: 127.4058 - val_acc: 0.6324
Epoch 20/120
12/12 [==============================] - 12s - loss: 119.5834 - acc: 0.7036 - val_loss: 121.6892 - val_acc: 0.7206
Epoch 21/120
12/12 [==============================] - 11s - loss: 118.8190 - acc: 0.7161 - val_loss: 121.0611 - val_acc: 0.6324
Epoch 22/120
12/12 [==============================] - 11s - loss: 110.9408 - acc: 0.6977 - val_loss: 117.7469 - val_acc: 0.6458
Epoch 23/120
12/12 [==============================] - 11s - loss: 103.3774 - acc: 0.7108 - val_loss: 109.8626 - val_acc: 0.6618
Epoch 24/120
12/12 [==============================] - 11s - loss: 98.7466 - acc: 0.7189 - val_loss: 95.8373 - val_acc: 0.6618
Epoch 25/120
12/12 [==============================] - 11s - loss: 93.3549 - acc: 0.6742 - val_loss: 95.7317 - val_acc: 0.6324
Epoch 26/120
12/12 [==============================] - 11s - loss: 88.3621 - acc: 0.7448 - val_loss: 96.6021 - val_acc: 0.6979
Epoch 27/120
12/12 [==============================] - 11s - loss: 87.0719 - acc: 0.7317 - val_loss: 92.0815 - val_acc: 0.7353
Epoch 28/120
12/12 [==============================] - 11s - loss: 81.6982 - acc: 0.6977 - val_loss: 85.9402 - val_acc: 0.6912
Epoch 29/120
12/12 [==============================] - 11s - loss: 77.7089 - acc: 0.7452 - val_loss: 84.3973 - val_acc: 0.6324
Epoch 30/120
12/12 [==============================] - 11s - loss: 76.5259 - acc: 0.7267 - val_loss: 80.1807 - val_acc: 0.7059
Epoch 31/120
12/12 [==============================] - 11s - loss: 72.6538 - acc: 0.7422 - val_loss: 79.9289 - val_acc: 0.6979
Epoch 32/120
12/12 [==============================] - 11s - loss: 70.0562 - acc: 0.7395 - val_loss: 72.8702 - val_acc: 0.6618
Epoch 33/120
12/12 [==============================] - 11s - loss: 67.6157 - acc: 0.7526 - val_loss: 70.5338 - val_acc: 0.6618
Epoch 34/120
12/12 [==============================] - 11s - loss: 69.3460 - acc: 0.7318 - val_loss: 70.1105 - val_acc: 0.7206
Epoch 35/120
12/12 [==============================] - 12s - loss: 63.6469 - acc: 0.7237 - val_loss: 70.8251 - val_acc: 0.7083
Epoch 36/120
12/12 [==============================] - 11s - loss: 62.5290 - acc: 0.7420 - val_loss: 63.7673 - val_acc: 0.7353
Epoch 37/120
12/12 [==============================] - 11s - loss: 61.9411 - acc: 0.7703 - val_loss: 64.9707 - val_acc: 0.7794
Epoch 38/120
12/12 [==============================] - 11s - loss: 59.1216 - acc: 0.7651 - val_loss: 64.8005 - val_acc: 0.7059
Epoch 39/120
12/12 [==============================] - 11s - loss: 58.1541 - acc: 0.7167 - val_loss: 64.1187 - val_acc: 0.7188
Epoch 40/120
12/12 [==============================] - 11s - loss: 56.5921 - acc: 0.7496 - val_loss: 62.0746 - val_acc: 0.7059
Epoch 41/120
12/12 [==============================] - 11s - loss: 56.3486 - acc: 0.7269 - val_loss: 58.9157 - val_acc: 0.7206
Epoch 42/120
12/12 [==============================] - 11s - loss: 55.8110 - acc: 0.7030 - val_loss: 60.2903 - val_acc: 0.7500
Epoch 43/120
12/12 [==============================] - 11s - loss: 53.5070 - acc: 0.7446 - val_loss: 59.3690 - val_acc: 0.7604
Epoch 44/120
12/12 [==============================] - 12s - loss: 53.1200 - acc: 0.7263 - val_loss: 55.2540 - val_acc: 0.7059
Epoch 45/120
12/12 [==============================] - 11s - loss: 52.2821 - acc: 0.7444 - val_loss: 54.9571 - val_acc: 0.7647
Epoch 46/120
12/12 [==============================] - 11s - loss: 50.4555 - acc: 0.7576 - val_loss: 52.7142 - val_acc: 0.7206
Epoch 47/120
12/12 [==============================] - 11s - loss: 51.9942 - acc: 0.7474 - val_loss: 52.5731 - val_acc: 0.7206
Epoch 48/120
12/12 [==============================] - 10s - loss: 49.6161 - acc: 0.7685 - val_loss: 51.2623 - val_acc: 0.7206
Epoch 49/120
12/12 [==============================] - 10s - loss: 47.4975 - acc: 0.7661 - val_loss: 49.8551 - val_acc: 0.6912
Epoch 50/120
12/12 [==============================] - 11s - loss: 47.8064 - acc: 0.7446 - val_loss: 52.9269 - val_acc: 0.7188
Epoch 51/120
12/12 [==============================] - 10s - loss: 46.8758 - acc: 0.7474 - val_loss: 48.0909 - val_acc: 0.6765
Epoch 52/120
12/12 [==============================] - 11s - loss: 45.2255 - acc: 0.7502 - val_loss: 48.9373 - val_acc: 0.7059
Epoch 53/120
12/12 [==============================] - 11s - loss: 45.4845 - acc: 0.7424 - val_loss: 48.8612 - val_acc: 0.6912
Epoch 54/120
12/12 [==============================] - 11s - loss: 45.2531 - acc: 0.7365 - val_loss: 49.4520 - val_acc: 0.7188
Epoch 55/120
12/12 [==============================] - 11s - loss: 43.6251 - acc: 0.7317 - val_loss: 46.8535 - val_acc: 0.6765
Epoch 56/120
12/12 [==============================] - 11s - loss: 44.1053 - acc: 0.7526 - val_loss: 46.5449 - val_acc: 0.7206
Epoch 57/120
12/12 [==============================] - 11s - loss: 42.3695 - acc: 0.7498 - val_loss: 45.9340 - val_acc: 0.7353
Epoch 58/120
12/12 [==============================] - 11s - loss: 43.0189 - acc: 0.7737 - val_loss: 47.1373 - val_acc: 0.7396
Epoch 59/120
12/12 [==============================] - 11s - loss: 41.4748 - acc: 0.7038 - val_loss: 44.4741 - val_acc: 0.6765
Epoch 60/120
12/12 [==============================] - 12s - loss: 43.0763 - acc: 0.7474 - val_loss: 42.6233 - val_acc: 0.7647
Epoch 61/120
12/12 [==============================] - 12s - loss: 40.5593 - acc: 0.7733 - val_loss: 43.9149 - val_acc: 0.6765
Epoch 62/120
12/12 [==============================] - 13s - loss: 40.5759 - acc: 0.7524 - val_loss: 45.4547 - val_acc: 0.7083
Epoch 63/120
12/12 [==============================] - 12s - loss: 40.4581 - acc: 0.7783 - val_loss: 42.7305 - val_acc: 0.6765
Epoch 64/120
10/12 [========================>.....] - ETA: 1s - loss: 38.5306 - acc: 0.7531
---------------------------------------------------------------------------
KeyboardInterrupt                         Traceback (most recent call last)
<ipython-input-21-c5685858fd74> in <module>()
     10                    validation_data=test_generator,
     11                    validation_steps=nb_test_samples // batch_size,
---> 12                    callbacks=[tbCallBack])
     13 
     14 model.save_weights('first_try.h5')

~/virtualenvironment/tensorflow/lib/python3.6/site-packages/keras/legacy/interfaces.py in wrapper(*args, **kwargs)
     86                 warnings.warn('Update your `' + object_name +
     87                               '` call to the Keras 2 API: ' + signature, stacklevel=2)
---> 88             return func(*args, **kwargs)
     89         wrapper._legacy_support_signature = inspect.getargspec(func)
     90         return wrapper

~/virtualenvironment/tensorflow/lib/python3.6/site-packages/keras/models.py in fit_generator(self, generator, steps_per_epoch, epochs, verbose, callbacks, validation_data, validation_steps, class_weight, max_q_size, workers, pickle_safe, initial_epoch)
   1122                                         workers=workers,
   1123                                         pickle_safe=pickle_safe,
-> 1124                                         initial_epoch=initial_epoch)
   1125 
   1126     @interfaces.legacy_generator_methods_support

~/virtualenvironment/tensorflow/lib/python3.6/site-packages/keras/legacy/interfaces.py in wrapper(*args, **kwargs)
     86                 warnings.warn('Update your `' + object_name +
     87                               '` call to the Keras 2 API: ' + signature, stacklevel=2)
---> 88             return func(*args, **kwargs)
     89         wrapper._legacy_support_signature = inspect.getargspec(func)
     90         return wrapper

~/virtualenvironment/tensorflow/lib/python3.6/site-packages/keras/engine/training.py in fit_generator(self, generator, steps_per_epoch, epochs, verbose, callbacks, validation_data, validation_steps, class_weight, max_q_size, workers, pickle_safe, initial_epoch)
   1900                     outs = self.train_on_batch(x, y,
   1901                                                sample_weight=sample_weight,
-> 1902                                                class_weight=class_weight)
   1903 
   1904                     if not isinstance(outs, list):

~/virtualenvironment/tensorflow/lib/python3.6/site-packages/keras/engine/training.py in train_on_batch(self, x, y, sample_weight, class_weight)
   1640             ins = x + y + sample_weights
   1641         self._make_train_function()
-> 1642         outputs = self.train_function(ins)
   1643         if len(outputs) == 1:
   1644             return outputs[0]

~/virtualenvironment/tensorflow/lib/python3.6/site-packages/keras/backend/tensorflow_backend.py in __call__(self, inputs)
   2267         updated = session.run(self.outputs + [self.updates_op],
   2268                               feed_dict=feed_dict,
-> 2269                               **self.session_kwargs)
   2270         return updated[:len(self.outputs)]
   2271 

~/virtualenvironment/tensorflow/lib/python3.6/site-packages/tensorflow/python/client/session.py in run(self, fetches, feed_dict, options, run_metadata)
    787     try:
    788       result = self._run(None, fetches, feed_dict, options_ptr,
--> 789                          run_metadata_ptr)
    790       if run_metadata:
    791         proto_data = tf_session.TF_GetBuffer(run_metadata_ptr)

~/virtualenvironment/tensorflow/lib/python3.6/site-packages/tensorflow/python/client/session.py in _run(self, handle, fetches, feed_dict, options, run_metadata)
    995     if final_fetches or final_targets:
    996       results = self._do_run(handle, final_targets, final_fetches,
--> 997                              feed_dict_string, options, run_metadata)
    998     else:
    999       results = []

~/virtualenvironment/tensorflow/lib/python3.6/site-packages/tensorflow/python/client/session.py in _do_run(self, handle, target_list, fetch_list, feed_dict, options, run_metadata)
   1130     if handle is None:
   1131       return self._do_call(_run_fn, self._session, feed_dict, fetch_list,
-> 1132                            target_list, options, run_metadata)
   1133     else:
   1134       return self._do_call(_prun_fn, self._session, handle, feed_dict,

~/virtualenvironment/tensorflow/lib/python3.6/site-packages/tensorflow/python/client/session.py in _do_call(self, fn, *args)
   1137   def _do_call(self, fn, *args):
   1138     try:
-> 1139       return fn(*args)
   1140     except errors.OpError as e:
   1141       message = compat.as_text(e.message)

~/virtualenvironment/tensorflow/lib/python3.6/site-packages/tensorflow/python/client/session.py in _run_fn(session, feed_dict, fetch_list, target_list, options, run_metadata)
   1119         return tf_session.TF_Run(session, options,
   1120                                  feed_dict, fetch_list, target_list,
-> 1121                                  status, run_metadata)
   1122 
   1123     def _prun_fn(session, handle, feed_dict, fetch_list):

KeyboardInterrupt: 

Testing to predict to see outcome of the predictions


In [29]:
import random

In [31]:
test_data_path = "data/processed/test/Body"
random_image = random.choice(os.listdir(test_filepath))
test__image_filepath = os.path.normpath(os.path.join(proj_dir, test_data_path, random_image))

In [32]:
import scipy.misc
proj_dir = os.path.curdir
image = scipy.misc.imread(test_image_filepath,mode='L')
image = scipy.misc.imresize(image, (100,50))
image = image / 255
image = np.expand_dims(image, axis=2)

model.save('../models/80_mf.h5')
image = image.reshape((1,) + image.shape)
model.predict_proba(image, verbose=1, batch_size=1)


1/1 [==============================] - 0s
Out[32]:
array([[  9.74491298e-01,   1.81158148e-02,   3.50045832e-03,
          8.41598317e-04,   3.05083441e-03]], dtype=float32)

In [33]:
model.predict(image)


Out[33]:
array([[  9.74491298e-01,   1.81158148e-02,   3.50045832e-03,
          8.41598317e-04,   3.05083441e-03]], dtype=float32)

In [34]:
train_generator.class_indices


Out[34]:
{'Body': 0,
 'Head-Neck': 1,
 'Lower-Limb': 2,
 'True-Negative': 3,
 'Upper-Limb': 4}

In [ ]: