Lab 3 - Embeddings

Students: Jan Carbonell Gisela Alessandrello


In [0]:
%tensorflow_version 2.x
import tensorflow as tf
device_name = tf.test.gpu_device_name()
if device_name != '/device:GPU:0':
  raise SystemError('GPU device not found')
print('Found GPU at: {}'.format(device_name))


TensorFlow 2.x selected.
Found GPU at: /device:GPU:0

In [0]:
from google.colab import drive
drive.mount('/content/drive')


Go to this URL in a browser: https://accounts.google.com/o/oauth2/auth?client_id=947318989803-6bn6qk8qdgf4n4g3pfee6491hc0brc4i.apps.googleusercontent.com&redirect_uri=urn%3aietf%3awg%3aoauth%3a2.0%3aoob&response_type=code&scope=email%20https%3a%2f%2fwww.googleapis.com%2fauth%2fdocs.test%20https%3a%2f%2fwww.googleapis.com%2fauth%2fdrive%20https%3a%2f%2fwww.googleapis.com%2fauth%2fdrive.photos.readonly%20https%3a%2f%2fwww.googleapis.com%2fauth%2fpeopleapi.readonly

Enter your authorization code:
··········
Mounted at /content/drive

In [0]:
# Importing packages
import os
import io
from pathlib import Path
import pandas as pd
import numpy as np
import glob
import matplotlib.pyplot as plt
import seaborn as sns
import cv2
import warnings
import random
warnings.filterwarnings('ignore')
from datetime import datetime, timedelta
from scipy import stats
from itertools import product
from math import sqrt
from sklearn.metrics import mean_squared_error
import matplotlib as mpl
%matplotlib inline
from sklearn.preprocessing import MinMaxScaler
from sklearn.model_selection import train_test_split
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from tensorflow.keras.preprocessing.sequence import TimeseriesGenerator
from tensorflow.keras.applications import VGG16, VGG19, InceptionResNetV2 
from tensorflow.keras import models, layers
from tensorflow.keras.layers import AveragePooling2D, ZeroPadding2D, Dropout, Flatten
from tensorflow.keras.layers import Input, Dense, Reshape, Activation
from tensorflow.keras.optimizers import RMSprop, SGD, Adam
from tensorflow.keras.callbacks import ModelCheckpoint, EarlyStopping
from tensorflow.keras.models import model_from_json, Sequential, Model
from tensorflow.keras.applications.vgg16 import preprocess_input
from tensorflow.keras.utils import to_categorical
print(tf.__version__)


2.0.0

New Section


In [0]:
#setting up global variables
# ATADIR = "./simpsons_dataset" #training data directory
DATADIR = '/content/drive/My Drive/MAI/DL/Lab3/simpsons_dataset'
CATEGORIES = ['bart_simpson' , 'homer_simpson', 'lisa_simpson', 'moe_szyslak', 'ned_flanders' ]

image_size=224 #image net has images trained of this size
batch_size=64
epochs=100

In [0]:
training_data = []

for category in CATEGORIES:
    print("Loading images for category: ", category, " ...")
    path = os.path.join(DATADIR, category) #path to alphabets
    class_num = CATEGORIES.index(category)
    for img in os.listdir(path):
        if img != '.DS_Store':
            # print(os.path.join(path,img))
            img_array = cv2.imread(os.path.join(path,img), cv2.IMREAD_COLOR)
            new_array = cv2.resize(img_array, (224, 224))
            final_img = cv2.cvtColor(new_array, cv2.COLOR_BGR2RGB)
            training_data.append([final_img, class_num])


Loading images for category:  bart_simpson  ...
Loading images for category:  homer_simpson  ...
Loading images for category:  lisa_simpson  ...
Loading images for category:  moe_szyslak  ...
Loading images for category:  ned_flanders  ...

In [0]:
# training_data[1:5]
random.shuffle(training_data)

In [0]:
X=[]
y=[]
for features,label in training_data: 
    X.append(features)
    y.append(label)
    # X = np.append(X, features)
    # y = np.append(y, label)
    
X = np.array(X).reshape(-1, image_size, image_size, 3)
X = X.astype('float32')/255.0 # to normalize data
y = to_categorical(y) #one-hot encoding
y = np.array(y)

X_train, X_val, y_train, y_val = train_test_split(X, y, test_size=0.1)

print("Train number of samples:", X_train.shape[0])
print("Val number of samples:", X_val.shape[0])

train_data_gen = ImageDataGenerator(
      # validation_split = 0.1, 
     horizontal_flip=True
    ) 

val_data_gen = ImageDataGenerator(
      # validation_split = 0.1, 
     horizontal_flip=True) 

train_data = train_data_gen.flow(X_train, y_train, batch_size = 32, shuffle = True)

val_data = val_data_gen.flow(X_val, y_val, batch_size = 32, shuffle = True)


Train number of samples: 5908
Val number of samples: 657

In [0]:
num_train_samples = X_train.shape[0]
num_val_samples = X_val.shape[1]

print(train_data)
print(val_data)

print("Classes: B  H  L  M  N ")

x,y = train_data.next()
for i in range(0,4):
    image = x[i]
    plt.imshow(image)
    plt.title(y[i])
    plt.show()


print(X_train.shape)
print(X_val.shape)
print(y_train.shape)
print(y_val.shape)


<keras_preprocessing.image.numpy_array_iterator.NumpyArrayIterator object at 0x7f0831d674a8>
<keras_preprocessing.image.numpy_array_iterator.NumpyArrayIterator object at 0x7f0831d67588>
Classes: B  H  L  M  N 
(5908, 224, 224, 3)
(657, 224, 224, 3)
(5908, 5)
(657, 5)

In [0]:
print(y_val[1])


[0. 0. 0. 0. 1.]

In [0]:
# VGG19, InceptionResNetV2 
# conv_base = InceptionResNetV2(weights='imagenet', include_top=False, input_shape=(image_size,image_size,3))
conv_base = VGG19(weights='imagenet',include_top=False,input_shape=(image_size,image_size,3))
# conv_base = VGG16(weights='imagenet',include_top=False,input_shape=(image_size,image_size,3))


Downloading data from https://github.com/fchollet/deep-learning-models/releases/download/v0.1/vgg19_weights_tf_dim_ordering_tf_kernels_notop.h5
80142336/80134624 [==============================] - 2s 0us/step

In [0]:
# Build Model 

model = Sequential()
#Add the base model
for layer in conv_base.layers:
    model.add(layer)
    
model.summary()


Model: "sequential"
_________________________________________________________________
Layer (type)                 Output Shape              Param #   
=================================================================
block1_conv1 (Conv2D)        (None, 224, 224, 64)      1792      
_________________________________________________________________
block1_conv2 (Conv2D)        (None, 224, 224, 64)      36928     
_________________________________________________________________
block1_pool (MaxPooling2D)   (None, 112, 112, 64)      0         
_________________________________________________________________
block2_conv1 (Conv2D)        (None, 112, 112, 128)     73856     
_________________________________________________________________
block2_conv2 (Conv2D)        (None, 112, 112, 128)     147584    
_________________________________________________________________
block2_pool (MaxPooling2D)   (None, 56, 56, 128)       0         
_________________________________________________________________
block3_conv1 (Conv2D)        (None, 56, 56, 256)       295168    
_________________________________________________________________
block3_conv2 (Conv2D)        (None, 56, 56, 256)       590080    
_________________________________________________________________
block3_conv3 (Conv2D)        (None, 56, 56, 256)       590080    
_________________________________________________________________
block3_conv4 (Conv2D)        (None, 56, 56, 256)       590080    
_________________________________________________________________
block3_pool (MaxPooling2D)   (None, 28, 28, 256)       0         
_________________________________________________________________
block4_conv1 (Conv2D)        (None, 28, 28, 512)       1180160   
_________________________________________________________________
block4_conv2 (Conv2D)        (None, 28, 28, 512)       2359808   
_________________________________________________________________
block4_conv3 (Conv2D)        (None, 28, 28, 512)       2359808   
_________________________________________________________________
block4_conv4 (Conv2D)        (None, 28, 28, 512)       2359808   
_________________________________________________________________
block4_pool (MaxPooling2D)   (None, 14, 14, 512)       0         
_________________________________________________________________
block5_conv1 (Conv2D)        (None, 14, 14, 512)       2359808   
_________________________________________________________________
block5_conv2 (Conv2D)        (None, 14, 14, 512)       2359808   
_________________________________________________________________
block5_conv3 (Conv2D)        (None, 14, 14, 512)       2359808   
_________________________________________________________________
block5_conv4 (Conv2D)        (None, 14, 14, 512)       2359808   
_________________________________________________________________
block5_pool (MaxPooling2D)   (None, 7, 7, 512)         0         
=================================================================
Total params: 20,024,384
Trainable params: 20,024,384
Non-trainable params: 0
_________________________________________________________________

In [0]:
# Freeze the layers that won't be trained
# Freeze 18 first layers  for layer in model.layers[:18]:
# Freeze top 10 first layers 
for layer in model.layers[:10]:
    layer.trainable = False

#Add new custom layers
# x = model.output
# x = Flatten()(x)
# x = Dense(64, activation='relu')(x)
# x = Dense(64, activation='relu')(x)
# predictions = Dense(29, activation='softmax')(x)
# model_final = Model(inputs=model.input, output=predictions)

model.add(Flatten())
model.add(Dense(64, activation='relu'))
# model.add(Dropout(0.5))
model.add(Dense(32, activation='relu'))
model.add(Dense(5, activation='softmax'))

model.summary()


Model: "sequential"
_________________________________________________________________
Layer (type)                 Output Shape              Param #   
=================================================================
block1_conv1 (Conv2D)        (None, 224, 224, 64)      1792      
_________________________________________________________________
block1_conv2 (Conv2D)        (None, 224, 224, 64)      36928     
_________________________________________________________________
block1_pool (MaxPooling2D)   (None, 112, 112, 64)      0         
_________________________________________________________________
block2_conv1 (Conv2D)        (None, 112, 112, 128)     73856     
_________________________________________________________________
block2_conv2 (Conv2D)        (None, 112, 112, 128)     147584    
_________________________________________________________________
block2_pool (MaxPooling2D)   (None, 56, 56, 128)       0         
_________________________________________________________________
block3_conv1 (Conv2D)        (None, 56, 56, 256)       295168    
_________________________________________________________________
block3_conv2 (Conv2D)        (None, 56, 56, 256)       590080    
_________________________________________________________________
block3_conv3 (Conv2D)        (None, 56, 56, 256)       590080    
_________________________________________________________________
block3_conv4 (Conv2D)        (None, 56, 56, 256)       590080    
_________________________________________________________________
block3_pool (MaxPooling2D)   (None, 28, 28, 256)       0         
_________________________________________________________________
block4_conv1 (Conv2D)        (None, 28, 28, 512)       1180160   
_________________________________________________________________
block4_conv2 (Conv2D)        (None, 28, 28, 512)       2359808   
_________________________________________________________________
block4_conv3 (Conv2D)        (None, 28, 28, 512)       2359808   
_________________________________________________________________
block4_conv4 (Conv2D)        (None, 28, 28, 512)       2359808   
_________________________________________________________________
block4_pool (MaxPooling2D)   (None, 14, 14, 512)       0         
_________________________________________________________________
block5_conv1 (Conv2D)        (None, 14, 14, 512)       2359808   
_________________________________________________________________
block5_conv2 (Conv2D)        (None, 14, 14, 512)       2359808   
_________________________________________________________________
block5_conv3 (Conv2D)        (None, 14, 14, 512)       2359808   
_________________________________________________________________
block5_conv4 (Conv2D)        (None, 14, 14, 512)       2359808   
_________________________________________________________________
block5_pool (MaxPooling2D)   (None, 7, 7, 512)         0         
_________________________________________________________________
flatten (Flatten)            (None, 25088)             0         
_________________________________________________________________
dense (Dense)                (None, 64)                1605696   
_________________________________________________________________
dense_1 (Dense)              (None, 32)                2080      
_________________________________________________________________
dense_2 (Dense)              (None, 5)                 165       
=================================================================
Total params: 21,632,325
Trainable params: 19,306,757
Non-trainable params: 2,325,568
_________________________________________________________________

In [0]:
# Optimizer 
# adam = tf.keras.optimizers.Adam(lr=0.0001, beta_1=0.9, beta_2=0.999, epsilon=None, decay=0.0, amsgrad=False)
# sgd = SGD(lr=0.001)
# loss='mean_squared_error'
# loss='categorical_crossentropy'
model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy'])
checkpoint = ModelCheckpoint("weights.{epoch:02d}-{val_loss:.2f}.h5", monitor='val_loss', verbose=0, save_best_only=True, save_weights_only=False, mode='auto', period=1)
early = EarlyStopping(monitor='val_acc', min_delta=0, patience=20, verbose=1, mode='auto')


WARNING:tensorflow:`period` argument is deprecated. Please use `save_freq` to specify the frequency in number of samples seen.

In [0]:
history = model.fit_generator(train_data,
                    epochs = 80,
                    # steps_per_epoch = 1,
                    validation_data=val_data,              
                    # validation_steps=3, # , we will use all val data.
                    verbose=1, 
                    # shuffle=True, 
                    # callbacks=[early, checkpoint], 
                    use_multiprocessing=True)


# The returned "history" object holds a record
# of the loss values and metric values during training
print('\nhistory dict:', history.history)

# serialize model to JSON
model_json = model.to_json()
with open("/content/drive/My Drive/MAI/DL/Lab3/model_lab3.json", "w") as json_file:
    json_file.write(model_json)
    
# serialize weights to HDF5
model.save_weights("/content/drive/My Drive/MAI/DL/Lab3/model_lab3_weights.h5")
print("Saved model to disk")


Epoch 1/80
185/185 [==============================] - 74s 399ms/step - loss: 0.1080 - accuracy: 0.9626 - val_loss: 0.4777 - val_accuracy: 0.8661
Epoch 2/80
185/185 [==============================] - 73s 397ms/step - loss: 0.1135 - accuracy: 0.9638 - val_loss: 0.3530 - val_accuracy: 0.9041
Epoch 3/80
185/185 [==============================] - 74s 398ms/step - loss: 0.1139 - accuracy: 0.9607 - val_loss: 0.4826 - val_accuracy: 0.8828
Epoch 4/80
185/185 [==============================] - 74s 399ms/step - loss: 0.1443 - accuracy: 0.9518 - val_loss: 0.6213 - val_accuracy: 0.8661
Epoch 5/80
185/185 [==============================] - 74s 398ms/step - loss: 0.0865 - accuracy: 0.9702 - val_loss: 0.3376 - val_accuracy: 0.9163
Epoch 6/80
185/185 [==============================] - 74s 399ms/step - loss: 0.1583 - accuracy: 0.9484 - val_loss: 0.4087 - val_accuracy: 0.8995
Epoch 7/80
185/185 [==============================] - 74s 398ms/step - loss: 0.1443 - accuracy: 0.9533 - val_loss: 0.4168 - val_accuracy: 0.8965
Epoch 8/80
185/185 [==============================] - 74s 398ms/step - loss: 0.0989 - accuracy: 0.9682 - val_loss: 0.4362 - val_accuracy: 0.8843
Epoch 9/80
185/185 [==============================] - 74s 401ms/step - loss: 0.0953 - accuracy: 0.9724 - val_loss: 0.6027 - val_accuracy: 0.8539
Epoch 10/80
185/185 [==============================] - 74s 399ms/step - loss: 0.1611 - accuracy: 0.9482 - val_loss: 0.4659 - val_accuracy: 0.8813
Epoch 11/80
185/185 [==============================] - 74s 399ms/step - loss: 0.1392 - accuracy: 0.9558 - val_loss: 0.3355 - val_accuracy: 0.9011
Epoch 12/80
185/185 [==============================] - 74s 399ms/step - loss: 0.0861 - accuracy: 0.9721 - val_loss: 0.4341 - val_accuracy: 0.8980
Epoch 13/80
185/185 [==============================] - 74s 401ms/step - loss: 0.0827 - accuracy: 0.9739 - val_loss: 0.3763 - val_accuracy: 0.8935
Epoch 14/80
185/185 [==============================] - 74s 399ms/step - loss: 0.1386 - accuracy: 0.9572 - val_loss: 0.3973 - val_accuracy: 0.8858
Epoch 15/80
185/185 [==============================] - 74s 399ms/step - loss: 0.1274 - accuracy: 0.9617 - val_loss: 0.4504 - val_accuracy: 0.8767
Epoch 16/80
185/185 [==============================] - 73s 397ms/step - loss: 0.1134 - accuracy: 0.9624 - val_loss: 0.3887 - val_accuracy: 0.8706
Epoch 17/80
185/185 [==============================] - 74s 399ms/step - loss: 0.1150 - accuracy: 0.9617 - val_loss: 0.3838 - val_accuracy: 0.8904
Epoch 18/80
185/185 [==============================] - 74s 399ms/step - loss: 0.0746 - accuracy: 0.9739 - val_loss: 0.5091 - val_accuracy: 0.8919
Epoch 19/80
185/185 [==============================] - 74s 399ms/step - loss: 0.0961 - accuracy: 0.9699 - val_loss: 0.3595 - val_accuracy: 0.9117
Epoch 20/80
185/185 [==============================] - 74s 398ms/step - loss: 0.0853 - accuracy: 0.9734 - val_loss: 0.7719 - val_accuracy: 0.8584
Epoch 21/80
185/185 [==============================] - 74s 398ms/step - loss: 0.1843 - accuracy: 0.9458 - val_loss: 0.3432 - val_accuracy: 0.9041
Epoch 22/80
185/185 [==============================] - 73s 397ms/step - loss: 0.1503 - accuracy: 0.9548 - val_loss: 0.3870 - val_accuracy: 0.8980
Epoch 23/80
185/185 [==============================] - 74s 399ms/step - loss: 0.1071 - accuracy: 0.9655 - val_loss: 0.3448 - val_accuracy: 0.9117
Epoch 24/80
185/185 [==============================] - 74s 398ms/step - loss: 0.0805 - accuracy: 0.9741 - val_loss: 0.3904 - val_accuracy: 0.9163
Epoch 25/80
185/185 [==============================] - 74s 398ms/step - loss: 0.0884 - accuracy: 0.9736 - val_loss: 0.5405 - val_accuracy: 0.8935
Epoch 26/80
185/185 [==============================] - 74s 398ms/step - loss: 0.3174 - accuracy: 0.9035 - val_loss: 0.5579 - val_accuracy: 0.8645
Epoch 27/80
185/185 [==============================] - 73s 397ms/step - loss: 0.1443 - accuracy: 0.9526 - val_loss: 0.2672 - val_accuracy: 0.9163
Epoch 28/80
185/185 [==============================] - 73s 397ms/step - loss: 0.0768 - accuracy: 0.9760 - val_loss: 0.3256 - val_accuracy: 0.9117
Epoch 29/80
185/185 [==============================] - 74s 397ms/step - loss: 0.0819 - accuracy: 0.9744 - val_loss: 0.4329 - val_accuracy: 0.9011
Epoch 30/80
185/185 [==============================] - 74s 399ms/step - loss: 0.0665 - accuracy: 0.9800 - val_loss: 0.4043 - val_accuracy: 0.9087
Epoch 31/80
185/185 [==============================] - 74s 398ms/step - loss: 0.0643 - accuracy: 0.9773 - val_loss: 0.3556 - val_accuracy: 0.9072
Epoch 32/80
185/185 [==============================] - 74s 398ms/step - loss: 0.1004 - accuracy: 0.9687 - val_loss: 0.4986 - val_accuracy: 0.8919
Epoch 33/80
185/185 [==============================] - 74s 399ms/step - loss: 0.0965 - accuracy: 0.9697 - val_loss: 0.4692 - val_accuracy: 0.8904
Epoch 34/80
185/185 [==============================] - 74s 398ms/step - loss: 0.0823 - accuracy: 0.9755 - val_loss: 0.4568 - val_accuracy: 0.9056
Epoch 35/80
185/185 [==============================] - 74s 399ms/step - loss: 0.0984 - accuracy: 0.9711 - val_loss: 0.4287 - val_accuracy: 0.8858
Epoch 36/80
185/185 [==============================] - 74s 398ms/step - loss: 0.1474 - accuracy: 0.9529 - val_loss: 0.5506 - val_accuracy: 0.8874
Epoch 37/80
185/185 [==============================] - 74s 398ms/step - loss: 0.0993 - accuracy: 0.9680 - val_loss: 0.5338 - val_accuracy: 0.9132
Epoch 38/80
185/185 [==============================] - 74s 399ms/step - loss: 0.1425 - accuracy: 0.9589 - val_loss: 0.3747 - val_accuracy: 0.8904
Epoch 39/80
185/185 [==============================] - 74s 400ms/step - loss: 0.1166 - accuracy: 0.9633 - val_loss: 0.5543 - val_accuracy: 0.8843
Epoch 40/80
185/185 [==============================] - 74s 398ms/step - loss: 0.0587 - accuracy: 0.9832 - val_loss: 0.3923 - val_accuracy: 0.9163
Epoch 41/80
185/185 [==============================] - 74s 399ms/step - loss: 0.0498 - accuracy: 0.9841 - val_loss: 0.4473 - val_accuracy: 0.9163
Epoch 42/80
185/185 [==============================] - 74s 398ms/step - loss: 0.0477 - accuracy: 0.9834 - val_loss: 0.4850 - val_accuracy: 0.9178
Epoch 43/80
185/185 [==============================] - 74s 399ms/step - loss: 0.0902 - accuracy: 0.9758 - val_loss: 0.5393 - val_accuracy: 0.8752
Epoch 44/80
185/185 [==============================] - 74s 398ms/step - loss: 0.1484 - accuracy: 0.9589 - val_loss: 0.3463 - val_accuracy: 0.9117
Epoch 45/80
185/185 [==============================] - 74s 398ms/step - loss: 0.1100 - accuracy: 0.9677 - val_loss: 0.4482 - val_accuracy: 0.8813
Epoch 46/80
185/185 [==============================] - 74s 398ms/step - loss: 0.1256 - accuracy: 0.9621 - val_loss: 0.4988 - val_accuracy: 0.8524
Epoch 47/80
185/185 [==============================] - 73s 397ms/step - loss: 0.1179 - accuracy: 0.9646 - val_loss: 0.4727 - val_accuracy: 0.8995
Epoch 48/80
185/185 [==============================] - 74s 397ms/step - loss: 0.0818 - accuracy: 0.9758 - val_loss: 0.4425 - val_accuracy: 0.9056
Epoch 49/80
185/185 [==============================] - 74s 400ms/step - loss: 0.0498 - accuracy: 0.9841 - val_loss: 0.5135 - val_accuracy: 0.9087
Epoch 50/80
185/185 [==============================] - 73s 397ms/step - loss: 0.0859 - accuracy: 0.9773 - val_loss: 0.4634 - val_accuracy: 0.9041
Epoch 51/80
185/185 [==============================] - 74s 398ms/step - loss: 0.1160 - accuracy: 0.9677 - val_loss: 0.5030 - val_accuracy: 0.9056
Epoch 52/80
185/185 [==============================] - 74s 398ms/step - loss: 0.0871 - accuracy: 0.9729 - val_loss: 0.4724 - val_accuracy: 0.9072
Epoch 53/80
185/185 [==============================] - 74s 400ms/step - loss: 0.1071 - accuracy: 0.9699 - val_loss: 0.3978 - val_accuracy: 0.9056
Epoch 54/80
185/185 [==============================] - 74s 398ms/step - loss: 0.1184 - accuracy: 0.9665 - val_loss: 0.3201 - val_accuracy: 0.9011
Epoch 55/80
185/185 [==============================] - 74s 398ms/step - loss: 0.1758 - accuracy: 0.9425 - val_loss: 0.4085 - val_accuracy: 0.9011
Epoch 56/80
185/185 [==============================] - 74s 398ms/step - loss: 0.1047 - accuracy: 0.9702 - val_loss: 0.4150 - val_accuracy: 0.9087
Epoch 57/80
185/185 [==============================] - 74s 399ms/step - loss: 0.0969 - accuracy: 0.9724 - val_loss: 0.5565 - val_accuracy: 0.8980
Epoch 58/80
185/185 [==============================] - 74s 397ms/step - loss: 0.0933 - accuracy: 0.9724 - val_loss: 0.3967 - val_accuracy: 0.9056
Epoch 59/80
185/185 [==============================] - 74s 399ms/step - loss: 0.0488 - accuracy: 0.9863 - val_loss: 0.4043 - val_accuracy: 0.9209
Epoch 60/80
185/185 [==============================] - 74s 398ms/step - loss: 0.0784 - accuracy: 0.9770 - val_loss: 0.3475 - val_accuracy: 0.9239
Epoch 61/80
185/185 [==============================] - 74s 399ms/step - loss: 0.0803 - accuracy: 0.9773 - val_loss: 0.5874 - val_accuracy: 0.8858
Epoch 62/80
185/185 [==============================] - 74s 399ms/step - loss: 0.0959 - accuracy: 0.9741 - val_loss: 0.4002 - val_accuracy: 0.9178
Epoch 63/80
185/185 [==============================] - 74s 399ms/step - loss: 0.2151 - accuracy: 0.9445 - val_loss: 0.6461 - val_accuracy: 0.8539
Epoch 64/80
185/185 [==============================] - 74s 400ms/step - loss: 0.2404 - accuracy: 0.9342 - val_loss: 0.5181 - val_accuracy: 0.8874
Epoch 65/80
185/185 [==============================] - 74s 398ms/step - loss: 0.1326 - accuracy: 0.9611 - val_loss: 0.5222 - val_accuracy: 0.8980
Epoch 66/80
185/185 [==============================] - 74s 400ms/step - loss: 0.0738 - accuracy: 0.9788 - val_loss: 0.4651 - val_accuracy: 0.8950
Epoch 67/80
185/185 [==============================] - 74s 401ms/step - loss: 0.0674 - accuracy: 0.9805 - val_loss: 0.5902 - val_accuracy: 0.8965
Epoch 68/80
185/185 [==============================] - 74s 400ms/step - loss: 0.1032 - accuracy: 0.9699 - val_loss: 0.5278 - val_accuracy: 0.8935
Epoch 69/80
185/185 [==============================] - 74s 398ms/step - loss: 0.0850 - accuracy: 0.9741 - val_loss: 0.4135 - val_accuracy: 0.9193
Epoch 70/80
185/185 [==============================] - 74s 399ms/step - loss: 0.1197 - accuracy: 0.9661 - val_loss: 0.4128 - val_accuracy: 0.8995
Epoch 71/80
185/185 [==============================] - 74s 398ms/step - loss: 0.0547 - accuracy: 0.9827 - val_loss: 0.3913 - val_accuracy: 0.9285
Epoch 72/80
185/185 [==============================] - 74s 399ms/step - loss: 0.0490 - accuracy: 0.9875 - val_loss: 0.4058 - val_accuracy: 0.9178
Epoch 73/80
185/185 [==============================] - 74s 398ms/step - loss: 0.0503 - accuracy: 0.9856 - val_loss: 0.3428 - val_accuracy: 0.9193
Epoch 74/80
185/185 [==============================] - 74s 399ms/step - loss: 0.1042 - accuracy: 0.9721 - val_loss: 0.4430 - val_accuracy: 0.9148
Epoch 75/80
185/185 [==============================] - 74s 399ms/step - loss: 0.1182 - accuracy: 0.9687 - val_loss: 0.5872 - val_accuracy: 0.8874
Epoch 76/80
185/185 [==============================] - 74s 397ms/step - loss: 0.1150 - accuracy: 0.9685 - val_loss: 0.3884 - val_accuracy: 0.9132
Epoch 77/80
185/185 [==============================] - 74s 399ms/step - loss: 0.0625 - accuracy: 0.9846 - val_loss: 0.5521 - val_accuracy: 0.8919
Epoch 78/80
185/185 [==============================] - 74s 400ms/step - loss: 0.0590 - accuracy: 0.9817 - val_loss: 0.5145 - val_accuracy: 0.9269
Epoch 79/80
185/185 [==============================] - 74s 398ms/step - loss: 0.0850 - accuracy: 0.9773 - val_loss: 0.5050 - val_accuracy: 0.9056
Epoch 80/80
185/185 [==============================] - 74s 399ms/step - loss: 0.1308 - accuracy: 0.9656 - val_loss: 0.5593 - val_accuracy: 0.8995

history dict: {'loss': [0.10794449354520039, 0.11367213300422349, 0.11372748689036173, 0.14437522867299774, 0.0866317790582998, 0.15860205275632727, 0.14337870004960793, 0.09911031126042799, 0.09515870322700976, 0.16002284965465904, 0.1391281298586825, 0.08627077458412484, 0.08286393584148201, 0.13883733253461955, 0.12749408350938385, 0.11359479344912131, 0.11519377465128455, 0.07475710265258341, 0.09612504169521456, 0.08543855454714384, 0.18470205922930116, 0.14945800838387932, 0.1072561571323461, 0.08054441776746217, 0.08849940376517665, 0.31701299122937615, 0.1443063236505661, 0.07632986588571308, 0.082015892093498, 0.06661080354436874, 0.06425574689566933, 0.10051947244450565, 0.09629204650154147, 0.0824626094974406, 0.09861345547246084, 0.147659057538172, 0.09943445190817582, 0.14226304032110287, 0.11680134461364111, 0.05870513916707654, 0.04983011154337649, 0.04779479512793092, 0.09032773740756557, 0.148255796042462, 0.10984585708014782, 0.1256217116610757, 0.11806999154820201, 0.08194024402102154, 0.049918916033268465, 0.08570629784890331, 0.11583813390402614, 0.08680744795317868, 0.10596543477300224, 0.1186771264242055, 0.1761457193390265, 0.10483366113847163, 0.0965427844982989, 0.09329783887562665, 0.048875886830353474, 0.07850174507156439, 0.08032123992566882, 0.0959865396641215, 0.21531771397194147, 0.24085699450212028, 0.1327980833459764, 0.0739247266456331, 0.06747899888101545, 0.10343294086876406, 0.08478889118638612, 0.11991714977816292, 0.054812455417735904, 0.04911152872746557, 0.050296095536574596, 0.10442598195514499, 0.11840585764728379, 0.114986950617672, 0.06258155101241535, 0.058966442122485405, 0.0851278474300761, 0.1310367754087298], 'accuracy': [0.9625931, 0.9637779, 0.9607312, 0.95176035, 0.9702099, 0.9483751, 0.95328367, 0.96817875, 0.97241026, 0.9482058, 0.9558226, 0.97207177, 0.97393364, 0.9571767, 0.9617468, 0.96242386, 0.9617468, 0.97393364, 0.96987134, 0.97342587, 0.9458361, 0.95480704, 0.96547055, 0.9741029, 0.97359514, 0.90352064, 0.9526066, 0.9759648, 0.9744414, 0.9800271, 0.9773189, 0.9686865, 0.9697021, 0.975457, 0.9710562, 0.9529452, 0.9680095, 0.95886934, 0.9632701, 0.98324305, 0.9840894, 0.9834123, 0.9757955, 0.95886934, 0.967671, 0.9620853, 0.9646242, 0.9757955, 0.9840894, 0.9773189, 0.967671, 0.9729181, 0.96987134, 0.9664861, 0.94245094, 0.9702099, 0.97241026, 0.97241026, 0.9862898, 0.9769804, 0.9773189, 0.9741029, 0.944482, 0.9341571, 0.96106976, 0.97884226, 0.98053485, 0.96987134, 0.9741029, 0.9661476, 0.9827353, 0.9874746, 0.98561275, 0.97207177, 0.9686865, 0.96851724, 0.98459715, 0.9817197, 0.9773189, 0.9656398], 'val_loss': [0.4776649588630313, 0.3529567423587044, 0.4826188502567155, 0.6213081927881354, 0.33763687773829415, 0.40873548565875917, 0.4167756537596385, 0.4361502400466374, 0.6027386926469349, 0.4659432584331149, 0.3354816644319466, 0.43413046144303824, 0.3763476869180089, 0.39726745017937254, 0.4504277774443229, 0.3886901722067878, 0.38383680147429305, 0.509106708424432, 0.3595152209025054, 0.7719261991747078, 0.343172981270722, 0.3870081488220465, 0.34479374899750664, 0.3904384768434933, 0.540497957595757, 0.5579357565868468, 0.26724013357999776, 0.3256135526157561, 0.4328776133202371, 0.40430591716652825, 0.355649495053859, 0.49858942202159334, 0.46920303752024967, 0.4567904406715007, 0.42871546284073875, 0.5506452921600569, 0.5337852975797086, 0.3747068127351148, 0.5542621836066246, 0.39232996599804165, 0.44731653441808056, 0.48504090025311425, 0.539291018531436, 0.3463074287310952, 0.44818030546108883, 0.49883324440036503, 0.47268666326999664, 0.4425454203571592, 0.5134755727790651, 0.46343561208673884, 0.5029824436420486, 0.47239810237217517, 0.3977500617703689, 0.32013404635446413, 0.40849604244743076, 0.41500662320426535, 0.5565300234371707, 0.3966606091707945, 0.40432838045637165, 0.34750651852006004, 0.5874341591483071, 0.4002368427269782, 0.6461100152560643, 0.5180840960570744, 0.5222208181484824, 0.46507582707064493, 0.590192915783042, 0.5278145712356836, 0.4134961210546039, 0.4127597046040353, 0.39128695020363446, 0.40578722603996065, 0.3428055912788425, 0.44297594266633195, 0.5871719810224715, 0.38843061522181543, 0.5521424444658416, 0.5145177399251788, 0.5049854317413909, 0.5593459369348628], 'val_accuracy': [0.8660578, 0.9041096, 0.8828006, 0.8660578, 0.9162862, 0.8995434, 0.8964992, 0.8843227, 0.8538813, 0.8812785, 0.90106547, 0.8980213, 0.8934551, 0.88584477, 0.8767123, 0.87062407, 0.89041096, 0.891933, 0.9117199, 0.8584475, 0.9041096, 0.8980213, 0.9117199, 0.9162862, 0.8934551, 0.86453575, 0.9162862, 0.9117199, 0.90106547, 0.9086758, 0.9071537, 0.891933, 0.89041096, 0.90563166, 0.88584477, 0.88736683, 0.913242, 0.89041096, 0.8843227, 0.9162862, 0.9162862, 0.91780823, 0.87519026, 0.9117199, 0.8812785, 0.85235924, 0.8995434, 0.90563166, 0.9086758, 0.9041096, 0.90563166, 0.9071537, 0.90563166, 0.90106547, 0.90106547, 0.9086758, 0.8980213, 0.90563166, 0.92085236, 0.9238965, 0.88584477, 0.91780823, 0.8538813, 0.88736683, 0.8980213, 0.89497715, 0.8964992, 0.8934551, 0.9193303, 0.8995434, 0.9284627, 0.91780823, 0.9193303, 0.9147641, 0.88736683, 0.913242, 0.891933, 0.9269406, 0.90563166, 0.8995434]}
Saved model to disk

In [0]:
##Model Accuracy
import matplotlib.pyplot as plt
plt.plot(history.history['accuracy'])
plt.plot(history.history['val_accuracy'])
plt.title('Model Accuracy')
plt.ylabel('Accuracy')
plt.xlabel('Epoch')
plt.legend(['Train', 'Validation'], loc='upper right')
plt.show()


##Model Accuracy

plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.title('Model Loss')
plt.ylabel('Loss')
plt.xlabel('Epoch')
plt.legend(['Train', 'Validation'], loc='upper right')
plt.show()


Test to predict model


In [0]:
CATEGORIES = ['bart_simpson' , 'homer_simpson', 'lisa_simpson', 'moe_szyslak', 'ned_flanders' ]
num_classes = 5
test_data = []
test_labels = []
image_data = []
TESTDIR = '/content/drive/My Drive/MAI/DL/Lab3/simpsons_testset'

for category in CATEGORIES:
    print("Loading test images for category: ", category, " ...")
    path = os.path.join(TESTDIR, category) 
    class_num = CATEGORIES.index(category)
    print("Class: ",  class_num)
    for img in os.listdir(path):
        if img != '.DS_Store':
            # print(img)
            img_array = cv2.imread(os.path.join(path,img), cv2.IMREAD_COLOR)
            # new_array = cv2.resize(img_array, (224, 224))
            new_array = cv2.resize(img_array, (224, 224)).astype('float32') / 255.
            final_img = cv2.cvtColor(new_array, cv2.COLOR_BGR2RGB)
            test_data.append(final_img)
            test_labels.append(class_num)
            image_data.append(img)
    X_test = np.array(test_data)
    y_test = np.array(test_labels)
    y_test = to_categorical(y_test, num_classes)
    print("Test set", X_test.shape, y_test.shape)

# X_test, y_test

probabilities = model.predict(X_test)
y_pred = probabilities > 0.5
y_pred = np.argmax(y_pred, axis=1)

y_test = np.argmax(y_test, axis=1)


Loading test images for category:  bart_simpson  ...
Class:  0
Test set (50, 224, 224, 3) (50, 5)
Loading test images for category:  homer_simpson  ...
Class:  1
Test set (100, 224, 224, 3) (100, 5)
Loading test images for category:  lisa_simpson  ...
Class:  2
Test set (150, 224, 224, 3) (150, 5)
Loading test images for category:  moe_szyslak  ...
Class:  3
Test set (200, 224, 224, 3) (200, 5)
Loading test images for category:  ned_flanders  ...
Class:  4
Test set (249, 224, 224, 3) (249, 5)

In [0]:
y_pred


Out[0]:
array([1, 1, 0, 0, 0, 0, 3, 0, 0, 1, 0, 0, 0, 3, 0, 0, 1, 1, 0, 0, 0, 0,
       0, 0, 3, 0, 0, 0, 0, 3, 3, 0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 3, 0, 0,
       2, 4, 0, 0, 3, 3, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
       1, 4, 3, 1, 1, 1, 1, 1, 1, 1, 1, 1, 3, 1, 1, 1, 1, 1, 1, 1, 1, 1,
       1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 4, 2, 2, 2, 2, 2, 2, 2, 2,
       2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
       2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3,
       3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
       3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 0, 3, 3, 3, 3, 3,
       3, 3, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
       4, 4, 4, 4, 1, 4, 4, 4, 4, 4, 4, 4, 0, 4, 4, 4, 4, 4, 4, 1, 4, 2,
       4, 0, 4, 4, 4, 4, 4])

In [0]:
y_test


Out[0]:
array([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
       0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
       0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
       1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
       1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
       2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
       2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3,
       3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
       3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
       3, 3, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
       4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
       4, 4, 4, 4, 4, 4, 4])

In [0]:
image_data


Out[0]:
['bart_simpson_48.jpg',
 'bart_simpson_49.jpg',
 'bart_simpson_39.jpg',
 'bart_simpson_11.jpg',
 'bart_simpson_10.jpg',
 'bart_simpson_38.jpg',
 'bart_simpson_12.jpg',
 'bart_simpson_13.jpg',
 'bart_simpson_17.jpg',
 'bart_simpson_16.jpg',
 'bart_simpson_28.jpg',
 'bart_simpson_14.jpg',
 'bart_simpson_8.jpg',
 'bart_simpson_29.jpg',
 'bart_simpson_15.jpg',
 'bart_simpson_9.jpg',
 'bart_simpson_24.jpg',
 'bart_simpson_30.jpg',
 'bart_simpson_18.jpg',
 'bart_simpson_19.jpg',
 'bart_simpson_5.jpg',
 'bart_simpson_31.jpg',
 'bart_simpson_4.jpg',
 'bart_simpson_25.jpg',
 'bart_simpson_33.jpg',
 'bart_simpson_27.jpg',
 'bart_simpson_26.jpg',
 'bart_simpson_7.jpg',
 'bart_simpson_6.jpg',
 'bart_simpson_2.jpg',
 'bart_simpson_32.jpg',
 'bart_simpson_3.jpg',
 'bart_simpson_36.jpg',
 'bart_simpson_37.jpg',
 'bart_simpson_23.jpg',
 'bart_simpson_22.jpg',
 'bart_simpson_1.jpg',
 'bart_simpson_21.jpg',
 'bart_simpson_0.jpg',
 'bart_simpson_34.jpg',
 'bart_simpson_20.jpg',
 'bart_simpson_35.jpg',
 'bart_simpson_47.jpg',
 'bart_simpson_46.jpg',
 'bart_simpson_45.jpg',
 'bart_simpson_44.jpg',
 'bart_simpson_40.jpg',
 'bart_simpson_41.jpg',
 'bart_simpson_42.jpg',
 'bart_simpson_43.jpg',
 'homer_simpson_9.jpg',
 'homer_simpson_8.jpg',
 'homer_simpson_48.jpg',
 'homer_simpson_49.jpg',
 'homer_simpson_13.jpg',
 'homer_simpson_12.jpg',
 'homer_simpson_10.jpg',
 'homer_simpson_38.jpg',
 'homer_simpson_11.jpg',
 'homer_simpson_39.jpg',
 'homer_simpson_15.jpg',
 'homer_simpson_29.jpg',
 'homer_simpson_28.jpg',
 'homer_simpson_14.jpg',
 'homer_simpson_16.jpg',
 'homer_simpson_17.jpg',
 'homer_simpson_32.jpg',
 'homer_simpson_26.jpg',
 'homer_simpson_27.jpg',
 'homer_simpson_33.jpg',
 'homer_simpson_19.jpg',
 'homer_simpson_31.jpg',
 'homer_simpson_18.jpg',
 'homer_simpson_24.jpg',
 'homer_simpson_30.jpg',
 'homer_simpson_25.jpg',
 'homer_simpson_34.jpg',
 'homer_simpson_20.jpg',
 'homer_simpson_21.jpg',
 'homer_simpson_35.jpg',
 'homer_simpson_23.jpg',
 'homer_simpson_37.jpg',
 'homer_simpson_36.jpg',
 'homer_simpson_22.jpg',
 'homer_simpson_45.jpg',
 'homer_simpson_1.jpg',
 'homer_simpson_44.jpg',
 'homer_simpson_0.jpg',
 'homer_simpson_3.jpg',
 'homer_simpson_46.jpg',
 'homer_simpson_2.jpg',
 'homer_simpson_47.jpg',
 'homer_simpson_7.jpg',
 'homer_simpson_43.jpg',
 'homer_simpson_42.jpg',
 'homer_simpson_6.jpg',
 'homer_simpson_4.jpg',
 'homer_simpson_40.jpg',
 'homer_simpson_5.jpg',
 'homer_simpson_41.jpg',
 'lisa_simpson_46.jpg',
 'lisa_simpson_47.jpg',
 'lisa_simpson_8.jpg',
 'lisa_simpson_45.jpg',
 'lisa_simpson_44.jpg',
 'lisa_simpson_9.jpg',
 'lisa_simpson_40.jpg',
 'lisa_simpson_41.jpg',
 'lisa_simpson_43.jpg',
 'lisa_simpson_42.jpg',
 'lisa_simpson_19.jpg',
 'lisa_simpson_31.jpg',
 'lisa_simpson_24.jpg',
 'lisa_simpson_30.jpg',
 'lisa_simpson_25.jpg',
 'lisa_simpson_18.jpg',
 'lisa_simpson_32.jpg',
 'lisa_simpson_33.jpg',
 'lisa_simpson_26.jpg',
 'lisa_simpson_27.jpg',
 'lisa_simpson_22.jpg',
 'lisa_simpson_37.jpg',
 'lisa_simpson_23.jpg',
 'lisa_simpson_36.jpg',
 'lisa_simpson_20.jpg',
 'lisa_simpson_34.jpg',
 'lisa_simpson_21.jpg',
 'lisa_simpson_35.jpg',
 'lisa_simpson_10.jpg',
 'lisa_simpson_38.jpg',
 'lisa_simpson_11.jpg',
 'lisa_simpson_39.jpg',
 'lisa_simpson_12.jpg',
 'lisa_simpson_13.jpg',
 'lisa_simpson_16.jpg',
 'lisa_simpson_17.jpg',
 'lisa_simpson_15.jpg',
 'lisa_simpson_28.jpg',
 'lisa_simpson_29.jpg',
 'lisa_simpson_14.jpg',
 'lisa_simpson_2.jpg',
 'lisa_simpson_1.jpg',
 'lisa_simpson_3.jpg',
 'lisa_simpson_0.jpg',
 'lisa_simpson_4.jpg',
 'lisa_simpson_48.jpg',
 'lisa_simpson_49.jpg',
 'lisa_simpson_5.jpg',
 'lisa_simpson_7.jpg',
 'lisa_simpson_6.jpg',
 'moe_szyslak_49.jpg',
 'moe_szyslak_48.jpg',
 'moe_szyslak_38.jpg',
 'moe_szyslak_10.jpg',
 'moe_szyslak_3.jpg',
 'moe_szyslak_11.jpg',
 'moe_szyslak_39.jpg',
 'moe_szyslak_0.jpg',
 'moe_szyslak_2.jpg',
 'moe_szyslak_13.jpg',
 'moe_szyslak_5.jpg',
 'moe_szyslak_1.jpg',
 'moe_szyslak_12.jpg',
 'moe_szyslak_16.jpg',
 'moe_szyslak_17.jpg',
 'moe_szyslak_15.jpg',
 'moe_szyslak_4.jpg',
 'moe_szyslak_6.jpg',
 'moe_szyslak_29.jpg',
 'moe_szyslak_14.jpg',
 'moe_szyslak_28.jpg',
 'moe_szyslak_7.jpg',
 'moe_szyslak_19.jpg',
 'moe_szyslak_31.jpg',
 'moe_szyslak_25.jpg',
 'moe_szyslak_30.jpg',
 'moe_szyslak_24.jpg',
 'moe_szyslak_9.jpg',
 'moe_szyslak_32.jpg',
 'moe_szyslak_18.jpg',
 'moe_szyslak_26.jpg',
 'moe_szyslak_8.jpg',
 'moe_szyslak_33.jpg',
 'moe_szyslak_27.jpg',
 'moe_szyslak_37.jpg',
 'moe_szyslak_23.jpg',
 'moe_szyslak_36.jpg',
 'moe_szyslak_20.jpg',
 'moe_szyslak_22.jpg',
 'moe_szyslak_34.jpg',
 'moe_szyslak_35.jpg',
 'moe_szyslak_46.jpg',
 'moe_szyslak_21.jpg',
 'moe_szyslak_47.jpg',
 'moe_szyslak_45.jpg',
 'moe_szyslak_44.jpg',
 'moe_szyslak_40.jpg',
 'moe_szyslak_41.jpg',
 'moe_szyslak_43.jpg',
 'moe_szyslak_42.jpg',
 'ned_flanders_28.jpg',
 'ned_flanders_29.jpg',
 'ned_flanders_15.jpg',
 'ned_flanders_14.jpg',
 'ned_flanders_17.jpg',
 'ned_flanders_12.jpg',
 'ned_flanders_9.jpg',
 'ned_flanders_16.jpg',
 'ned_flanders_13.jpg',
 'ned_flanders_39.jpg',
 'ned_flanders_8.jpg',
 'ned_flanders_11.jpg',
 'ned_flanders_38.jpg',
 'ned_flanders_10.jpg',
 'ned_flanders_48.jpg',
 'ned_flanders_49.jpg',
 'ned_flanders_42.jpg',
 'ned_flanders_43.jpg',
 'ned_flanders_41.jpg',
 'ned_flanders_40.jpg',
 'ned_flanders_44.jpg',
 'ned_flanders_47.jpg',
 'ned_flanders_46.jpg',
 'ned_flanders_35.jpg',
 'ned_flanders_6.jpg',
 'ned_flanders_21.jpg',
 'ned_flanders_20.jpg',
 'ned_flanders_7.jpg',
 'ned_flanders_34.jpg',
 'ned_flanders_36.jpg',
 'ned_flanders_22.jpg',
 'ned_flanders_5.jpg',
 'ned_flanders_4.jpg',
 'ned_flanders_0.jpg',
 'ned_flanders_33.jpg',
 'ned_flanders_27.jpg',
 'ned_flanders_23.jpg',
 'ned_flanders_37.jpg',
 'ned_flanders_26.jpg',
 'ned_flanders_1.jpg',
 'ned_flanders_18.jpg',
 'ned_flanders_3.jpg',
 'ned_flanders_24.jpg',
 'ned_flanders_30.jpg',
 'ned_flanders_32.jpg',
 'ned_flanders_2.jpg',
 'ned_flanders_19.jpg',
 'ned_flanders_31.jpg',
 'ned_flanders_25.jpg']

In [0]:
from sklearn.metrics import confusion_matrix
from sklearn.utils.multiclass import unique_labels

# Plot normalized confusion matrix
mat = confusion_matrix(y_test, y_pred)

def plot_confusion_matrix(y_true, y_pred, classes,
                          normalize=False,
                          title=None,
                          cmap=plt.cm.Blues):
    """
    This function prints and plots the confusion matrix.
    Normalization can be applied by setting `normalize=True`.
    """
    if not title:
        if normalize:
            title = 'Normalized confusion matrix'
        else:
            title = 'Confusion matrix, without normalization'

    # Compute confusion matrix
    cm = confusion_matrix(y_true, y_pred)
    # Only use the labels that appear in the data
    classes = ['B','H','L','M','N'] #classes[unique_labels(y_true, y_pred)]
    if normalize:
        cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
        print("Normalized confusion matrix")
    else:
        print('Confusion matrix, without normalization')

    print(cm)

    fig, ax = plt.subplots()
    im = ax.imshow(cm, interpolation='nearest', cmap=cmap)
    ax.figure.colorbar(im, ax=ax)
    # We want to show all ticks...
    ax.set(xticks=np.arange(cm.shape[1]),
           yticks=np.arange(cm.shape[0]),
           # ... and label them with the respective list entries
           xticklabels=classes, yticklabels=classes,
           title=title,
           ylabel='True label',
           xlabel='Predicted label')
    bottom, top = ax.get_ylim()
    ax.set_ylim(bottom + 0.5, top - 0.5)
    # Rotate the tick labels and set their alignment.
    plt.setp(ax.get_xticklabels(), rotation=45, ha="right",
             rotation_mode="anchor")

    # Loop over data dimensions and create text annotations.
    fmt = '.2f' if normalize else 'd'
    thresh = cm.max() / 2.
    for i in range(cm.shape[0]):
        for j in range(cm.shape[1]):
            ax.text(j, i, format(cm[i, j], fmt),
                    ha="center", va="center",
                    color="white" if cm[i, j] > thresh else "black")
    # fig.tight_layout()
    return ax


np.set_printoptions(precision=2)

# Plot non-normalized confusion matrix
plot_confusion_matrix(y_test, y_pred, classes=[0,1,2,3,4], title='Confusion matrix, without normalization')

# Plot normalized confusion matrix
plot_confusion_matrix(y_test, y_pred, classes=[0,1,2,3,4], normalize=True, title='Normalized confusion matrix')
print("   ")
plt.tight_layout(w_pad=5.5, h_pad=5.0)
plt.show()


Confusion matrix, without normalization
[[34  5  1  9  1]
 [ 0 47  0  2  1]
 [ 0  0 49  0  1]
 [ 1  0  0 49  0]
 [ 2  2  1  0 44]]
Normalized confusion matrix
[[0.68 0.1  0.02 0.18 0.02]
 [0.   0.94 0.   0.04 0.02]
 [0.   0.   0.98 0.   0.02]
 [0.02 0.   0.   0.98 0.  ]
 [0.04 0.04 0.02 0.   0.9 ]]
   

In [0]:
probabilities = model.predict(X_test)
y_prednew = probabilities > 0.5

score = model.evaluate(X_test, y_prednew, verbose=1)
print('Test score:', score[0])
print('Test accuracy:', score[1])
print(score)


249/1 [==============================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================] - 1s 3ms/sample - loss: 0.0607 - accuracy: 0.9799
Test score: 0.03802779110440289
Test accuracy: 0.9799197
[0.03802779110440289, 0.9799197]