The German Traffic Sign Benchmark

Student Name 1: ...

Student Name 2: ...


In [0]:
# !wget -c http://benchmark.ini.rub.de/Dataset_GTSDB/FullIJCNN2013.zip
# !unzip FullIJCNN2013.zip

In [0]:
import numpy as np
import cv2

IMG_HEIGHT = 600
SIGN_SIZE = (224, 224)

# Function for reading the images
def readImages(rootpath, images_range, signs_range):
    '''Reads traffic sign data for German Traffic Sign Recognition Benchmark.
    Arguments: path to the traffic sign data, for example 'FullIJCNN2013'
    Returns:   list of images, list of corresponding labels'''
    images = {} # original image
    scales = {} # original scale
    for num in images_range:
        filename = rootpath + '/' + "{:05d}".format(num) + '.ppm'
        img = cv2.imread(filename, cv2.IMREAD_COLOR)
        scale = IMG_HEIGHT / float(img.shape[0])
        img_resized = cv2.resize(img, (int(img.shape[1]*scale),int(img.shape[0]*scale)))
        images.setdefault(filename,[]).append(img_resized)
        scales.setdefault(filename,[]).append(scale)

    files = [] # filenames
    signs = [] # traffic sign image
    bboxes = [] # corresponding box detection
    labels = [] # traffic sign type
    data = np.genfromtxt(rootpath + '/' + 'gt.txt', delimiter=';', dtype=str, usecols=range(0, 6))
    for elem in signs_range:
        filename = rootpath + '/' + data[elem][0]
        img = images.get(filename)[0]
        scale = scales.get(filename)[0]
        bbox = np.array([int(data[elem][1]), int(data[elem][2]), int(data[elem][3]), int(data[elem][4])]) * scale
        sign = img[int(bbox[1]):int(bbox[3]), int(bbox[0]):int(bbox[2])]
        sign_resized = cv2.resize(sign, SIGN_SIZE)
        files.append(filename)
        signs.append(sign_resized)
        bboxes.append(bbox)
        labels.append(data[elem][5])
    return images, files, signs, bboxes, labels

In [0]:
# The German Traffic Sign Recognition Benchmark
train_images, train_files, train_signs, train_bboxes, train_labels = readImages('FullIJCNN2013', range(0,600), range(0,852))
test_images, test_files, test_signs, test_bboxes, test_labels = readImages('FullIJCNN2013', range(600,900), range(852,1213))

In [4]:
import matplotlib.pyplot as plt
%matplotlib inline 

# Show examples from each class
class_names = np.unique(train_labels)
num_classes = len(class_names)
fig = plt.figure(figsize=(8,8))
for i in range(num_classes):
    ax = fig.add_subplot(6, 9, 1 + i, xticks=[], yticks=[])
    ax.set_title(class_names[i])
    indices = np.where(np.isin(train_labels, class_names[i]))[0]
    plt.imshow(cv2.cvtColor(train_signs[int(np.random.choice(indices, 1))], cv2.COLOR_BGR2RGB))
plt.show()



In [5]:
from sklearn.utils import shuffle
train_files, train_signs, train_bboxes, train_labels = shuffle(train_files, train_signs, train_bboxes, train_labels)
# plt.imshow(cv2.cvtColor(train_images.get(train_files[0])[0], cv2.COLOR_BGR2RGB))
# plt.show()
# plt.imshow(cv2.cvtColor(train_signs[0], cv2.COLOR_BGR2RGB))
# plt.show()
# print(train_bboxes[0])
# print(train_labels[0])

# Data pre-processing
tr_signs = np.array(train_signs)[0:600]
tr_labels = np.array(train_labels)[0:600]
va_signs = np.array(train_signs)[600:852]
va_labels = np.array(train_labels)[600:852]
te_signs = np.array(test_signs)
te_labels = np.array(test_labels)

tr_signs = tr_signs.astype('float32')
va_signs = va_signs.astype('float32')
te_signs = te_signs.astype('float32')
tr_signs /= 255.0
va_signs /= 255.0
te_signs /= 255.0

from keras.utils import np_utils
tr_labels = np_utils.to_categorical(tr_labels, num_classes)
va_labels = np_utils.to_categorical(va_labels, num_classes)
te_labels = np_utils.to_categorical(te_labels, num_classes)


Using TensorFlow backend.

In [0]:


In [0]:
# Tensorboard
from time import time
from keras.callbacks import TensorBoard
tensorboard = TensorBoard(log_dir='logs/{}'.format(time()))

Assignment 1: Multi-Layer Perceptron


In [7]:
from keras.models import Sequential
from keras.layers import Dense, Dropout, Activation, Flatten
from keras import optimizers

mlp = Sequential()
mlp.add(Dense(16, input_shape=(SIGN_SIZE[0], SIGN_SIZE[1], 3)))
mlp.add(Flatten())
mlp.add(Activation('relu'))
mlp.add(Dropout(0.15))
mlp.add(Dense(num_classes))
mlp.add(Activation('softmax'))

opt = optimizers.SGD(lr=0.001, decay=1e-6, momentum=0.9, nesterov=True)
mlp.compile(optimizer=opt, loss='categorical_crossentropy', metrics=['accuracy'])
mlp.summary()


WARNING:tensorflow:From /usr/local/lib/python3.6/dist-packages/tensorflow/python/framework/op_def_library.py:263: colocate_with (from tensorflow.python.framework.ops) is deprecated and will be removed in a future version.
Instructions for updating:
Colocations handled automatically by placer.
WARNING:tensorflow:From /usr/local/lib/python3.6/dist-packages/keras/backend/tensorflow_backend.py:3445: calling dropout (from tensorflow.python.ops.nn_ops) with keep_prob is deprecated and will be removed in a future version.
Instructions for updating:
Please use `rate` instead of `keep_prob`. Rate should be set to `rate = 1 - keep_prob`.
_________________________________________________________________
Layer (type)                 Output Shape              Param #   
=================================================================
dense_1 (Dense)              (None, 224, 224, 16)      64        
_________________________________________________________________
flatten_1 (Flatten)          (None, 802816)            0         
_________________________________________________________________
activation_1 (Activation)    (None, 802816)            0         
_________________________________________________________________
dropout_1 (Dropout)          (None, 802816)            0         
_________________________________________________________________
dense_2 (Dense)              (None, 43)                34521131  
_________________________________________________________________
activation_2 (Activation)    (None, 43)                0         
=================================================================
Total params: 34,521,195
Trainable params: 34,521,195
Non-trainable params: 0
_________________________________________________________________

In [0]:


In [0]:


In [12]:
data = mlp.fit(tr_signs, tr_labels, batch_size=16, epochs=70, verbose=2, validation_data=(va_signs, va_labels), callbacks=[tensorboard])

start = time()
loss, acc = mlp.evaluate(te_signs, te_labels, verbose=0)
end = time()
print('MLP took ' + str(end - start) + ' seconds')
print('Test loss: ' + str(loss) + ' - Accuracy: ' + str(acc))


Train on 600 samples, validate on 252 samples
Epoch 1/70
 - 3s - loss: 0.1983 - acc: 0.9783 - val_loss: 2.3484 - val_acc: 0.6984
Epoch 2/70
 - 3s - loss: 0.1901 - acc: 0.9783 - val_loss: 2.1405 - val_acc: 0.7460
Epoch 3/70
 - 3s - loss: 0.1579 - acc: 0.9867 - val_loss: 2.1346 - val_acc: 0.7540
Epoch 4/70
 - 3s - loss: 0.1440 - acc: 0.9933 - val_loss: 2.1502 - val_acc: 0.7540
Epoch 5/70
 - 3s - loss: 0.1430 - acc: 0.9933 - val_loss: 2.1830 - val_acc: 0.7500
Epoch 6/70
 - 3s - loss: 0.1402 - acc: 0.9933 - val_loss: 2.1580 - val_acc: 0.7619
Epoch 7/70
 - 3s - loss: 0.1370 - acc: 0.9933 - val_loss: 2.1836 - val_acc: 0.7540
Epoch 8/70
 - 3s - loss: 0.1346 - acc: 0.9933 - val_loss: 2.1942 - val_acc: 0.7460
Epoch 9/70
 - 3s - loss: 0.1346 - acc: 0.9933 - val_loss: 2.2503 - val_acc: 0.7381
Epoch 10/70
 - 3s - loss: 0.1321 - acc: 0.9933 - val_loss: 2.1952 - val_acc: 0.7579
Epoch 11/70
 - 3s - loss: 0.1309 - acc: 0.9933 - val_loss: 2.2071 - val_acc: 0.7579
Epoch 12/70
 - 3s - loss: 0.1291 - acc: 0.9933 - val_loss: 2.1859 - val_acc: 0.7619
Epoch 13/70
 - 3s - loss: 0.1302 - acc: 0.9933 - val_loss: 2.2201 - val_acc: 0.7579
Epoch 14/70
 - 3s - loss: 0.1287 - acc: 0.9933 - val_loss: 2.1408 - val_acc: 0.7540
Epoch 15/70
 - 3s - loss: 0.1267 - acc: 0.9933 - val_loss: 2.1963 - val_acc: 0.7738
Epoch 16/70
 - 3s - loss: 0.1264 - acc: 0.9933 - val_loss: 2.2214 - val_acc: 0.7698
Epoch 17/70
 - 3s - loss: 0.1240 - acc: 0.9933 - val_loss: 2.2221 - val_acc: 0.7619
Epoch 18/70
 - 3s - loss: 0.1238 - acc: 0.9933 - val_loss: 2.2308 - val_acc: 0.7738
Epoch 19/70
 - 3s - loss: 0.1232 - acc: 0.9933 - val_loss: 2.2803 - val_acc: 0.7421
Epoch 20/70
 - 3s - loss: 0.1234 - acc: 0.9933 - val_loss: 2.2859 - val_acc: 0.7579
Epoch 21/70
 - 3s - loss: 0.1218 - acc: 0.9933 - val_loss: 2.2756 - val_acc: 0.7540
Epoch 22/70
 - 3s - loss: 0.1227 - acc: 0.9933 - val_loss: 2.2622 - val_acc: 0.7659
Epoch 23/70
 - 3s - loss: 0.1208 - acc: 0.9933 - val_loss: 2.2777 - val_acc: 0.7579
Epoch 24/70
 - 3s - loss: 0.1203 - acc: 0.9933 - val_loss: 2.2971 - val_acc: 0.7659
Epoch 25/70
 - 3s - loss: 0.1200 - acc: 0.9933 - val_loss: 2.2914 - val_acc: 0.7698
Epoch 26/70
 - 3s - loss: 0.1198 - acc: 0.9933 - val_loss: 2.3115 - val_acc: 0.7500
Epoch 27/70
 - 3s - loss: 0.1189 - acc: 0.9933 - val_loss: 2.3038 - val_acc: 0.7540
Epoch 28/70
 - 3s - loss: 0.1184 - acc: 0.9933 - val_loss: 2.3291 - val_acc: 0.7579
Epoch 29/70
 - 3s - loss: 0.1184 - acc: 0.9933 - val_loss: 2.3321 - val_acc: 0.7619
Epoch 30/70
 - 3s - loss: 0.1185 - acc: 0.9933 - val_loss: 2.3336 - val_acc: 0.7619
Epoch 31/70
 - 3s - loss: 0.1179 - acc: 0.9933 - val_loss: 2.3215 - val_acc: 0.7659
Epoch 32/70
 - 3s - loss: 0.1181 - acc: 0.9933 - val_loss: 2.3356 - val_acc: 0.7579
Epoch 33/70
 - 3s - loss: 0.1174 - acc: 0.9933 - val_loss: 2.3335 - val_acc: 0.7659
Epoch 34/70
 - 3s - loss: 0.1170 - acc: 0.9933 - val_loss: 2.3340 - val_acc: 0.7540
Epoch 35/70
 - 3s - loss: 0.1172 - acc: 0.9933 - val_loss: 2.3515 - val_acc: 0.7579
Epoch 36/70
 - 3s - loss: 0.1169 - acc: 0.9933 - val_loss: 2.3388 - val_acc: 0.7619
Epoch 37/70
 - 3s - loss: 0.1161 - acc: 0.9933 - val_loss: 2.3524 - val_acc: 0.7659
Epoch 38/70
 - 3s - loss: 0.1159 - acc: 0.9933 - val_loss: 2.3471 - val_acc: 0.7659
Epoch 39/70
 - 3s - loss: 0.1157 - acc: 0.9933 - val_loss: 2.3713 - val_acc: 0.7698
Epoch 40/70
 - 3s - loss: 0.1157 - acc: 0.9933 - val_loss: 2.3725 - val_acc: 0.7659
Epoch 41/70
 - 3s - loss: 0.1153 - acc: 0.9933 - val_loss: 2.3554 - val_acc: 0.7738
Epoch 42/70
 - 3s - loss: 0.1156 - acc: 0.9933 - val_loss: 2.3688 - val_acc: 0.7659
Epoch 43/70
 - 3s - loss: 0.1153 - acc: 0.9933 - val_loss: 2.3719 - val_acc: 0.7659
Epoch 44/70
 - 3s - loss: 0.1148 - acc: 0.9933 - val_loss: 2.4011 - val_acc: 0.7579
Epoch 45/70
 - 3s - loss: 0.1145 - acc: 0.9933 - val_loss: 2.3635 - val_acc: 0.7619
Epoch 46/70
 - 3s - loss: 0.1145 - acc: 0.9933 - val_loss: 2.3982 - val_acc: 0.7619
Epoch 47/70
 - 3s - loss: 0.1148 - acc: 0.9933 - val_loss: 2.3985 - val_acc: 0.7619
Epoch 48/70
 - 3s - loss: 0.1139 - acc: 0.9933 - val_loss: 2.3835 - val_acc: 0.7659
Epoch 49/70
 - 3s - loss: 0.1142 - acc: 0.9933 - val_loss: 2.3803 - val_acc: 0.7619
Epoch 50/70
 - 3s - loss: 0.1137 - acc: 0.9933 - val_loss: 2.3851 - val_acc: 0.7619
Epoch 51/70
 - 3s - loss: 0.1140 - acc: 0.9933 - val_loss: 2.3785 - val_acc: 0.7659
Epoch 52/70
 - 3s - loss: 0.1139 - acc: 0.9933 - val_loss: 2.4018 - val_acc: 0.7659
Epoch 53/70
 - 3s - loss: 0.1137 - acc: 0.9933 - val_loss: 2.4153 - val_acc: 0.7619
Epoch 54/70
 - 3s - loss: 0.1133 - acc: 0.9933 - val_loss: 2.4097 - val_acc: 0.7619
Epoch 55/70
 - 3s - loss: 0.1133 - acc: 0.9933 - val_loss: 2.4174 - val_acc: 0.7659
Epoch 56/70
 - 3s - loss: 0.1132 - acc: 0.9933 - val_loss: 2.4095 - val_acc: 0.7659
Epoch 57/70
 - 3s - loss: 0.1130 - acc: 0.9933 - val_loss: 2.4039 - val_acc: 0.7619
Epoch 58/70
 - 3s - loss: 0.1131 - acc: 0.9933 - val_loss: 2.4081 - val_acc: 0.7659
Epoch 59/70
 - 3s - loss: 0.1130 - acc: 0.9933 - val_loss: 2.4168 - val_acc: 0.7619
Epoch 60/70
 - 3s - loss: 0.1128 - acc: 0.9933 - val_loss: 2.4062 - val_acc: 0.7619
Epoch 61/70
 - 3s - loss: 0.1128 - acc: 0.9933 - val_loss: 2.4167 - val_acc: 0.7659
Epoch 62/70
 - 3s - loss: 0.1124 - acc: 0.9933 - val_loss: 2.4142 - val_acc: 0.7659
Epoch 63/70
 - 3s - loss: 0.1124 - acc: 0.9933 - val_loss: 2.4188 - val_acc: 0.7619
Epoch 64/70
 - 3s - loss: 0.1123 - acc: 0.9933 - val_loss: 2.4281 - val_acc: 0.7619
Epoch 65/70
 - 3s - loss: 0.1123 - acc: 0.9933 - val_loss: 2.4255 - val_acc: 0.7619
Epoch 66/70
 - 3s - loss: 0.1126 - acc: 0.9933 - val_loss: 2.4285 - val_acc: 0.7659
Epoch 67/70
 - 3s - loss: 0.1122 - acc: 0.9933 - val_loss: 2.4390 - val_acc: 0.7619
Epoch 68/70
 - 3s - loss: 0.1121 - acc: 0.9933 - val_loss: 2.4441 - val_acc: 0.7579
Epoch 69/70
 - 3s - loss: 0.1118 - acc: 0.9933 - val_loss: 2.4381 - val_acc: 0.7659
Epoch 70/70
 - 3s - loss: 0.1118 - acc: 0.9933 - val_loss: 2.4488 - val_acc: 0.7619
MLP took 0.3103933334350586 seconds
Test loss: 1.4093975310840765 - Accuracy: 0.8421052633230045

In [13]:
acc


Out[13]:
0.8421052633230045

In [14]:
plt.plot(data.history['acc'])
plt.plot(data.history['val_acc'])
plt.title('Model accuracy')
plt.ylabel('Accuracy')
plt.xlabel('Epoch')
plt.legend(['Train', 'Test'], loc='upper left')
plt.show()



In [15]:
plt.plot(data.history['loss'])
plt.plot(data.history['val_loss'])
plt.title('Model loss')
plt.ylabel('Loss')
plt.xlabel('Epoch')
plt.legend(['Train', 'Test'], loc='upper left')
plt.show()