In [3]:
import os, cv2, random, json
import numpy as np
import pandas as pd
np.random.seed(23)

from sklearn.model_selection import train_test_split
from sklearn.metrics import mean_squared_error

import matplotlib.pyplot as plt
from matplotlib import ticker
import seaborn as sns
%matplotlib inline 

from keras.models import Sequential, Model
from keras.layers import Input, Dropout, Flatten, Convolution2D, MaxPooling2D, ZeroPadding2D, Dense, Activation
from keras.layers import merge, Convolution1D, BatchNormalization, Reshape, Permute
from keras.optimizers import RMSprop, Adam, Adamax, Nadam, SGD, Adadelta
from keras.callbacks import ModelCheckpoint, Callback, EarlyStopping
from keras.utils import np_utils
from keras.regularizers import l2
from keras import backend as K
from keras.preprocessing.image import ImageDataGenerator


Using TensorFlow backend.

In [4]:
ROWS = 120
COLS = 320
CHANNELS = 3
DIR = 'data/IMG/'

Parsing the Data Log

First, I load the data_log.csv into pandas and extract the center image paths and steering angle labels.


In [5]:
data = pd.read_csv('data/driving_log.csv', header=None, 
                   names=['center', 'left', 'right', 'angle', 'throttle', 'break', 'speed'])
print(data.ix[0].center)
data.sample()


/home/jeffd23/data/sdc/projects/behavioral-cloning/data/IMG/center_2016_12_10_13_38_06_848.jpg
Out[5]:
center left right angle throttle break speed
1503 /home/jeffd23/data/sdc/projects/behavioral-clo... /home/jeffd23/data/sdc/projects/behavioral-cl... /home/jeffd23/data/sdc/projects/behavioral-cl... -0.269265 1.0 0 30.17192

In [6]:
def img_id(path):
    return path.split('/IMG/')[1]
image_paths = data.center.apply(img_id).values.tolist()
image_paths[:5]


Out[6]:
['center_2016_12_10_13_38_06_848.jpg',
 'center_2016_12_10_13_38_06_950.jpg',
 'center_2016_12_10_13_38_07_050.jpg',
 'center_2016_12_10_13_38_07_150.jpg',
 'center_2016_12_10_13_38_07_251.jpg']

In [7]:
# y_all = data[['angle', 'throttle']].values
y_all = data.angle.values
n_samples = y_all.shape[0]
print("Training Model with {} Samples".format(n_samples))


Training Model with 2422 Samples

Reading and Preprocessing the Images with OpenCV


In [8]:
def read_image(path):
    img = cv2.imread(path, cv2.IMREAD_COLOR)
    img = img[40:160, 0:320] ## Cropping top section of image, just useless noise
#     img = cv2.imread(path, cv2.IMREAD_GRAYSCALE)
#     img = np.expand_dims(img, axis=2)
    return img[:,:,::-1]

X_all = np.ndarray((n_samples, ROWS, COLS, CHANNELS), dtype=np.uint8)

for i, path in enumerate(image_paths):
    DIR+path
    img = read_image(DIR+path)
    X_all[i] = img
    
print(X_all.shape)


(2422, 120, 320, 3)

In [9]:
for img in X_all[:3]:
    plt.imshow(img)
    plt.show()


Building a Convnet in Keras

  1. Split the data in train/test sets.
  2. Build a keras model for regression.

In [10]:
X_train, X_test, y_train, y_test = train_test_split(
    X_all, y_all, test_size=0.20, random_state=23)

In [11]:
def fit_gen(data, batch_size):
    while 1:
        x = np.ndarray((batch_size, ROWS, COLS, CHANNELS), dtype=np.uint8)
        y = np.zeros(batch_size)
        i=0
        for line in data.iterrows():
            path = line[1].center.split('/IMG/')[1]
            x[i] = read_image(DIR+path)
            y[i] = line[1].angle
            i+=1
            if i == batch_size:
                i=0
                yield (x, y)
                x = np.ndarray((batch_size, ROWS, COLS, CHANNELS), dtype=np.uint8)
                y = np.zeros(batch_size)

In [12]:
def rmse(y_true, y_pred):
    return K.sqrt(K.mean(K.square(y_pred - y_true), axis=-1))

def get_model():
    lr = 0.0001
    weight_init='glorot_normal'
    opt = RMSprop(lr)
    loss = 'mean_squared_error'

    model = Sequential()
    
    model.add(BatchNormalization(mode=2, axis=1, input_shape=(ROWS, COLS, CHANNELS)))
    model.add(Convolution2D(3, 3, 3, init=weight_init, border_mode='same', activation='relu', input_shape=(ROWS, COLS, CHANNELS)))
    model.add(MaxPooling2D(pool_size=(2, 2)))

    model.add(Convolution2D(24, 3, 3, init=weight_init, border_mode='same', activation='relu'))
    model.add(MaxPooling2D(pool_size=(2, 2)))

    model.add(Convolution2D(36, 3, 3, init=weight_init, border_mode='same', activation='relu'))
    model.add(MaxPooling2D(pool_size=(2, 2)))

    model.add(Convolution2D(48, 3, 3, init=weight_init, border_mode='same',  activation='relu'))
    model.add(MaxPooling2D(pool_size=(2, 2)))

    model.add(Flatten())
    model.add(Dense(80, activation='relu', init=weight_init))
    model.add(Dense(20, activation='relu', init=weight_init))
    
    model.add(Dropout(0.25))
    model.add(Dense(1, init=weight_init, activation='linear'))

    model.compile(optimizer=opt, loss=loss)

    return model
    
model = get_model()
model.summary()


____________________________________________________________________________________________________
Layer (type)                     Output Shape          Param #     Connected to                     
====================================================================================================
batchnormalization_1 (BatchNormal(None, 120, 320, 3)   6           batchnormalization_input_1[0][0] 
____________________________________________________________________________________________________
convolution2d_1 (Convolution2D)  (None, 120, 320, 3)   84          batchnormalization_1[0][0]       
____________________________________________________________________________________________________
maxpooling2d_1 (MaxPooling2D)    (None, 60, 160, 3)    0           convolution2d_1[0][0]            
____________________________________________________________________________________________________
convolution2d_2 (Convolution2D)  (None, 60, 160, 24)   672         maxpooling2d_1[0][0]             
____________________________________________________________________________________________________
maxpooling2d_2 (MaxPooling2D)    (None, 30, 80, 24)    0           convolution2d_2[0][0]            
____________________________________________________________________________________________________
convolution2d_3 (Convolution2D)  (None, 30, 80, 36)    7812        maxpooling2d_2[0][0]             
____________________________________________________________________________________________________
maxpooling2d_3 (MaxPooling2D)    (None, 15, 40, 36)    0           convolution2d_3[0][0]            
____________________________________________________________________________________________________
convolution2d_4 (Convolution2D)  (None, 15, 40, 48)    15600       maxpooling2d_3[0][0]             
____________________________________________________________________________________________________
maxpooling2d_4 (MaxPooling2D)    (None, 7, 20, 48)     0           convolution2d_4[0][0]            
____________________________________________________________________________________________________
flatten_1 (Flatten)              (None, 6720)          0           maxpooling2d_4[0][0]             
____________________________________________________________________________________________________
dense_1 (Dense)                  (None, 80)            537680      flatten_1[0][0]                  
____________________________________________________________________________________________________
dense_2 (Dense)                  (None, 20)            1620        dense_1[0][0]                    
____________________________________________________________________________________________________
dropout_1 (Dropout)              (None, 20)            0           dense_2[0][0]                    
____________________________________________________________________________________________________
dense_3 (Dense)                  (None, 1)             21          dropout_1[0][0]                  
====================================================================================================
Total params: 563495
____________________________________________________________________________________________________

In [13]:
nb_epoch = 50
batch_size = 64

# Callbacks
early_stopping = EarlyStopping(monitor='val_loss', patience=8, verbose=1, mode='auto')   
save_weights = ModelCheckpoint('new_model.h5', monitor='val_loss', save_best_only=True)

# model.fit_generator(fit_gen(data, 32),
#         samples_per_epoch=data.shape[0], nb_epoch=nb_epoch, 
#         validation_data=(X_test, y_test), callbacks=[save_weights, early_stopping])
        

model.fit(X_train, y_train, batch_size=batch_size, nb_epoch=nb_epoch,
          validation_data=(X_test, y_test), verbose=1, shuffle=True, callbacks=[save_weights, early_stopping])


preds = model.predict(X_test, verbose=1)

print( "Test MSE: {}".format(mean_squared_error(y_test, preds)))
print( "Test RMSE: {}".format(np.sqrt(mean_squared_error(y_test, preds))))


Train on 1937 samples, validate on 485 samples
Epoch 1/50
1937/1937 [==============================] - 3s - loss: 0.0428 - val_loss: 0.0414
Epoch 2/50
1937/1937 [==============================] - 2s - loss: 0.0370 - val_loss: 0.0346
Epoch 3/50
1937/1937 [==============================] - 2s - loss: 0.0328 - val_loss: 0.0314
Epoch 4/50
1937/1937 [==============================] - 2s - loss: 0.0293 - val_loss: 0.0469
Epoch 5/50
1937/1937 [==============================] - 2s - loss: 0.0297 - val_loss: 0.0306
Epoch 6/50
1937/1937 [==============================] - 2s - loss: 0.0268 - val_loss: 0.0302
Epoch 7/50
1937/1937 [==============================] - 2s - loss: 0.0260 - val_loss: 0.0285
Epoch 8/50
1937/1937 [==============================] - 2s - loss: 0.0254 - val_loss: 0.0276
Epoch 9/50
1937/1937 [==============================] - 2s - loss: 0.0253 - val_loss: 0.0282
Epoch 10/50
1937/1937 [==============================] - 2s - loss: 0.0235 - val_loss: 0.0260
Epoch 11/50
1937/1937 [==============================] - 2s - loss: 0.0219 - val_loss: 0.0239
Epoch 12/50
1937/1937 [==============================] - 2s - loss: 0.0222 - val_loss: 0.0245
Epoch 13/50
1937/1937 [==============================] - 2s - loss: 0.0215 - val_loss: 0.0239
Epoch 14/50
1937/1937 [==============================] - 2s - loss: 0.0205 - val_loss: 0.0232
Epoch 15/50
1937/1937 [==============================] - 2s - loss: 0.0208 - val_loss: 0.0224
Epoch 16/50
1937/1937 [==============================] - 2s - loss: 0.0196 - val_loss: 0.0241
Epoch 17/50
1937/1937 [==============================] - 2s - loss: 0.0191 - val_loss: 0.0352
Epoch 18/50
1937/1937 [==============================] - 2s - loss: 0.0186 - val_loss: 0.0318
Epoch 19/50
1937/1937 [==============================] - 2s - loss: 0.0192 - val_loss: 0.0227
Epoch 20/50
1937/1937 [==============================] - 2s - loss: 0.0172 - val_loss: 0.0226
Epoch 21/50
1937/1937 [==============================] - 2s - loss: 0.0172 - val_loss: 0.0198
Epoch 22/50
1937/1937 [==============================] - 2s - loss: 0.0160 - val_loss: 0.0221
Epoch 23/50
1937/1937 [==============================] - 2s - loss: 0.0162 - val_loss: 0.0192
Epoch 24/50
1937/1937 [==============================] - 2s - loss: 0.0157 - val_loss: 0.0198
Epoch 25/50
1937/1937 [==============================] - 2s - loss: 0.0139 - val_loss: 0.0235
Epoch 26/50
1937/1937 [==============================] - 2s - loss: 0.0154 - val_loss: 0.0187
Epoch 27/50
1937/1937 [==============================] - 2s - loss: 0.0143 - val_loss: 0.0257
Epoch 28/50
1937/1937 [==============================] - 2s - loss: 0.0141 - val_loss: 0.0185
Epoch 29/50
1937/1937 [==============================] - 2s - loss: 0.0136 - val_loss: 0.0198
Epoch 30/50
1937/1937 [==============================] - 2s - loss: 0.0142 - val_loss: 0.0198
Epoch 31/50
1937/1937 [==============================] - 2s - loss: 0.0134 - val_loss: 0.0188
Epoch 32/50
1937/1937 [==============================] - 2s - loss: 0.0131 - val_loss: 0.0173
Epoch 33/50
1937/1937 [==============================] - 2s - loss: 0.0126 - val_loss: 0.0184
Epoch 34/50
1937/1937 [==============================] - 2s - loss: 0.0124 - val_loss: 0.0186
Epoch 35/50
1937/1937 [==============================] - 2s - loss: 0.0120 - val_loss: 0.0193
Epoch 36/50
1937/1937 [==============================] - 2s - loss: 0.0125 - val_loss: 0.0206
Epoch 37/50
1937/1937 [==============================] - 2s - loss: 0.0120 - val_loss: 0.0193
Epoch 38/50
1937/1937 [==============================] - 2s - loss: 0.0119 - val_loss: 0.0239
Epoch 39/50
1937/1937 [==============================] - 2s - loss: 0.0111 - val_loss: 0.0184
Epoch 40/50
1937/1937 [==============================] - 2s - loss: 0.0108 - val_loss: 0.0169
Epoch 41/50
1937/1937 [==============================] - 2s - loss: 0.0108 - val_loss: 0.0185
Epoch 42/50
1937/1937 [==============================] - 2s - loss: 0.0107 - val_loss: 0.0167
Epoch 43/50
1937/1937 [==============================] - 2s - loss: 0.0106 - val_loss: 0.0165
Epoch 44/50
1937/1937 [==============================] - 2s - loss: 0.0108 - val_loss: 0.0196
Epoch 45/50
1937/1937 [==============================] - 2s - loss: 0.0105 - val_loss: 0.0168
Epoch 46/50
1937/1937 [==============================] - 2s - loss: 0.0101 - val_loss: 0.0185
Epoch 47/50
1937/1937 [==============================] - 2s - loss: 0.0101 - val_loss: 0.0214
Epoch 48/50
1937/1937 [==============================] - 2s - loss: 0.0100 - val_loss: 0.0165
Epoch 49/50
1937/1937 [==============================] - 2s - loss: 0.0106 - val_loss: 0.0162
Epoch 50/50
1937/1937 [==============================] - 2s - loss: 0.0107 - val_loss: 0.0185
485/485 [==============================] - 0s     
Test MSE: 0.018325869529454464
Test RMSE: 0.13537307534903115

In [15]:
js = model.to_json()
with open('model.json', 'w') as outfile:
    json.dump(js, outfile)

In [ ]: