Preprocess data


In [1]:
import csv
import cv2
import gzip
import matplotlib.pyplot as plt
import numpy as np
import os
import random

from os.path import expanduser
from sklearn.model_selection import train_test_split
from sklearn.utils import shuffle

home_path = expanduser("~")

In [2]:
-

In [3]:
# read in driving log
samples = []
with open('../data/interim/img/driving_log.csv') as csvfile:
    reader = csv.reader(csvfile)
    for line in reader:
        samples.append(line)

# drop header
samples = samples[1:-1]

In [4]:
# samples = random.sample(samples, 1000) # 1000 got to dirt corner

In [5]:
test_generator = data_generator(
    samples[0:10], 
    shape=(160,320,3), 
    batch_size=6, 
    correction=0.38,
    sensitivity=0.02,
    angle_threshold=0.6)

for i in range(1):
    X_batch, y_batch = next(test_generator)
    print('x shape = {}\ny shape = {}'.format(X_batch.shape, y_batch.shape))
    print(y_batch)
    plt.imshow(X_batch[0])
    plt.show()


x shape = (6, 160, 320, 3)
y shape = (6,)
[ 0.01588996  0.00457633 -0.78095355  0.79565751  0.7607427  -0.03824149]

Keras modelling


In [6]:
from keras.callbacks import TensorBoard
from keras.layers import Lambda, Cropping2D, MaxPooling2D, Flatten, Dense, SpatialDropout2D
from keras.layers.convolutional import Convolution2D
from keras.models import Sequential
from keras.optimizers import Adam


Using TensorFlow backend.

In [7]:
# training hyper-parameters
epochs = 20
batch_size = 6

# camera hyper-parameters
correction = 0.25
sensitivity = 0.03
angle_threshold = 0.85

# model hyper-parameters
dropout = 0.15

In [8]:
# set known image attributes
shape = (160, 320, 3)

# set train and validate sets
train_samples, validation_samples = train_test_split(samples, test_size=0.3)
# train_samples, validation_samples = train_test_split(
#     train_samples, test_size=0.2)

# set generator functions
train_generator = data_generator(
    train_samples, 
    shape=shape, 
    batch_size=batch_size, 
    correction=correction,
    sensitivity=sensitivity,
    angle_threshold=angle_threshold)

validation_generator = data_generator(
    validation_samples,
    shape=shape,
    batch_size=batch_size,
    correction=correction,
    sensitivity=sensitivity,
    angle_threshold=angle_threshold)

In [9]:
# model architecture based on
# -> https://arxiv.org/pdf/1604.07316.pdf
model = Sequential()

# region of interest
# Cropping2D((pixels_to_remove_from_top, from_bottom), (0, 0))
model.add(Cropping2D(cropping=((60, 20), (0, 0)), input_shape=shape))

# normalisation
model.add(Lambda(lambda x: x / 255.0 - 0.5))

# Convolution2D(filters, kernel, stride)
model.add(Convolution2D(24, 5, 5, subsample=(2, 2), activation='elu'))
model.add(Convolution2D(36, 5, 5, subsample=(2, 2), activation='elu'))
model.add(Convolution2D(48, 5, 5, subsample=(2, 2), activation='elu'))
# model.add(SpatialDropout2D(dropout))
# model.add(Convolution2D(64, 3, 3, activation='elu'))
# model.add(Convolution2D(64, 3, 3, activation='elu'))
# model.add(SpatialDropout2D(dropout))

# flatten
model.add(Flatten())

# fully connected
model.add(Dense(100, activation='elu'))
# model.add(Dense(50, activation='elu'))
model.add(Dense(10, activation='elu'))

# output
model.add(Dense(1))

In [10]:
model.compile(optimizer=Adam(lr=1e-04), loss='mean_squared_error')

t_board = TensorBoard(
    log_dir='../log', histogram_freq=1, write_graph=True, write_images=True)

history_object = model.fit_generator(
    train_generator,
    samples_per_epoch=len(train_samples),
    validation_data=validation_generator,
    nb_val_samples=len(validation_samples),
    nb_epoch=epochs,
    verbose=1)


Epoch 1/20
1638/9325 [====>.........................] - ETA: 116s - loss: 0.2131
---------------------------------------------------------------------------
KeyboardInterrupt                         Traceback (most recent call last)
<ipython-input-10-24f124ef6978> in <module>()
     10     nb_val_samples=len(validation_samples),
     11     nb_epoch=epochs,
---> 12     verbose=1)

/home/adrian/miniconda3/envs/carnd/lib/python3.5/site-packages/keras/models.py in fit_generator(self, generator, samples_per_epoch, nb_epoch, verbose, callbacks, validation_data, nb_val_samples, class_weight, max_q_size, nb_worker, pickle_safe, initial_epoch, **kwargs)
    933                                         nb_worker=nb_worker,
    934                                         pickle_safe=pickle_safe,
--> 935                                         initial_epoch=initial_epoch)
    936 
    937     def evaluate_generator(self, generator, val_samples,

/home/adrian/miniconda3/envs/carnd/lib/python3.5/site-packages/keras/engine/training.py in fit_generator(self, generator, samples_per_epoch, nb_epoch, verbose, callbacks, validation_data, nb_val_samples, class_weight, max_q_size, nb_worker, pickle_safe, initial_epoch)
   1551                     outs = self.train_on_batch(x, y,
   1552                                                sample_weight=sample_weight,
-> 1553                                                class_weight=class_weight)
   1554 
   1555                     if not isinstance(outs, list):

/home/adrian/miniconda3/envs/carnd/lib/python3.5/site-packages/keras/engine/training.py in train_on_batch(self, x, y, sample_weight, class_weight)
   1314             ins = x + y + sample_weights
   1315         self._make_train_function()
-> 1316         outputs = self.train_function(ins)
   1317         if len(outputs) == 1:
   1318             return outputs[0]

/home/adrian/miniconda3/envs/carnd/lib/python3.5/site-packages/keras/backend/tensorflow_backend.py in __call__(self, inputs)
   1898         session = get_session()
   1899         updated = session.run(self.outputs + [self.updates_op],
-> 1900                               feed_dict=feed_dict)
   1901         return updated[:len(self.outputs)]
   1902 

/home/adrian/miniconda3/envs/carnd/lib/python3.5/site-packages/tensorflow/python/client/session.py in run(self, fetches, feed_dict, options, run_metadata)
    764     try:
    765       result = self._run(None, fetches, feed_dict, options_ptr,
--> 766                          run_metadata_ptr)
    767       if run_metadata:
    768         proto_data = tf_session.TF_GetBuffer(run_metadata_ptr)

/home/adrian/miniconda3/envs/carnd/lib/python3.5/site-packages/tensorflow/python/client/session.py in _run(self, handle, fetches, feed_dict, options, run_metadata)
    962     if final_fetches or final_targets:
    963       results = self._do_run(handle, final_targets, final_fetches,
--> 964                              feed_dict_string, options, run_metadata)
    965     else:
    966       results = []

/home/adrian/miniconda3/envs/carnd/lib/python3.5/site-packages/tensorflow/python/client/session.py in _do_run(self, handle, target_list, fetch_list, feed_dict, options, run_metadata)
   1012     if handle is None:
   1013       return self._do_call(_run_fn, self._session, feed_dict, fetch_list,
-> 1014                            target_list, options, run_metadata)
   1015     else:
   1016       return self._do_call(_prun_fn, self._session, handle, feed_dict,

/home/adrian/miniconda3/envs/carnd/lib/python3.5/site-packages/tensorflow/python/client/session.py in _do_call(self, fn, *args)
   1019   def _do_call(self, fn, *args):
   1020     try:
-> 1021       return fn(*args)
   1022     except errors.OpError as e:
   1023       message = compat.as_text(e.message)

/home/adrian/miniconda3/envs/carnd/lib/python3.5/site-packages/tensorflow/python/client/session.py in _run_fn(session, feed_dict, fetch_list, target_list, options, run_metadata)
   1001         return tf_session.TF_Run(session, options,
   1002                                  feed_dict, fetch_list, target_list,
-> 1003                                  status, run_metadata)
   1004 
   1005     def _prun_fn(session, handle, feed_dict, fetch_list):

KeyboardInterrupt: 

In [ ]:
model.save('../model.h5')

In [ ]:
# print the keys contained in the history object
print(history_object.history.keys())

In [ ]:
# plot the training and validation loss for each epoch
plt.plot(history_object.history['loss'])
plt.plot(history_object.history['val_loss'])
plt.title('model mean squared error loss')
plt.ylabel('mean squared error loss')
plt.xlabel('epoch')
plt.legend(['training set', 'validation set'], loc='upper right')
plt.show()

In [ ]:
model.summary()