In [1]:
import os
import sys
import csv
from PIL import Image
import numpy as np
import cv2
from sklearn.model_selection import train_test_split
import json
from keras.models import Sequential
from keras.layers.core import Dense, Dropout, Activation, Flatten
from keras.layers import Convolution2D, MaxPooling2D, AveragePooling2D
from keras.layers.normalization import BatchNormalization
from scipy.misc import toimage
In [2]:
#Pre-processing Images
# Re-size images down to a quarter of original size, to speed up training
def resize(img):
img = img.resize((80, 40), Image.ANTIALIAS)
return img
#Cutting the image to the section, that holds the road information
def cut_top_portion_of_images(image):
array_Image = np.array(image)
array_Cut = array_Image[15:]
return array_Cut
#Converting the RGB Image to an HLS Image
def convert_to_HLS(img):
hls = cv2.cvtColor(img, cv2.COLOR_RGB2HLS)
return hls
#Normalizing the input Image
def normalize(image_data):
max = 255. #np.max(img)
return (((image_data) / max) - 0.5)
In [3]:
#Reading the driving log to match stearing information to Images
with open('./driving_log.csv', 'r') as f:
reader = csv.reader(f)
driving_list = list(reader)
X_train = []
y_train = []
print(len(driving_list))
#Preprocess all Images with cut/convert to HLS/Normalize
for i, row in enumerate(driving_list):
if i == 0:
continue
#if i == 1:
#print(row[0])
groups = row[0].split('/')
#print(groups)
#print('/'.join(groups[n:]))
image = Image.open("./IMG/" + groups[-1])
#toimage(image).show()
image = resize(image)
#toimage(image).show()
image = cut_top_portion_of_images(image)
#toimage(image).show()
image = convert_to_HLS(image)
#toimage(image).show()
image = normalize(image)
#toimage(image).show()
X_train.append(image)
y_train.append(row[3])
X_train = np.array(X_train)
#shuffle and split Training Data into Train and Validation
X_train, X_val, y_train, y_val = train_test_split(
X_train,
y_train,
test_size=0.2)
print("done")
from keras.models import Sequential
# Create the Sequential model
model = Sequential()
The keras.models.Sequential
class is a wrapper for the neural network model. Just like many of the class models in scikit-learn, it provides common functions like fit()
, evaluate()
, and compile()
. We'll cover these functions as we get to them. Let's start looking at the layers of the model.
A Keras layer is just like a neural network layer. It can be fully connected, max pool, activation, etc. You can add a layer to the model using the model's add()
function. For example, a simple model would look like this:
from keras.models import Sequential
from keras.layers.core import Dense, Activation, Flatten
# Create the Sequential model
model = Sequential()
# 1st Layer - Add a flatten layer
model.add(Flatten(input_shape=(32, 32, 3)))
# 2nd Layer - Add a fully connected layer
model.add(Dense(100))
# 3rd Layer - Add a ReLU activation layer
model.add(Activation('relu'))
# 4th Layer - Add a fully connected layer
model.add(Dense(60))
# 5th Layer - Add a ReLU activation layer
model.add(Activation('relu'))
Keras will automatically infer the shape of all layers after the first layer. This means you only have to set the input dimensions for the first layer.
The first layer from above, model.add(Flatten(input_shape=(32, 32, 3)))
, sets the input dimension to (32, 32, 3) and output dimension to (3072=32*32*3). The second layer takes in the output of the first layer and sets the output dimenions to (100). This chain of passing output to the next layer continues until the last layer, which is the output of the model.
In [4]:
batch_size = 100
nb_epoch = 15
pool_size = (2, 2)
X_train = X_train.astype('float32')
X_test = X_val.astype('float32')
print(X_train.shape[0], 'train samples')
print(X_val.shape[0], 'test samples')
input_shape = X_train.shape[1:]
print(input_shape)
In [5]:
model = Sequential()
model.add(BatchNormalization(input_shape=input_shape))
# Convolutional Layer 1 and Dropout
model.add(Convolution2D(64, 3, 3, border_mode='valid', subsample=(1,1)))
model.add(Activation('relu'))
model.add(Dropout(0.2))
# Conv Layer 2
model.add(Convolution2D(32, 3, 3, border_mode='valid', subsample=(1,1)))
model.add(Activation('relu'))
# Conv Layer 3
model.add(Convolution2D(16, 3, 3, border_mode='valid', subsample=(1,1)))
model.add(Activation('relu'))
# Conv Layer 4
model.add(Convolution2D(8, 3, 3, border_mode='valid', subsample=(1,1)))
model.add(Activation('relu'))
# Pooling
model.add(MaxPooling2D(pool_size=pool_size))
# Flatten and Dropout
model.add(Flatten())
model.add(Dropout(0.5))
# Fully Connected Layer 1 and Dropout
model.add(Dense(128))
model.add(Activation('relu'))
model.add(Dropout(0.5))
# FC Layer 2
model.add(Dense(64))
model.add(Activation('relu'))
# FC Layer 3
model.add(Dense(32))
model.add(Activation('relu'))
# Final FC Layer - just one output - steering angle
model.add(Dense(1))
# Compiling and training the model
model.compile(metrics=['mean_squared_error'], optimizer='Nadam', loss='mean_squared_error')
model.fit(X_train, y_train, batch_size=batch_size, nb_epoch=nb_epoch, verbose=2, validation_data=(X_val, y_val))
# Save model architecture and weights
model_json = model.to_json()
with open("./model.json", "w") as json_file:
json.dump(model_json, json_file)
model.save_weights('./model.h5')
# Show summary of model
model.summary()