In [1]:
import numpy as np
from PIL import Image, ImageEnhance, ImageOps
import pandas as pd

path = '/data1/udacity/simulator/data'
img_path = path +'/IMG'
csv_file = path +'/driving_log.csv'

csv_array=pd.read_csv(csv_file)

print("Number of lines in CSV: " + str(csv_array.shape))

import matplotlib.pyplot as plt
%matplotlib inline


plt.hist(csv_array.steering, bins=50)
print("Min Steering Angle:" + str(min(csv_array.steering)))
print("Max Steering Angle:" + str(max(csv_array.steering)))


Number of lines in CSV: (8036, 7)
Min Steering Angle:-0.9426954
Max Steering Angle:1.0

In [2]:
# center, left, right, steering angle, throttle, break, speed

# preprocess the data

X_full_name = []
y_full_angle= []

line = csv_array.iloc[0]
  #print(line)

    
def plotCameraImages(line):
  fig, axes = plt.subplots(1, 3)
  fig.set_figwidth(10)
  fig.set_figheight(20)

  i=0
  for camera in ['left','center','right']:
    print(i)
    axes.flat[i].imshow(Image.open(path+'/'+line[camera].decode('UTF-8').strip()))
    axes.flat[i].tick_params(labelcolor='w', top='off', bottom='off', left='off', right='off')
    if (camera=='center'):
       axes.flat[i].set_title(camera+' ' +str(line['steering']))
    else:
       axes.flat[i].set_title(camera)
        
    i=i+1
    
plotCameraImages(csv_array.iloc[0])
plotCameraImages(csv_array.iloc[1900])
plotCameraImages(csv_array[csv_array.steering < 0.26 ][csv_array.steering>0.24].iloc[4])


0
1
2
0
1
2
/home/alans/anaconda2/envs/cancer_tutorial_tf/lib/python2.7/site-packages/ipykernel/__main__.py:31: UserWarning: Boolean Series key will be reindexed to match DataFrame index.
0
1
2

In [3]:
csv_array.head()


Out[3]:
center left right steering throttle brake speed
0 IMG/center_2016_12_01_13_30_48_287.jpg IMG/left_2016_12_01_13_30_48_287.jpg IMG/right_2016_12_01_13_30_48_287.jpg 0.0 0.0 0.0 22.148290
1 IMG/center_2016_12_01_13_30_48_404.jpg IMG/left_2016_12_01_13_30_48_404.jpg IMG/right_2016_12_01_13_30_48_404.jpg 0.0 0.0 0.0 21.879630
2 IMG/center_2016_12_01_13_31_12_937.jpg IMG/left_2016_12_01_13_31_12_937.jpg IMG/right_2016_12_01_13_31_12_937.jpg 0.0 0.0 0.0 1.453011
3 IMG/center_2016_12_01_13_31_13_037.jpg IMG/left_2016_12_01_13_31_13_037.jpg IMG/right_2016_12_01_13_31_13_037.jpg 0.0 0.0 0.0 1.438419
4 IMG/center_2016_12_01_13_31_13_177.jpg IMG/left_2016_12_01_13_31_13_177.jpg IMG/right_2016_12_01_13_31_13_177.jpg 0.0 0.0 0.0 1.418236

In [4]:
import driving_data
import cv2
filename = driving_data.val_xs[0]
img=[]
img.append( np.array(Image.open(filename)))
#img.append( driving_data.process_image_comma(filename).transpose( 1, 2, 0) )
img.append(driving_data.process_image_gray(filename))
img.append( driving_data.process_image_sully(filename) )


fig, axes = plt.subplots(1, 3)
fig.set_figwidth(10)
fig.set_figheight(20)

i=0
for camera in ['raw','comma','sully']:
    print(i)
    print(img[i].shape)
    axes.flat[i].imshow(img[i])
    axes.flat[i].tick_params(labelcolor='w', top='off', bottom='off', left='off', right='off')
    axes.flat[i].set_title(camera)
    i=i+1
plt.show()

print('brightness')
img3 = np.array(Image.open(filename))
img3 = driving_data.augment_brightness_camera_images(img3)
plt.imshow(img3)
plt.show()
print('brightness')
img3 = np.array(Image.open(filename))
img3 = driving_data.augment_brightness_camera_images(img3)
plt.imshow(img3)
plt.show()
img3 = cv2.resize(img3, (200, 66) )
plt.imshow(img3)
plt.show()

print(filename)
image = cv2.imread(filename)
plt.imshow(image)
plt.show()

print('early process')
image = np.array(Image.open(filename))
image =driving_data.process_image_sully_pixels(image)
plt.imshow(image)
plt.show()

print('process')
img2 = driving_data.process_image_sully(filename)
img4 = np.add(img2,0.5)
img4 = np.multiply(img4,255)
plt.imshow(img4)
plt.show()
img2 = driving_data.process_image_sully(filename)
plt.imshow(img2)
plt.show()
img2 = driving_data.process_image_sully(filename)
plt.imshow(img2)
plt.show()
img2 = driving_data.process_image_sully(filename)
plt.imshow(img2)
plt.show()


img2 = driving_data.process_image_comma(filename).transpose( 1, 2, 0)
plt.imshow(img2)
plt.show()
img2 = driving_data.process_image_comma(filename).transpose( 1, 2, 0)
plt.imshow(img2)
plt.show()
img2 = driving_data.process_image_comma(filename).transpose( 1, 2, 0)
plt.imshow(img2)
plt.show()
img2 = driving_data.process_image_comma(filename).transpose( 1, 2, 0)
plt.imshow(img2)
plt.show()
img2 = driving_data.process_image_comma(filename).transpose( 1, 2, 0)
plt.imshow(img2)
plt.show()


0
(160, 320, 3)
1
(160, 320)
2
(66, 200, 3)
brightness
brightness
/data1/udacity/simulator/data/IMG/center_2016_12_01_13_38_49_496.jpg
early process
process

In [5]:
plt.hist(driving_data.val_ys, bins=50)


Out[5]:
(array([   2.,    0.,    0.,    4.,    0.,    0.,    1.,    1.,    0.,
           4.,    5.,    7.,   13.,   10.,   32.,   28.,   26.,   82.,
          74.,  136.,  157.,  155.,  235.,  103.,  294.,  162.,  165.,
         233.,  160.,   89.,   66.,  107.,   36.,   18.,   20.,   20.,
           3.,    8.,    5.,    2.,    0.,    2.,    0.,    0.,    0.,
           0.,    0.,    0.,    0.,    1.]),
 array([-1.0832381 , -1.03857334, -0.99390858, -0.94924381, -0.90457905,
        -0.85991429, -0.81524953, -0.77058477, -0.72592   , -0.68125524,
        -0.63659048, -0.59192572, -0.54726096, -0.50259619, -0.45793143,
        -0.41326667, -0.36860191, -0.32393715, -0.27927238, -0.23460762,
        -0.18994286, -0.1452781 , -0.10061334, -0.05594857, -0.01128381,
         0.03338095,  0.07804571,  0.12271047,  0.16737524,  0.21204   ,
         0.25670476,  0.30136952,  0.34603428,  0.39069905,  0.43536381,
         0.48002857,  0.52469333,  0.56935809,  0.61402286,  0.65868762,
         0.70335238,  0.74801714,  0.7926819 ,  0.83734667,  0.88201143,
         0.92667619,  0.97134095,  1.01600571,  1.06067048,  1.10533524,
         1.15      ]),
 <a list of 50 Patch objects>)

In [6]:
from keras.layers import Convolution2D, MaxPooling2D, Activation
from keras.models import Sequential
from keras.layers import Dense, Activation, Reshape, Merge

import numpy as np
import matplotlib.pyplot as plt
import cv2  # only used for loading the image, you can use anything that returns the image as a np.ndarray

%matplotlib inline


Using TensorFlow backend.

In [7]:
img = driving_data.process_image_gray(filename)

In [8]:
plt.imshow(img)


Out[8]:
<matplotlib.image.AxesImage at 0x7fb7e89a94d0>

In [ ]:
model = Sequential ([
        Reshape ((160, 320, 1), input_shape=(160, 320)),

        Convolution2D (24, 8, 8, border_mode='valid'),
        MaxPooling2D (pool_size=(2, 2)),
        Activation ('relu'),
            #77x157
        Convolution2D (36, 5, 5, border_mode='valid'),
        MaxPooling2D (pool_size=(2, 2)),
        
        Activation ('relu'),
        
        #37x77
        Convolution2D (48, 5, 5, border_mode='valid'),
        MaxPooling2D (pool_size=(2, 2)),
        Activation ('relu'),

        #17x37
        Convolution2D (64, 3, 3, border_mode='valid'),
        MaxPooling2D (pool_size=(2, 2)),
        Activation ('relu'),

        #8x18
        Convolution2D (64, 2, 2, border_mode='valid'),
        MaxPooling2D (pool_size=(2, 2)),
        Activation ('relu'),
    ])

In [ ]:
from keras import backend as K
print(model.layers)
get_3rd_layer_output = K.function([model.layers[0].input, model.layers[2].input, K.learning_phase()], [model.layers[3].output])
print(get_3rd_layer_output)
img_batch = np.expand_dims(img,axis=0)
layer_output = get_3rd_layer_output([img_batch])[0]
print(layer_output.shape)

In [ ]:
#model = Sequential()
#model.add(Convolution2D(3,    # number of filter layers
#                        3,    # y dimension of kernel (we're going for a 3x3 kernel)
#                        3,    # x dimension of kernel
#                        input_shape=cat.shape))

In [ ]:
img_batch = np.expand_dims(img,axis=0)

In [ ]:
conv_img = model.predict(img_batch)
print(conv_img.shape)

In [ ]:
# here we get rid of that added dimension and plot the image
def visualize_img(model, img):
    # Keras expects batches of images, so we have to add a dimension to trick it into being nice
    img_batch = np.expand_dims(img,axis=0)
    conv_img = model.predict(img_batch)
    conv_img = np.squeeze(conv_img, axis=0)
    print conv_img.shape
    plt.imshow(conv_img)

In [ ]:
#visualize_img(model, img)
end = conv_img.shape[3]
for i in range(0,end):
    a = conv_img[0,:,:,i]
    plt.imshow(a)
    plt.show()

In [ ]: