In [24]:
# Import used libraries
import tensorflow as tf
import keras

from keras.models import Sequential, Model
from keras.layers import Dense, Flatten, Conv2D, Dropout, Lambda, Cropping2D, Input, Activation, MaxPool2D
from keras.layers import add
from keras.layers.convolutional import Convolution2D
from keras.layers.normalization import BatchNormalization

import cv2
import datetime
import pickle
import json
import os
from tqdm import tqdm
import numpy as np
import pandas as pd
from sklearn.utils import shuffle
from sklearn.model_selection import train_test_split
import matplotlib.pyplot as plt

In [18]:
# Load and display driving log
with open('config.json') as config_file:    
    config_data = json.load(config_file)
print("Data path: {}".format(config_data['data_path']))


path = config_data['data_path']
file = config_data['driving_log_file']
driving_log = pd.read_csv(path + file)
driving_log


Data path: /home/timo/Documents/mldata/car_sim_video_images/
Out[18]:
center left right steering throttle brake speed
0 IMG/center_2016_12_01_13_30_48_287.jpg IMG/left_2016_12_01_13_30_48_287.jpg IMG/right_2016_12_01_13_30_48_287.jpg 0.0 0.0 0.0 22.148290
1 IMG/center_2016_12_01_13_30_48_404.jpg IMG/left_2016_12_01_13_30_48_404.jpg IMG/right_2016_12_01_13_30_48_404.jpg 0.0 0.0 0.0 21.879630
2 IMG/center_2016_12_01_13_31_12_937.jpg IMG/left_2016_12_01_13_31_12_937.jpg IMG/right_2016_12_01_13_31_12_937.jpg 0.0 0.0 0.0 1.453011
3 IMG/center_2016_12_01_13_31_13_037.jpg IMG/left_2016_12_01_13_31_13_037.jpg IMG/right_2016_12_01_13_31_13_037.jpg 0.0 0.0 0.0 1.438419
4 IMG/center_2016_12_01_13_31_13_177.jpg IMG/left_2016_12_01_13_31_13_177.jpg IMG/right_2016_12_01_13_31_13_177.jpg 0.0 0.0 0.0 1.418236
5 IMG/center_2016_12_01_13_31_13_279.jpg IMG/left_2016_12_01_13_31_13_279.jpg IMG/right_2016_12_01_13_31_13_279.jpg 0.0 0.0 0.0 1.403993
6 IMG/center_2016_12_01_13_31_13_381.jpg IMG/left_2016_12_01_13_31_13_381.jpg IMG/right_2016_12_01_13_31_13_381.jpg 0.0 0.0 0.0 1.389892
7 IMG/center_2016_12_01_13_31_13_482.jpg IMG/left_2016_12_01_13_31_13_482.jpg IMG/right_2016_12_01_13_31_13_482.jpg 0.0 0.0 0.0 1.375934
8 IMG/center_2016_12_01_13_31_13_584.jpg IMG/left_2016_12_01_13_31_13_584.jpg IMG/right_2016_12_01_13_31_13_584.jpg 0.0 0.0 0.0 1.362115
9 IMG/center_2016_12_01_13_31_13_686.jpg IMG/left_2016_12_01_13_31_13_686.jpg IMG/right_2016_12_01_13_31_13_686.jpg 0.0 0.0 0.0 1.348435
10 IMG/center_2016_12_01_13_31_13_786.jpg IMG/left_2016_12_01_13_31_13_786.jpg IMG/right_2016_12_01_13_31_13_786.jpg 0.0 0.0 0.0 1.334892
11 IMG/center_2016_12_01_13_31_13_890.jpg IMG/left_2016_12_01_13_31_13_890.jpg IMG/right_2016_12_01_13_31_13_890.jpg 0.0 0.0 0.0 1.318820
12 IMG/center_2016_12_01_13_31_13_991.jpg IMG/left_2016_12_01_13_31_13_991.jpg IMG/right_2016_12_01_13_31_13_991.jpg 0.0 0.0 0.0 1.305575
13 IMG/center_2016_12_01_13_31_14_092.jpg IMG/left_2016_12_01_13_31_14_092.jpg IMG/right_2016_12_01_13_31_14_092.jpg 0.0 0.0 0.0 1.292579
14 IMG/center_2016_12_01_13_31_14_194.jpg IMG/left_2016_12_01_13_31_14_194.jpg IMG/right_2016_12_01_13_31_14_194.jpg 0.0 0.0 0.0 1.279884
15 IMG/center_2016_12_01_13_31_14_295.jpg IMG/left_2016_12_01_13_31_14_295.jpg IMG/right_2016_12_01_13_31_14_295.jpg 0.0 0.0 0.0 1.267316
16 IMG/center_2016_12_01_13_31_14_398.jpg IMG/left_2016_12_01_13_31_14_398.jpg IMG/right_2016_12_01_13_31_14_398.jpg 0.0 0.0 0.0 1.254873
17 IMG/center_2016_12_01_13_31_14_500.jpg IMG/left_2016_12_01_13_31_14_500.jpg IMG/right_2016_12_01_13_31_14_500.jpg 0.0 0.0 0.0 1.242555
18 IMG/center_2016_12_01_13_31_14_602.jpg IMG/left_2016_12_01_13_31_14_602.jpg IMG/right_2016_12_01_13_31_14_602.jpg 0.0 0.0 0.0 1.230361
19 IMG/center_2016_12_01_13_31_14_702.jpg IMG/left_2016_12_01_13_31_14_702.jpg IMG/right_2016_12_01_13_31_14_702.jpg 0.0 0.0 0.0 1.218291
20 IMG/center_2016_12_01_13_31_14_803.jpg IMG/left_2016_12_01_13_31_14_803.jpg IMG/right_2016_12_01_13_31_14_803.jpg 0.0 0.0 0.0 1.206346
21 IMG/center_2016_12_01_13_31_14_904.jpg IMG/left_2016_12_01_13_31_14_904.jpg IMG/right_2016_12_01_13_31_14_904.jpg 0.0 0.0 0.0 1.194345
22 IMG/center_2016_12_01_13_31_15_005.jpg IMG/left_2016_12_01_13_31_15_005.jpg IMG/right_2016_12_01_13_31_15_005.jpg 0.0 0.0 0.0 1.182352
23 IMG/center_2016_12_01_13_31_15_106.jpg IMG/left_2016_12_01_13_31_15_106.jpg IMG/right_2016_12_01_13_31_15_106.jpg 0.0 0.0 0.0 1.170474
24 IMG/center_2016_12_01_13_31_15_208.jpg IMG/left_2016_12_01_13_31_15_208.jpg IMG/right_2016_12_01_13_31_15_208.jpg 0.0 0.0 0.0 1.156378
25 IMG/center_2016_12_01_13_31_15_308.jpg IMG/left_2016_12_01_13_31_15_308.jpg IMG/right_2016_12_01_13_31_15_308.jpg 0.0 0.0 0.0 1.144761
26 IMG/center_2016_12_01_13_31_15_411.jpg IMG/left_2016_12_01_13_31_15_411.jpg IMG/right_2016_12_01_13_31_15_411.jpg 0.0 0.0 0.0 1.133260
27 IMG/center_2016_12_01_13_31_15_513.jpg IMG/left_2016_12_01_13_31_15_513.jpg IMG/right_2016_12_01_13_31_15_513.jpg 0.0 0.0 0.0 1.121875
28 IMG/center_2016_12_01_13_32_35_588.jpg IMG/left_2016_12_01_13_32_35_588.jpg IMG/right_2016_12_01_13_32_35_588.jpg 0.0 0.0 0.0 1.087376
29 IMG/center_2016_12_01_13_32_39_212.jpg IMG/left_2016_12_01_13_32_39_212.jpg IMG/right_2016_12_01_13_32_39_212.jpg 0.0 0.0 0.0 0.755065
... ... ... ... ... ... ... ...
8006 IMG/center_2016_12_01_13_46_37_480.jpg IMG/left_2016_12_01_13_46_37_480.jpg IMG/right_2016_12_01_13_46_37_480.jpg 0.0 0.0 0.0 1.592460
8007 IMG/center_2016_12_01_13_46_37_511.jpg IMG/left_2016_12_01_13_46_37_511.jpg IMG/right_2016_12_01_13_46_37_511.jpg 0.0 0.0 0.0 1.586049
8008 IMG/center_2016_12_01_13_46_37_581.jpg IMG/left_2016_12_01_13_46_37_581.jpg IMG/right_2016_12_01_13_46_37_581.jpg 0.0 0.0 0.0 1.576481
8009 IMG/center_2016_12_01_13_46_37_613.jpg IMG/left_2016_12_01_13_46_37_613.jpg IMG/right_2016_12_01_13_46_37_613.jpg 0.0 0.0 0.0 1.570135
8010 IMG/center_2016_12_01_13_46_37_682.jpg IMG/left_2016_12_01_13_46_37_682.jpg IMG/right_2016_12_01_13_46_37_682.jpg 0.0 0.0 0.0 1.560663
8011 IMG/center_2016_12_01_13_46_37_714.jpg IMG/left_2016_12_01_13_46_37_714.jpg IMG/right_2016_12_01_13_46_37_714.jpg 0.0 0.0 0.0 1.554380
8012 IMG/center_2016_12_01_13_46_37_784.jpg IMG/left_2016_12_01_13_46_37_784.jpg IMG/right_2016_12_01_13_46_37_784.jpg 0.0 0.0 0.0 1.545003
8013 IMG/center_2016_12_01_13_46_37_815.jpg IMG/left_2016_12_01_13_46_37_815.jpg IMG/right_2016_12_01_13_46_37_815.jpg 0.0 0.0 0.0 1.538783
8014 IMG/center_2016_12_01_13_46_37_887.jpg IMG/left_2016_12_01_13_46_37_887.jpg IMG/right_2016_12_01_13_46_37_887.jpg 0.0 0.0 0.0 1.529500
8015 IMG/center_2016_12_01_13_46_37_916.jpg IMG/left_2016_12_01_13_46_37_916.jpg IMG/right_2016_12_01_13_46_37_916.jpg 0.0 0.0 0.0 1.523343
8016 IMG/center_2016_12_01_13_46_37_989.jpg IMG/left_2016_12_01_13_46_37_989.jpg IMG/right_2016_12_01_13_46_37_989.jpg 0.0 0.0 0.0 1.511102
8017 IMG/center_2016_12_01_13_46_38_033.jpg IMG/left_2016_12_01_13_46_38_033.jpg IMG/right_2016_12_01_13_46_38_033.jpg 0.0 0.0 0.0 1.505018
8018 IMG/center_2016_12_01_13_46_38_090.jpg IMG/left_2016_12_01_13_46_38_090.jpg IMG/right_2016_12_01_13_46_38_090.jpg 0.0 0.0 0.0 1.495939
8019 IMG/center_2016_12_01_13_46_38_135.jpg IMG/left_2016_12_01_13_46_38_135.jpg IMG/right_2016_12_01_13_46_38_135.jpg 0.0 0.0 0.0 1.489917
8020 IMG/center_2016_12_01_13_46_38_191.jpg IMG/left_2016_12_01_13_46_38_191.jpg IMG/right_2016_12_01_13_46_38_191.jpg 0.0 0.0 0.0 1.480929
8021 IMG/center_2016_12_01_13_46_38_237.jpg IMG/left_2016_12_01_13_46_38_237.jpg IMG/right_2016_12_01_13_46_38_237.jpg 0.0 0.0 0.0 1.474967
8022 IMG/center_2016_12_01_13_46_38_294.jpg IMG/left_2016_12_01_13_46_38_294.jpg IMG/right_2016_12_01_13_46_38_294.jpg 0.0 0.0 0.0 1.466069
8023 IMG/center_2016_12_01_13_46_38_339.jpg IMG/left_2016_12_01_13_46_38_339.jpg IMG/right_2016_12_01_13_46_38_339.jpg 0.0 0.0 0.0 1.460167
8024 IMG/center_2016_12_01_13_46_38_395.jpg IMG/left_2016_12_01_13_46_38_395.jpg IMG/right_2016_12_01_13_46_38_395.jpg 0.0 0.0 0.0 1.451358
8025 IMG/center_2016_12_01_13_46_38_440.jpg IMG/left_2016_12_01_13_46_38_440.jpg IMG/right_2016_12_01_13_46_38_440.jpg 0.0 0.0 0.0 1.445515
8026 IMG/center_2016_12_01_13_46_38_497.jpg IMG/left_2016_12_01_13_46_38_497.jpg IMG/right_2016_12_01_13_46_38_497.jpg 0.0 0.0 0.0 1.436795
8027 IMG/center_2016_12_01_13_46_38_543.jpg IMG/left_2016_12_01_13_46_38_543.jpg IMG/right_2016_12_01_13_46_38_543.jpg 0.0 0.0 0.0 1.431010
8028 IMG/center_2016_12_01_13_46_38_599.jpg IMG/left_2016_12_01_13_46_38_599.jpg IMG/right_2016_12_01_13_46_38_599.jpg 0.0 0.0 0.0 1.422377
8029 IMG/center_2016_12_01_13_46_38_644.jpg IMG/left_2016_12_01_13_46_38_644.jpg IMG/right_2016_12_01_13_46_38_644.jpg 0.0 0.0 0.0 1.416651
8030 IMG/center_2016_12_01_13_46_38_700.jpg IMG/left_2016_12_01_13_46_38_700.jpg IMG/right_2016_12_01_13_46_38_700.jpg 0.0 0.0 0.0 1.408105
8031 IMG/center_2016_12_01_13_46_38_745.jpg IMG/left_2016_12_01_13_46_38_745.jpg IMG/right_2016_12_01_13_46_38_745.jpg 0.0 0.0 0.0 1.402436
8032 IMG/center_2016_12_01_13_46_38_802.jpg IMG/left_2016_12_01_13_46_38_802.jpg IMG/right_2016_12_01_13_46_38_802.jpg 0.0 0.0 0.0 1.393976
8033 IMG/center_2016_12_01_13_46_38_846.jpg IMG/left_2016_12_01_13_46_38_846.jpg IMG/right_2016_12_01_13_46_38_846.jpg 0.0 0.0 0.0 1.388364
8034 IMG/center_2016_12_01_13_46_38_922.jpg IMG/left_2016_12_01_13_46_38_922.jpg IMG/right_2016_12_01_13_46_38_922.jpg 0.0 0.0 0.0 1.377208
8035 IMG/center_2016_12_01_13_46_38_947.jpg IMG/left_2016_12_01_13_46_38_947.jpg IMG/right_2016_12_01_13_46_38_947.jpg 0.0 0.0 0.0 1.374433

8036 rows × 7 columns


In [19]:
# Store all images as a numpy array
nb_images = 10 # len(driving_log)
images = []
labels = []
camera_names     = ('center', 'left', 'right')
# Initalize offsets of the steering angle for center, left and right images
steering_offsets = dict({key:val for key,val in zip(camera_names, [0, 0.2, -0.2])})

if not os.path.exists(path + config_data['pickle_file']):
    for camera in camera_names:
        print("Load '{}' images".format(camera))
        labels.append((driving_log['steering'][0:nb_images].values.reshape(-1,1) + steering_offsets[camera]))
        for row in tqdm(range(0, nb_images), unit=' images'):
            img = cv2.imread(path + str.replace(driving_log[camera][row], ' ', ''))
            images.append(np.array(img))

    for image in images.copy():
        images.append(np.fliplr(image))
    for label in labels.copy():
        labels.append(-1*label)
        
    images  = np.stack(images)#.reshape(-1, 160, 320, 3)
    labels  = np.concatenate(labels)
    image_shape = images.shape[1::]
    images, labels = shuffle(images, labels, random_state=42)
    
    # Split dataset to train, test
    X_train, X_test, y_train, y_test = train_test_split(images, labels, test_size=0.3, random_state=42)
    # Save as pickle file
    print("Save data to file: '{}'.".format(config_data['pickle_file']))
    with open(path + config_data['pickle_file'], 'wb') as f: 
        pickle.dump([X_train, X_test, y_train, y_test], f)
    print("Done.")
else:
    print("Load data from file: '{}'.".format(config_data['pickle_file']))
    with open(path + config_data['pickle_file'], 'rb') as f: 
        X_train, X_test, y_train, y_test = pickle.load(f)
    print("Done.")


  0%|          | 0/10 [00:00<?, ? images/s]
Load 'center' images
100%|██████████| 10/10 [00:00<00:00, 27.01 images/s]
100%|██████████| 10/10 [00:00<00:00, 205.53 images/s]
100%|██████████| 10/10 [00:00<00:00, 483.25 images/s]
Load 'left' images
Load 'right' images
Save data to file: 'images.p'.
Done.

In [20]:
# Visualize labels
plt.plot(y_train)
plt.show()



In [21]:
size_of = lambda x: x.shape[0] * x.shape[1] 
# Visualize random image
print("Number of images: 3x{} (center, left, right)".format(len(driving_log)))

img = X_train[0,::]
plt.imshow(img)
plt.show()
plt.imshow(np.fliplr(img))
plt.show()


r   = 100.0 / img.shape[1]
dim = (100, int(img.shape[0] * r))
dim = (64, 32)
# perform the actual resizing of the image and show it
resized = cv2.resize(img, dim, interpolation=cv2.INTER_CUBIC)
print(resized.shape)
plt.imshow(resized)
plt.show()
print(size_of(img))
print(size_of(resized))
print("Ratio: {}".format((size_of(img)/size_of(resized))))


Number of images: 3x8036 (center, left, right)
(32, 64, 3)
51200
2048
Ratio: 25.0

Implementation of Inception Network ??? Inception Network ??? Paper in Keras.


In [22]:
from keras.layers import concatenate

def conv_block(x, nb_filters, kernel_size=(3,3), strides=1):

    x1 = Convolution2D(nb_filters, kernel_size, strides=strides, padding='same')(x)
    #x1 = BatchNormalization()(x1)
    x1 = Activation('relu')(x1)
    
    return x1

def res_conv_block(x, kernel_size=(3,3)):
    nb_filters = keras.backend.int_shape(x)[3]
    x1 = Convolution2D(nb_filters, kernel_size=(1,1), padding='same')(x)
    x1 = Convolution2D(nb_filters, kernel_size, padding='same')(x1)
    #x1 = BatchNormalization()(x1)
    out = add([x, x1])
    
    return Activation('relu')(out)

def inception_A(x):
    nb_filters1 = 32
    nb_filters2 = keras.backend.int_shape(x)[3]
    
    x1 = Convolution2D(nb_filters1, kernel_size=(1,1), padding='same')(x)
    
    x2 = Convolution2D(nb_filters1, kernel_size=(1,1), padding='same')(x)
    x2 = Convolution2D(nb_filters1, kernel_size=(3,3), padding='same')(x2)
    
    x3 = Convolution2D(nb_filters1, kernel_size=(1,1), padding='same')(x)
    x3 = Convolution2D(nb_filters1, kernel_size=(3,3), padding='same')(x3)
    x3 = Convolution2D(nb_filters1, kernel_size=(3,3), padding='same')(x3)
    
    x123 = concatenate([x1, x2, x3])
    x123 = Convolution2D(nb_filters2, kernel_size=(1,1), padding='same')(x123)
    
    out = add([x, x123])
    return Activation('relu')(out)

def reduction_A(x, filter_bank={'k':192,'l':224,'m':256,'n':384}):
    x1 = Convolution2D(filter_bank['k'], kernel_size=(1,1), padding='same')(x)
    x1 = Convolution2D(filter_bank['l'], kernel_size=(3,3), padding='same')(x1)
    x1 = Convolution2D(filter_bank['m'], kernel_size=(3,3), padding='same', strides=2)(x1)
    
    x2 = Convolution2D(filter_bank['n'], kernel_size=(3,3), padding='same', strides=2)(x)
    
    x3 = MaxPool2D(strides=2)(x)
    
    return concatenate([x1, x2, x3])

In [23]:
init = Input((160, 320, 3))
x      = conv_block(init, 32)
x      = res_conv_block(x)
x      = conv_block(x, 64)
x      = inception_A(x)
x      = reduction_A(x, filter_bank={'k':24,'l':32,'m':48,'n':64})
x      = conv_block(x, 32, strides=2)
x      = conv_block(x, 32, strides=2)
x      = conv_block(x, 32, strides=2)
x      = conv_block(x, 32, strides=2)
x      = Flatten()(x)
x      = Dense(activation='relu', units=10)(x)
out    = Dense(units=1, use_bias=True)(x)
model1 = Model(init, out, name='Inception-v4')
model1.summary()


____________________________________________________________________________________________________
Layer (type)                     Output Shape          Param #     Connected to                     
====================================================================================================
input_1 (InputLayer)             (None, 160, 320, 3)   0                                            
____________________________________________________________________________________________________
conv2d_1 (Conv2D)                (None, 160, 320, 32)  896                                          
____________________________________________________________________________________________________
activation_1 (Activation)        (None, 160, 320, 32)  0                                            
____________________________________________________________________________________________________
conv2d_2 (Conv2D)                (None, 160, 320, 32)  1056                                         
____________________________________________________________________________________________________
conv2d_3 (Conv2D)                (None, 160, 320, 32)  9248                                         
____________________________________________________________________________________________________
add_1 (Add)                      (None, 160, 320, 32)  0                                            
____________________________________________________________________________________________________
activation_2 (Activation)        (None, 160, 320, 32)  0                                            
____________________________________________________________________________________________________
conv2d_4 (Conv2D)                (None, 160, 320, 64)  18496                                        
____________________________________________________________________________________________________
activation_3 (Activation)        (None, 160, 320, 64)  0                                            
____________________________________________________________________________________________________
conv2d_8 (Conv2D)                (None, 160, 320, 32)  2080                                         
____________________________________________________________________________________________________
conv2d_6 (Conv2D)                (None, 160, 320, 32)  2080                                         
____________________________________________________________________________________________________
conv2d_9 (Conv2D)                (None, 160, 320, 32)  9248                                         
____________________________________________________________________________________________________
conv2d_5 (Conv2D)                (None, 160, 320, 32)  2080                                         
____________________________________________________________________________________________________
conv2d_7 (Conv2D)                (None, 160, 320, 32)  9248                                         
____________________________________________________________________________________________________
conv2d_10 (Conv2D)               (None, 160, 320, 32)  9248                                         
____________________________________________________________________________________________________
concatenate_1 (Concatenate)      (None, 160, 320, 96)  0                                            
____________________________________________________________________________________________________
conv2d_11 (Conv2D)               (None, 160, 320, 64)  6208                                         
____________________________________________________________________________________________________
add_2 (Add)                      (None, 160, 320, 64)  0                                            
____________________________________________________________________________________________________
activation_4 (Activation)        (None, 160, 320, 64)  0                                            
____________________________________________________________________________________________________
conv2d_12 (Conv2D)               (None, 160, 320, 24)  1560                                         
____________________________________________________________________________________________________
conv2d_13 (Conv2D)               (None, 160, 320, 32)  6944                                         
____________________________________________________________________________________________________
conv2d_14 (Conv2D)               (None, 80, 160, 48)   13872                                        
____________________________________________________________________________________________________
conv2d_15 (Conv2D)               (None, 80, 160, 64)   36928                                        
____________________________________________________________________________________________________
max_pooling2d_1 (MaxPooling2D)   (None, 80, 160, 64)   0                                            
____________________________________________________________________________________________________
concatenate_2 (Concatenate)      (None, 80, 160, 176)  0                                            
____________________________________________________________________________________________________
conv2d_16 (Conv2D)               (None, 40, 80, 32)    50720                                        
____________________________________________________________________________________________________
activation_5 (Activation)        (None, 40, 80, 32)    0                                            
____________________________________________________________________________________________________
conv2d_17 (Conv2D)               (None, 20, 40, 32)    9248                                         
____________________________________________________________________________________________________
activation_6 (Activation)        (None, 20, 40, 32)    0                                            
____________________________________________________________________________________________________
conv2d_18 (Conv2D)               (None, 10, 20, 32)    9248                                         
____________________________________________________________________________________________________
activation_7 (Activation)        (None, 10, 20, 32)    0                                            
____________________________________________________________________________________________________
conv2d_19 (Conv2D)               (None, 5, 10, 32)     9248                                         
____________________________________________________________________________________________________
activation_8 (Activation)        (None, 5, 10, 32)     0                                            
____________________________________________________________________________________________________
flatten_1 (Flatten)              (None, 1600)          0                                            
____________________________________________________________________________________________________
dense_1 (Dense)                  (None, 10)            16010                                        
____________________________________________________________________________________________________
dense_2 (Dense)                  (None, 1)             11                                           
====================================================================================================
Total params: 223,677.0
Trainable params: 223,677.0
Non-trainable params: 0.0
____________________________________________________________________________________________________

In [ ]:
now = datetime.datetime.now
batch_size = 3
epochs = 1

model1.compile(loss='mse',
              optimizer='adadelta',
              metrics=['accuracy'])

t = now()
model1.fit(X_train, y_train,
          batch_size=batch_size,
          epochs=epochs,
          verbose=1,
          validation_data=(X_test, y_test))
print('Training time: %s' % (now() - t))
score = model1.evaluate(X_test, y_test, verbose=0)
print('Test score:', score[0])
print('Test accuracy:', score[1])

In [ ]: