In [1]:
%matplotlib inline  

# Imports
import h5py
import numpy as np
import tensorflow as tf
from six.moves import cPickle as pickle
from six.moves import range
import matplotlib
from pylab import imshow, show, cm
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
from skimage import io
import math
import random
from skimage.transform import resize
import os.path


from keras.models import Model
from keras.layers import Dense, Activation, Reshape, AveragePooling2D, MaxPooling2D, Input, Flatten, merge, Convolution2D, Dropout, LocallyConnected2D
from keras.regularizers import l1, l2
from keras.layers.normalization import BatchNormalization
from keras.layers.advanced_activations import LeakyReLU
from keras.callbacks import TensorBoard


Using TensorFlow backend.

In [2]:
# CONSTANTS
imgSize = 54

In [3]:
# Define some useful Functions
def get_box_data(index, hdf5_data):
    """
    get `left, top, width, height` of each picture
    :param index:
    :param hdf5_data:
    :return:
    """
    meta_data = dict()
    meta_data['height'] = []
    meta_data['label'] = []
    meta_data['left'] = []
    meta_data['top'] = []
    meta_data['width'] = []

    def print_attrs(name, obj):
        vals = []
        if obj.shape[0] == 1:
            vals.append(obj[0][0])
        else:
            for k in range(obj.shape[0]):
                vals.append(int(hdf5_data[obj[k][0]][0][0]))
        meta_data[name] = vals

    box = hdf5_data['/digitStruct/bbox'][index]
    hdf5_data[box[0]].visititems(print_attrs)
    return meta_data

def get_name(index, hdf5_data):
    name = hdf5_data['/digitStruct/name']
    return ''.join([chr(v[0]) for v in hdf5_data[name[index][0]].value])

def accuracy(predictions, labels):
  return (100.0 * np.sum(np.argmax(predictions, 1) == np.argmax(labels, 1))
          / predictions.shape[0])

def oneHot(num, length):
    arr = np.zeros(length)
    arr[num-1]=1
    return arr

def maybeLoadData(folder, force=False, variations=False):
    import os.path
    import pickle
    file_path=folder + 'data.pk'
    if(os.path.exists(file_path) is False) or force==True:
        imageData, imageLengths, imageDigits = loadData(folder, variations)
        data = { 'imageData': imageData, 'imageLengths': imageLengths, 'imageDigits': imageDigits}
        pickle.dump(data , open( file_path, "wb" ))
    
    data = pickle.load( open( file_path, "rb" ) );
    return data['imageData'], data['imageLengths'], data['imageDigits']


def loadData(folder, variations=False):
    #First load the data using h5py
    f = h5py.File(folder + '/' + 'digitStruct.mat')
    #Get the number of images to iterate through them
    length = len(f['/digitStruct/name'])

    #length = 10;   #TestLength

    imageData = np.zeros([length, imgSize,imgSize,3]).astype(np.float32)
    imageLengths = np.zeros([length, 5]).astype(np.int)
    imageDigits = np.zeros([length,5,10]).astype(np.int)

    #Iterate through the images
    for i in range(0,length):
        if(i%500==0): #In case of error, comment this line
            print("Loaded {} out of {}".format(i,length))

        #Read the image
        imageFile = folder + '/' + get_name(i,f)
        img = io.imread(imageFile)
        #Read the box data & get the bounding box for all characters (using first and last digit)
        boxData=get_box_data(i, f)

        firstTop = int(boxData['top'][0])
        firstLeft = int(boxData['left'][0])
        firstRight = int(boxData['left'][0]) + int(boxData['width'][0])
        firstBottom = int(boxData['top'][0]) + int(boxData['height'][0])

        l = len(boxData['top'])
        lastTop = int(boxData['top'][l-1])
        lastLeft = int(boxData['left'][l-1])
        lastRight = int(boxData['left'][l-1]) + int(boxData['width'][l-1])
        lastBottom = int(boxData['top'][l-1]) + int(boxData['height'][l-1])

        top = min(firstTop, lastTop)
        left = min(firstLeft, lastLeft)
        right = max(firstRight, lastRight)
        bottom = max(firstBottom, lastBottom)

        height = bottom-top
        width = right-left
        vertMiddle = (bottom+top)//2
        horCenter = (left+right)//2

        if(variations==True):
            top = vertMiddle - ((1.3*height)//2)
            bottom = vertMiddle + ((1.3*height)//2)
            left = horCenter - ((1.3*width)//2)
            right = horCenter + ((1.3*width)//2)

        top = int(max(top, 0))
        left = int(max(left, 0))
        right = int(min(right, img.shape[1]))
        bottom = int(min(bottom, img.shape[0]))


        #Extract only the RoI for faster pre-processing
        img = img[top:bottom, left:right, :]


        #Length of digits
        numberOfDigits = len(boxData['label'])

        if(variations==True):
            #Resize the image to 64x64
            img = resize(img,(64, 64, 3))
            leftStart=random.randint(0,9)
            topStart=random.randint(0,9)
            img = img[topStart:(topStart+imgSize), leftStart:(leftStart+imgSize)]
        else:
            img = resize(img,(imgSize, imgSize, 3))

        
        #Copy the data
        oneImageData = np.resize(img, (imgSize,imgSize,3)).astype(np.float32)
        #plt.imshow(oneImageData)
        #plt.show()

        oneImageData=oneImageData/255.0
        imageData[i] = oneImageData
        first=0
        if(numberOfDigits>5):
            numberOfDigits=5
            print(boxData['label'])
            first=1

        imageLengths[i] = oneHot(numberOfDigits,5)

        for k in range(0,5):
            if(k<numberOfDigits):
                imageDigits[i,k,:]=oneHot(int(boxData['label'][int(k+first)]),10)
#             else:
#                 imageDigits[i,k,10]=1

    
    shuffledIndexes  = np.arange(length)
    np.random.shuffle(shuffledIndexes)

    imageData = imageData[shuffledIndexes,:,:,:]
    imageLengths = imageLengths[shuffledIndexes,:]
    imageDigits = imageDigits[shuffledIndexes,:,:]
    return imageData,imageLengths, imageDigits

In [4]:
trainPath = '/home/carnd/data/svhn/train'
testPath = '/home/carnd/data/svhn/test'

print("Loading data")
trainImageData, trainImageLengths,trainImageDigits = maybeLoadData(trainPath, force=False, variations=False)
print("Training data images: {}".format(trainImageData.shape))
print("              length: {}".format(trainImageLengths.shape))
print("              digits: {}".format(trainImageDigits.shape))

print("Loading test & validation data")
folderImageData, folderImageLengths,folderImageDigits = maybeLoadData(testPath, force=False, variations=False)
print("Folder test data images: {}".format(folderImageData.shape))
print("          length: {}".format(folderImageLengths.shape))
print("          digits: {}".format(folderImageDigits.shape))

half = len(folderImageData)//2
validationImageData = folderImageData[0:half,:]
validationImageLengths = folderImageLengths[0:half,:]
validationImageDigits= folderImageDigits[0:half,:,:]
print("Validation data images: {}".format(validationImageData.shape))
print("                length: {}".format(validationImageLengths.shape))
print("                digits: {}".format(validationImageDigits.shape))

testImageData = folderImageData[half:,:]
testImageLengths = folderImageLengths[half:,:]
testImageDigits= folderImageDigits[half:,:,:]
print("Test data images: {}".format(testImageData.shape))
print("          length: {}".format(testImageLengths.shape))
print("          digits: {}".format(testImageDigits.shape))

print("Data loaded.")


Loading data
Training data images: (33402, 54, 54, 3)
              length: (33402, 5)
              digits: (33402, 5, 10)
Loading test & validation data
Folder test data images: (13068, 54, 54, 3)
          length: (13068, 5)
          digits: (13068, 5, 10)
Validation data images: (6534, 54, 54, 3)
                length: (6534, 5)
                digits: (6534, 5, 10)
Test data images: (6534, 54, 54, 3)
          length: (6534, 5)
          digits: (6534, 5, 10)
Data loaded.

In [5]:
#Confirm Data
d=6135
plt.imshow(trainImageData[d]*255)
plt.show()
print(trainImageLengths[d])
print(trainImageDigits[d])

trainDigit0  = trainImageDigits[:,0,:]
trainDigit1  = trainImageDigits[:,1,:]
trainDigit2  = trainImageDigits[:,2,:]
trainDigit3  = trainImageDigits[:,3,:]
trainDigit4  = trainImageDigits[:,4,:]

print(trainDigit0[d])
print(trainDigit1[d])
print(trainDigit2[d])
print(trainDigit3[d])
print(trainDigit4[d])


[0 1 0 0 0]
[[0 1 0 0 0 0 0 0 0 0]
 [0 0 0 0 0 0 0 1 0 0]
 [0 0 0 0 0 0 0 0 0 0]
 [0 0 0 0 0 0 0 0 0 0]
 [0 0 0 0 0 0 0 0 0 0]]
[0 1 0 0 0 0 0 0 0 0]
[0 0 0 0 0 0 0 1 0 0]
[0 0 0 0 0 0 0 0 0 0]
[0 0 0 0 0 0 0 0 0 0]
[0 0 0 0 0 0 0 0 0 0]

In [6]:
inputSize=imgSize*imgSize
channels=3
num_labels=5

def getCnnModel():
    x = Input(batch_shape=(None, imgSize, imgSize,channels))

    conv = Convolution2D(64, (5, 5), strides=(1, 1), padding='same', name = "conv1", activation='relu')(x)
    conv = MaxPooling2D(pool_size = (2,2))(conv)
    conv = BatchNormalization()(conv)
    conv = Dropout(0.2)(conv)

    conv = Convolution2D(128, (5, 5), strides=(1, 1), padding='same', name = "conv2", activation='relu')(conv)
    conv = MaxPooling2D(pool_size = (2,2))(conv)
    conv = BatchNormalization()(conv)
    conv = Dropout(0.2)(conv)

    conv = Convolution2D(256, (5, 5), strides=(1, 1),padding='same', name = "conv3", activation='relu')(conv)
    conv = MaxPooling2D(pool_size = (2,2))(conv)
    conv = BatchNormalization()(conv)
    conv = Dropout(0.2)(conv)

    conv = Convolution2D(1024, (5, 5), strides=(1, 1),padding='same', name = "conv4", activation='relu')(conv)
    conv = MaxPooling2D(pool_size = (2,2))(conv)
    conv = BatchNormalization()(conv)
    conv = Dropout(0.2)(conv)


    flat = Flatten()(conv)

    dense = Dense(1024, activation='relu')(flat)
    dense = BatchNormalization()(dense)
    dense = Dropout(0.3)(dense)
    dense = Dense(1024, activation='relu')(dense)
    dense = BatchNormalization()(dense)
    dense = Dropout(0.3)(dense)
    dense = Dense(1024, activation='relu')(dense)
    dense = BatchNormalization()(dense)
    dense = Dropout(0.3)(dense)
    
    outL = Dense(5)(dense)
    outL = Activation('softmax', name="Length")(outL)

    outD0 = Dense(10)(dense)
    outD0 = Activation('sigmoid', name="Digit0")(outD0)
    outD1 = Dense(10)(dense)
    outD1 = Activation('sigmoid', name="Digit1")(outD1)
    outD2 = Dense(10)(dense)
    outD2 = Activation('sigmoid', name="Digit2")(outD2)
    outD3 = Dense(10)(dense)
    outD3 = Activation('sigmoid', name="Digit3")(outD3)
    outD4 = Dense(10)(dense)
    outD4 = Activation('sigmoid', name="Digit4")(outD4)
    model = Model(input=x, output=[outL, outD0, outD1, outD2, outD3, outD4])
    return model

In [7]:
batch_size = 256
modelFile = '/home/carnd/data/svhn/model.h5'
force = True
epochs=25
if os.path.isfile(modelFile) is False or force==True:
    tbCallBack = TensorBoard(log_dir='./logs', histogram_freq=0, write_graph=True, write_images=True)


    #model = strongLengthBias()
    model = getCnnModel()
    model.summary()
    model.compile(loss='binary_crossentropy',
                  optimizer='nadam',
                  metrics=['accuracy'])

    trainDigit0  = trainImageDigits[:,0,:]
    trainDigit1  = trainImageDigits[:,1,:]
    trainDigit2  = trainImageDigits[:,2,:]
    trainDigit3  = trainImageDigits[:,3,:]
    trainDigit4  = trainImageDigits[:,4,:]

    validationDigit0  = validationImageDigits[:,0,:]
    validationDigit1  = validationImageDigits[:,1,:]
    validationDigit2  = validationImageDigits[:,2,:]
    validationDigit3  = validationImageDigits[:,3,:]
    validationDigit4  = validationImageDigits[:,4,:]


    model.fit(trainImageData, [trainImageLengths, trainDigit0, trainDigit1, trainDigit2, trainDigit3, trainDigit4], nb_epoch=epochs, batch_size=batch_size, validation_data=(validationImageData,[validationImageLengths,validationDigit0,validationDigit1,validationDigit2,validationDigit3,validationDigit4]), callbacks=[tbCallBack], verbose=2)
    model.save(modelFile)
else:
    print('Loading saved model')
    from keras.models import load_model
    model = load_model(modelFile)

testDigit0  = testImageDigits[:,0,:]
testDigit1  = testImageDigits[:,1,:]
testDigit2  = testImageDigits[:,2,:]
testDigit3  = testImageDigits[:,3,:]
testDigit4  = testImageDigits[:,4,:]

score = model.evaluate(testImageData, [testImageLengths, testDigit0, testDigit1, testDigit2, testDigit3, testDigit4], batch_size=32)
print('Test score:', score)


/home/carnd/anaconda3/envs/dl/lib/python3.5/site-packages/ipykernel/__main__.py:54: UserWarning: Update your `Model` call to the Keras 2 API: `Model(outputs=[<tf.Tenso..., inputs=Tensor("in...)`
/home/carnd/anaconda3/envs/dl/lib/python3.5/site-packages/ipykernel/__main__.py:29: UserWarning: The `nb_epoch` argument in `fit` has been renamed `epochs`.
____________________________________________________________________________________________________
Layer (type)                     Output Shape          Param #     Connected to                     
====================================================================================================
input_1 (InputLayer)             (None, 54, 54, 3)     0                                            
____________________________________________________________________________________________________
conv1 (Conv2D)                   (None, 54, 54, 64)    4864        input_1[0][0]                    
____________________________________________________________________________________________________
max_pooling2d_1 (MaxPooling2D)   (None, 27, 27, 64)    0           conv1[0][0]                      
____________________________________________________________________________________________________
batch_normalization_1 (BatchNorm (None, 27, 27, 64)    256         max_pooling2d_1[0][0]            
____________________________________________________________________________________________________
dropout_1 (Dropout)              (None, 27, 27, 64)    0           batch_normalization_1[0][0]      
____________________________________________________________________________________________________
conv2 (Conv2D)                   (None, 27, 27, 128)   204928      dropout_1[0][0]                  
____________________________________________________________________________________________________
max_pooling2d_2 (MaxPooling2D)   (None, 13, 13, 128)   0           conv2[0][0]                      
____________________________________________________________________________________________________
batch_normalization_2 (BatchNorm (None, 13, 13, 128)   512         max_pooling2d_2[0][0]            
____________________________________________________________________________________________________
dropout_2 (Dropout)              (None, 13, 13, 128)   0           batch_normalization_2[0][0]      
____________________________________________________________________________________________________
conv3 (Conv2D)                   (None, 13, 13, 256)   819456      dropout_2[0][0]                  
____________________________________________________________________________________________________
max_pooling2d_3 (MaxPooling2D)   (None, 6, 6, 256)     0           conv3[0][0]                      
____________________________________________________________________________________________________
batch_normalization_3 (BatchNorm (None, 6, 6, 256)     1024        max_pooling2d_3[0][0]            
____________________________________________________________________________________________________
dropout_3 (Dropout)              (None, 6, 6, 256)     0           batch_normalization_3[0][0]      
____________________________________________________________________________________________________
conv4 (Conv2D)                   (None, 6, 6, 1024)    6554624     dropout_3[0][0]                  
____________________________________________________________________________________________________
max_pooling2d_4 (MaxPooling2D)   (None, 3, 3, 1024)    0           conv4[0][0]                      
____________________________________________________________________________________________________
batch_normalization_4 (BatchNorm (None, 3, 3, 1024)    4096        max_pooling2d_4[0][0]            
____________________________________________________________________________________________________
dropout_4 (Dropout)              (None, 3, 3, 1024)    0           batch_normalization_4[0][0]      
____________________________________________________________________________________________________
flatten_1 (Flatten)              (None, 9216)          0           dropout_4[0][0]                  
____________________________________________________________________________________________________
dense_1 (Dense)                  (None, 1024)          9438208     flatten_1[0][0]                  
____________________________________________________________________________________________________
batch_normalization_5 (BatchNorm (None, 1024)          4096        dense_1[0][0]                    
____________________________________________________________________________________________________
dropout_5 (Dropout)              (None, 1024)          0           batch_normalization_5[0][0]      
____________________________________________________________________________________________________
dense_2 (Dense)                  (None, 1024)          1049600     dropout_5[0][0]                  
____________________________________________________________________________________________________
batch_normalization_6 (BatchNorm (None, 1024)          4096        dense_2[0][0]                    
____________________________________________________________________________________________________
dropout_6 (Dropout)              (None, 1024)          0           batch_normalization_6[0][0]      
____________________________________________________________________________________________________
dense_3 (Dense)                  (None, 1024)          1049600     dropout_6[0][0]                  
____________________________________________________________________________________________________
batch_normalization_7 (BatchNorm (None, 1024)          4096        dense_3[0][0]                    
____________________________________________________________________________________________________
dropout_7 (Dropout)              (None, 1024)          0           batch_normalization_7[0][0]      
____________________________________________________________________________________________________
dense_4 (Dense)                  (None, 5)             5125        dropout_7[0][0]                  
____________________________________________________________________________________________________
dense_5 (Dense)                  (None, 10)            10250       dropout_7[0][0]                  
____________________________________________________________________________________________________
dense_6 (Dense)                  (None, 10)            10250       dropout_7[0][0]                  
____________________________________________________________________________________________________
dense_7 (Dense)                  (None, 10)            10250       dropout_7[0][0]                  
____________________________________________________________________________________________________
dense_8 (Dense)                  (None, 10)            10250       dropout_7[0][0]                  
____________________________________________________________________________________________________
dense_9 (Dense)                  (None, 10)            10250       dropout_7[0][0]                  
____________________________________________________________________________________________________
Length (Activation)              (None, 5)             0           dense_4[0][0]                    
____________________________________________________________________________________________________
Digit0 (Activation)              (None, 10)            0           dense_5[0][0]                    
____________________________________________________________________________________________________
Digit1 (Activation)              (None, 10)            0           dense_6[0][0]                    
____________________________________________________________________________________________________
Digit2 (Activation)              (None, 10)            0           dense_7[0][0]                    
____________________________________________________________________________________________________
Digit3 (Activation)              (None, 10)            0           dense_8[0][0]                    
____________________________________________________________________________________________________
Digit4 (Activation)              (None, 10)            0           dense_9[0][0]                    
====================================================================================================
Total params: 19,195,831
Trainable params: 19,186,743
Non-trainable params: 9,088
____________________________________________________________________________________________________
Train on 33402 samples, validate on 6534 samples
Epoch 1/25
132s - loss: 1.9380 - Length_loss: 0.4075 - Digit0_loss: 0.4192 - Digit1_loss: 0.4171 - Digit2_loss: 0.2825 - Digit3_loss: 0.2139 - Digit4_loss: 0.1977 - Length_acc: 0.8413 - Digit0_acc: 0.8304 - Digit1_acc: 0.8430 - Digit2_acc: 0.8940 - Digit3_acc: 0.9175 - Digit4_acc: 0.9232 - val_loss: 1.6966 - val_Length_loss: 0.9416 - val_Digit0_loss: 0.3189 - val_Digit1_loss: 0.2882 - val_Digit2_loss: 0.1118 - val_Digit3_loss: 0.0288 - val_Digit4_loss: 0.0074 - val_Length_acc: 0.8000 - val_Digit0_acc: 0.9000 - val_Digit1_acc: 0.9192 - val_Digit2_acc: 0.9832 - val_Digit3_acc: 0.9987 - val_Digit4_acc: 1.0000
Epoch 2/25
123s - loss: 0.7834 - Length_loss: 0.1326 - Digit0_loss: 0.2411 - Digit1_loss: 0.2705 - Digit2_loss: 0.1126 - Digit3_loss: 0.0227 - Digit4_loss: 0.0039 - Length_acc: 0.9481 - Digit0_acc: 0.9160 - Digit1_acc: 0.9165 - Digit2_acc: 0.9693 - Digit3_acc: 0.9955 - Digit4_acc: 0.9999 - val_loss: 1.8152 - val_Length_loss: 1.0760 - val_Digit0_loss: 0.3124 - val_Digit1_loss: 0.3221 - val_Digit2_loss: 0.0924 - val_Digit3_loss: 0.0107 - val_Digit4_loss: 0.0016 - val_Length_acc: 0.6766 - val_Digit0_acc: 0.8566 - val_Digit1_acc: 0.9192 - val_Digit2_acc: 0.9832 - val_Digit3_acc: 0.9987 - val_Digit4_acc: 1.0000
Epoch 3/25
123s - loss: 0.5295 - Length_loss: 0.0818 - Digit0_loss: 0.1458 - Digit1_loss: 0.1834 - Digit2_loss: 0.0974 - Digit3_loss: 0.0195 - Digit4_loss: 0.0015 - Length_acc: 0.9694 - Digit0_acc: 0.9467 - Digit1_acc: 0.9362 - Digit2_acc: 0.9700 - Digit3_acc: 0.9955 - Digit4_acc: 1.0000 - val_loss: 2.7131 - val_Length_loss: 1.6911 - val_Digit0_loss: 0.4771 - val_Digit1_loss: 0.4225 - val_Digit2_loss: 0.1121 - val_Digit3_loss: 0.0100 - val_Digit4_loss: 4.1049e-04 - val_Length_acc: 0.6766 - val_Digit0_acc: 0.8559 - val_Digit1_acc: 0.9192 - val_Digit2_acc: 0.9832 - val_Digit3_acc: 0.9987 - val_Digit4_acc: 1.0000
Epoch 4/25
123s - loss: 0.3743 - Length_loss: 0.0599 - Digit0_loss: 0.0977 - Digit1_loss: 0.1205 - Digit2_loss: 0.0775 - Digit3_loss: 0.0177 - Digit4_loss: 9.6344e-04 - Length_acc: 0.9781 - Digit0_acc: 0.9654 - Digit1_acc: 0.9579 - Digit2_acc: 0.9736 - Digit3_acc: 0.9954 - Digit4_acc: 0.9999 - val_loss: 2.4042 - val_Length_loss: 1.3864 - val_Digit0_loss: 0.4880 - val_Digit1_loss: 0.4089 - val_Digit2_loss: 0.1100 - val_Digit3_loss: 0.0106 - val_Digit4_loss: 2.8328e-04 - val_Length_acc: 0.6968 - val_Digit0_acc: 0.8660 - val_Digit1_acc: 0.9195 - val_Digit2_acc: 0.9832 - val_Digit3_acc: 0.9987 - val_Digit4_acc: 1.0000
Epoch 5/25
123s - loss: 0.2833 - Length_loss: 0.0426 - Digit0_loss: 0.0739 - Digit1_loss: 0.0898 - Digit2_loss: 0.0603 - Digit3_loss: 0.0161 - Digit4_loss: 6.5787e-04 - Length_acc: 0.9844 - Digit0_acc: 0.9746 - Digit1_acc: 0.9687 - Digit2_acc: 0.9788 - Digit3_acc: 0.9955 - Digit4_acc: 1.0000 - val_loss: 0.9059 - val_Length_loss: 0.3478 - val_Digit0_loss: 0.2406 - val_Digit1_loss: 0.2382 - val_Digit2_loss: 0.0713 - val_Digit3_loss: 0.0076 - val_Digit4_loss: 3.5858e-04 - val_Length_acc: 0.8453 - val_Digit0_acc: 0.9230 - val_Digit1_acc: 0.9326 - val_Digit2_acc: 0.9845 - val_Digit3_acc: 0.9987 - val_Digit4_acc: 1.0000
Epoch 6/25
123s - loss: 0.2286 - Length_loss: 0.0338 - Digit0_loss: 0.0606 - Digit1_loss: 0.0703 - Digit2_loss: 0.0486 - Digit3_loss: 0.0149 - Digit4_loss: 4.2412e-04 - Length_acc: 0.9877 - Digit0_acc: 0.9793 - Digit1_acc: 0.9756 - Digit2_acc: 0.9827 - Digit3_acc: 0.9955 - Digit4_acc: 1.0000 - val_loss: 2.9993 - val_Length_loss: 1.9894 - val_Digit0_loss: 0.3601 - val_Digit1_loss: 0.5296 - val_Digit2_loss: 0.1097 - val_Digit3_loss: 0.0103 - val_Digit4_loss: 2.2210e-04 - val_Length_acc: 0.6776 - val_Digit0_acc: 0.8832 - val_Digit1_acc: 0.9192 - val_Digit2_acc: 0.9832 - val_Digit3_acc: 0.9987 - val_Digit4_acc: 1.0000
Epoch 7/25
123s - loss: 0.1911 - Length_loss: 0.0260 - Digit0_loss: 0.0514 - Digit1_loss: 0.0588 - Digit2_loss: 0.0406 - Digit3_loss: 0.0139 - Digit4_loss: 3.7759e-04 - Length_acc: 0.9910 - Digit0_acc: 0.9825 - Digit1_acc: 0.9792 - Digit2_acc: 0.9857 - Digit3_acc: 0.9956 - Digit4_acc: 1.0000 - val_loss: 0.6678 - val_Length_loss: 0.1388 - val_Digit0_loss: 0.2113 - val_Digit1_loss: 0.2545 - val_Digit2_loss: 0.0567 - val_Digit3_loss: 0.0062 - val_Digit4_loss: 3.5064e-04 - val_Length_acc: 0.9511 - val_Digit0_acc: 0.9255 - val_Digit1_acc: 0.9240 - val_Digit2_acc: 0.9850 - val_Digit3_acc: 0.9987 - val_Digit4_acc: 1.0000
Epoch 8/25
123s - loss: 0.1646 - Length_loss: 0.0222 - Digit0_loss: 0.0444 - Digit1_loss: 0.0498 - Digit2_loss: 0.0348 - Digit3_loss: 0.0131 - Digit4_loss: 3.1614e-04 - Length_acc: 0.9925 - Digit0_acc: 0.9846 - Digit1_acc: 0.9825 - Digit2_acc: 0.9874 - Digit3_acc: 0.9956 - Digit4_acc: 1.0000 - val_loss: 0.5488 - val_Length_loss: 0.1931 - val_Digit0_loss: 0.1388 - val_Digit1_loss: 0.1604 - val_Digit2_loss: 0.0493 - val_Digit3_loss: 0.0068 - val_Digit4_loss: 3.3471e-04 - val_Length_acc: 0.9216 - val_Digit0_acc: 0.9491 - val_Digit1_acc: 0.9452 - val_Digit2_acc: 0.9863 - val_Digit3_acc: 0.9988 - val_Digit4_acc: 1.0000
Epoch 9/25
---------------------------------------------------------------------------
KeyboardInterrupt                         Traceback (most recent call last)
<ipython-input-7-a8660fc1b617> in <module>()
     27 
     28 
---> 29     model.fit(trainImageData, [trainImageLengths, trainDigit0, trainDigit1, trainDigit2, trainDigit3, trainDigit4], nb_epoch=epochs, batch_size=batch_size, validation_data=(validationImageData,[validationImageLengths,validationDigit0,validationDigit1,validationDigit2,validationDigit3,validationDigit4]), callbacks=[tbCallBack], verbose=2)
     30     model.save(modelFile)
     31 else:

/home/carnd/anaconda3/envs/dl/lib/python3.5/site-packages/keras/engine/training.py in fit(self, x, y, batch_size, epochs, verbose, callbacks, validation_split, validation_data, shuffle, class_weight, sample_weight, initial_epoch, **kwargs)
   1428                               val_f=val_f, val_ins=val_ins, shuffle=shuffle,
   1429                               callback_metrics=callback_metrics,
-> 1430                               initial_epoch=initial_epoch)
   1431 
   1432     def evaluate(self, x, y, batch_size=32, verbose=1, sample_weight=None):

/home/carnd/anaconda3/envs/dl/lib/python3.5/site-packages/keras/engine/training.py in _fit_loop(self, f, ins, out_labels, batch_size, epochs, verbose, callbacks, val_f, val_ins, shuffle, callback_metrics, initial_epoch)
   1077                 batch_logs['size'] = len(batch_ids)
   1078                 callbacks.on_batch_begin(batch_index, batch_logs)
-> 1079                 outs = f(ins_batch)
   1080                 if not isinstance(outs, list):
   1081                     outs = [outs]

/home/carnd/anaconda3/envs/dl/lib/python3.5/site-packages/keras/backend/tensorflow_backend.py in __call__(self, inputs)
   2266         updated = session.run(self.outputs + [self.updates_op],
   2267                               feed_dict=feed_dict,
-> 2268                               **self.session_kwargs)
   2269         return updated[:len(self.outputs)]
   2270 

/home/carnd/anaconda3/envs/dl/lib/python3.5/site-packages/tensorflow/python/client/session.py in run(self, fetches, feed_dict, options, run_metadata)
    765     try:
    766       result = self._run(None, fetches, feed_dict, options_ptr,
--> 767                          run_metadata_ptr)
    768       if run_metadata:
    769         proto_data = tf_session.TF_GetBuffer(run_metadata_ptr)

/home/carnd/anaconda3/envs/dl/lib/python3.5/site-packages/tensorflow/python/client/session.py in _run(self, handle, fetches, feed_dict, options, run_metadata)
    963     if final_fetches or final_targets:
    964       results = self._do_run(handle, final_targets, final_fetches,
--> 965                              feed_dict_string, options, run_metadata)
    966     else:
    967       results = []

/home/carnd/anaconda3/envs/dl/lib/python3.5/site-packages/tensorflow/python/client/session.py in _do_run(self, handle, target_list, fetch_list, feed_dict, options, run_metadata)
   1013     if handle is None:
   1014       return self._do_call(_run_fn, self._session, feed_dict, fetch_list,
-> 1015                            target_list, options, run_metadata)
   1016     else:
   1017       return self._do_call(_prun_fn, self._session, handle, feed_dict,

/home/carnd/anaconda3/envs/dl/lib/python3.5/site-packages/tensorflow/python/client/session.py in _do_call(self, fn, *args)
   1020   def _do_call(self, fn, *args):
   1021     try:
-> 1022       return fn(*args)
   1023     except errors.OpError as e:
   1024       message = compat.as_text(e.message)

/home/carnd/anaconda3/envs/dl/lib/python3.5/site-packages/tensorflow/python/client/session.py in _run_fn(session, feed_dict, fetch_list, target_list, options, run_metadata)
   1002         return tf_session.TF_Run(session, options,
   1003                                  feed_dict, fetch_list, target_list,
-> 1004                                  status, run_metadata)
   1005 
   1006     def _prun_fn(session, handle, feed_dict, fetch_list):

KeyboardInterrupt: 

In [ ]:


In [ ]:


In [ ]: