Attack Examples on GTSRB


In [1]:
# Specify visible cuda device
import os
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"] = "0"

In [2]:
%matplotlib inline

%reload_ext autoreload
%autoreload 2

from parameters import *
from lib.utils import *
from lib.attacks import *
from lib.keras_utils import *
from lib.RandomTransform import *
from lib.OptCarlini import *
from lib.OptTransform import *


Using TensorFlow backend.

Initialize Model


In [3]:
# Build and load trained model
model = built_mltscl()
model.load_weights(WEIGTHS_PATH)

# Load dataset
x_train, y_train, x_val, y_val, x_test, y_test = load_dataset_GTSRB(
    n_channel=N_CHANNEL)

# Convert labels to one-hot encoding
y_train = keras.utils.to_categorical(y_train, NUM_LABELS)
y_test = keras.utils.to_categorical(y_test, NUM_LABELS)
y_val = keras.utils.to_categorical(y_val, NUM_LABELS)

# Read sign names
signnames = read_csv("./input_data/signnames.csv").values[:, 1]

In [4]:
model.summary()


____________________________________________________________________________________________________
Layer (type)                     Output Shape          Param #     Connected to                     
====================================================================================================
input_1 (InputLayer)             (None, 32, 32, 3)     0                                            
____________________________________________________________________________________________________
conv2d_1 (Conv2D)                (None, 32, 32, 32)    2432        input_1[0][0]                    
____________________________________________________________________________________________________
dropout_1 (Dropout)              (None, 32, 32, 32)    0           conv2d_1[0][0]                   
____________________________________________________________________________________________________
max_pooling2d_1 (MaxPooling2D)   (None, 16, 16, 32)    0           dropout_1[0][0]                  
____________________________________________________________________________________________________
conv2d_2 (Conv2D)                (None, 16, 16, 64)    51264       max_pooling2d_1[0][0]            
____________________________________________________________________________________________________
dropout_2 (Dropout)              (None, 16, 16, 64)    0           conv2d_2[0][0]                   
____________________________________________________________________________________________________
max_pooling2d_2 (MaxPooling2D)   (None, 8, 8, 64)      0           dropout_2[0][0]                  
____________________________________________________________________________________________________
conv2d_3 (Conv2D)                (None, 8, 8, 128)     204928      max_pooling2d_2[0][0]            
____________________________________________________________________________________________________
dropout_3 (Dropout)              (None, 8, 8, 128)     0           conv2d_3[0][0]                   
____________________________________________________________________________________________________
max_pooling2d_4 (MaxPooling2D)   (None, 4, 4, 32)      0           max_pooling2d_1[0][0]            
____________________________________________________________________________________________________
max_pooling2d_5 (MaxPooling2D)   (None, 4, 4, 64)      0           max_pooling2d_2[0][0]            
____________________________________________________________________________________________________
max_pooling2d_3 (MaxPooling2D)   (None, 4, 4, 128)     0           dropout_3[0][0]                  
____________________________________________________________________________________________________
flatten_1 (Flatten)              (None, 512)           0           max_pooling2d_4[0][0]            
____________________________________________________________________________________________________
flatten_2 (Flatten)              (None, 1024)          0           max_pooling2d_5[0][0]            
____________________________________________________________________________________________________
flatten_3 (Flatten)              (None, 2048)          0           max_pooling2d_3[0][0]            
____________________________________________________________________________________________________
concatenate_1 (Concatenate)      (None, 3584)          0           flatten_1[0][0]                  
                                                                   flatten_2[0][0]                  
                                                                   flatten_3[0][0]                  
____________________________________________________________________________________________________
dense_1 (Dense)                  (None, 1024)          3671040     concatenate_1[0][0]              
____________________________________________________________________________________________________
dropout_4 (Dropout)              (None, 1024)          0           dense_1[0][0]                    
____________________________________________________________________________________________________
dense_2 (Dense)                  (None, 43)            44075       dropout_4[0][0]                  
====================================================================================================
Total params: 3,973,739
Trainable params: 3,973,739
Non-trainable params: 0
____________________________________________________________________________________________________

Load data


In [5]:
SAMPLE_IMG_DIR = './traffic_sign_samples'
SAMPLE_LABEL = './traffic_sign_samples/samples_label.txt'

In [6]:
# Load sample images, labels and masks
x_smp, x_smp_full, y_smp, masks, masks_full = load_samples(SAMPLE_IMG_DIR, SAMPLE_LABEL)

In [31]:
# Set target class to attack
tg = 10
print "Target class: " + signnames[tg]
# Set number of samples
size = 10

y_target = np.zeros((len(x_test))) + tg
y_target = keras.utils.to_categorical(y_target, NUM_LABELS)

# Filter samples (originally misclassified, originally classified as target)
x_fil, y_fil, del_id = filter_samples(model, x_smp, y_smp, y_target=y_target)
x_fil_full = np.delete(x_smp_full, del_id, axis=0)
masks_fil = np.delete(masks, del_id, axis=0)
masks_fil_full = np.delete(masks_full, del_id, axis=0)

# Set samples to attack (choose some samples by random)
ind = np.random.choice(range(len(y_fil)), size=size)
x_ben = np.copy(x_fil[ind])
x_ben_full = np.copy(x_fil_full[ind])
y_ben = np.copy(y_fil[ind])
y_tg = np.copy(y_target[ind])
masks_ben = np.copy(masks_fil[ind])
masks_ben_full = np.copy(masks_fil_full[ind])


Target class: No passing for vechiles over 3.5 metric tons

Attack Examples

Fast Gradient


In [48]:
# Specify list of magnitudes
mag_list = np.linspace(1.0, 2.0, 6)
x_fg = fg(model, x_ben, y_tg, mag_list, target=True, mask=masks_ben)

In [61]:
im = x_ben[0]
print "Original class: " + signnames[predict(model, im)]
plt.imshow(im)
plt.axis('off')
plt.show()

im = x_fg[5, 0]
print "Adversarial class: " + signnames[predict(model, im)]
plt.imshow(im)
plt.axis('off')
plt.show()


['Original class: No vechiles']
['Adversarial class: Speed limit (120km/h)']

Iterative Attack

Iterative steps in gradient direction


In [53]:
x_it = iterative(model, x_ben, y_tg, n_step=32, step_size=0.05, target=True, mask=masks_ben)


lib/utils.py:421: RuntimeWarning: divide by zero encountered in divide
  grad /= np.linalg.norm(grad)
lib/utils.py:421: RuntimeWarning: invalid value encountered in divide
  grad /= np.linalg.norm(grad)

In [54]:
im = x_ben[0]
print "Original class: " + signnames[predict(model, im)]
plt.imshow(im)
plt.axis('off')
plt.show()

im = x_it[0]
print "Adversarial class: " + signnames[predict(model, im)]
plt.imshow(im)
plt.axis('off')
plt.show()


['Original class: No vechiles']
['Adversarial class: Speed limit (100km/h)']

Optimize Attack


In [55]:
# Initialize optimizer
opt = OptCarlini(model, c=1, lr=0.01, target=True, use_bound=False, init_scl=0.1,
                 loss_op=0, var_change=True, k=5)
# Run optimizer on sample (only take one sample at a time)
x_adv, norm = opt.optimize(x_ben[0], y_tg[0], n_step=5000, prog=True, mask=masks_ben[0])
# Run optimier with constant search
#x_adv, norm = opt.optimize_search(x_ben[0], y_tg[0], n_step=5000, search_step=10, prog=True, mask=masks_ben[0])


Step: 0, norm=12.276, loss=57.971, obj=70.247
Step: 50, norm=12.316, loss=-5.000, obj=7.316
Step: 100, norm=12.073, loss=-5.000, obj=7.073
Step: 150, norm=11.769, loss=-5.000, obj=6.769
Step: 200, norm=11.428, loss=-5.000, obj=6.428
Step: 250, norm=11.060, loss=-5.000, obj=6.060
Step: 300, norm=10.676, loss=-5.000, obj=5.676
Step: 350, norm=10.414, loss=-5.000, obj=5.414
Step: 400, norm=10.087, loss=-5.000, obj=5.087
Step: 450, norm=9.719, loss=-5.000, obj=4.719
Step: 500, norm=9.350, loss=-5.000, obj=4.350
Step: 550, norm=8.982, loss=-5.000, obj=3.982
Step: 600, norm=8.617, loss=-5.000, obj=3.617
Step: 650, norm=8.256, loss=-5.000, obj=3.256
Step: 700, norm=7.900, loss=-5.000, obj=2.900
Step: 750, norm=7.550, loss=-5.000, obj=2.550
Step: 800, norm=7.310, loss=-5.000, obj=2.310
Step: 850, norm=6.988, loss=-5.000, obj=1.988
Step: 900, norm=6.671, loss=-5.000, obj=1.671
Step: 950, norm=6.361, loss=-5.000, obj=1.361
Step: 1000, norm=6.184, loss=-5.000, obj=1.184
Step: 1050, norm=5.903, loss=-5.000, obj=0.903
Step: 1100, norm=5.678, loss=-5.000, obj=0.678
Step: 1150, norm=5.594, loss=-5.000, obj=0.594
Step: 1200, norm=5.349, loss=-5.000, obj=0.349
Step: 1250, norm=5.110, loss=-5.000, obj=0.110
Step: 1300, norm=4.878, loss=-5.000, obj=-0.122
Step: 1350, norm=4.652, loss=-5.000, obj=-0.348
Step: 1400, norm=4.433, loss=-5.000, obj=-0.567
Step: 1450, norm=4.220, loss=-5.000, obj=-0.780
Step: 1500, norm=4.085, loss=-5.000, obj=-0.915
Step: 1550, norm=3.948, loss=-5.000, obj=-1.052
Step: 1600, norm=3.755, loss=-5.000, obj=-1.245
Step: 1650, norm=3.568, loss=-5.000, obj=-1.432
Step: 1700, norm=3.495, loss=-5.000, obj=-1.505
Step: 1750, norm=3.485, loss=-5.000, obj=-1.515
Step: 1800, norm=3.323, loss=-5.000, obj=-1.677
Step: 1850, norm=3.166, loss=-5.000, obj=-1.834
Step: 1900, norm=3.014, loss=-5.000, obj=-1.986
Step: 1950, norm=3.028, loss=-5.000, obj=-1.972
Step: 2000, norm=2.960, loss=-5.000, obj=-2.040
Step: 2050, norm=2.824, loss=-5.000, obj=-2.176
Step: 2100, norm=2.693, loss=-5.000, obj=-2.307
Step: 2150, norm=2.588, loss=-5.000, obj=-2.412
Step: 2200, norm=2.573, loss=-5.000, obj=-2.427
Step: 2250, norm=2.454, loss=-5.000, obj=-2.546
Step: 2300, norm=2.338, loss=-5.000, obj=-2.662
Step: 2350, norm=2.477, loss=-5.000, obj=-2.523
Step: 2400, norm=2.385, loss=-5.000, obj=-2.615
Step: 2450, norm=2.280, loss=-5.000, obj=-2.720
Step: 2500, norm=2.179, loss=-5.000, obj=-2.821
Step: 2550, norm=2.082, loss=-5.000, obj=-2.918
Step: 2600, norm=2.098, loss=-5.000, obj=-2.902
Step: 2650, norm=2.009, loss=-5.000, obj=-2.991
Step: 2700, norm=2.055, loss=-5.000, obj=-2.945
Step: 2750, norm=2.109, loss=-5.000, obj=-2.891
Step: 2800, norm=2.019, loss=-5.000, obj=-2.981
Step: 2850, norm=1.933, loss=-5.000, obj=-3.067
Step: 2900, norm=1.851, loss=-5.000, obj=-3.149
Step: 2950, norm=1.853, loss=-5.000, obj=-3.147
Step: 3000, norm=1.789, loss=-5.000, obj=-3.211
Step: 3050, norm=1.711, loss=-4.776, obj=-3.065
Step: 3100, norm=1.890, loss=-5.000, obj=-3.110
Step: 3150, norm=1.812, loss=-5.000, obj=-3.188
Step: 3200, norm=1.738, loss=-5.000, obj=-3.262
Step: 3250, norm=1.666, loss=-5.000, obj=-3.334
Step: 3300, norm=1.769, loss=-5.000, obj=-3.231
Step: 3350, norm=1.735, loss=-5.000, obj=-3.265
Step: 3400, norm=1.664, loss=-5.000, obj=-3.336
Step: 3450, norm=1.597, loss=-5.000, obj=-3.403
Step: 3500, norm=1.754, loss=-5.000, obj=-3.246
Step: 3550, norm=1.691, loss=-5.000, obj=-3.309
Step: 3600, norm=1.626, loss=-5.000, obj=-3.374
Step: 3650, norm=1.564, loss=-5.000, obj=-3.436
Step: 3700, norm=1.591, loss=-5.000, obj=-3.409
Step: 3750, norm=1.544, loss=-5.000, obj=-3.456
Step: 3800, norm=1.648, loss=-5.000, obj=-3.352
Step: 3850, norm=1.638, loss=-5.000, obj=-3.362
Step: 3900, norm=1.577, loss=-5.000, obj=-3.423
Step: 3950, norm=1.519, loss=-5.000, obj=-3.481
Step: 4000, norm=1.510, loss=-5.000, obj=-3.490
Step: 4050, norm=1.492, loss=-5.000, obj=-3.508
Step: 4100, norm=1.540, loss=-5.000, obj=-3.460
Step: 4150, norm=1.587, loss=-5.000, obj=-3.413
Step: 4200, norm=1.530, loss=-5.000, obj=-3.470
Step: 4250, norm=1.476, loss=-5.000, obj=-3.524
Step: 4300, norm=1.432, loss=-5.000, obj=-3.568
Step: 4350, norm=1.560, loss=-5.000, obj=-3.440
Step: 4400, norm=1.505, loss=-5.000, obj=-3.495
Step: 4450, norm=1.530, loss=-5.000, obj=-3.470
Step: 4500, norm=1.482, loss=-5.000, obj=-3.518
Step: 4550, norm=1.432, loss=-4.907, obj=-3.476
Step: 4600, norm=1.560, loss=-5.000, obj=-3.440
Step: 4650, norm=1.509, loss=-5.000, obj=-3.491
Step: 4700, norm=1.461, loss=-5.000, obj=-3.539
Step: 4750, norm=1.414, loss=-5.000, obj=-3.586
Step: 4800, norm=1.434, loss=-5.000, obj=-3.566
Step: 4850, norm=1.455, loss=-5.000, obj=-3.545
Step: 4900, norm=1.427, loss=-5.000, obj=-3.573
Step: 4950, norm=1.451, loss=-5.000, obj=-3.549

In [56]:
im = x_ben[0]
print "Original class: " + signnames[predict(model, im)]
plt.imshow(im)
plt.axis('off')
plt.show()

im = x_adv
print "Adversarial class: " + signnames[predict(model, im)]
plt.imshow(im)
plt.axis('off')
plt.show()


['Original class: No vechiles']
['Adversarial class: No passing for vechiles over 3.5 metric tons']

Optimize with Transformation


In [57]:
# Initialize optimizer
opt = OptTransform(model, c=1, lr=0.01, target=True, use_bound=False, init_scl=0.1,
                   loss_op=0, var_change=True, k=5, batch_size=32)
# Run optimizer on sample
x_adv, norm = opt.optimize(x_ben[0], y_tg[0], n_step=5000, prog=True, mask=masks_ben[0])
# Run optimier with constant search
#x_adv, norm = opt.optimize_search(x_ben[0], y_tg[0], n_step=5000, search_step=10, prog=True, mask=masks_ben[0])


Step: 0, norm=12.247, loss=50.082, obj=62.329
Step: 50, norm=12.210, loss=-5.000, obj=7.210
Step: 100, norm=11.957, loss=-5.000, obj=6.957
Step: 150, norm=11.646, loss=-5.000, obj=6.646
Step: 200, norm=11.297, loss=-5.000, obj=6.297
Step: 250, norm=10.924, loss=-5.000, obj=5.924
Step: 300, norm=10.536, loss=-5.000, obj=5.536
Step: 350, norm=10.139, loss=-5.000, obj=5.139
Step: 400, norm=9.738, loss=-5.000, obj=4.738
Step: 450, norm=9.337, loss=-5.000, obj=4.337
Step: 500, norm=8.937, loss=-5.000, obj=3.937
Step: 550, norm=8.689, loss=-5.000, obj=3.689
Step: 600, norm=8.365, loss=-5.000, obj=3.365
Step: 650, norm=8.025, loss=-5.000, obj=3.025
Step: 700, norm=7.691, loss=-5.000, obj=2.691
Step: 750, norm=7.363, loss=-5.000, obj=2.363
Step: 800, norm=7.042, loss=-5.000, obj=2.042
Step: 850, norm=6.728, loss=-5.000, obj=1.728
Step: 900, norm=6.422, loss=-5.000, obj=1.422
Step: 950, norm=6.123, loss=-5.000, obj=1.123
Step: 1000, norm=5.832, loss=-5.000, obj=0.832
Step: 1050, norm=5.693, loss=-5.000, obj=0.693
Step: 1100, norm=5.441, loss=-5.000, obj=0.441
Step: 1150, norm=5.189, loss=-5.000, obj=0.189
Step: 1200, norm=4.944, loss=-5.000, obj=-0.056
Step: 1250, norm=4.707, loss=-5.000, obj=-0.293
Step: 1300, norm=4.716, loss=-5.000, obj=-0.284
Step: 1350, norm=4.517, loss=-5.000, obj=-0.483
Step: 1400, norm=4.321, loss=-5.000, obj=-0.679
Step: 1450, norm=4.132, loss=-5.000, obj=-0.868
Step: 1500, norm=3.949, loss=-5.000, obj=-1.051
Step: 1550, norm=3.771, loss=-5.000, obj=-1.229
Step: 1600, norm=3.598, loss=-5.000, obj=-1.402
Step: 1650, norm=3.561, loss=-5.000, obj=-1.439
Step: 1700, norm=3.601, loss=-5.000, obj=-1.399
Step: 1750, norm=3.455, loss=-5.000, obj=-1.545
Step: 1800, norm=3.312, loss=-5.000, obj=-1.688
Step: 1850, norm=3.175, loss=-5.000, obj=-1.825
Step: 1900, norm=3.041, loss=-5.000, obj=-1.959
Step: 1950, norm=3.130, loss=-5.000, obj=-1.870
Step: 2000, norm=3.043, loss=-5.000, obj=-1.957
Step: 2050, norm=2.925, loss=-5.000, obj=-2.075
Step: 2100, norm=2.812, loss=-5.000, obj=-2.188
Step: 2150, norm=2.702, loss=-5.000, obj=-2.298
Step: 2200, norm=2.595, loss=-5.000, obj=-2.405
Step: 2250, norm=2.492, loss=-4.954, obj=-2.463
Step: 2300, norm=2.601, loss=-5.000, obj=-2.399
Step: 2350, norm=2.503, loss=-5.000, obj=-2.497
Step: 2400, norm=2.406, loss=-5.000, obj=-2.594
Step: 2450, norm=2.312, loss=-5.000, obj=-2.688
Step: 2500, norm=2.220, loss=-5.000, obj=-2.780
Step: 2550, norm=2.433, loss=-5.000, obj=-2.567
Step: 2600, norm=2.349, loss=-5.000, obj=-2.651
Step: 2650, norm=2.265, loss=-5.000, obj=-2.735
Step: 2700, norm=2.184, loss=-5.000, obj=-2.816
Step: 2750, norm=2.105, loss=-5.000, obj=-2.895
Step: 2800, norm=2.039, loss=-5.000, obj=-2.961
Step: 2850, norm=2.083, loss=-5.000, obj=-2.917
Step: 2900, norm=2.007, loss=-5.000, obj=-2.993
Step: 2950, norm=1.955, loss=-5.000, obj=-3.045
Step: 3000, norm=2.115, loss=-5.000, obj=-2.885
Step: 3050, norm=2.041, loss=-5.000, obj=-2.959
Step: 3100, norm=1.969, loss=-5.000, obj=-3.031
Step: 3150, norm=1.899, loss=-5.000, obj=-3.101
Step: 3200, norm=1.832, loss=-5.000, obj=-3.168
Step: 3250, norm=1.926, loss=-5.000, obj=-3.074
Step: 3300, norm=1.858, loss=-5.000, obj=-3.142
Step: 3350, norm=1.960, loss=-5.000, obj=-3.040
Step: 3400, norm=1.899, loss=-5.000, obj=-3.101
Step: 3450, norm=1.833, loss=-5.000, obj=-3.167
Step: 3500, norm=1.771, loss=-5.000, obj=-3.229
Step: 3550, norm=1.880, loss=-5.000, obj=-3.120
Step: 3600, norm=1.881, loss=-5.000, obj=-3.119
Step: 3650, norm=1.821, loss=-5.000, obj=-3.179
Step: 3700, norm=1.762, loss=-5.000, obj=-3.238
Step: 3750, norm=1.706, loss=-5.000, obj=-3.294
Step: 3800, norm=1.651, loss=-5.000, obj=-3.349
Step: 3850, norm=1.719, loss=-5.000, obj=-3.281
Step: 3900, norm=1.664, loss=-5.000, obj=-3.336
Step: 3950, norm=1.809, loss=-5.000, obj=-3.191
Step: 4000, norm=1.756, loss=-5.000, obj=-3.244
Step: 4050, norm=1.700, loss=-5.000, obj=-3.300
Step: 4100, norm=1.646, loss=-5.000, obj=-3.354
Step: 4150, norm=1.595, loss=-5.000, obj=-3.405
Step: 4200, norm=1.636, loss=-5.000, obj=-3.364
Step: 4250, norm=1.586, loss=-5.000, obj=-3.414
Step: 4300, norm=1.733, loss=-5.000, obj=-3.267
Step: 4350, norm=1.697, loss=-5.000, obj=-3.303
Step: 4400, norm=1.644, loss=-5.000, obj=-3.356
Step: 4450, norm=1.594, loss=-5.000, obj=-3.406
Step: 4500, norm=1.632, loss=-5.000, obj=-3.368
Step: 4550, norm=1.637, loss=-5.000, obj=-3.363
Step: 4600, norm=1.588, loss=-5.000, obj=-3.412
Step: 4650, norm=1.663, loss=-5.000, obj=-3.337
Step: 4700, norm=1.654, loss=-5.000, obj=-3.346
Step: 4750, norm=1.607, loss=-5.000, obj=-3.393
Step: 4800, norm=1.561, loss=-5.000, obj=-3.439
Step: 4850, norm=1.517, loss=-5.000, obj=-3.483
Step: 4900, norm=1.559, loss=-5.000, obj=-3.441
Step: 4950, norm=1.516, loss=-5.000, obj=-3.484

In [58]:
im = x_ben[0]
print "Original class: " + signnames[predict(model, im)]
plt.imshow(im)
plt.axis('off')
plt.show()

im = x_adv
print "Adversarial class: " + signnames[predict(model, im)]
plt.imshow(im)
plt.axis('off')
plt.show()


['Original class: No vechiles']
['Adversarial class: No passing for vechiles over 3.5 metric tons']

In [60]:
# Evaluate each attack, return a list of adv success rate
print eval_adv(model, x_fg, y_tg, target=True)
print eval_adv(model, x_it, y_tg, target=True)


[0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
0.0

Appendix

Model trainer


In [7]:
# Build model
model = built_mltscl()

# Load dataset
x_train, y_train, x_val, y_val, x_test, y_test = load_dataset_GTSRB(
    n_channel=N_CHANNEL, train_file_name='train_extended.p')

y_train = keras.utils.to_categorical(y_train, NUM_LABELS)
y_test = keras.utils.to_categorical(y_test, NUM_LABELS)
y_val = keras.utils.to_categorical(y_val, NUM_LABELS)

In [10]:
filepath = './weights.{epoch:02d}-{val_loss:.2f}.hdf5'
modelCheckpoint = keras.callbacks.ModelCheckpoint(filepath, monitor='val_loss', verbose=0, 
                                                  save_best_only=False, save_weights_only=False, 
                                                  mode='auto', period=1)
earlyStop = keras.callbacks.EarlyStopping(monitor='val_loss', min_delta=0.001, patience=5, 
                                          verbose=0, mode='auto')

In [14]:
model.fit(x_train, y_train, batch_size=BATCH_SIZE, epochs=NUM_EPOCH, verbose=1, 
          callbacks=[modelCheckpoint, earlyStop], validation_data=(x_val, y_val), 
          shuffle=True, initial_epoch=0)


Train on 695980 samples, validate on 4410 samples
Epoch 1/100
695980/695980 [==============================] - 240s - loss: 0.7197 - acc: 0.8864 - val_loss: 0.5842 - val_acc: 0.9404
Epoch 2/100
695980/695980 [==============================] - 235s - loss: 0.5336 - acc: 0.9393 - val_loss: 0.4748 - val_acc: 0.9574
Epoch 3/100
695980/695980 [==============================] - 235s - loss: 0.4958 - acc: 0.9509 - val_loss: 0.4832 - val_acc: 0.9565
Epoch 4/100
695980/695980 [==============================] - 239s - loss: 0.4789 - acc: 0.9572 - val_loss: 0.4538 - val_acc: 0.9719
Epoch 5/100
695980/695980 [==============================] - 235s - loss: 0.4607 - acc: 0.9607 - val_loss: 0.4437 - val_acc: 0.9671
Epoch 6/100
390752/695980 [===============>..............] - ETA: 102s - loss: 0.4546 - acc: 0.9624
---------------------------------------------------------------------------
KeyboardInterrupt                         Traceback (most recent call last)
<ipython-input-14-883cd5b4f5fe> in <module>()
      1 model.fit(x_train, y_train, batch_size=BATCH_SIZE, epochs=NUM_EPOCH, verbose=1, 
      2           callbacks=[modelCheckpoint, earlyStop], validation_data=(x_val, y_val),
----> 3           shuffle=True, initial_epoch=0)

/home/chawins/.conda/envs/tsa/lib/python2.7/site-packages/keras/engine/training.pyc in fit(self, x, y, batch_size, epochs, verbose, callbacks, validation_split, validation_data, shuffle, class_weight, sample_weight, initial_epoch, steps_per_epoch, validation_steps, **kwargs)
   1596                               initial_epoch=initial_epoch,
   1597                               steps_per_epoch=steps_per_epoch,
-> 1598                               validation_steps=validation_steps)
   1599 
   1600     def evaluate(self, x, y,

/home/chawins/.conda/envs/tsa/lib/python2.7/site-packages/keras/engine/training.pyc in _fit_loop(self, f, ins, out_labels, batch_size, epochs, verbose, callbacks, val_f, val_ins, shuffle, callback_metrics, initial_epoch, steps_per_epoch, validation_steps)
   1181                     batch_logs['size'] = len(batch_ids)
   1182                     callbacks.on_batch_begin(batch_index, batch_logs)
-> 1183                     outs = f(ins_batch)
   1184                     if not isinstance(outs, list):
   1185                         outs = [outs]

/home/chawins/.conda/envs/tsa/lib/python2.7/site-packages/keras/backend/tensorflow_backend.pyc in __call__(self, inputs)
   2271         updated = session.run(self.outputs + [self.updates_op],
   2272                               feed_dict=feed_dict,
-> 2273                               **self.session_kwargs)
   2274         return updated[:len(self.outputs)]
   2275 

/home/chawins/.conda/envs/tsa/lib/python2.7/site-packages/tensorflow/python/client/session.pyc in run(self, fetches, feed_dict, options, run_metadata)
    893     try:
    894       result = self._run(None, fetches, feed_dict, options_ptr,
--> 895                          run_metadata_ptr)
    896       if run_metadata:
    897         proto_data = tf_session.TF_GetBuffer(run_metadata_ptr)

/home/chawins/.conda/envs/tsa/lib/python2.7/site-packages/tensorflow/python/client/session.pyc in _run(self, handle, fetches, feed_dict, options, run_metadata)
   1103 
   1104           feed_dict_tensor[subfeed_t] = np_val
-> 1105           feed_map[compat.as_bytes(subfeed_t.name)] = (subfeed_t, subfeed_val)
   1106 
   1107     # Create a fetch handler to take care of the structure of fetches.

/home/chawins/.conda/envs/tsa/lib/python2.7/site-packages/tensorflow/python/framework/ops.pyc in name(self)
    282     if not self._op.name:
    283       raise ValueError("Operation was not named: %s" % self._op)
--> 284     return "%s:%d" % (self._op.name, self._value_index)
    285 
    286   @property

KeyboardInterrupt: