In [1]:
# Numbers
import numpy as np
import pandas as pd

# Serialization
import pickle

# Neural networks
from keras.optimizers import Adam
from keras.callbacks import EarlyStopping
from tensorflow.python.client import device_lib

# Plotting
from matplotlib import pyplot as plt
import matplotlib.gridspec as gridspec
import seaborn as sns

from mouse_challenge_utils import *
from model import *

%matplotlib inline
sns.set(color_codes=True)


Using TensorFlow backend.

In [2]:
training_sessions, validation_sessions, testing_sessions = load_mouse_dynamics_dataset()

def print_stats(sessions, name):
    average_size = np.average([len(i['data']) for i in sessions])
    print('[*] Loaded {} {} sessions with an average of {:.2f} data points'.format(
        len(sessions), name, average_size))
    user_ids = [i['user_id'] for i in sessions]
    print('{} sessions per user: {}'.format(name, dict(Counter(user_ids))))
    
print_stats(training_sessions, 'training')
print_stats(validation_sessions, 'validation')
print_stats(testing_sessions, 'testing')
print()
print('[?] User 0 represents malicious user activity while User -1 represents an unknown user (unlabeled data)')


[*] Loaded 65 training sessions with an average of 31269.46 data points
training sessions per user: {35: 5, 7: 7, 9: 7, 12: 7, 15: 6, 16: 6, 20: 7, 21: 7, 23: 6, 29: 7}
[*] Loaded 411 validation sessions with an average of 1586.27 data points
validation sessions per user: {35: 35, 7: 36, 9: 23, 12: 56, 15: 45, 16: 68, 20: 30, 21: 37, 23: 38, 29: 43}
[*] Loaded 795 testing sessions with an average of 1358.64 data points
testing sessions per user: {-1: 795}

[?] User 0 represents malicious user activity while User -1 represents an unknown user (unlabeled data)

Convert training sessions to labeled examples, each example will have a seq_size sequence size, we will include three features per data point, the timestamp, the x position and the y position. We will one-hot encode the labels, and shuffle all the data.


In [3]:
df_train = sessions_to_dataframe(training_sessions)
df_val = sessions_to_dataframe(validation_sessions)
df_train.head()


Out[3]:
record_dt client_dt button state x y user_id
0 0.000 0.000 NoButton Move 105 252 20
1 0.000 0.016 NoButton Move 105 253 20
2 0.109 0.015 NoButton Move 107 256 20
3 0.000 0.016 NoButton Move 110 261 20
4 0.000 0.031 NoButton Move 111 268 20

In [4]:
df_train = preprocess_data(df_train)
df_val = preprocess_data(df_val)
#### SPECIAL CASE #####
# There isnt any XButton data in the validation set so we better drop this column for the training set
# if we want to have the same number of features in both sets
df_train = df_train.drop(['XButton'], axis = 1)
#### SPECIAL CASE #####

In [5]:
df_train.head()


Out[5]:
client_dt x y user_id Down Drag Move Pressed Released Up Left NoButton Right Scroll
0 -0.959984 -0.980739 -0.574522 20 0 0 1 0 0 0 0 1 0 0
1 -0.734843 -0.980739 -0.570085 20 0 0 1 0 0 0 0 1 0 0
2 -0.748914 -0.974632 -0.556775 20 0 0 1 0 0 0 0 1 0 0
3 -0.734843 -0.965470 -0.534592 20 0 0 1 0 0 0 0 1 0 0
4 -0.523772 -0.962417 -0.503536 20 0 0 1 0 0 0 0 1 0 0

In [6]:
seq_size = 300

train_x, train_y = data_to_machine_learning_examples(df_train, seq_size)
print('[*] Generated traning examples {} and labels {}'.format(train_x.shape, train_y.shape))
val_x, val_y = data_to_machine_learning_examples(df_val, seq_size)
print('[*] Generated validation examples {} and labels {}'.format(val_x.shape, val_y.shape))


[*] Generated traning examples (6770, 300, 13) and labels (6770, 10)
[*] Generated validation examples (2167, 300, 13) and labels (2167, 10)

In [7]:
def print_model(model):
    print("[*] Sequential model created with the following layers:")
    for layer in model.layers:
        print("{:30}{} -> {}".format(layer.name, layer.input_shape, layer.output_shape))

In [10]:
from keras.models import load_model
from keras.callbacks import ModelCheckpoint
from keras.callbacks import ReduceLROnPlateau
from keras.callbacks import TensorBoard

epochs = 200
batch_size = 30
learning_rate = 0.0001
batch_norm_momentum = 0.2
n_classes = 10
data_point_dimensionality = 13

# model = load_model('model/model_18.h5')
model = create_model_paper(input_shape = (seq_size, data_point_dimensionality),
                     classes = n_classes,
                     batch_norm_momentum = batch_norm_momentum,
                     l2_regularization = 0.01)

optimizer = Adam(lr=learning_rate)    
model.compile(loss='categorical_crossentropy', optimizer=optimizer, metrics=['accuracy'])
    
cb_check = ModelCheckpoint('model/checkpoint', monitor='val_loss', verbose=1, period=30)
cb_reducelr = ReduceLROnPlateau(verbose=1)
cb_tensorboard = TensorBoard(log_dir='./logs', histogram_freq=30, write_graph=True)

hist = model.fit(train_x, train_y, 
                 batch_size, epochs, 2, 
                 validation_data=(val_x, val_y),
                 callbacks = [cb_reducelr])
#                  callbacks =[cb_check, cb_reducelr, cb_tensorboard])


Train on 6770 samples, validate on 2167 samples
Epoch 1/200
7s - loss: 1.2794 - acc: 0.6214 - val_loss: 1.2142 - val_acc: 0.6267
Epoch 2/200
6s - loss: 1.0547 - acc: 0.7112 - val_loss: 0.9964 - val_acc: 0.7393
Epoch 3/200
6s - loss: 0.9421 - acc: 0.7479 - val_loss: 1.0233 - val_acc: 0.6922
Epoch 4/200
6s - loss: 0.9030 - acc: 0.7640 - val_loss: 0.9441 - val_acc: 0.7480
Epoch 5/200
6s - loss: 0.8561 - acc: 0.7790 - val_loss: 0.9222 - val_acc: 0.7453
Epoch 6/200
6s - loss: 0.8317 - acc: 0.7889 - val_loss: 0.9075 - val_acc: 0.7554
Epoch 7/200
6s - loss: 0.7992 - acc: 0.8049 - val_loss: 0.9119 - val_acc: 0.7522
Epoch 8/200
6s - loss: 0.7751 - acc: 0.8047 - val_loss: 0.8904 - val_acc: 0.7739
Epoch 9/200
6s - loss: 0.7557 - acc: 0.8143 - val_loss: 0.7947 - val_acc: 0.8039
Epoch 10/200
6s - loss: 0.7375 - acc: 0.8230 - val_loss: 0.8382 - val_acc: 0.7743
Epoch 11/200
6s - loss: 0.7254 - acc: 0.8235 - val_loss: 0.8329 - val_acc: 0.7771
Epoch 12/200
6s - loss: 0.7194 - acc: 0.8263 - val_loss: 0.8257 - val_acc: 0.7803
Epoch 13/200
6s - loss: 0.6910 - acc: 0.8453 - val_loss: 0.7599 - val_acc: 0.8126
Epoch 14/200
6s - loss: 0.6858 - acc: 0.8390 - val_loss: 0.9486 - val_acc: 0.7577
Epoch 15/200
6s - loss: 0.6740 - acc: 0.8467 - val_loss: 0.7957 - val_acc: 0.8145
Epoch 16/200
6s - loss: 0.6637 - acc: 0.8424 - val_loss: 0.7592 - val_acc: 0.8126
Epoch 17/200
6s - loss: 0.6606 - acc: 0.8468 - val_loss: 0.7613 - val_acc: 0.8016
Epoch 18/200
6s - loss: 0.6459 - acc: 0.8548 - val_loss: 0.7326 - val_acc: 0.8339
Epoch 19/200
6s - loss: 0.6594 - acc: 0.8446 - val_loss: 0.7319 - val_acc: 0.8080
Epoch 20/200
6s - loss: 0.6237 - acc: 0.8623 - val_loss: 0.6786 - val_acc: 0.8468
Epoch 21/200
6s - loss: 0.6240 - acc: 0.8614 - val_loss: 0.6971 - val_acc: 0.8343
Epoch 22/200
6s - loss: 0.6298 - acc: 0.8549 - val_loss: 0.7436 - val_acc: 0.8173
Epoch 23/200
6s - loss: 0.6119 - acc: 0.8685 - val_loss: 0.7187 - val_acc: 0.8210
Epoch 24/200
6s - loss: 0.6043 - acc: 0.8687 - val_loss: 0.7126 - val_acc: 0.8228
Epoch 25/200
6s - loss: 0.6027 - acc: 0.8677 - val_loss: 0.7240 - val_acc: 0.8136
Epoch 26/200
6s - loss: 0.6060 - acc: 0.8632 - val_loss: 0.7183 - val_acc: 0.8228
Epoch 27/200
6s - loss: 0.5924 - acc: 0.8716 - val_loss: 0.7197 - val_acc: 0.8348
Epoch 28/200
6s - loss: 0.5939 - acc: 0.8668 - val_loss: 0.7137 - val_acc: 0.8210
Epoch 29/200
6s - loss: 0.5846 - acc: 0.8733 - val_loss: 0.6495 - val_acc: 0.8385
Epoch 30/200
6s - loss: 0.5668 - acc: 0.8787 - val_loss: 0.6935 - val_acc: 0.8357
Epoch 31/200
6s - loss: 0.5846 - acc: 0.8728 - val_loss: 0.6869 - val_acc: 0.8288
Epoch 32/200
6s - loss: 0.5740 - acc: 0.8762 - val_loss: 0.6915 - val_acc: 0.8343
Epoch 33/200
6s - loss: 0.5894 - acc: 0.8702 - val_loss: 0.7256 - val_acc: 0.8265
Epoch 34/200
6s - loss: 0.5880 - acc: 0.8665 - val_loss: 0.6761 - val_acc: 0.8256
Epoch 35/200
6s - loss: 0.5764 - acc: 0.8768 - val_loss: 0.6747 - val_acc: 0.8389
Epoch 36/200
6s - loss: 0.5546 - acc: 0.8814 - val_loss: 0.6976 - val_acc: 0.8279
Epoch 37/200
6s - loss: 0.5572 - acc: 0.8796 - val_loss: 0.6510 - val_acc: 0.8533
Epoch 38/200
6s - loss: 0.5536 - acc: 0.8823 - val_loss: 0.6837 - val_acc: 0.8306
Epoch 39/200
6s - loss: 0.5559 - acc: 0.8734 - val_loss: 0.6219 - val_acc: 0.8588
Epoch 40/200
6s - loss: 0.5385 - acc: 0.8861 - val_loss: 0.6781 - val_acc: 0.8399
Epoch 41/200
6s - loss: 0.5488 - acc: 0.8817 - val_loss: 0.6499 - val_acc: 0.8353
Epoch 42/200
6s - loss: 0.5585 - acc: 0.8764 - val_loss: 0.7553 - val_acc: 0.8025
Epoch 43/200
6s - loss: 0.5402 - acc: 0.8812 - val_loss: 0.6629 - val_acc: 0.8339
Epoch 44/200
6s - loss: 0.5435 - acc: 0.8845 - val_loss: 0.6647 - val_acc: 0.8413
Epoch 45/200
6s - loss: 0.5465 - acc: 0.8798 - val_loss: 0.6160 - val_acc: 0.8519
Epoch 46/200
6s - loss: 0.5303 - acc: 0.8867 - val_loss: 0.6654 - val_acc: 0.8288
Epoch 47/200
6s - loss: 0.5374 - acc: 0.8809 - val_loss: 0.6674 - val_acc: 0.8320
Epoch 48/200
6s - loss: 0.5342 - acc: 0.8857 - val_loss: 0.6513 - val_acc: 0.8616
Epoch 49/200
6s - loss: 0.5306 - acc: 0.8907 - val_loss: 0.6408 - val_acc: 0.8417
Epoch 50/200
6s - loss: 0.5470 - acc: 0.8838 - val_loss: 0.6464 - val_acc: 0.8454
Epoch 51/200
6s - loss: 0.5267 - acc: 0.8877 - val_loss: 0.6224 - val_acc: 0.8565
Epoch 52/200
6s - loss: 0.5201 - acc: 0.8951 - val_loss: 0.6270 - val_acc: 0.8588
Epoch 53/200
6s - loss: 0.5124 - acc: 0.8932 - val_loss: 0.6574 - val_acc: 0.8560
Epoch 54/200
6s - loss: 0.5140 - acc: 0.8903 - val_loss: 0.6240 - val_acc: 0.8542
Epoch 55/200
6s - loss: 0.5049 - acc: 0.8962 - val_loss: 0.6441 - val_acc: 0.8459
Epoch 56/200

Epoch 00055: reducing learning rate to 9.999999747378752e-06.
6s - loss: 0.5294 - acc: 0.8842 - val_loss: 0.6302 - val_acc: 0.8417
Epoch 57/200
6s - loss: 0.4608 - acc: 0.9164 - val_loss: 0.6426 - val_acc: 0.8486
Epoch 58/200
6s - loss: 0.4461 - acc: 0.9205 - val_loss: 0.6099 - val_acc: 0.8676
Epoch 59/200
6s - loss: 0.4319 - acc: 0.9284 - val_loss: 0.5698 - val_acc: 0.8717
Epoch 60/200
6s - loss: 0.4218 - acc: 0.9312 - val_loss: 0.5640 - val_acc: 0.8671
Epoch 61/200
6s - loss: 0.4297 - acc: 0.9295 - val_loss: 0.5667 - val_acc: 0.8740
Epoch 62/200
6s - loss: 0.4203 - acc: 0.9310 - val_loss: 0.5850 - val_acc: 0.8722
Epoch 63/200
6s - loss: 0.4199 - acc: 0.9332 - val_loss: 0.5454 - val_acc: 0.8772
Epoch 64/200
6s - loss: 0.4162 - acc: 0.9353 - val_loss: 0.5638 - val_acc: 0.8772
Epoch 65/200
6s - loss: 0.4132 - acc: 0.9349 - val_loss: 0.5223 - val_acc: 0.8929
Epoch 66/200
6s - loss: 0.4112 - acc: 0.9343 - val_loss: 0.5735 - val_acc: 0.8671
Epoch 67/200
6s - loss: 0.4100 - acc: 0.9346 - val_loss: 0.5671 - val_acc: 0.8694
Epoch 68/200
6s - loss: 0.4104 - acc: 0.9347 - val_loss: 0.5713 - val_acc: 0.8740
Epoch 69/200
6s - loss: 0.4050 - acc: 0.9397 - val_loss: 0.5609 - val_acc: 0.8754
Epoch 70/200
6s - loss: 0.4027 - acc: 0.9394 - val_loss: 0.5587 - val_acc: 0.8791
Epoch 71/200
6s - loss: 0.4003 - acc: 0.9388 - val_loss: 0.5246 - val_acc: 0.8869
Epoch 72/200
6s - loss: 0.3970 - acc: 0.9396 - val_loss: 0.5282 - val_acc: 0.8865
Epoch 73/200
6s - loss: 0.3944 - acc: 0.9393 - val_loss: 0.5714 - val_acc: 0.8639
Epoch 74/200
6s - loss: 0.3980 - acc: 0.9403 - val_loss: 0.5945 - val_acc: 0.8551
Epoch 75/200
6s - loss: 0.3945 - acc: 0.9406 - val_loss: 0.5409 - val_acc: 0.8892
Epoch 76/200

Epoch 00075: reducing learning rate to 9.999999747378752e-07.
6s - loss: 0.3999 - acc: 0.9394 - val_loss: 0.5651 - val_acc: 0.8722
Epoch 77/200
6s - loss: 0.3830 - acc: 0.9434 - val_loss: 0.5294 - val_acc: 0.8943
Epoch 78/200
6s - loss: 0.3809 - acc: 0.9476 - val_loss: 0.5401 - val_acc: 0.8957
Epoch 79/200
6s - loss: 0.3809 - acc: 0.9430 - val_loss: 0.5103 - val_acc: 0.8962
Epoch 80/200
6s - loss: 0.3811 - acc: 0.9465 - val_loss: 0.5319 - val_acc: 0.8860
Epoch 81/200
6s - loss: 0.3811 - acc: 0.9483 - val_loss: 0.5713 - val_acc: 0.8791
Epoch 82/200
6s - loss: 0.3822 - acc: 0.9462 - val_loss: 0.5065 - val_acc: 0.9026
Epoch 83/200
6s - loss: 0.3756 - acc: 0.9486 - val_loss: 0.5249 - val_acc: 0.8943
Epoch 84/200
6s - loss: 0.3801 - acc: 0.9482 - val_loss: 0.5099 - val_acc: 0.8943
Epoch 85/200
6s - loss: 0.3797 - acc: 0.9458 - val_loss: 0.5146 - val_acc: 0.8906
Epoch 86/200
6s - loss: 0.3794 - acc: 0.9479 - val_loss: 0.5502 - val_acc: 0.8786
Epoch 87/200
6s - loss: 0.3783 - acc: 0.9459 - val_loss: 0.5445 - val_acc: 0.8768
Epoch 88/200
6s - loss: 0.3802 - acc: 0.9479 - val_loss: 0.5451 - val_acc: 0.8796
Epoch 89/200
6s - loss: 0.3802 - acc: 0.9468 - val_loss: 0.5272 - val_acc: 0.8874
Epoch 90/200
6s - loss: 0.3781 - acc: 0.9479 - val_loss: 0.5179 - val_acc: 0.8879
Epoch 91/200
6s - loss: 0.3778 - acc: 0.9486 - val_loss: 0.5464 - val_acc: 0.8791
Epoch 92/200
6s - loss: 0.3784 - acc: 0.9479 - val_loss: 0.5741 - val_acc: 0.8611
Epoch 93/200

Epoch 00092: reducing learning rate to 9.999999974752428e-08.
6s - loss: 0.3797 - acc: 0.9453 - val_loss: 0.5598 - val_acc: 0.8722
Epoch 94/200
6s - loss: 0.3776 - acc: 0.9484 - val_loss: 0.4976 - val_acc: 0.8957
Epoch 95/200
6s - loss: 0.3783 - acc: 0.9521 - val_loss: 0.5686 - val_acc: 0.8805
Epoch 96/200
6s - loss: 0.3774 - acc: 0.9511 - val_loss: 0.5201 - val_acc: 0.8929
Epoch 97/200
6s - loss: 0.3760 - acc: 0.9480 - val_loss: 0.5559 - val_acc: 0.8666
Epoch 98/200
6s - loss: 0.3770 - acc: 0.9465 - val_loss: 0.4951 - val_acc: 0.9012
Epoch 99/200
6s - loss: 0.3791 - acc: 0.9487 - val_loss: 0.5103 - val_acc: 0.9003
Epoch 100/200
6s - loss: 0.3738 - acc: 0.9511 - val_loss: 0.5262 - val_acc: 0.8874
Epoch 101/200
6s - loss: 0.3728 - acc: 0.9498 - val_loss: 0.9946 - val_acc: 0.6502
Epoch 102/200
6s - loss: 0.3752 - acc: 0.9484 - val_loss: 0.5649 - val_acc: 0.8772
Epoch 103/200
6s - loss: 0.3770 - acc: 0.9477 - val_loss: 0.5291 - val_acc: 0.8828
Epoch 104/200
6s - loss: 0.3734 - acc: 0.9514 - val_loss: 0.5133 - val_acc: 0.8980
Epoch 105/200
6s - loss: 0.3770 - acc: 0.9498 - val_loss: 0.5184 - val_acc: 0.8860
Epoch 106/200
6s - loss: 0.3744 - acc: 0.9498 - val_loss: 0.5182 - val_acc: 0.8906
Epoch 107/200
6s - loss: 0.3794 - acc: 0.9482 - val_loss: 0.5320 - val_acc: 0.8943
Epoch 108/200
6s - loss: 0.3778 - acc: 0.9476 - val_loss: 0.5555 - val_acc: 0.8768
Epoch 109/200

Epoch 00108: reducing learning rate to 1.0000000116860975e-08.
6s - loss: 0.3763 - acc: 0.9479 - val_loss: 0.5116 - val_acc: 0.8948
Epoch 110/200
6s - loss: 0.3740 - acc: 0.9465 - val_loss: 0.5601 - val_acc: 0.8745
Epoch 111/200
6s - loss: 0.3763 - acc: 0.9476 - val_loss: 0.5246 - val_acc: 0.8916
Epoch 112/200
6s - loss: 0.3773 - acc: 0.9489 - val_loss: 0.5042 - val_acc: 0.8920
Epoch 113/200
6s - loss: 0.3763 - acc: 0.9498 - val_loss: 0.5303 - val_acc: 0.8920
Epoch 114/200
6s - loss: 0.3774 - acc: 0.9486 - val_loss: 0.5209 - val_acc: 0.8902
Epoch 115/200
6s - loss: 0.3767 - acc: 0.9518 - val_loss: 0.5541 - val_acc: 0.8772
Epoch 116/200
6s - loss: 0.3767 - acc: 0.9504 - val_loss: 0.5228 - val_acc: 0.8860
Epoch 117/200
6s - loss: 0.3747 - acc: 0.9483 - val_loss: 0.5220 - val_acc: 0.8888
Epoch 118/200
6s - loss: 0.3761 - acc: 0.9490 - val_loss: 0.5089 - val_acc: 0.8929
Epoch 119/200

Epoch 00118: reducing learning rate to 9.999999939225292e-10.
6s - loss: 0.3755 - acc: 0.9470 - val_loss: 0.5157 - val_acc: 0.9054
Epoch 120/200
6s - loss: 0.3742 - acc: 0.9492 - val_loss: 0.5049 - val_acc: 0.8957
Epoch 121/200
6s - loss: 0.3732 - acc: 0.9518 - val_loss: 0.4983 - val_acc: 0.8985
Epoch 122/200
6s - loss: 0.3778 - acc: 0.9477 - val_loss: 0.5315 - val_acc: 0.8874
Epoch 123/200
6s - loss: 0.3743 - acc: 0.9501 - val_loss: 0.5119 - val_acc: 0.8860
Epoch 124/200
6s - loss: 0.3741 - acc: 0.9505 - val_loss: 0.5529 - val_acc: 0.8856
Epoch 125/200
6s - loss: 0.3740 - acc: 0.9504 - val_loss: 0.5106 - val_acc: 0.8971
Epoch 126/200
6s - loss: 0.3795 - acc: 0.9467 - val_loss: 0.5225 - val_acc: 0.8934
Epoch 127/200
6s - loss: 0.3741 - acc: 0.9502 - val_loss: 0.5506 - val_acc: 0.8772
Epoch 128/200
6s - loss: 0.3782 - acc: 0.9486 - val_loss: 0.5089 - val_acc: 0.8985
Epoch 129/200

Epoch 00128: reducing learning rate to 9.999999717180686e-11.
6s - loss: 0.3803 - acc: 0.9465 - val_loss: 0.5864 - val_acc: 0.8634
Epoch 130/200
6s - loss: 0.3735 - acc: 0.9482 - val_loss: 0.5220 - val_acc: 0.8837
Epoch 131/200
6s - loss: 0.3791 - acc: 0.9487 - val_loss: 0.5971 - val_acc: 0.8786
Epoch 132/200
6s - loss: 0.3755 - acc: 0.9495 - val_loss: 0.5507 - val_acc: 0.8699
Epoch 133/200
6s - loss: 0.3743 - acc: 0.9493 - val_loss: 0.5262 - val_acc: 0.8846
Epoch 134/200
6s - loss: 0.3784 - acc: 0.9470 - val_loss: 0.5309 - val_acc: 0.8943
Epoch 135/200
6s - loss: 0.3719 - acc: 0.9507 - val_loss: 0.5068 - val_acc: 0.8943
Epoch 136/200
6s - loss: 0.3766 - acc: 0.9490 - val_loss: 0.5301 - val_acc: 0.8925
Epoch 137/200
6s - loss: 0.3755 - acc: 0.9516 - val_loss: 0.5113 - val_acc: 0.8985
Epoch 138/200
6s - loss: 0.3759 - acc: 0.9479 - val_loss: 0.5429 - val_acc: 0.8846
Epoch 139/200

Epoch 00138: reducing learning rate to 9.99999943962493e-12.
6s - loss: 0.3744 - acc: 0.9501 - val_loss: 0.5265 - val_acc: 0.8842
Epoch 140/200
6s - loss: 0.3797 - acc: 0.9467 - val_loss: 0.5756 - val_acc: 0.8731
Epoch 141/200
6s - loss: 0.3769 - acc: 0.9479 - val_loss: 0.5326 - val_acc: 0.8916
Epoch 142/200
6s - loss: 0.3742 - acc: 0.9514 - val_loss: 0.5047 - val_acc: 0.9031
Epoch 143/200
6s - loss: 0.3741 - acc: 0.9510 - val_loss: 0.5033 - val_acc: 0.8952
Epoch 144/200
6s - loss: 0.3768 - acc: 0.9493 - val_loss: 0.5325 - val_acc: 0.8865
Epoch 145/200
6s - loss: 0.3769 - acc: 0.9495 - val_loss: 0.5298 - val_acc: 0.8902
Epoch 146/200
6s - loss: 0.3774 - acc: 0.9518 - val_loss: 0.5182 - val_acc: 0.8934
Epoch 147/200
6s - loss: 0.3727 - acc: 0.9508 - val_loss: 0.5376 - val_acc: 0.8763
Epoch 148/200
6s - loss: 0.3750 - acc: 0.9501 - val_loss: 0.5245 - val_acc: 0.8911
Epoch 149/200

Epoch 00148: reducing learning rate to 9.999999092680235e-13.
6s - loss: 0.3735 - acc: 0.9513 - val_loss: 0.5146 - val_acc: 0.8902
Epoch 150/200
6s - loss: 0.3758 - acc: 0.9486 - val_loss: 0.5114 - val_acc: 0.8934
Epoch 151/200
6s - loss: 0.3773 - acc: 0.9489 - val_loss: 0.5129 - val_acc: 0.8943
Epoch 152/200
6s - loss: 0.3774 - acc: 0.9492 - val_loss: 0.5096 - val_acc: 0.8994
Epoch 153/200
6s - loss: 0.3764 - acc: 0.9484 - val_loss: 0.5331 - val_acc: 0.8948
Epoch 154/200
6s - loss: 0.3771 - acc: 0.9489 - val_loss: 0.5207 - val_acc: 0.8934
Epoch 155/200
---------------------------------------------------------------------------
KeyboardInterrupt                         Traceback (most recent call last)
<ipython-input-10-c147e19f8bbc> in <module>()
     27                  batch_size, epochs, 2,
     28                  validation_data=(val_x, val_y),
---> 29                  callbacks = [cb_reducelr])
     30 #                  callbacks =[cb_check, cb_reducelr, cb_tensorboard])

/home/ubuntu/data/installs/miniconda3/envs/dl-python35/lib/python3.5/site-packages/keras/models.py in fit(self, x, y, batch_size, epochs, verbose, callbacks, validation_split, validation_data, shuffle, class_weight, sample_weight, initial_epoch, **kwargs)
    843                               class_weight=class_weight,
    844                               sample_weight=sample_weight,
--> 845                               initial_epoch=initial_epoch)
    846 
    847     def evaluate(self, x, y, batch_size=32, verbose=1,

/home/ubuntu/data/installs/miniconda3/envs/dl-python35/lib/python3.5/site-packages/keras/engine/training.py in fit(self, x, y, batch_size, epochs, verbose, callbacks, validation_split, validation_data, shuffle, class_weight, sample_weight, initial_epoch, **kwargs)
   1483                               val_f=val_f, val_ins=val_ins, shuffle=shuffle,
   1484                               callback_metrics=callback_metrics,
-> 1485                               initial_epoch=initial_epoch)
   1486 
   1487     def evaluate(self, x, y, batch_size=32, verbose=1, sample_weight=None):

/home/ubuntu/data/installs/miniconda3/envs/dl-python35/lib/python3.5/site-packages/keras/engine/training.py in _fit_loop(self, f, ins, out_labels, batch_size, epochs, verbose, callbacks, val_f, val_ins, shuffle, callback_metrics, initial_epoch)
   1138                 batch_logs['size'] = len(batch_ids)
   1139                 callbacks.on_batch_begin(batch_index, batch_logs)
-> 1140                 outs = f(ins_batch)
   1141                 if not isinstance(outs, list):
   1142                     outs = [outs]

/home/ubuntu/data/installs/miniconda3/envs/dl-python35/lib/python3.5/site-packages/keras/backend/tensorflow_backend.py in __call__(self, inputs)
   2071         session = get_session()
   2072         updated = session.run(self.outputs + [self.updates_op],
-> 2073                               feed_dict=feed_dict)
   2074         return updated[:len(self.outputs)]
   2075 

/home/ubuntu/data/installs/miniconda3/envs/dl-python35/lib/python3.5/site-packages/tensorflow/python/client/session.py in run(self, fetches, feed_dict, options, run_metadata)
    765     try:
    766       result = self._run(None, fetches, feed_dict, options_ptr,
--> 767                          run_metadata_ptr)
    768       if run_metadata:
    769         proto_data = tf_session.TF_GetBuffer(run_metadata_ptr)

/home/ubuntu/data/installs/miniconda3/envs/dl-python35/lib/python3.5/site-packages/tensorflow/python/client/session.py in _run(self, handle, fetches, feed_dict, options, run_metadata)
    963     if final_fetches or final_targets:
    964       results = self._do_run(handle, final_targets, final_fetches,
--> 965                              feed_dict_string, options, run_metadata)
    966     else:
    967       results = []

/home/ubuntu/data/installs/miniconda3/envs/dl-python35/lib/python3.5/site-packages/tensorflow/python/client/session.py in _do_run(self, handle, target_list, fetch_list, feed_dict, options, run_metadata)
   1013     if handle is None:
   1014       return self._do_call(_run_fn, self._session, feed_dict, fetch_list,
-> 1015                            target_list, options, run_metadata)
   1016     else:
   1017       return self._do_call(_prun_fn, self._session, handle, feed_dict,

/home/ubuntu/data/installs/miniconda3/envs/dl-python35/lib/python3.5/site-packages/tensorflow/python/client/session.py in _do_call(self, fn, *args)
   1020   def _do_call(self, fn, *args):
   1021     try:
-> 1022       return fn(*args)
   1023     except errors.OpError as e:
   1024       message = compat.as_text(e.message)

/home/ubuntu/data/installs/miniconda3/envs/dl-python35/lib/python3.5/site-packages/tensorflow/python/client/session.py in _run_fn(session, feed_dict, fetch_list, target_list, options, run_metadata)
   1002         return tf_session.TF_Run(session, options,
   1003                                  feed_dict, fetch_list, target_list,
-> 1004                                  status, run_metadata)
   1005 
   1006     def _prun_fn(session, handle, feed_dict, fetch_list):

KeyboardInterrupt: 

In [17]:
plt.plot(hist.history['loss'])
plt.plot(hist.history['val_loss'])


Out[17]:
[<matplotlib.lines.Line2D at 0x7fa80d04fcf8>]
/home/ubuntu/data/installs/miniconda3/envs/dl-python35/lib/python3.5/site-packages/matplotlib/font_manager.py:1297: UserWarning: findfont: Font family ['sans-serif'] not found. Falling back to DejaVu Sans
  (prop.get_family(), self.defaultFamily[fontext]))

In [18]:
plt.plot(hist.history['acc'])
plt.plot(hist.history['val_acc'])


Out[18]:
[<matplotlib.lines.Line2D at 0x7fa806a432b0>]
/home/ubuntu/data/installs/miniconda3/envs/dl-python35/lib/python3.5/site-packages/matplotlib/font_manager.py:1297: UserWarning: findfont: Font family ['sans-serif'] not found. Falling back to DejaVu Sans
  (prop.get_family(), self.defaultFamily[fontext]))

TSNE


In [8]:
from keras.models import load_model

model = load_model('model/model_18.h5')

def print_model(model):
    print("[*] Sequential model created with the following layers:")
    for layer in model.layers:
        print("{:30}{} -> {}".format(layer.name, layer.input_shape, layer.output_shape))

In [9]:
print_model(model)


[*] Sequential model created with the following layers:
conv1d_1                      (None, 300, 13) -> (None, 300, 128)
batch_normalization_1         (None, 300, 128) -> (None, 300, 128)
leaky_re_lu_1                 (None, 300, 128) -> (None, 300, 128)
conv1d_2                      (None, 300, 128) -> (None, 300, 256)
batch_normalization_2         (None, 300, 256) -> (None, 300, 256)
leaky_re_lu_2                 (None, 300, 256) -> (None, 300, 256)
conv1d_3                      (None, 300, 256) -> (None, 300, 128)
batch_normalization_3         (None, 300, 128) -> (None, 300, 128)
leaky_re_lu_3                 (None, 300, 128) -> (None, 300, 128)
global_average_pooling1d_1    (None, 300, 128) -> (None, 128)
dense_1                       (None, 128) -> (None, 10)

In [10]:
from keras.models import Model

layer_name = 'global_average_pooling1d_1'
intermediate_layer_model = Model(inputs=model.input,
                                 outputs=model.get_layer(layer_name).output)

In [18]:
intermediate_output = intermediate_layer_model.predict(train_x)
y_data = model.predict(train_x)

In [19]:
intermediate_output.shape


Out[19]:
(6770, 128)

In [20]:
y_data_nums = [np.argmax(row) for row in y_data]

In [21]:
from sklearn.manifold import TSNE

tsne_model = TSNE(n_components=2, random_state=0)
np.set_printoptions(suppress=True)
result = tsne_model.fit_transform(intermediate_output)

print(result)


[[ -7.44866775   6.68743613]
 [ 11.15769077   1.97122216]
 [ -7.39057814  -0.67059351]
 ..., 
 [  2.16120788   1.82891381]
 [  8.272329     1.877645  ]
 [  7.85698169  -3.96640998]]

In [15]:
import seaborn as sns
sns.set(style="white", color_codes=True)
g = sns.jointplot(x=result[:,0], y=result[:,1])


/home/ubuntu/data/installs/miniconda3/envs/dl-python35/lib/python3.5/site-packages/matplotlib/font_manager.py:1297: UserWarning: findfont: Font family ['sans-serif'] not found. Falling back to DejaVu Sans
  (prop.get_family(), self.defaultFamily[fontext]))

In [24]:
plt.figure(1, figsize=(12, 10))
plt.scatter(result[:,0], result[:,1], c=y_data_nums, cmap=plt.cm.get_cmap("jet"))
# plt.scatter(result[:,0], result[:,1])
plt.colorbar(ticks=range(10))
plt.clim(-0.5, 9.5)
plt.show()


/home/ubuntu/data/installs/miniconda3/envs/dl-python35/lib/python3.5/site-packages/matplotlib/font_manager.py:1297: UserWarning: findfont: Font family ['sans-serif'] not found. Falling back to DejaVu Sans
  (prop.get_family(), self.defaultFamily[fontext]))

In [ ]: