Time Series Movement Prediction


In [1]:
from IPython.display import Image
import pandas as pd
import numpy as np

In [13]:
path="MovementAAL.jpg"
Image(path, width=600, height=400)


Out[13]:

Data Exploration


In [16]:
# The data has been collected from 4 sensors each second
sample_df = pd.read_csv('dataset/MovementAAL_RSS_1.csv')
sample_df.head()


Out[16]:
#RSS_anchor1 RSS_anchor2 RSS_anchor3 RSS_anchor4
0 -0.90476 -0.48 0.28571 0.30
1 -0.57143 -0.32 0.14286 0.30
2 -0.38095 -0.28 -0.14286 0.35
3 -0.28571 -0.20 -0.47619 0.35
4 -0.14286 -0.20 0.14286 -0.20

In [24]:
# 1 means has movement, -1 means no
## 314 records, each is for one MovementAAL_RSS file (314 files in total)

target = pd.read_csv('dataset/MovementAAL_target.csv')
print(target[' class_label'].value_counts())
target.head()


 1    158
-1    156
Name:  class_label, dtype: int64
Out[24]:
#sequence_ID class_label
0 1 1
1 2 1
2 3 1
3 4 1
4 5 1

In [29]:
# groups seperate all the 314 files into 3 groups, based on 6 types of the movement paths
# We can use group 2 as training data, group 1 as validation data and group 3 as testing data

groups = pd.read_csv('groups/MovementAAL_DatasetGroup.csv')
print(groups[' dataset_ID'].value_counts())
groups.head()


2    106
3    104
1    104
Name:  dataset_ID, dtype: int64
Out[29]:
#sequence_ID dataset_ID
0 1 1
1 2 1
2 3 1
3 4 1
4 5 1

In [31]:
print('6 Paths')
path="6paths.png"
Image(path, width=500, height=300)


6 Paths
Out[31]:

In [32]:
print('3 Groups')
path="3groups.png"
Image(path, width=500, height=300)


3 Groups
Out[32]:

Data Preprocessing


In [2]:
# There are 314 time series data files in total, let's collect them in a list
file_lst = []

ts_folder = 'dataset/'
for i in range(314):
    file_path = ts_folder + 'MovementAAL_RSS_'+str(i+1)+'.csv'
    tmp_df = pd.read_csv(file_path)
    file_lst.append(tmp_df.values)  # append each file into the list

In [3]:
file_lst[0]


Out[3]:
array([[-0.90476 , -0.48    ,  0.28571 ,  0.3     ],
       [-0.57143 , -0.32    ,  0.14286 ,  0.3     ],
       [-0.38095 , -0.28    , -0.14286 ,  0.35    ],
       [-0.28571 , -0.2     , -0.47619 ,  0.35    ],
       [-0.14286 , -0.2     ,  0.14286 , -0.2     ],
       [-0.14286 , -0.2     ,  0.047619,  0.      ],
       [-0.14286 , -0.16    , -0.38095 ,  0.2     ],
       [-0.14286 , -0.04    , -0.61905 , -0.2     ],
       [-0.095238, -0.08    ,  0.14286 , -0.55    ],
       [-0.047619,  0.04    , -0.095238,  0.05    ],
       [-0.19048 , -0.04    ,  0.095238,  0.4     ],
       [-0.095238, -0.04    , -0.14286 ,  0.35    ],
       [-0.33333 , -0.08    , -0.28571 , -0.2     ],
       [-0.2381  ,  0.04    ,  0.14286 ,  0.35    ],
       [ 0.      ,  0.08    ,  0.14286 ,  0.05    ],
       [-0.095238,  0.04    ,  0.095238,  0.1     ],
       [-0.14286 , -0.2     ,  0.14286 ,  0.5     ],
       [-0.19048 ,  0.04    , -0.42857 ,  0.3     ],
       [-0.14286 , -0.08    , -0.2381  ,  0.15    ],
       [-0.33333 ,  0.16    , -0.14286 , -0.8     ],
       [-0.42857 ,  0.16    , -0.28571 , -0.1     ],
       [-0.71429 ,  0.16    , -0.28571 ,  0.2     ],
       [-0.095238, -0.08    ,  0.095238,  0.35    ],
       [-0.28571 ,  0.04    ,  0.14286 ,  0.2     ],
       [ 0.      ,  0.04    ,  0.14286 ,  0.1     ],
       [ 0.      ,  0.04    , -0.047619, -0.05    ],
       [-0.14286 , -0.6     , -0.28571 , -0.1     ]])

In [4]:
# most anonying part - make each file the same length
## All pad last row to the max file length, then all truncate to 90th percentile length

# find 90th percentile & max length
file_len_lst = [len(f) for f in file_lst]
print(pd.Series(file_len_lst).describe())
print(pd.Series(file_len_lst).quantile(0.9))  # 90th percentile length


count    314.000000
mean      42.028662
std       16.185303
min       19.000000
25%       26.000000
50%       41.000000
75%       56.000000
max      129.000000
dtype: float64
61.0

In [5]:
# For each file, keep padding the last row till reach to max length 129
max_len = int(pd.Series(file_len_lst).describe()['max'])
print(max_len)

for i in range(len(file_lst)):
    original_len = len(file_lst[i])
    add_len = max_len - original_len
    
    for j in range(add_len):
        file_lst[i] = np.vstack((file_lst[i], file_lst[i][-1]))  # pad the last row towards the max length
        
print(len(file_lst[0]), len(file_lst[-1]))


129
(129, 129)

In [6]:
# Now truncate each sequence to 90th percentile length, 
## so that we can keep a balance between losing data and adding too much data
from keras.preprocessing import sequence

seq_len = 60
final_seq = sequence.pad_sequences(file_lst, maxlen=seq_len, padding='post', dtype='float', truncating='post')
print(len(final_seq), len(final_seq[0]))


Using TensorFlow backend.
(314, 60)

In [7]:
# get the labels and save as numpy array
label_df = pd.read_csv('dataset/MovementAAL_target.csv')
labels = label_df.values[:,1]
labels


Out[7]:
array([ 1,  1,  1,  1,  1,  1,  1,  1,  1,  1,  1,  1,  1,  1,  1,  1,  1,
        1,  1,  1,  1,  1,  1,  1,  1,  1, -1, -1, -1, -1, -1, -1, -1, -1,
       -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
       -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,  1,  1,  1,
        1,  1,  1,  1,  1,  1,  1,  1,  1,  1,  1,  1,  1,  1,  1,  1,  1,
        1,  1,  1,  1,  1,  1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
       -1, -1,  1,  1,  1,  1,  1,  1,  1,  1,  1,  1,  1,  1,  1,  1,  1,
        1,  1,  1,  1,  1,  1,  1,  1,  1,  1,  1, -1, -1, -1, -1, -1, -1,
       -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
       -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
        1,  1,  1,  1,  1,  1,  1,  1,  1,  1,  1,  1,  1,  1,  1,  1,  1,
        1,  1,  1,  1,  1,  1,  1,  1,  1, -1, -1, -1, -1, -1, -1, -1, -1,
       -1, -1, -1, -1, -1, -1,  1,  1,  1,  1,  1,  1,  1,  1,  1,  1,  1,
        1,  1,  1,  1,  1,  1,  1,  1,  1,  1,  1,  1,  1,  1,  1,  1, -1,
       -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
       -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
       -1, -1,  1,  1,  1,  1,  1,  1,  1,  1,  1,  1,  1,  1,  1,  1,  1,
        1,  1,  1,  1,  1,  1,  1,  1,  1,  1,  1,  1, -1, -1, -1, -1, -1,
       -1, -1, -1, -1, -1, -1, -1, -1])

In [10]:
# We can set group 2 as training, group 1 as validation set and group 3 as testing data
groups = pd.read_csv('groups/MovementAAL_DatasetGroup.csv')
print(groups[' dataset_ID'].value_counts())
groups.head()


2    106
3    104
1    104
Name:  dataset_ID, dtype: int64
Out[10]:
#sequence_ID dataset_ID
0 1 1
1 2 1
2 3 1
3 4 1
4 5 1

In [16]:
groups.values[1]


Out[16]:
array([2, 1])

In [34]:
train_data = [final_seq[i] for i in range(len(final_seq)) if groups.values[i][1] == 2]
val_data = [final_seq[i] for i in range(len(final_seq)) if groups.values[i][1] == 1]
test_data = [final_seq[i] for i in range(len(final_seq)) if groups.values[i][1] == 3]

In [35]:
train_labels = [labels[i] for i in range(len(final_seq)) if groups.values[i][1] == 2]
val_labels = [labels[i] for i in range(len(final_seq)) if groups.values[i][1] == 1]
test_labels = [labels[i] for i in range(len(final_seq)) if groups.values[i][1] == 3]

In [36]:
train = np.array(train_data)
val = np.array(val_data)
test = np.array(test_data)

In [38]:
print(train.shape)  # 106 files, each file has length 60, each record contains 4 values
train[0][0]


(106, 60, 4)
Out[38]:
array([ 0.066667,  0.38462 , -1.      , -0.61905 ])

In [39]:
train_target = np.array(train_labels)
val_target = np.array(val_labels)
test_target = np.array(test_labels)
print(train_target.shape)


(106,)

In [40]:
# for target, have to convert to 0,1
train_target = (train_target + 1)/2
val_target = (val_target + 1)/2
test_target = (test_target + 1)/2

Model Prediction

  • Use sequence model LSTM

In [41]:
from keras.preprocessing import sequence
import tensorflow as tf
from keras.models import Sequential
from keras.layers import Dense
from keras.layers import LSTM

from keras.optimizers import Adam
from keras.models import load_model
from keras.callbacks import ModelCheckpoint

import matplotlib.pyplot as plt
%matplotlib inline

In [42]:
model = Sequential()
model.add(LSTM(256, input_shape=(seq_len, 4)))
model.add(Dense(1, activation='sigmoid'))

In [43]:
model.summary()


_________________________________________________________________
Layer (type)                 Output Shape              Param #   
=================================================================
lstm_2 (LSTM)                (None, 256)               267264    
_________________________________________________________________
dense_2 (Dense)              (None, 1)                 257       
=================================================================
Total params: 267,521
Trainable params: 267,521
Non-trainable params: 0
_________________________________________________________________

In [44]:
adam = Adam(lr=0.001)
# with check point, the training will stop at optimized testing results, reducing overfitting
chk = ModelCheckpoint('best_model.pkl', monitor='val_acc', save_best_only=True, mode='max', verbose=1)

model.compile(loss='binary_crossentropy', optimizer=adam, metrics=['accuracy'])
model.fit(train, train_target, epochs=200, batch_size=128, callbacks=[chk], validation_data=(val,val_target))


Train on 106 samples, validate on 104 samples
Epoch 1/200
106/106 [==============================] - 2s 18ms/step - loss: 0.6960 - acc: 0.4623 - val_loss: 0.6859 - val_acc: 0.5096

Epoch 00001: val_acc improved from -inf to 0.50962, saving model to best_model.pkl
Epoch 2/200
106/106 [==============================] - 0s 5ms/step - loss: 0.6835 - acc: 0.5849 - val_loss: 0.6758 - val_acc: 0.5481

Epoch 00002: val_acc improved from 0.50962 to 0.54808, saving model to best_model.pkl
Epoch 3/200
106/106 [==============================] - 0s 5ms/step - loss: 0.6723 - acc: 0.6132 - val_loss: 0.6669 - val_acc: 0.5865

Epoch 00003: val_acc improved from 0.54808 to 0.58654, saving model to best_model.pkl
Epoch 4/200
106/106 [==============================] - 1s 5ms/step - loss: 0.6618 - acc: 0.6226 - val_loss: 0.6572 - val_acc: 0.5769

Epoch 00004: val_acc did not improve from 0.58654
Epoch 5/200
106/106 [==============================] - 1s 5ms/step - loss: 0.6486 - acc: 0.6415 - val_loss: 0.6478 - val_acc: 0.5673

Epoch 00005: val_acc did not improve from 0.58654
Epoch 6/200
106/106 [==============================] - 0s 4ms/step - loss: 0.6317 - acc: 0.6698 - val_loss: 0.6433 - val_acc: 0.6058

Epoch 00006: val_acc improved from 0.58654 to 0.60577, saving model to best_model.pkl
Epoch 7/200
106/106 [==============================] - 0s 4ms/step - loss: 0.6063 - acc: 0.6792 - val_loss: 0.6777 - val_acc: 0.5962

Epoch 00007: val_acc did not improve from 0.60577
Epoch 8/200
106/106 [==============================] - 0s 4ms/step - loss: 0.5567 - acc: 0.7453 - val_loss: 0.9922 - val_acc: 0.5769

Epoch 00008: val_acc did not improve from 0.60577
Epoch 9/200
106/106 [==============================] - 0s 5ms/step - loss: 0.4601 - acc: 0.8019 - val_loss: 1.7768 - val_acc: 0.5865

Epoch 00009: val_acc did not improve from 0.60577
Epoch 10/200
106/106 [==============================] - 0s 4ms/step - loss: 0.6330 - acc: 0.7925 - val_loss: 1.5308 - val_acc: 0.5577

Epoch 00010: val_acc did not improve from 0.60577
Epoch 11/200
106/106 [==============================] - 0s 5ms/step - loss: 0.4730 - acc: 0.8208 - val_loss: 1.0987 - val_acc: 0.5577

Epoch 00011: val_acc did not improve from 0.60577
Epoch 12/200
106/106 [==============================] - 1s 5ms/step - loss: 0.4653 - acc: 0.8302 - val_loss: 0.8604 - val_acc: 0.5673

Epoch 00012: val_acc did not improve from 0.60577
Epoch 13/200
106/106 [==============================] - 1s 9ms/step - loss: 0.4990 - acc: 0.8113 - val_loss: 0.7591 - val_acc: 0.5673

Epoch 00013: val_acc did not improve from 0.60577
Epoch 14/200
106/106 [==============================] - 1s 7ms/step - loss: 0.5109 - acc: 0.7925 - val_loss: 0.7139 - val_acc: 0.5962

Epoch 00014: val_acc did not improve from 0.60577
Epoch 15/200
106/106 [==============================] - 1s 7ms/step - loss: 0.5109 - acc: 0.7925 - val_loss: 0.7001 - val_acc: 0.5962

Epoch 00015: val_acc did not improve from 0.60577
Epoch 16/200
106/106 [==============================] - 1s 7ms/step - loss: 0.5034 - acc: 0.7925 - val_loss: 0.7166 - val_acc: 0.6250

Epoch 00016: val_acc improved from 0.60577 to 0.62500, saving model to best_model.pkl
Epoch 17/200
106/106 [==============================] - 1s 6ms/step - loss: 0.4836 - acc: 0.8019 - val_loss: 0.7673 - val_acc: 0.6058

Epoch 00017: val_acc did not improve from 0.62500
Epoch 18/200
106/106 [==============================] - 1s 7ms/step - loss: 0.4445 - acc: 0.8208 - val_loss: 0.8472 - val_acc: 0.6154

Epoch 00018: val_acc did not improve from 0.62500
Epoch 19/200
106/106 [==============================] - 1s 5ms/step - loss: 0.4028 - acc: 0.8396 - val_loss: 0.9406 - val_acc: 0.5673

Epoch 00019: val_acc did not improve from 0.62500
Epoch 20/200
106/106 [==============================] - 1s 6ms/step - loss: 0.3967 - acc: 0.8302 - val_loss: 1.0081 - val_acc: 0.5769

Epoch 00020: val_acc did not improve from 0.62500
Epoch 21/200
106/106 [==============================] - 1s 6ms/step - loss: 0.4087 - acc: 0.8113 - val_loss: 1.0275 - val_acc: 0.5385

Epoch 00021: val_acc did not improve from 0.62500
Epoch 22/200
106/106 [==============================] - 1s 7ms/step - loss: 0.3863 - acc: 0.8396 - val_loss: 1.0090 - val_acc: 0.5385

Epoch 00022: val_acc did not improve from 0.62500
Epoch 23/200
106/106 [==============================] - 1s 6ms/step - loss: 0.3733 - acc: 0.8302 - val_loss: 0.9649 - val_acc: 0.5288

Epoch 00023: val_acc did not improve from 0.62500
Epoch 24/200
106/106 [==============================] - 1s 6ms/step - loss: 0.3627 - acc: 0.8208 - val_loss: 0.9198 - val_acc: 0.5385

Epoch 00024: val_acc did not improve from 0.62500
Epoch 25/200
106/106 [==============================] - 1s 5ms/step - loss: 0.3423 - acc: 0.8491 - val_loss: 0.9249 - val_acc: 0.5385

Epoch 00025: val_acc did not improve from 0.62500
Epoch 26/200
106/106 [==============================] - 1s 5ms/step - loss: 0.3161 - acc: 0.8868 - val_loss: 0.9850 - val_acc: 0.5577

Epoch 00026: val_acc did not improve from 0.62500
Epoch 27/200
106/106 [==============================] - 1s 6ms/step - loss: 0.2962 - acc: 0.8868 - val_loss: 1.0167 - val_acc: 0.5769

Epoch 00027: val_acc did not improve from 0.62500
Epoch 28/200
106/106 [==============================] - 1s 5ms/step - loss: 0.3122 - acc: 0.8868 - val_loss: 1.0126 - val_acc: 0.5385

Epoch 00028: val_acc did not improve from 0.62500
Epoch 29/200
106/106 [==============================] - 1s 5ms/step - loss: 0.2941 - acc: 0.8962 - val_loss: 1.0367 - val_acc: 0.5481

Epoch 00029: val_acc did not improve from 0.62500
Epoch 30/200
106/106 [==============================] - 1s 6ms/step - loss: 0.2931 - acc: 0.8962 - val_loss: 0.9896 - val_acc: 0.5481

Epoch 00030: val_acc did not improve from 0.62500
Epoch 31/200
106/106 [==============================] - 1s 5ms/step - loss: 0.2711 - acc: 0.8962 - val_loss: 0.9559 - val_acc: 0.5288

Epoch 00031: val_acc did not improve from 0.62500
Epoch 32/200
106/106 [==============================] - 0s 5ms/step - loss: 0.2730 - acc: 0.8774 - val_loss: 0.9751 - val_acc: 0.5577

Epoch 00032: val_acc did not improve from 0.62500
Epoch 33/200
106/106 [==============================] - 1s 5ms/step - loss: 0.2421 - acc: 0.9057 - val_loss: 0.9841 - val_acc: 0.5769

Epoch 00033: val_acc did not improve from 0.62500
Epoch 34/200
106/106 [==============================] - 1s 5ms/step - loss: 0.2551 - acc: 0.8962 - val_loss: 0.8725 - val_acc: 0.5865

Epoch 00034: val_acc did not improve from 0.62500
Epoch 35/200
106/106 [==============================] - 1s 5ms/step - loss: 0.5181 - acc: 0.8113 - val_loss: 0.8443 - val_acc: 0.5769

Epoch 00035: val_acc did not improve from 0.62500
Epoch 36/200
106/106 [==============================] - 1s 5ms/step - loss: 0.5627 - acc: 0.7453 - val_loss: 0.8617 - val_acc: 0.5769

Epoch 00036: val_acc did not improve from 0.62500
Epoch 37/200
106/106 [==============================] - 1s 6ms/step - loss: 0.3708 - acc: 0.8585 - val_loss: 0.8816 - val_acc: 0.5865

Epoch 00037: val_acc did not improve from 0.62500
Epoch 38/200
106/106 [==============================] - 1s 6ms/step - loss: 0.2681 - acc: 0.9245 - val_loss: 0.9157 - val_acc: 0.5769

Epoch 00038: val_acc did not improve from 0.62500
Epoch 39/200
106/106 [==============================] - 1s 6ms/step - loss: 0.2796 - acc: 0.8868 - val_loss: 0.9486 - val_acc: 0.5577

Epoch 00039: val_acc did not improve from 0.62500
Epoch 40/200
106/106 [==============================] - 1s 6ms/step - loss: 0.3497 - acc: 0.8491 - val_loss: 0.9756 - val_acc: 0.5577

Epoch 00040: val_acc did not improve from 0.62500
Epoch 41/200
106/106 [==============================] - 1s 6ms/step - loss: 0.3250 - acc: 0.8679 - val_loss: 1.0064 - val_acc: 0.5673

Epoch 00041: val_acc did not improve from 0.62500
Epoch 42/200
106/106 [==============================] - 1s 5ms/step - loss: 0.2802 - acc: 0.8962 - val_loss: 1.0636 - val_acc: 0.5769

Epoch 00042: val_acc did not improve from 0.62500
Epoch 43/200
106/106 [==============================] - 1s 5ms/step - loss: 0.2696 - acc: 0.8774 - val_loss: 1.1441 - val_acc: 0.5673

Epoch 00043: val_acc did not improve from 0.62500
Epoch 44/200
106/106 [==============================] - 1s 5ms/step - loss: 0.2838 - acc: 0.8868 - val_loss: 1.2205 - val_acc: 0.5577

Epoch 00044: val_acc did not improve from 0.62500
Epoch 45/200
106/106 [==============================] - 0s 5ms/step - loss: 0.2999 - acc: 0.8491 - val_loss: 1.2565 - val_acc: 0.5577

Epoch 00045: val_acc did not improve from 0.62500
Epoch 46/200
106/106 [==============================] - 1s 5ms/step - loss: 0.2996 - acc: 0.8585 - val_loss: 1.2424 - val_acc: 0.5577

Epoch 00046: val_acc did not improve from 0.62500
Epoch 47/200
106/106 [==============================] - 1s 5ms/step - loss: 0.2934 - acc: 0.8679 - val_loss: 1.1959 - val_acc: 0.5481

Epoch 00047: val_acc did not improve from 0.62500
Epoch 48/200
106/106 [==============================] - 1s 5ms/step - loss: 0.2758 - acc: 0.8962 - val_loss: 1.1527 - val_acc: 0.5673

Epoch 00048: val_acc did not improve from 0.62500
Epoch 49/200
106/106 [==============================] - 0s 5ms/step - loss: 0.2532 - acc: 0.9057 - val_loss: 1.1513 - val_acc: 0.5769

Epoch 00049: val_acc did not improve from 0.62500
Epoch 50/200
106/106 [==============================] - 0s 5ms/step - loss: 0.2536 - acc: 0.8868 - val_loss: 1.1362 - val_acc: 0.5962

Epoch 00050: val_acc did not improve from 0.62500
Epoch 51/200
106/106 [==============================] - 0s 4ms/step - loss: 0.2443 - acc: 0.9057 - val_loss: 1.1717 - val_acc: 0.5769

Epoch 00051: val_acc did not improve from 0.62500
Epoch 52/200
106/106 [==============================] - 0s 4ms/step - loss: 0.2519 - acc: 0.8962 - val_loss: 1.2375 - val_acc: 0.5769

Epoch 00052: val_acc did not improve from 0.62500
Epoch 53/200
106/106 [==============================] - 0s 5ms/step - loss: 0.2453 - acc: 0.8962 - val_loss: 1.3554 - val_acc: 0.5865

Epoch 00053: val_acc did not improve from 0.62500
Epoch 54/200
106/106 [==============================] - 0s 4ms/step - loss: 0.2255 - acc: 0.9057 - val_loss: 1.3748 - val_acc: 0.6058

Epoch 00054: val_acc did not improve from 0.62500
Epoch 55/200
106/106 [==============================] - 0s 4ms/step - loss: 0.3424 - acc: 0.8585 - val_loss: 1.4688 - val_acc: 0.5481

Epoch 00055: val_acc did not improve from 0.62500
Epoch 56/200
106/106 [==============================] - 0s 5ms/step - loss: 0.3171 - acc: 0.8491 - val_loss: 1.5134 - val_acc: 0.5288

Epoch 00056: val_acc did not improve from 0.62500
Epoch 57/200
106/106 [==============================] - 0s 4ms/step - loss: 0.4384 - acc: 0.7925 - val_loss: 1.5340 - val_acc: 0.5288

Epoch 00057: val_acc did not improve from 0.62500
Epoch 58/200
106/106 [==============================] - 0s 5ms/step - loss: 0.4505 - acc: 0.7925 - val_loss: 1.5290 - val_acc: 0.5288

Epoch 00058: val_acc did not improve from 0.62500
Epoch 59/200
106/106 [==============================] - 0s 5ms/step - loss: 0.4692 - acc: 0.7925 - val_loss: 1.5449 - val_acc: 0.5288

Epoch 00059: val_acc did not improve from 0.62500
Epoch 60/200
106/106 [==============================] - 0s 5ms/step - loss: 0.4518 - acc: 0.7642 - val_loss: 1.5910 - val_acc: 0.5288

Epoch 00060: val_acc did not improve from 0.62500
Epoch 61/200
106/106 [==============================] - 0s 4ms/step - loss: 0.4342 - acc: 0.7736 - val_loss: 1.5323 - val_acc: 0.5288

Epoch 00061: val_acc did not improve from 0.62500
Epoch 62/200
106/106 [==============================] - 0s 5ms/step - loss: 0.3774 - acc: 0.7925 - val_loss: 1.4241 - val_acc: 0.5288

Epoch 00062: val_acc did not improve from 0.62500
Epoch 63/200
106/106 [==============================] - 0s 5ms/step - loss: 0.3622 - acc: 0.8019 - val_loss: 1.3255 - val_acc: 0.5288

Epoch 00063: val_acc did not improve from 0.62500
Epoch 64/200
106/106 [==============================] - 0s 4ms/step - loss: 0.3518 - acc: 0.8019 - val_loss: 1.2404 - val_acc: 0.5288

Epoch 00064: val_acc did not improve from 0.62500
Epoch 65/200
106/106 [==============================] - 0s 4ms/step - loss: 0.3467 - acc: 0.8113 - val_loss: 1.2077 - val_acc: 0.5385

Epoch 00065: val_acc did not improve from 0.62500
Epoch 66/200
106/106 [==============================] - 1s 5ms/step - loss: 0.3424 - acc: 0.8302 - val_loss: 1.2288 - val_acc: 0.5385

Epoch 00066: val_acc did not improve from 0.62500
Epoch 67/200
106/106 [==============================] - 1s 5ms/step - loss: 0.3342 - acc: 0.8208 - val_loss: 1.2749 - val_acc: 0.5385

Epoch 00067: val_acc did not improve from 0.62500
Epoch 68/200
106/106 [==============================] - 0s 4ms/step - loss: 0.3288 - acc: 0.8208 - val_loss: 1.3258 - val_acc: 0.5385

Epoch 00068: val_acc did not improve from 0.62500
Epoch 69/200
106/106 [==============================] - 0s 4ms/step - loss: 0.3239 - acc: 0.8302 - val_loss: 1.3794 - val_acc: 0.5385

Epoch 00069: val_acc did not improve from 0.62500
Epoch 70/200
106/106 [==============================] - 0s 5ms/step - loss: 0.3195 - acc: 0.8302 - val_loss: 1.4283 - val_acc: 0.5385

Epoch 00070: val_acc did not improve from 0.62500
Epoch 71/200
106/106 [==============================] - 0s 4ms/step - loss: 0.3159 - acc: 0.8302 - val_loss: 1.4333 - val_acc: 0.5481

Epoch 00071: val_acc did not improve from 0.62500
Epoch 72/200
106/106 [==============================] - 0s 4ms/step - loss: 0.3093 - acc: 0.8302 - val_loss: 1.3991 - val_acc: 0.5577

Epoch 00072: val_acc did not improve from 0.62500
Epoch 73/200
106/106 [==============================] - 0s 5ms/step - loss: 0.3030 - acc: 0.8396 - val_loss: 1.3867 - val_acc: 0.5577

Epoch 00073: val_acc did not improve from 0.62500
Epoch 74/200
106/106 [==============================] - 0s 4ms/step - loss: 0.3012 - acc: 0.8396 - val_loss: 1.5341 - val_acc: 0.5481

Epoch 00074: val_acc did not improve from 0.62500
Epoch 75/200
106/106 [==============================] - 0s 5ms/step - loss: 0.2960 - acc: 0.8396 - val_loss: 1.7464 - val_acc: 0.5481

Epoch 00075: val_acc did not improve from 0.62500
Epoch 76/200
106/106 [==============================] - 0s 4ms/step - loss: 0.2951 - acc: 0.8679 - val_loss: 1.8375 - val_acc: 0.5577

Epoch 00076: val_acc did not improve from 0.62500
Epoch 77/200
106/106 [==============================] - 0s 4ms/step - loss: 0.2900 - acc: 0.8679 - val_loss: 1.8702 - val_acc: 0.5577

Epoch 00077: val_acc did not improve from 0.62500
Epoch 78/200
106/106 [==============================] - 0s 4ms/step - loss: 0.2870 - acc: 0.8585 - val_loss: 1.8981 - val_acc: 0.5673

Epoch 00078: val_acc did not improve from 0.62500
Epoch 79/200
106/106 [==============================] - 1s 5ms/step - loss: 0.2844 - acc: 0.8679 - val_loss: 1.9242 - val_acc: 0.5673

Epoch 00079: val_acc did not improve from 0.62500
Epoch 80/200
106/106 [==============================] - 0s 5ms/step - loss: 0.2815 - acc: 0.8774 - val_loss: 1.9491 - val_acc: 0.5673

Epoch 00080: val_acc did not improve from 0.62500
Epoch 81/200
106/106 [==============================] - 0s 5ms/step - loss: 0.2780 - acc: 0.8774 - val_loss: 1.9771 - val_acc: 0.5673

Epoch 00081: val_acc did not improve from 0.62500
Epoch 82/200
106/106 [==============================] - 0s 4ms/step - loss: 0.2709 - acc: 0.8774 - val_loss: 2.0079 - val_acc: 0.5577

Epoch 00082: val_acc did not improve from 0.62500
Epoch 83/200
106/106 [==============================] - 0s 5ms/step - loss: 0.2676 - acc: 0.8774 - val_loss: 2.0464 - val_acc: 0.5577

Epoch 00083: val_acc did not improve from 0.62500
Epoch 84/200
106/106 [==============================] - 0s 4ms/step - loss: 0.2629 - acc: 0.8774 - val_loss: 2.1209 - val_acc: 0.5577

Epoch 00084: val_acc did not improve from 0.62500
Epoch 85/200
106/106 [==============================] - 0s 5ms/step - loss: 0.2531 - acc: 0.8774 - val_loss: 2.2568 - val_acc: 0.5577

Epoch 00085: val_acc did not improve from 0.62500
Epoch 86/200
106/106 [==============================] - 0s 4ms/step - loss: 0.2476 - acc: 0.8868 - val_loss: 2.3477 - val_acc: 0.5577

Epoch 00086: val_acc did not improve from 0.62500
Epoch 87/200
106/106 [==============================] - 0s 5ms/step - loss: 0.2439 - acc: 0.8868 - val_loss: 2.4852 - val_acc: 0.5481

Epoch 00087: val_acc did not improve from 0.62500
Epoch 88/200
106/106 [==============================] - 0s 4ms/step - loss: 0.2440 - acc: 0.8868 - val_loss: 2.1226 - val_acc: 0.5673

Epoch 00088: val_acc did not improve from 0.62500
Epoch 89/200
106/106 [==============================] - 1s 5ms/step - loss: 0.2385 - acc: 0.8774 - val_loss: 2.0934 - val_acc: 0.5673

Epoch 00089: val_acc did not improve from 0.62500
Epoch 90/200
106/106 [==============================] - 0s 4ms/step - loss: 0.2506 - acc: 0.8774 - val_loss: 2.0858 - val_acc: 0.5673

Epoch 00090: val_acc did not improve from 0.62500
Epoch 91/200
106/106 [==============================] - 0s 4ms/step - loss: 0.2572 - acc: 0.8774 - val_loss: 1.8908 - val_acc: 0.5673

Epoch 00091: val_acc did not improve from 0.62500
Epoch 92/200
106/106 [==============================] - 0s 4ms/step - loss: 0.2583 - acc: 0.8774 - val_loss: 1.7063 - val_acc: 0.5673

Epoch 00092: val_acc did not improve from 0.62500
Epoch 93/200
106/106 [==============================] - 0s 4ms/step - loss: 0.2675 - acc: 0.8774 - val_loss: 1.6648 - val_acc: 0.5962

Epoch 00093: val_acc did not improve from 0.62500
Epoch 94/200
106/106 [==============================] - 1s 5ms/step - loss: 0.2792 - acc: 0.8774 - val_loss: 1.6512 - val_acc: 0.5577

Epoch 00094: val_acc did not improve from 0.62500
Epoch 95/200
106/106 [==============================] - 1s 5ms/step - loss: 0.6193 - acc: 0.8019 - val_loss: 0.5249 - val_acc: 0.7212

Epoch 00095: val_acc improved from 0.62500 to 0.72115, saving model to best_model.pkl
Epoch 96/200
106/106 [==============================] - 0s 4ms/step - loss: 1.0801 - acc: 0.7170 - val_loss: 0.5519 - val_acc: 0.7115

Epoch 00096: val_acc did not improve from 0.72115
Epoch 97/200
106/106 [==============================] - 1s 6ms/step - loss: 1.1806 - acc: 0.6887 - val_loss: 0.5709 - val_acc: 0.7115

Epoch 00097: val_acc did not improve from 0.72115
Epoch 98/200
106/106 [==============================] - 1s 6ms/step - loss: 0.9933 - acc: 0.6509 - val_loss: 0.5818 - val_acc: 0.7115

Epoch 00098: val_acc did not improve from 0.72115
Epoch 99/200
106/106 [==============================] - 1s 6ms/step - loss: 0.7804 - acc: 0.6604 - val_loss: 0.6153 - val_acc: 0.7019

Epoch 00099: val_acc did not improve from 0.72115
Epoch 100/200
106/106 [==============================] - 1s 6ms/step - loss: 0.6427 - acc: 0.6509 - val_loss: 0.6927 - val_acc: 0.6923

Epoch 00100: val_acc did not improve from 0.72115
Epoch 101/200
106/106 [==============================] - 1s 5ms/step - loss: 0.5649 - acc: 0.6509 - val_loss: 0.7724 - val_acc: 0.5865

Epoch 00101: val_acc did not improve from 0.72115
Epoch 102/200
106/106 [==============================] - 1s 5ms/step - loss: 0.5767 - acc: 0.6415 - val_loss: 0.8086 - val_acc: 0.6058

Epoch 00102: val_acc did not improve from 0.72115
Epoch 103/200
106/106 [==============================] - 1s 6ms/step - loss: 0.6259 - acc: 0.6226 - val_loss: 0.8447 - val_acc: 0.6058

Epoch 00103: val_acc did not improve from 0.72115
Epoch 104/200
106/106 [==============================] - 1s 6ms/step - loss: 0.6551 - acc: 0.6038 - val_loss: 0.8733 - val_acc: 0.6058

Epoch 00104: val_acc did not improve from 0.72115
Epoch 105/200
106/106 [==============================] - 1s 5ms/step - loss: 0.6543 - acc: 0.5849 - val_loss: 0.8872 - val_acc: 0.5962

Epoch 00105: val_acc did not improve from 0.72115
Epoch 106/200
106/106 [==============================] - 0s 4ms/step - loss: 0.6374 - acc: 0.6132 - val_loss: 0.8904 - val_acc: 0.6154

Epoch 00106: val_acc did not improve from 0.72115
Epoch 107/200
106/106 [==============================] - 0s 4ms/step - loss: 0.6137 - acc: 0.6321 - val_loss: 0.8871 - val_acc: 0.6058

Epoch 00107: val_acc did not improve from 0.72115
Epoch 108/200
106/106 [==============================] - 0s 4ms/step - loss: 0.5883 - acc: 0.6321 - val_loss: 0.8688 - val_acc: 0.6058

Epoch 00108: val_acc did not improve from 0.72115
Epoch 109/200
106/106 [==============================] - 1s 5ms/step - loss: 0.5677 - acc: 0.6604 - val_loss: 0.8478 - val_acc: 0.5962

Epoch 00109: val_acc did not improve from 0.72115
Epoch 110/200
106/106 [==============================] - 1s 5ms/step - loss: 0.5556 - acc: 0.6698 - val_loss: 0.8274 - val_acc: 0.5865

Epoch 00110: val_acc did not improve from 0.72115
Epoch 111/200
106/106 [==============================] - 0s 4ms/step - loss: 0.5470 - acc: 0.6698 - val_loss: 0.8079 - val_acc: 0.5673

Epoch 00111: val_acc did not improve from 0.72115
Epoch 112/200
106/106 [==============================] - 0s 4ms/step - loss: 0.5425 - acc: 0.7453 - val_loss: 0.7928 - val_acc: 0.6442

Epoch 00112: val_acc did not improve from 0.72115
Epoch 113/200
106/106 [==============================] - 0s 4ms/step - loss: 0.5413 - acc: 0.7170 - val_loss: 0.7792 - val_acc: 0.6635

Epoch 00113: val_acc did not improve from 0.72115
Epoch 114/200
106/106 [==============================] - 0s 4ms/step - loss: 0.5414 - acc: 0.6509 - val_loss: 0.7843 - val_acc: 0.6923

Epoch 00114: val_acc did not improve from 0.72115
Epoch 115/200
106/106 [==============================] - 0s 4ms/step - loss: 0.5403 - acc: 0.6415 - val_loss: 0.7957 - val_acc: 0.7019

Epoch 00115: val_acc did not improve from 0.72115
Epoch 116/200
106/106 [==============================] - 0s 4ms/step - loss: 0.5369 - acc: 0.6698 - val_loss: 0.8070 - val_acc: 0.7019

Epoch 00116: val_acc did not improve from 0.72115
Epoch 117/200
106/106 [==============================] - 0s 4ms/step - loss: 0.5355 - acc: 0.6887 - val_loss: 0.8089 - val_acc: 0.7115

Epoch 00117: val_acc did not improve from 0.72115
Epoch 118/200
106/106 [==============================] - 0s 4ms/step - loss: 0.5264 - acc: 0.7170 - val_loss: 0.8181 - val_acc: 0.7308

Epoch 00118: val_acc improved from 0.72115 to 0.73077, saving model to best_model.pkl
Epoch 119/200
106/106 [==============================] - 0s 4ms/step - loss: 0.5130 - acc: 0.7358 - val_loss: 0.8434 - val_acc: 0.7596

Epoch 00119: val_acc improved from 0.73077 to 0.75962, saving model to best_model.pkl
Epoch 120/200
106/106 [==============================] - 0s 4ms/step - loss: 0.5016 - acc: 0.7453 - val_loss: 0.8795 - val_acc: 0.7788

Epoch 00120: val_acc improved from 0.75962 to 0.77885, saving model to best_model.pkl
Epoch 121/200
106/106 [==============================] - 0s 4ms/step - loss: 0.4789 - acc: 0.7547 - val_loss: 0.9501 - val_acc: 0.7788

Epoch 00121: val_acc did not improve from 0.77885
Epoch 122/200
106/106 [==============================] - 0s 4ms/step - loss: 0.4656 - acc: 0.7547 - val_loss: 1.1675 - val_acc: 0.7500

Epoch 00122: val_acc did not improve from 0.77885
Epoch 123/200
106/106 [==============================] - 0s 4ms/step - loss: 0.4641 - acc: 0.7358 - val_loss: 1.1769 - val_acc: 0.7212

Epoch 00123: val_acc did not improve from 0.77885
Epoch 124/200
106/106 [==============================] - 0s 4ms/step - loss: 0.4470 - acc: 0.7642 - val_loss: 1.1504 - val_acc: 0.7115

Epoch 00124: val_acc did not improve from 0.77885
Epoch 125/200
106/106 [==============================] - 0s 4ms/step - loss: 0.4423 - acc: 0.7736 - val_loss: 1.2039 - val_acc: 0.6346

Epoch 00125: val_acc did not improve from 0.77885
Epoch 126/200
106/106 [==============================] - 0s 4ms/step - loss: 0.4333 - acc: 0.7830 - val_loss: 1.2389 - val_acc: 0.6442

Epoch 00126: val_acc did not improve from 0.77885
Epoch 127/200
106/106 [==============================] - 0s 4ms/step - loss: 0.4027 - acc: 0.8113 - val_loss: 1.3279 - val_acc: 0.6154

Epoch 00127: val_acc did not improve from 0.77885
Epoch 128/200
106/106 [==============================] - 0s 4ms/step - loss: 0.3631 - acc: 0.8585 - val_loss: 1.9127 - val_acc: 0.6250

Epoch 00128: val_acc did not improve from 0.77885
Epoch 129/200
106/106 [==============================] - 0s 4ms/step - loss: 0.3271 - acc: 0.8679 - val_loss: 2.2089 - val_acc: 0.6346

Epoch 00129: val_acc did not improve from 0.77885
Epoch 130/200
106/106 [==============================] - 0s 4ms/step - loss: 0.3030 - acc: 0.8774 - val_loss: 2.5116 - val_acc: 0.5865

Epoch 00130: val_acc did not improve from 0.77885
Epoch 131/200
106/106 [==============================] - 0s 4ms/step - loss: 0.2836 - acc: 0.8962 - val_loss: 2.6410 - val_acc: 0.5673

Epoch 00131: val_acc did not improve from 0.77885
Epoch 132/200
106/106 [==============================] - 0s 4ms/step - loss: 0.3466 - acc: 0.8868 - val_loss: 2.6199 - val_acc: 0.5673

Epoch 00132: val_acc did not improve from 0.77885
Epoch 133/200
106/106 [==============================] - 0s 4ms/step - loss: 0.2711 - acc: 0.8962 - val_loss: 2.3682 - val_acc: 0.6058

Epoch 00133: val_acc did not improve from 0.77885
Epoch 134/200
106/106 [==============================] - 0s 4ms/step - loss: 0.2582 - acc: 0.9151 - val_loss: 2.3142 - val_acc: 0.6058

Epoch 00134: val_acc did not improve from 0.77885
Epoch 135/200
106/106 [==============================] - 0s 4ms/step - loss: 0.2797 - acc: 0.9057 - val_loss: 2.5222 - val_acc: 0.5673

Epoch 00135: val_acc did not improve from 0.77885
Epoch 136/200
106/106 [==============================] - 0s 4ms/step - loss: 0.2487 - acc: 0.9151 - val_loss: 2.6836 - val_acc: 0.5577

Epoch 00136: val_acc did not improve from 0.77885
Epoch 137/200
106/106 [==============================] - 0s 4ms/step - loss: 0.2452 - acc: 0.8962 - val_loss: 2.7752 - val_acc: 0.5481

Epoch 00137: val_acc did not improve from 0.77885
Epoch 138/200
106/106 [==============================] - 0s 4ms/step - loss: 0.2570 - acc: 0.8868 - val_loss: 2.5722 - val_acc: 0.5577

Epoch 00138: val_acc did not improve from 0.77885
Epoch 139/200
106/106 [==============================] - 0s 4ms/step - loss: 0.2420 - acc: 0.8962 - val_loss: 2.4443 - val_acc: 0.5769

Epoch 00139: val_acc did not improve from 0.77885
Epoch 140/200
106/106 [==============================] - 0s 4ms/step - loss: 0.2383 - acc: 0.9057 - val_loss: 2.3510 - val_acc: 0.5865

Epoch 00140: val_acc did not improve from 0.77885
Epoch 141/200
106/106 [==============================] - 0s 4ms/step - loss: 0.2375 - acc: 0.9057 - val_loss: 2.3273 - val_acc: 0.5962

Epoch 00141: val_acc did not improve from 0.77885
Epoch 142/200
106/106 [==============================] - 0s 4ms/step - loss: 0.2341 - acc: 0.8962 - val_loss: 2.3967 - val_acc: 0.5865

Epoch 00142: val_acc did not improve from 0.77885
Epoch 143/200
106/106 [==============================] - 0s 4ms/step - loss: 0.2253 - acc: 0.9057 - val_loss: 2.5460 - val_acc: 0.5962

Epoch 00143: val_acc did not improve from 0.77885
Epoch 144/200
106/106 [==============================] - 0s 4ms/step - loss: 0.2155 - acc: 0.9151 - val_loss: 2.6584 - val_acc: 0.5865

Epoch 00144: val_acc did not improve from 0.77885
Epoch 145/200
106/106 [==============================] - 0s 4ms/step - loss: 0.2019 - acc: 0.9245 - val_loss: 2.8091 - val_acc: 0.5865

Epoch 00145: val_acc did not improve from 0.77885
Epoch 146/200
106/106 [==============================] - 0s 4ms/step - loss: 0.2008 - acc: 0.9245 - val_loss: 2.7056 - val_acc: 0.6058

Epoch 00146: val_acc did not improve from 0.77885
Epoch 147/200
106/106 [==============================] - 0s 4ms/step - loss: 0.2180 - acc: 0.8962 - val_loss: 2.7497 - val_acc: 0.6154

Epoch 00147: val_acc did not improve from 0.77885
Epoch 148/200
106/106 [==============================] - 1s 5ms/step - loss: 0.4370 - acc: 0.8491 - val_loss: 2.8884 - val_acc: 0.6154

Epoch 00148: val_acc did not improve from 0.77885
Epoch 149/200
106/106 [==============================] - 1s 5ms/step - loss: 0.2186 - acc: 0.8868 - val_loss: 3.1986 - val_acc: 0.5577

Epoch 00149: val_acc did not improve from 0.77885
Epoch 150/200
106/106 [==============================] - 1s 6ms/step - loss: 0.3723 - acc: 0.8679 - val_loss: 2.6972 - val_acc: 0.6250

Epoch 00150: val_acc did not improve from 0.77885
Epoch 151/200
106/106 [==============================] - 0s 4ms/step - loss: 0.1894 - acc: 0.9434 - val_loss: 2.3208 - val_acc: 0.6538

Epoch 00151: val_acc did not improve from 0.77885
Epoch 152/200
106/106 [==============================] - 0s 5ms/step - loss: 0.2035 - acc: 0.9245 - val_loss: 2.2636 - val_acc: 0.6442

Epoch 00152: val_acc did not improve from 0.77885
Epoch 153/200
106/106 [==============================] - 0s 4ms/step - loss: 0.2298 - acc: 0.9057 - val_loss: 2.2520 - val_acc: 0.6250

Epoch 00153: val_acc did not improve from 0.77885
Epoch 154/200
106/106 [==============================] - 1s 8ms/step - loss: 0.2441 - acc: 0.9057 - val_loss: 2.2418 - val_acc: 0.6250

Epoch 00154: val_acc did not improve from 0.77885
Epoch 155/200
106/106 [==============================] - 1s 6ms/step - loss: 0.2531 - acc: 0.8962 - val_loss: 2.2511 - val_acc: 0.6250

Epoch 00155: val_acc did not improve from 0.77885
Epoch 156/200
106/106 [==============================] - 1s 5ms/step - loss: 0.2542 - acc: 0.9057 - val_loss: 2.2580 - val_acc: 0.6154

Epoch 00156: val_acc did not improve from 0.77885
Epoch 157/200
106/106 [==============================] - 0s 5ms/step - loss: 0.3532 - acc: 0.8774 - val_loss: 2.2592 - val_acc: 0.6250

Epoch 00157: val_acc did not improve from 0.77885
Epoch 158/200
106/106 [==============================] - 1s 7ms/step - loss: 0.3317 - acc: 0.9151 - val_loss: 2.2829 - val_acc: 0.6442

Epoch 00158: val_acc did not improve from 0.77885
Epoch 159/200
106/106 [==============================] - 1s 6ms/step - loss: 0.2664 - acc: 0.9340 - val_loss: 2.3189 - val_acc: 0.6250

Epoch 00159: val_acc did not improve from 0.77885
Epoch 160/200
106/106 [==============================] - 1s 5ms/step - loss: 0.2050 - acc: 0.9340 - val_loss: 2.1112 - val_acc: 0.6058

Epoch 00160: val_acc did not improve from 0.77885
Epoch 161/200
106/106 [==============================] - 1s 6ms/step - loss: 0.2072 - acc: 0.9340 - val_loss: 1.5777 - val_acc: 0.6442

Epoch 00161: val_acc did not improve from 0.77885
Epoch 162/200
106/106 [==============================] - 1s 6ms/step - loss: 0.2861 - acc: 0.8774 - val_loss: 1.6201 - val_acc: 0.6442

Epoch 00162: val_acc did not improve from 0.77885
Epoch 163/200
106/106 [==============================] - 1s 6ms/step - loss: 0.2781 - acc: 0.8491 - val_loss: 2.1983 - val_acc: 0.5865

Epoch 00163: val_acc did not improve from 0.77885
Epoch 164/200
106/106 [==============================] - 0s 4ms/step - loss: 0.2235 - acc: 0.8491 - val_loss: 2.3266 - val_acc: 0.5769

Epoch 00164: val_acc did not improve from 0.77885
Epoch 165/200
106/106 [==============================] - 1s 7ms/step - loss: 0.2125 - acc: 0.8585 - val_loss: 2.4469 - val_acc: 0.5577

Epoch 00165: val_acc did not improve from 0.77885
Epoch 166/200
106/106 [==============================] - 1s 5ms/step - loss: 0.2074 - acc: 0.8774 - val_loss: 2.3798 - val_acc: 0.5865

Epoch 00166: val_acc did not improve from 0.77885
Epoch 167/200
106/106 [==============================] - 0s 4ms/step - loss: 0.2037 - acc: 0.8868 - val_loss: 2.3146 - val_acc: 0.5962

Epoch 00167: val_acc did not improve from 0.77885
Epoch 168/200
106/106 [==============================] - 0s 5ms/step - loss: 0.2021 - acc: 0.9151 - val_loss: 2.3305 - val_acc: 0.5962

Epoch 00168: val_acc did not improve from 0.77885
Epoch 169/200
106/106 [==============================] - 1s 8ms/step - loss: 0.2031 - acc: 0.9340 - val_loss: 2.2813 - val_acc: 0.6154

Epoch 00169: val_acc did not improve from 0.77885
Epoch 170/200
106/106 [==============================] - 1s 7ms/step - loss: 0.2091 - acc: 0.9340 - val_loss: 2.2433 - val_acc: 0.6154

Epoch 00170: val_acc did not improve from 0.77885
Epoch 171/200
106/106 [==============================] - 1s 7ms/step - loss: 0.1984 - acc: 0.9434 - val_loss: 2.2188 - val_acc: 0.6346

Epoch 00171: val_acc did not improve from 0.77885
Epoch 172/200
106/106 [==============================] - 1s 6ms/step - loss: 0.1904 - acc: 0.9434 - val_loss: 2.1421 - val_acc: 0.6442

Epoch 00172: val_acc did not improve from 0.77885
Epoch 173/200
106/106 [==============================] - 1s 6ms/step - loss: 0.1818 - acc: 0.9434 - val_loss: 2.1616 - val_acc: 0.6442

Epoch 00173: val_acc did not improve from 0.77885
Epoch 174/200
106/106 [==============================] - 1s 6ms/step - loss: 0.1707 - acc: 0.9434 - val_loss: 2.2102 - val_acc: 0.6346

Epoch 00174: val_acc did not improve from 0.77885
Epoch 175/200
106/106 [==============================] - 1s 7ms/step - loss: 0.1581 - acc: 0.9528 - val_loss: 2.2783 - val_acc: 0.6058

Epoch 00175: val_acc did not improve from 0.77885
Epoch 176/200
106/106 [==============================] - 1s 6ms/step - loss: 0.2158 - acc: 0.9434 - val_loss: 2.3174 - val_acc: 0.5865

Epoch 00176: val_acc did not improve from 0.77885
Epoch 177/200
106/106 [==============================] - 1s 6ms/step - loss: 0.1433 - acc: 0.9528 - val_loss: 2.2738 - val_acc: 0.5673

Epoch 00177: val_acc did not improve from 0.77885
Epoch 178/200
106/106 [==============================] - 1s 6ms/step - loss: 0.1396 - acc: 0.9528 - val_loss: 2.1070 - val_acc: 0.6058

Epoch 00178: val_acc did not improve from 0.77885
Epoch 179/200
106/106 [==============================] - 1s 6ms/step - loss: 0.1282 - acc: 0.9528 - val_loss: 1.9549 - val_acc: 0.6346

Epoch 00179: val_acc did not improve from 0.77885
Epoch 180/200
106/106 [==============================] - 1s 6ms/step - loss: 0.1212 - acc: 0.9623 - val_loss: 1.8653 - val_acc: 0.6442

Epoch 00180: val_acc did not improve from 0.77885
Epoch 181/200
106/106 [==============================] - 1s 6ms/step - loss: 0.1171 - acc: 0.9623 - val_loss: 1.8829 - val_acc: 0.6250

Epoch 00181: val_acc did not improve from 0.77885
Epoch 182/200
106/106 [==============================] - 1s 6ms/step - loss: 0.1041 - acc: 0.9623 - val_loss: 1.9553 - val_acc: 0.6154

Epoch 00182: val_acc did not improve from 0.77885
Epoch 183/200
106/106 [==============================] - 1s 7ms/step - loss: 0.0991 - acc: 0.9811 - val_loss: 1.8870 - val_acc: 0.6058

Epoch 00183: val_acc did not improve from 0.77885
Epoch 184/200
106/106 [==============================] - 1s 6ms/step - loss: 0.1092 - acc: 0.9717 - val_loss: 2.1061 - val_acc: 0.5962

Epoch 00184: val_acc did not improve from 0.77885
Epoch 185/200
106/106 [==============================] - 1s 7ms/step - loss: 0.1725 - acc: 0.9245 - val_loss: 1.5441 - val_acc: 0.6731

Epoch 00185: val_acc did not improve from 0.77885
Epoch 186/200
106/106 [==============================] - 1s 6ms/step - loss: 0.0964 - acc: 0.9528 - val_loss: 1.3108 - val_acc: 0.6731

Epoch 00186: val_acc did not improve from 0.77885
Epoch 187/200
106/106 [==============================] - 1s 7ms/step - loss: 0.1561 - acc: 0.9245 - val_loss: 1.6250 - val_acc: 0.6635

Epoch 00187: val_acc did not improve from 0.77885
Epoch 188/200
106/106 [==============================] - 1s 6ms/step - loss: 0.1302 - acc: 0.9340 - val_loss: 1.8509 - val_acc: 0.6442

Epoch 00188: val_acc did not improve from 0.77885
Epoch 189/200
106/106 [==============================] - 1s 6ms/step - loss: 0.1163 - acc: 0.9340 - val_loss: 2.0796 - val_acc: 0.5865

Epoch 00189: val_acc did not improve from 0.77885
Epoch 190/200
106/106 [==============================] - 1s 7ms/step - loss: 0.1225 - acc: 0.9528 - val_loss: 2.1178 - val_acc: 0.5865

Epoch 00190: val_acc did not improve from 0.77885
Epoch 191/200
106/106 [==============================] - 1s 6ms/step - loss: 0.0987 - acc: 0.9811 - val_loss: 2.1749 - val_acc: 0.5673

Epoch 00191: val_acc did not improve from 0.77885
Epoch 192/200
106/106 [==============================] - 1s 7ms/step - loss: 0.0991 - acc: 0.9811 - val_loss: 2.1667 - val_acc: 0.5769

Epoch 00192: val_acc did not improve from 0.77885
Epoch 193/200
106/106 [==============================] - 1s 6ms/step - loss: 0.1737 - acc: 0.9717 - val_loss: 2.0917 - val_acc: 0.5865

Epoch 00193: val_acc did not improve from 0.77885
Epoch 194/200
106/106 [==============================] - 1s 7ms/step - loss: 0.1551 - acc: 0.9717 - val_loss: 2.0561 - val_acc: 0.5769

Epoch 00194: val_acc did not improve from 0.77885
Epoch 195/200
106/106 [==============================] - 1s 6ms/step - loss: 0.1022 - acc: 0.9811 - val_loss: 2.0144 - val_acc: 0.5673

Epoch 00195: val_acc did not improve from 0.77885
Epoch 196/200
106/106 [==============================] - 1s 7ms/step - loss: 0.0960 - acc: 0.9811 - val_loss: 1.9612 - val_acc: 0.5673

Epoch 00196: val_acc did not improve from 0.77885
Epoch 197/200
106/106 [==============================] - 0s 5ms/step - loss: 0.0868 - acc: 0.9811 - val_loss: 1.9050 - val_acc: 0.5865

Epoch 00197: val_acc did not improve from 0.77885
Epoch 198/200
106/106 [==============================] - 1s 6ms/step - loss: 0.0789 - acc: 0.9811 - val_loss: 1.8515 - val_acc: 0.6250

Epoch 00198: val_acc did not improve from 0.77885
Epoch 199/200
106/106 [==============================] - 1s 5ms/step - loss: 0.0830 - acc: 0.9717 - val_loss: 1.7854 - val_acc: 0.6154

Epoch 00199: val_acc did not improve from 0.77885
Epoch 200/200
106/106 [==============================] - 0s 4ms/step - loss: 0.0831 - acc: 0.9623 - val_loss: 1.7519 - val_acc: 0.6154

Epoch 00200: val_acc did not improve from 0.77885
Out[44]:
<keras.callbacks.History at 0x1212d9c10>

In [45]:
model = load_model('best_model.pkl')

from sklearn.metrics import accuracy_score
test_preds = model.predict_classes(test)
accuracy_score(test_target, test_preds)


Out[45]:
0.6057692307692307