In [1]:
# Numbers
import numpy as np
import pandas as pd

# Serialization
import pickle

# Neural networks
from keras.optimizers import Adam
from keras.callbacks import EarlyStopping
from tensorflow.python.client import device_lib

# Machine learning
from sklearn.preprocessing import LabelEncoder, StandardScaler

# Plotting
from matplotlib import pyplot as plt
import matplotlib.gridspec as gridspec
%matplotlib inline


Using TensorFlow backend.

In [2]:
device_lib.list_local_devices()


Out[2]:
[name: "/cpu:0"
 device_type: "CPU"
 memory_limit: 268435456
 locality {
 }
 incarnation: 17434251368730919557, name: "/gpu:0"
 device_type: "GPU"
 memory_limit: 11332668621
 locality {
   bus_id: 1
 }
 incarnation: 11423549508794071041
 physical_device_desc: "device: 0, name: Tesla K80, pci bus id: 0000:00:1e.0"]

In [3]:
import os
    
def load_mouse_dynamics_dataset():
    
    training_sessions = []
    testing_sessions = []
    
    dataset_path = 'Mouse-Dynamics-Challenge'
    training_files_path = os.path.join(dataset_path, 'training_files')
    testing_files_path = os.path.join(dataset_path, 'test_files')
 
    # Load public labels, these are for the testing set, the labels only indicate if the user input
    # was produced by the real user or not
    labels_path = os.path.join(dataset_path, 'public_labels.csv')
    labels = pd.read_csv(labels_path)
    session_to_label = {labels['filename'][idx]: labels['is_illegal'][idx] for idx in range(len(labels))}
    
    user_names = os.listdir(training_files_path)
    
    # Load training and testing data for each user
    for user_name in user_names[:1]:
        
        user_id = user_name[4:]
        
        # Load training sessions
        user_training_path = os.path.join(training_files_path, user_name)
        training_session_names = os.listdir(user_training_path)
        
        for session_name in training_session_names:
            
            session_id = session_name[8:]
            df_session = pd.read_csv(os.path.join(user_training_path, session_name))
            
            training_sessions.append({ 
                'user_id': int(user_id), 
                'session_id' : int(session_id), 
                'data': df_session
            })
        
        # Load testing sessions
        user_testing_path = os.path.join(testing_files_path, user_name)
        testing_session_names = os.listdir(user_testing_path)
        
        for session_name in testing_session_names:
            
            session_id = session_name[8:]
            df_session = pd.read_csv(os.path.join(user_testing_path, session_name))
            
            # Some testing sessions doesn't have label, those are from the private dataset, only
            # the competition organizers had those labels. We don't save unlabeled sessions for now.
            try:
                is_illegal = session_to_label[session_name]
                
                testing_sessions.append({
                    # 
                    'user_id': int(user_id) if is_illegal == 0 else 0, 
                    'session_id' : int(session_id), 
                    'data': df_session
                })
            except KeyError:
                pass
                
    return training_sessions, testing_sessions
        
training_sessions, testing_sessions = load_mouse_dynamics_dataset()

In [6]:
training_sessions.shape


---------------------------------------------------------------------------
AttributeError                            Traceback (most recent call last)
<ipython-input-6-5d3139a3d7c0> in <module>()
----> 1 training_sessions.shape

AttributeError: 'list' object has no attribute 'shape'

In [3]:
# Deserialize data
with open('dataset/local.pkl','rb') as f:
    local_data = pickle.load(f)
with open('dataset/remote.pkl','rb') as f:
    remote_data = pickle.load(f)
with open('dataset/other_local.pkl','rb') as f:
    other_data = pickle.load(f)

# Fix datasets and convert to pandas dataframe
df_local = pd.DataFrame(local_data['x'][:,:])
df_local['origin'] = 'USER0_LOCAL'

df_remote = pd.DataFrame(remote_data['x'][:,:])
df_remote['origin'] = 'USER0_REMOTE'

df_remote_user1 = pd.DataFrame(other_data['x'][:,:])
df_remote_user1 = df_remote_user1[df_remote_user1[2] == 'MOUSE_MOVE']
df_remote_user1['origin'] = 'USER1_LOCAL'

df = pd.concat([df_local, df_remote, df_remote_user1])
df.columns = ['dt', 'device', 'event_type', 'x', 'y', 'origin']
df['dt'] = pd.to_numeric(df['dt'])
df['x'] = pd.to_numeric(df['x'])
df['y'] = pd.to_numeric(df['y'])

orig_df = df.copy()

Prepare data for machine learning, we will normalize the data and convert categorical variables to a number encoding



In [4]:
# Numerical encoding for labels
label_encoder = LabelEncoder()
df['origin'] = label_encoder.fit_transform(df['origin'])

labels = label_encoder.inverse_transform([0, 1, 2])
print('Label encoding: 0 -> {} , 1 -> {}, 2 -> {}'.format(labels[0], labels[1], labels[2]))

# Remove outliers (no outliers in position data)
df = df[df.dt < df.dt.quantile(.95)]

# Give 0 mean and unit variance to data
standard_scaler = StandardScaler()
df['x'] = standard_scaler.fit_transform(df['x'].values.reshape(-1, 1))
df['y'] = standard_scaler.fit_transform(df['y'].values.reshape(-1, 1))
df['dt'] = standard_scaler.fit_transform(df['dt'].values.reshape(-1, 1))


Label encoding: 0 -> USER0_LOCAL , 1 -> USER0_REMOTE, 2 -> USER1_LOCAL
/home/ubuntu/data/installs/miniconda3/envs/dl-python35/lib/python3.5/site-packages/sklearn/utils/validation.py:429: DataConversionWarning: Data with input dtype int64 was converted to float64 by StandardScaler.
  warnings.warn(msg, _DataConversionWarning)

Define hyperparameters



In [79]:
seq_size = 130
# This could be wrong..
# Each time we train a batch we normalize every layer against an aproximated mean and variance which is taken from
# the mean and variance of the examples of the current batch. This mean and variance are also a averaged with the
# ones calculated for previous batches by using a runing average. 
# This parameter indicates how smoothly it changes across batches, in practice it makes the generalization error
# more noisy
batch_normalization_momentum = 0.2

Divide data in subsequences, each subsequence will be a training example



In [80]:
min_len = np.min([
    len(df[df.origin == 0]), len(df[df.origin == 1]), len(df[df.origin == 2])])

num_examples = min_len // seq_size

# Separate dataset in different classes
class0 = df[df.origin == 0][:num_examples * seq_size][['x', 'y', 'dt']].values
class1 = df[df.origin == 1][:num_examples * seq_size][['x', 'y', 'dt']].values
class2 = df[df.origin == 2][:num_examples * seq_size][['x', 'y', 'dt']].values

# Split in subsequences
class0_x = np.vsplit(class0, num_examples)
class1_x = np.vsplit(class1, num_examples)
class2_x = np.vsplit(class2, num_examples)

# Concatenate all features sequentially, (needed for scikit-learn) this works , why?
# class0_x = [ arr.reshape(seq_size*3) for arr in class0_x ]
# class1_x = [ arr.reshape(seq_size*3) for arr in class1_x ]
# class2_x = [ arr.reshape(seq_size*3) for arr in class2_x ]

# Create labels
class0_y = np.full(num_examples, 0, dtype=np.float32)
class1_y = np.full(num_examples, 1, dtype=np.float32)
class2_y = np.full(num_examples, 2, dtype=np.float32)

# Join all examples for different labels
x = np.concatenate([class0_x, class2_x], axis=0)
y = np.append(class0_y, class2_y)

# Random permutation of all the examples
permutation = np.random.permutation(len(y))
y = y[permutation]
x = x[permutation]

Split into training and testing sets



In [82]:
from keras.utils import np_utils
y = [0 if i == 0 else 1 for i in y]
y = np_utils.to_categorical(y, 2)

split = int(num_examples*2 * 0.8)
x_train = x[:split]
y_train = y[:split]
x_test = x[split:]
y_test = y[split:]

In [93]:
y_test.shape


Out[93]:
(22, 2)

In [83]:
# OLD CODE
# data_points = np.min([len(remote['x']), len(local['x'])])
# seq_size = 50
# num_examples = data_points // seq_size
# # make data_points divisible by seq_size
# data_points = num_examples * seq_size
# remote_data = np.array(remote['x'][:data_points,0], dtype=np.float32)
# local_data = np.array(local['x'][:data_points,0], dtype=np.float32)
# # normalization
# remote_data_std = np.std(remote_data)
# remote_data_mean = np.mean(remote_data)
# remote_data = (remote_data - remote_data_mean) / (remote_data_std)

# local_data_std = np.std(local_data)
# local_data_mean = np.mean(local_data)
# local_data = (local_data - local_data_mean) / (local_data_std)
# # feature standardization
# classes = 2
# remote_data = (remote_data - remote_data.min())/(remote_data.max()-remote_data.min())*(classes-1)
# local_data = (local_data - local_data.min())/(local_data.max()-local_data.min())*(classes-1)

# remote_data = np.hsplit(remote_data, num_examples)
# local_data = np.hsplit(local_data, num_examples)

# y_remote = np.full(num_examples, 0, dtype=np.float32)
# y_local = np.full(num_examples, 1, dtype=np.float32)

# y = np.append(y_remote, y_local)

# from keras.utils import np_utils
# y = np_utils.to_categorical(y, classes)

# x = np.vstack((remote_data, local_data))

# permutation = np.random.permutation(len(y))
# y = y[permutation]
# x = x[permutation]

# split = int(num_examples*2 * 0.8)
# x_train = x[:split]
# y_train = y[:split]
# x_test = x[split+1:]
# y_test = y[split+1:]

# x_train = np.expand_dims(x_train, axis=2)
# x_test = np.expand_dims(x_test, axis=2)

In [84]:
gs = gridspec.GridSpec(4, 4, top=1., bottom=0., right=1., left=0., hspace=0.5,
        wspace=0.5)
i = 0
for g in gs:
    ax = plt.subplot(g)
    ax.plot(x_train[np.random.randint(16)][:, 2])
    i += 1



In [85]:
from keras.models import Sequential
from keras.layers.convolutional import Conv1D
from keras.layers.normalization import BatchNormalization
from keras.layers.advanced_activations import LeakyReLU
from keras.layers.pooling import GlobalAveragePooling1D
from keras.layers.core import Dense, Activation, Flatten

num_classes = 2
data_point_dimensionality = 3

model = Sequential([
        Conv1D(128, 8, padding='same', input_shape=(seq_size, data_point_dimensionality)),
        BatchNormalization(momentum=batch_normalization_momentum),
        LeakyReLU(),

        Conv1D(256, 5, padding='same'),
        BatchNormalization(momentum=batch_normalization_momentum),
        LeakyReLU(),

        Conv1D(128, 3, padding='same'),
        BatchNormalization(momentum=batch_normalization_momentum),
        LeakyReLU(),

        GlobalAveragePooling1D(),
        Dense(num_classes, activation='softmax')
])

# from model import get_model
# model = get_model((x_train.shape[1], 1), batch_normalization_momentum, classes, learning_rate)

In [86]:
for layer in model.layers:
    print("{:30}{} -> {}".format(layer.name, layer.input_shape, layer.output_shape))


conv1d_16                     (None, 130, 3) -> (None, 130, 128)
batch_normalization_16        (None, 130, 128) -> (None, 130, 128)
leaky_re_lu_16                (None, 130, 128) -> (None, 130, 128)
conv1d_17                     (None, 130, 128) -> (None, 130, 256)
batch_normalization_17        (None, 130, 256) -> (None, 130, 256)
leaky_re_lu_17                (None, 130, 256) -> (None, 130, 256)
conv1d_18                     (None, 130, 256) -> (None, 130, 128)
batch_normalization_18        (None, 130, 128) -> (None, 130, 128)
leaky_re_lu_18                (None, 130, 128) -> (None, 130, 128)
global_average_pooling1d_6    (None, 130, 128) -> (None, 128)
dense_6                       (None, 128) -> (None, 2)

In [87]:
learning_rate = 0.0000005

optimizer = Adam(lr=learning_rate)

In [88]:
epochs = 100
batch_size = 10

model.compile(loss='categorical_crossentropy', optimizer=optimizer, metrics=['accuracy'])

hist = model.fit(x_train, y_train, 
                 batch_size, epochs, 1, 
                 validation_data=(x_test, y_test))

# early_stopper = EarlyStopping(monitor = 'loss', patience=50) 
# hist = model.fit(x_train, y_train, 
#                  batch_size, epochs, 1, 
#                  validation_data=(x_test, y_test),
#                  callbacks = [early_stopper])


Train on 86 samples, validate on 22 samples
Epoch 1/100
86/86 [==============================] - 1s - loss: 0.6395 - acc: 0.6047 - val_loss: 0.6209 - val_acc: 0.5455
Epoch 2/100
86/86 [==============================] - 0s - loss: 0.6046 - acc: 0.6860 - val_loss: 0.4503 - val_acc: 0.7727
Epoch 3/100
86/86 [==============================] - 0s - loss: 0.6085 - acc: 0.7093 - val_loss: 0.4320 - val_acc: 0.8636
Epoch 4/100
86/86 [==============================] - 0s - loss: 0.5342 - acc: 0.7907 - val_loss: 0.4412 - val_acc: 0.8636
Epoch 5/100
86/86 [==============================] - 0s - loss: 0.5521 - acc: 0.7558 - val_loss: 0.3943 - val_acc: 0.8636
Epoch 6/100
86/86 [==============================] - 0s - loss: 0.5100 - acc: 0.7791 - val_loss: 0.3739 - val_acc: 0.8636
Epoch 7/100
86/86 [==============================] - 0s - loss: 0.5110 - acc: 0.7791 - val_loss: 0.3006 - val_acc: 0.9091
Epoch 8/100
86/86 [==============================] - 0s - loss: 0.5281 - acc: 0.7558 - val_loss: 0.4746 - val_acc: 0.7727
Epoch 9/100
86/86 [==============================] - 0s - loss: 0.5111 - acc: 0.7907 - val_loss: 0.2894 - val_acc: 0.9091
Epoch 10/100
86/86 [==============================] - 0s - loss: 0.5143 - acc: 0.7907 - val_loss: 0.3275 - val_acc: 0.8636
Epoch 11/100
86/86 [==============================] - 0s - loss: 0.4717 - acc: 0.8372 - val_loss: 0.3117 - val_acc: 0.8636
Epoch 12/100
86/86 [==============================] - 0s - loss: 0.4913 - acc: 0.8256 - val_loss: 0.2523 - val_acc: 0.9091
Epoch 13/100
86/86 [==============================] - 0s - loss: 0.4756 - acc: 0.8140 - val_loss: 0.3194 - val_acc: 0.8636
Epoch 14/100
86/86 [==============================] - 0s - loss: 0.5164 - acc: 0.7674 - val_loss: 0.2737 - val_acc: 0.9091
Epoch 15/100
86/86 [==============================] - 0s - loss: 0.5020 - acc: 0.7674 - val_loss: 0.4340 - val_acc: 0.8182
Epoch 16/100
86/86 [==============================] - 0s - loss: 0.5045 - acc: 0.8140 - val_loss: 0.5216 - val_acc: 0.7727
Epoch 17/100
86/86 [==============================] - 0s - loss: 0.4449 - acc: 0.8256 - val_loss: 0.2813 - val_acc: 0.8636
Epoch 18/100
86/86 [==============================] - 0s - loss: 0.4534 - acc: 0.8372 - val_loss: 0.2601 - val_acc: 0.9091
Epoch 19/100
86/86 [==============================] - 0s - loss: 0.5123 - acc: 0.7791 - val_loss: 0.2543 - val_acc: 0.9545
Epoch 20/100
86/86 [==============================] - 0s - loss: 0.4527 - acc: 0.8256 - val_loss: 0.2471 - val_acc: 0.9545
Epoch 21/100
86/86 [==============================] - 0s - loss: 0.4385 - acc: 0.8256 - val_loss: 0.2271 - val_acc: 0.9091
Epoch 22/100
86/86 [==============================] - 0s - loss: 0.5175 - acc: 0.7791 - val_loss: 0.2099 - val_acc: 0.9545
Epoch 23/100
86/86 [==============================] - 0s - loss: 0.4405 - acc: 0.8372 - val_loss: 0.2490 - val_acc: 0.9091
Epoch 24/100
86/86 [==============================] - 0s - loss: 0.5098 - acc: 0.7558 - val_loss: 0.2370 - val_acc: 0.9091
Epoch 25/100
86/86 [==============================] - 0s - loss: 0.4075 - acc: 0.8488 - val_loss: 0.2673 - val_acc: 0.9091
Epoch 26/100
86/86 [==============================] - 0s - loss: 0.4624 - acc: 0.8023 - val_loss: 0.2651 - val_acc: 1.0000
Epoch 27/100
86/86 [==============================] - 0s - loss: 0.4833 - acc: 0.7791 - val_loss: 0.3002 - val_acc: 0.8636
Epoch 28/100
86/86 [==============================] - 0s - loss: 0.4425 - acc: 0.8256 - val_loss: 0.3222 - val_acc: 0.8182
Epoch 29/100
86/86 [==============================] - 0s - loss: 0.4244 - acc: 0.8721 - val_loss: 0.1981 - val_acc: 1.0000
Epoch 30/100
86/86 [==============================] - 0s - loss: 0.4648 - acc: 0.7791 - val_loss: 0.4140 - val_acc: 0.8182
Epoch 31/100
86/86 [==============================] - 0s - loss: 0.4052 - acc: 0.8488 - val_loss: 0.2986 - val_acc: 0.8636
Epoch 32/100
86/86 [==============================] - 0s - loss: 0.4274 - acc: 0.8605 - val_loss: 0.5328 - val_acc: 0.7727
Epoch 33/100
86/86 [==============================] - 0s - loss: 0.4671 - acc: 0.7907 - val_loss: 0.2291 - val_acc: 1.0000
Epoch 34/100
86/86 [==============================] - 0s - loss: 0.4235 - acc: 0.8372 - val_loss: 0.2396 - val_acc: 0.9091
Epoch 35/100
86/86 [==============================] - 0s - loss: 0.4154 - acc: 0.7907 - val_loss: 0.2380 - val_acc: 0.9091
Epoch 36/100
86/86 [==============================] - 0s - loss: 0.4528 - acc: 0.8023 - val_loss: 0.2461 - val_acc: 0.9545
Epoch 37/100
86/86 [==============================] - 0s - loss: 0.4200 - acc: 0.8256 - val_loss: 0.2441 - val_acc: 0.9545
Epoch 38/100
86/86 [==============================] - 0s - loss: 0.4392 - acc: 0.8372 - val_loss: 0.2933 - val_acc: 0.8636
Epoch 39/100
86/86 [==============================] - 0s - loss: 0.4110 - acc: 0.8140 - val_loss: 0.1961 - val_acc: 0.9545
Epoch 40/100
86/86 [==============================] - 0s - loss: 0.4443 - acc: 0.8372 - val_loss: 0.2065 - val_acc: 1.0000
Epoch 41/100
86/86 [==============================] - 0s - loss: 0.4316 - acc: 0.8140 - val_loss: 0.4697 - val_acc: 0.8182
Epoch 42/100
86/86 [==============================] - 0s - loss: 0.5458 - acc: 0.7674 - val_loss: 0.2252 - val_acc: 1.0000
Epoch 43/100
86/86 [==============================] - 0s - loss: 0.4766 - acc: 0.8140 - val_loss: 0.4039 - val_acc: 0.8182
Epoch 44/100
86/86 [==============================] - 0s - loss: 0.4367 - acc: 0.8256 - val_loss: 0.2718 - val_acc: 0.8636
Epoch 45/100
86/86 [==============================] - 0s - loss: 0.3656 - acc: 0.8605 - val_loss: 0.1806 - val_acc: 0.9545
Epoch 46/100
86/86 [==============================] - 0s - loss: 0.5086 - acc: 0.7558 - val_loss: 0.3003 - val_acc: 0.8636
Epoch 47/100
86/86 [==============================] - 0s - loss: 0.4098 - acc: 0.8372 - val_loss: 0.2717 - val_acc: 0.9091
Epoch 48/100
86/86 [==============================] - 0s - loss: 0.4334 - acc: 0.8023 - val_loss: 0.2506 - val_acc: 0.9091
Epoch 49/100
86/86 [==============================] - 0s - loss: 0.3989 - acc: 0.8488 - val_loss: 0.2676 - val_acc: 0.9091
Epoch 50/100
86/86 [==============================] - 0s - loss: 0.4685 - acc: 0.7907 - val_loss: 0.2367 - val_acc: 0.9545
Epoch 51/100
86/86 [==============================] - 0s - loss: 0.3905 - acc: 0.8721 - val_loss: 0.2561 - val_acc: 0.9545
Epoch 52/100
86/86 [==============================] - 0s - loss: 0.4142 - acc: 0.8140 - val_loss: 0.3332 - val_acc: 0.8636
Epoch 53/100
86/86 [==============================] - 0s - loss: 0.3810 - acc: 0.8721 - val_loss: 0.2176 - val_acc: 0.9545
Epoch 54/100
86/86 [==============================] - 0s - loss: 0.4390 - acc: 0.8372 - val_loss: 0.2333 - val_acc: 0.9545
Epoch 55/100
86/86 [==============================] - 0s - loss: 0.4138 - acc: 0.8023 - val_loss: 0.2802 - val_acc: 0.8636
Epoch 56/100
86/86 [==============================] - 0s - loss: 0.4151 - acc: 0.8488 - val_loss: 0.2729 - val_acc: 0.8636
Epoch 57/100
86/86 [==============================] - 0s - loss: 0.3979 - acc: 0.8605 - val_loss: 0.2556 - val_acc: 0.8636
Epoch 58/100
86/86 [==============================] - 0s - loss: 0.4096 - acc: 0.8256 - val_loss: 0.2654 - val_acc: 0.9091
Epoch 59/100
86/86 [==============================] - 0s - loss: 0.4550 - acc: 0.8372 - val_loss: 0.5004 - val_acc: 0.7727
Epoch 60/100
86/86 [==============================] - 0s - loss: 0.4531 - acc: 0.8372 - val_loss: 0.2401 - val_acc: 0.9545
Epoch 61/100
86/86 [==============================] - 0s - loss: 0.3864 - acc: 0.8488 - val_loss: 0.3022 - val_acc: 0.8182
Epoch 62/100
86/86 [==============================] - 0s - loss: 0.4317 - acc: 0.8372 - val_loss: 0.4341 - val_acc: 0.8182
Epoch 63/100
86/86 [==============================] - 0s - loss: 0.4822 - acc: 0.7442 - val_loss: 0.4875 - val_acc: 0.8182
Epoch 64/100
86/86 [==============================] - 0s - loss: 0.4249 - acc: 0.8023 - val_loss: 0.2510 - val_acc: 0.9091
Epoch 65/100
86/86 [==============================] - 0s - loss: 0.4498 - acc: 0.7907 - val_loss: 0.2521 - val_acc: 0.9545
Epoch 66/100
86/86 [==============================] - 0s - loss: 0.4252 - acc: 0.8488 - val_loss: 0.3963 - val_acc: 0.8182
Epoch 67/100
86/86 [==============================] - 0s - loss: 0.3811 - acc: 0.8488 - val_loss: 0.3176 - val_acc: 0.8182
Epoch 68/100
86/86 [==============================] - 0s - loss: 0.3743 - acc: 0.8488 - val_loss: 0.2121 - val_acc: 0.9545
Epoch 69/100
86/86 [==============================] - 0s - loss: 0.3888 - acc: 0.8837 - val_loss: 0.4172 - val_acc: 0.8182
Epoch 70/100
86/86 [==============================] - 0s - loss: 0.3865 - acc: 0.8605 - val_loss: 0.1857 - val_acc: 1.0000
Epoch 71/100
86/86 [==============================] - 0s - loss: 0.3928 - acc: 0.8605 - val_loss: 0.2564 - val_acc: 0.9091
Epoch 72/100
86/86 [==============================] - 0s - loss: 0.3951 - acc: 0.8488 - val_loss: 0.2959 - val_acc: 0.8636
Epoch 73/100
86/86 [==============================] - 0s - loss: 0.4836 - acc: 0.7907 - val_loss: 0.5246 - val_acc: 0.7727
Epoch 74/100
86/86 [==============================] - 0s - loss: 0.4025 - acc: 0.8488 - val_loss: 0.3118 - val_acc: 0.8182
Epoch 75/100
86/86 [==============================] - 0s - loss: 0.4246 - acc: 0.8488 - val_loss: 0.2280 - val_acc: 1.0000
Epoch 76/100
86/86 [==============================] - 0s - loss: 0.3837 - acc: 0.8721 - val_loss: 0.2833 - val_acc: 0.8182
Epoch 77/100
86/86 [==============================] - 0s - loss: 0.3852 - acc: 0.8721 - val_loss: 0.2859 - val_acc: 0.7727
Epoch 78/100
86/86 [==============================] - 0s - loss: 0.3971 - acc: 0.8605 - val_loss: 0.2806 - val_acc: 0.8636
Epoch 79/100
86/86 [==============================] - 0s - loss: 0.3550 - acc: 0.8605 - val_loss: 0.4068 - val_acc: 0.8182
Epoch 80/100
86/86 [==============================] - 0s - loss: 0.4415 - acc: 0.8256 - val_loss: 0.2207 - val_acc: 0.9545
Epoch 81/100
86/86 [==============================] - 0s - loss: 0.4211 - acc: 0.8023 - val_loss: 0.3148 - val_acc: 0.8182
Epoch 82/100
86/86 [==============================] - 0s - loss: 0.3610 - acc: 0.8721 - val_loss: 0.2716 - val_acc: 0.8636
Epoch 83/100
86/86 [==============================] - 0s - loss: 0.4014 - acc: 0.8256 - val_loss: 0.2961 - val_acc: 0.8182
Epoch 84/100
86/86 [==============================] - 0s - loss: 0.3929 - acc: 0.8372 - val_loss: 0.2693 - val_acc: 0.8636
Epoch 85/100
86/86 [==============================] - 0s - loss: 0.3632 - acc: 0.8605 - val_loss: 0.2653 - val_acc: 0.7727
Epoch 86/100
86/86 [==============================] - 0s - loss: 0.3466 - acc: 0.8837 - val_loss: 0.2545 - val_acc: 0.9091
Epoch 87/100
86/86 [==============================] - 0s - loss: 0.4445 - acc: 0.7791 - val_loss: 0.5079 - val_acc: 0.8182
Epoch 88/100
86/86 [==============================] - 0s - loss: 0.4338 - acc: 0.7907 - val_loss: 0.2071 - val_acc: 1.0000
Epoch 89/100
86/86 [==============================] - 0s - loss: 0.4011 - acc: 0.8140 - val_loss: 0.3706 - val_acc: 0.8182
Epoch 90/100
86/86 [==============================] - 0s - loss: 0.4279 - acc: 0.8023 - val_loss: 0.4648 - val_acc: 0.8182
Epoch 91/100
86/86 [==============================] - 0s - loss: 0.4440 - acc: 0.8488 - val_loss: 0.2246 - val_acc: 1.0000
Epoch 92/100
86/86 [==============================] - 0s - loss: 0.3578 - acc: 0.8721 - val_loss: 0.2093 - val_acc: 0.9545
Epoch 93/100
86/86 [==============================] - 0s - loss: 0.3851 - acc: 0.8372 - val_loss: 0.1956 - val_acc: 0.9545
Epoch 94/100
86/86 [==============================] - 0s - loss: 0.3956 - acc: 0.8488 - val_loss: 0.1954 - val_acc: 0.9545
Epoch 95/100
86/86 [==============================] - 0s - loss: 0.3997 - acc: 0.8372 - val_loss: 0.3282 - val_acc: 0.8636
Epoch 96/100
86/86 [==============================] - 0s - loss: 0.3745 - acc: 0.8256 - val_loss: 0.1854 - val_acc: 1.0000
Epoch 97/100
86/86 [==============================] - 0s - loss: 0.3872 - acc: 0.8256 - val_loss: 0.2231 - val_acc: 0.9545
Epoch 98/100
86/86 [==============================] - 0s - loss: 0.3841 - acc: 0.8605 - val_loss: 0.3262 - val_acc: 0.8182
Epoch 99/100
86/86 [==============================] - 0s - loss: 0.3763 - acc: 0.8488 - val_loss: 0.2194 - val_acc: 1.0000
Epoch 100/100
86/86 [==============================] - 0s - loss: 0.4160 - acc: 0.8256 - val_loss: 0.2230 - val_acc: 0.9545

In [89]:
plt.plot(hist.history['loss'])
plt.plot(hist.history['val_loss'])


Out[89]:
[<matplotlib.lines.Line2D at 0x7fafede3fcf8>]

In [90]:
plt.plot(hist.history['acc'])
plt.plot(hist.history['val_acc'])


Out[90]:
[<matplotlib.lines.Line2D at 0x7fafede3f780>]

probar: bajar numero de samples escalado del reentrenamiento