In [1]:
import os
import time
import numpy as np
import pandas as pd
from sklearn.metrics import f1_score, roc_curve, auc, roc_auc_score, precision_recall_curve, recall_score, precision_score, confusion_matrix, average_precision_score

In [2]:
import matplotlib.pyplot as plt
%matplotlib inline

In [3]:
#data_features consists of lumisections from JetHT, wich are preprocessed:

# 1) samples with low lumi were deleted: 
# nonempty = np.where(data["lumi"] >= 0.01)[0]
# data = data.iloc[nonempty]

# 2) columns with std=0 were removed
# cols = data.select_dtypes([np.number]).columns
# std = data[cols].std()
# cols_to_drop = std[std==0].index
# data = data.drop(cols_to_drop, axis=1)

# 3) standard scaler was applied for all features

data_features = pd.read_hdf('/home/olgako/data/data_features_JetHT.hdf5', "data")
labels = pd.read_hdf('/home/olgako/data/labels_JetHT.hdf5', 'labels')

In [4]:
one_fifth = int(data_features.shape[0]/5)
step = int(data_features.shape[0]/10)
whole = data_features.shape[0]

In [5]:
from sklearn.preprocessing import MinMaxScaler
scaler = MinMaxScaler()
data_features=scaler.fit_transform(data_features)

In [6]:
def get_error_df(X_test, predictions, y_test, mode='None', n_highest = 100):
    if mode=='allmean':
        mse = np.mean(np.power(X_test - predictions, 2), axis=1)
        error_df = pd.DataFrame({'reconstruction_error': mse,
                         'true_class': y_test})        
        return error_df
    elif mode=='topn':
        temp = np.partition(-np.power(X_test - predictions, 2), n_highest)
        result = -temp[:,:n_highest]
        mse = np.mean(result, axis=1)
        error_df = pd.DataFrame({'reconstruction_error': mse,
                         'true_class': y_test})
        return error_df

In [7]:
import os
os.environ["CUDA_VISIBLE_DEVICES"]="0"

import tensorflow as tf
tf.set_random_seed(0)
from keras import backend as K
config = tf.ConfigProto()
config.gpu_options.allow_growth=True
sess = tf.Session(config=config)
K.set_session(sess)


Using TensorFlow backend.

In [8]:
from keras.models import Model, load_model
from keras.optimizers import Adam
from keras.layers import Input, Dense, Activation
from keras.callbacks import ModelCheckpoint, TensorBoard
from keras import regularizers
from keras.layers.advanced_activations import PReLU, LeakyReLU
from sklearn.utils import shuffle
import h5py

In [9]:
input_dim = data_features.shape[1]
encoding_dim = 50

In [10]:
def buildAE():
    input_layer = Input(shape=(input_dim, ))
    encoder = Dense(encoding_dim, activation='linear')(input_layer)
    encoder = LeakyReLU(alpha=.1)(encoder)
    decoder = Dense(input_dim, activation='sigmoid')(encoder)
    return Model(inputs=input_layer, outputs=decoder)

In [11]:
buildAE().summary()


_________________________________________________________________
Layer (type)                 Output Shape              Param #   
=================================================================
input_1 (InputLayer)         (None, 2470)              0         
_________________________________________________________________
dense_1 (Dense)              (None, 50)                123550    
_________________________________________________________________
leaky_re_lu_1 (LeakyReLU)    (None, 50)                0         
_________________________________________________________________
dense_2 (Dense)              (None, 2470)              125970    
=================================================================
Total params: 249,520
Trainable params: 249,520
Non-trainable params: 0
_________________________________________________________________

In [12]:
nb_epoch = 20
batch_size = 512

In [13]:
plt.figure(figsize=(10, 10))

imp_features = []
for i in range(9):
    indx_test = np.arange(i*step, one_fifth+i*step)
    indx_train = list(set(range(whole))-set(indx_test))
    
    
    y_train = np.array(labels.iloc[indx_train], 'float32')
    y_test = np.array(labels.iloc[indx_test], 'float32')

    X_train = np.array(data_features[indx_train], 'float32')
    X_test = np.array(data_features[indx_test], 'float32')

    
    start_time = time.time()
    
    autoencoder = buildAE()
    autoencoder.compile(optimizer=Adam(lr=0.001), loss='binary_crossentropy')
    autoencoder.fit(X_train[y_train==0.], 
                    X_train[y_train==0.],
                    epochs=nb_epoch,
                    batch_size=batch_size,
                    shuffle=True,
                    validation_split=0.1,
                    verbose=1,
                    initial_epoch=0)    
    
    print("--- %s seconds ---" % (time.time() - start_time))
    

    predictions = autoencoder.predict(X_test)
    error_df = get_error_df(pd.DataFrame(X_test), pd.DataFrame(predictions), y_test, mode='topn', n_highest = 300)
    
    

    fpr, tpr, _ = roc_curve(error_df.true_class, error_df.reconstruction_error)
    average_precision = average_precision_score(error_df.true_class, error_df.reconstruction_error)
    auc_score = roc_auc_score(error_df.true_class, error_df.reconstruction_error)
    
    percent = np.sum(y_test)/np.float(len(y_test))
    plt.plot(fpr, tpr, label= "frame: "+np.str(i)+' AUC = %.3lf' % auc_score+' average_precision = %.3lf' % average_precision +', anomalies percentage = %.3lf' % percent)
    
    
plt.legend(loc='lower right', fontsize=10)

plt.xlabel('FPR', fontsize=10)
plt.ylabel('TPR', fontsize=10)
plt.show()


Train on 115783 samples, validate on 12865 samples
Epoch 1/20
115783/115783 [==============================] - 3s - loss: 0.4550 - val_loss: 0.4357
Epoch 2/20
115783/115783 [==============================] - 3s - loss: 0.4350 - val_loss: 0.4346
Epoch 3/20
115783/115783 [==============================] - 3s - loss: 0.4344 - val_loss: 0.4343
Epoch 4/20
115783/115783 [==============================] - 3s - loss: 0.4342 - val_loss: 0.4340
Epoch 5/20
115783/115783 [==============================] - 2s - loss: 0.4338 - val_loss: 0.4337
Epoch 6/20
115783/115783 [==============================] - 3s - loss: 0.4334 - val_loss: 0.4332
Epoch 7/20
115783/115783 [==============================] - 2s - loss: 0.4330 - val_loss: 0.4328
Epoch 8/20
115783/115783 [==============================] - 3s - loss: 0.4328 - val_loss: 0.4326
Epoch 9/20
115783/115783 [==============================] - 2s - loss: 0.4326 - val_loss: 0.4323
Epoch 10/20
115783/115783 [==============================] - 3s - loss: 0.4323 - val_loss: 0.4321
Epoch 11/20
115783/115783 [==============================] - 2s - loss: 0.4321 - val_loss: 0.4320
Epoch 12/20
115783/115783 [==============================] - 3s - loss: 0.4321 - val_loss: 0.4320
Epoch 13/20
115783/115783 [==============================] - 3s - loss: 0.4320 - val_loss: 0.4319
Epoch 14/20
115783/115783 [==============================] - 2s - loss: 0.4319 - val_loss: 0.4318
Epoch 15/20
115783/115783 [==============================] - 3s - loss: 0.4318 - val_loss: 0.4317
Epoch 16/20
115783/115783 [==============================] - 2s - loss: 0.4318 - val_loss: 0.4317
Epoch 17/20
115783/115783 [==============================] - 3s - loss: 0.4317 - val_loss: 0.4316
Epoch 18/20
115783/115783 [==============================] - 2s - loss: 0.4316 - val_loss: 0.4315
Epoch 19/20
115783/115783 [==============================] - 3s - loss: 0.4316 - val_loss: 0.4315
Epoch 20/20
115783/115783 [==============================] - 2s - loss: 0.4315 - val_loss: 0.4315
--- 63.10294723510742 seconds ---
Train on 115292 samples, validate on 12811 samples
Epoch 1/20
115292/115292 [==============================] - 3s - loss: 0.4536 - val_loss: 0.4363
Epoch 2/20
115292/115292 [==============================] - 3s - loss: 0.4355 - val_loss: 0.4349
Epoch 3/20
115292/115292 [==============================] - 3s - loss: 0.4346 - val_loss: 0.4343
Epoch 4/20
115292/115292 [==============================] - 3s - loss: 0.4342 - val_loss: 0.4340
Epoch 5/20
115292/115292 [==============================] - 2s - loss: 0.4338 - val_loss: 0.4336
Epoch 6/20
115292/115292 [==============================] - 3s - loss: 0.4335 - val_loss: 0.4333
Epoch 7/20
115292/115292 [==============================] - 2s - loss: 0.4332 - val_loss: 0.4330
Epoch 8/20
115292/115292 [==============================] - 3s - loss: 0.4329 - val_loss: 0.4327
Epoch 9/20
115292/115292 [==============================] - 2s - loss: 0.4326 - val_loss: 0.4324
Epoch 10/20
115292/115292 [==============================] - 3s - loss: 0.4324 - val_loss: 0.4322
Epoch 11/20
115292/115292 [==============================] - 2s - loss: 0.4322 - val_loss: 0.4321
Epoch 12/20
115292/115292 [==============================] - 3s - loss: 0.4321 - val_loss: 0.4319
Epoch 13/20
115292/115292 [==============================] - 3s - loss: 0.4319 - val_loss: 0.4318
Epoch 14/20
115292/115292 [==============================] - 2s - loss: 0.4318 - val_loss: 0.4317
Epoch 15/20
115292/115292 [==============================] - 3s - loss: 0.4317 - val_loss: 0.4316
Epoch 16/20
115292/115292 [==============================] - 2s - loss: 0.4316 - val_loss: 0.4315
Epoch 17/20
115292/115292 [==============================] - 3s - loss: 0.4315 - val_loss: 0.4314
Epoch 18/20
115292/115292 [==============================] - 2s - loss: 0.4314 - val_loss: 0.4313
Epoch 19/20
115292/115292 [==============================] - 3s - loss: 0.4314 - val_loss: 0.4313
Epoch 20/20
115292/115292 [==============================] - 2s - loss: 0.4313 - val_loss: 0.4312
--- 62.52077293395996 seconds ---
Train on 115776 samples, validate on 12864 samples
Epoch 1/20
115776/115776 [==============================] - 3s - loss: 0.4543 - val_loss: 0.4360
Epoch 2/20
115776/115776 [==============================] - 3s - loss: 0.4358 - val_loss: 0.4348
Epoch 3/20
115776/115776 [==============================] - 3s - loss: 0.4350 - val_loss: 0.4343
Epoch 4/20
115776/115776 [==============================] - 3s - loss: 0.4346 - val_loss: 0.4339
Epoch 5/20
115776/115776 [==============================] - 2s - loss: 0.4343 - val_loss: 0.4337
Epoch 6/20
115776/115776 [==============================] - 3s - loss: 0.4341 - val_loss: 0.4334
Epoch 7/20
115776/115776 [==============================] - 2s - loss: 0.4337 - val_loss: 0.4330
Epoch 8/20
115776/115776 [==============================] - 3s - loss: 0.4334 - val_loss: 0.4327
Epoch 9/20
115776/115776 [==============================] - 3s - loss: 0.4331 - val_loss: 0.4324
Epoch 10/20
115776/115776 [==============================] - 2s - loss: 0.4328 - val_loss: 0.4322
Epoch 11/20
115776/115776 [==============================] - 3s - loss: 0.4326 - val_loss: 0.4320
Epoch 12/20
115776/115776 [==============================] - 2s - loss: 0.4325 - val_loss: 0.4319
Epoch 13/20
115776/115776 [==============================] - 3s - loss: 0.4324 - val_loss: 0.4318
Epoch 14/20
115776/115776 [==============================] - 2s - loss: 0.4323 - val_loss: 0.4317
Epoch 15/20
115776/115776 [==============================] - 3s - loss: 0.4322 - val_loss: 0.4316
Epoch 16/20
115776/115776 [==============================] - 2s - loss: 0.4321 - val_loss: 0.4315
Epoch 17/20
115776/115776 [==============================] - 3s - loss: 0.4320 - val_loss: 0.4315
Epoch 18/20
115776/115776 [==============================] - 3s - loss: 0.4319 - val_loss: 0.4314
Epoch 19/20
115776/115776 [==============================] - 3s - loss: 0.4319 - val_loss: 0.4314
Epoch 20/20
115776/115776 [==============================] - 2s - loss: 0.4318 - val_loss: 0.4313
--- 62.73229098320007 seconds ---
Train on 115874 samples, validate on 12875 samples
Epoch 1/20
115874/115874 [==============================] - 3s - loss: 0.4539 - val_loss: 0.4358
Epoch 2/20
115874/115874 [==============================] - 3s - loss: 0.4352 - val_loss: 0.4344
Epoch 3/20
115874/115874 [==============================] - 2s - loss: 0.4345 - val_loss: 0.4340
Epoch 4/20
115874/115874 [==============================] - 3s - loss: 0.4342 - val_loss: 0.4337
Epoch 5/20
115874/115874 [==============================] - 3s - loss: 0.4339 - val_loss: 0.4333
Epoch 6/20
115874/115874 [==============================] - 2s - loss: 0.4335 - val_loss: 0.4330
Epoch 7/20
115874/115874 [==============================] - 3s - loss: 0.4332 - val_loss: 0.4327
Epoch 8/20
115874/115874 [==============================] - 2s - loss: 0.4330 - val_loss: 0.4324
Epoch 9/20
115874/115874 [==============================] - 3s - loss: 0.4327 - val_loss: 0.4323
Epoch 10/20
115874/115874 [==============================] - ETA: 0s - loss: 0.432 - 2s - loss: 0.4326 - val_loss: 0.4321
Epoch 11/20
115874/115874 [==============================] - 3s - loss: 0.4324 - val_loss: 0.4319
Epoch 12/20
115874/115874 [==============================] - 2s - loss: 0.4323 - val_loss: 0.4318
Epoch 13/20
115874/115874 [==============================] - 3s - loss: 0.4322 - val_loss: 0.4317
Epoch 14/20
115874/115874 [==============================] - 2s - loss: 0.4321 - val_loss: 0.4316
Epoch 15/20
115874/115874 [==============================] - 3s - loss: 0.4320 - val_loss: 0.4316
Epoch 16/20
115874/115874 [==============================] - 2s - loss: 0.4319 - val_loss: 0.4315
Epoch 17/20
115874/115874 [==============================] - 3s - loss: 0.4318 - val_loss: 0.4314
Epoch 18/20
115874/115874 [==============================] - 2s - loss: 0.4318 - val_loss: 0.4314
Epoch 19/20
115874/115874 [==============================] - 3s - loss: 0.4317 - val_loss: 0.4313
Epoch 20/20
115874/115874 [==============================] - 3s - loss: 0.4317 - val_loss: 0.4313
--- 62.12800073623657 seconds ---
Train on 115318 samples, validate on 12814 samples
Epoch 1/20
115318/115318 [==============================] - 3s - loss: 0.4535 - val_loss: 0.4369
Epoch 2/20
115318/115318 [==============================] - 3s - loss: 0.4352 - val_loss: 0.4351
Epoch 3/20
115318/115318 [==============================] - 3s - loss: 0.4341 - val_loss: 0.4342
Epoch 4/20
115318/115318 [==============================] - 2s - loss: 0.4337 - val_loss: 0.4338
Epoch 5/20
115318/115318 [==============================] - 3s - loss: 0.4334 - val_loss: 0.4336
Epoch 6/20
115318/115318 [==============================] - 2s - loss: 0.4332 - val_loss: 0.4333
Epoch 7/20
115318/115318 [==============================] - 3s - loss: 0.4328 - val_loss: 0.4329
Epoch 8/20
115318/115318 [==============================] - 2s - loss: 0.4324 - val_loss: 0.4326
Epoch 9/20
115318/115318 [==============================] - 3s - loss: 0.4322 - val_loss: 0.4324
Epoch 10/20
115318/115318 [==============================] - 2s - loss: 0.4320 - val_loss: 0.4322
Epoch 11/20
115318/115318 [==============================] - 3s - loss: 0.4318 - val_loss: 0.4321
Epoch 12/20
115318/115318 [==============================] - 2s - loss: 0.4317 - val_loss: 0.4319
Epoch 13/20
115318/115318 [==============================] - 3s - loss: 0.4315 - val_loss: 0.4318
Epoch 14/20
115318/115318 [==============================] - 2s - loss: 0.4314 - val_loss: 0.4317
Epoch 15/20
115318/115318 [==============================] - 3s - loss: 0.4313 - val_loss: 0.4316
Epoch 16/20
115318/115318 [==============================] - 2s - loss: 0.4313 - val_loss: 0.4316
Epoch 17/20
115318/115318 [==============================] - 3s - loss: 0.4312 - val_loss: 0.4315
Epoch 18/20
115318/115318 [==============================] - 3s - loss: 0.4311 - val_loss: 0.4314
Epoch 19/20
115318/115318 [==============================] - 2s - loss: 0.4311 - val_loss: 0.4314
Epoch 20/20
115318/115318 [==============================] - 3s - loss: 0.4310 - val_loss: 0.4313
--- 62.43962574005127 seconds ---
Train on 115324 samples, validate on 12814 samples
Epoch 1/20
115324/115324 [==============================] - 3s - loss: 0.4525 - val_loss: 0.4366
Epoch 2/20
115324/115324 [==============================] - 3s - loss: 0.4350 - val_loss: 0.4353
Epoch 3/20
115324/115324 [==============================] - 2s - loss: 0.4342 - val_loss: 0.4344
Epoch 4/20
115324/115324 [==============================] - 3s - loss: 0.4338 - val_loss: 0.4340
Epoch 5/20
115324/115324 [==============================] - 3s - loss: 0.4335 - val_loss: 0.4337
Epoch 6/20
115324/115324 [==============================] - 3s - loss: 0.4332 - val_loss: 0.4334
Epoch 7/20
115324/115324 [==============================] - 3s - loss: 0.4329 - val_loss: 0.4330
Epoch 8/20
115324/115324 [==============================] - 2s - loss: 0.4326 - val_loss: 0.4327
Epoch 9/20
115324/115324 [==============================] - 3s - loss: 0.4323 - val_loss: 0.4325
Epoch 10/20
115324/115324 [==============================] - 2s - loss: 0.4321 - val_loss: 0.4323
Epoch 11/20
115324/115324 [==============================] - 3s - loss: 0.4319 - val_loss: 0.4321
Epoch 12/20
115324/115324 [==============================] - 2s - loss: 0.4317 - val_loss: 0.4320
Epoch 13/20
115324/115324 [==============================] - 3s - loss: 0.4316 - val_loss: 0.4319
Epoch 14/20
115324/115324 [==============================] - 2s - loss: 0.4315 - val_loss: 0.4317
Epoch 15/20
115324/115324 [==============================] - 3s - loss: 0.4313 - val_loss: 0.4316
Epoch 16/20
115324/115324 [==============================] - 2s - loss: 0.4312 - val_loss: 0.4316
Epoch 17/20
115324/115324 [==============================] - 3s - loss: 0.4312 - val_loss: 0.4315
Epoch 18/20
115324/115324 [==============================] - 2s - loss: 0.4311 - val_loss: 0.4314
Epoch 19/20
115324/115324 [==============================] - 3s - loss: 0.4310 - val_loss: 0.4314
Epoch 20/20
115324/115324 [==============================] - 3s - loss: 0.4310 - val_loss: 0.4314
--- 62.2017035484314 seconds ---
Train on 115416 samples, validate on 12825 samples
Epoch 1/20
115416/115416 [==============================] - 3s - loss: 0.4534 - val_loss: 0.4363
Epoch 2/20
115416/115416 [==============================] - 3s - loss: 0.4355 - val_loss: 0.4351
Epoch 3/20
115416/115416 [==============================] - 3s - loss: 0.4348 - val_loss: 0.4343
Epoch 4/20
115416/115416 [==============================] - 2s - loss: 0.4345 - val_loss: 0.4341
Epoch 5/20
115416/115416 [==============================] - 3s - loss: 0.4343 - val_loss: 0.4338
Epoch 6/20
115416/115416 [==============================] - 2s - loss: 0.4339 - val_loss: 0.4334
Epoch 7/20
115416/115416 [==============================] - 3s - loss: 0.4336 - val_loss: 0.4331
Epoch 8/20
115416/115416 [==============================] - 2s - loss: 0.4332 - val_loss: 0.4327
Epoch 9/20
115416/115416 [==============================] - 3s - loss: 0.4330 - val_loss: 0.4325
Epoch 10/20
115416/115416 [==============================] - 3s - loss: 0.4328 - val_loss: 0.4323
Epoch 11/20
115416/115416 [==============================] - 3s - loss: 0.4326 - val_loss: 0.4322
Epoch 12/20
115416/115416 [==============================] - 3s - loss: 0.4325 - val_loss: 0.4320
Epoch 13/20
115416/115416 [==============================] - 3s - loss: 0.4323 - val_loss: 0.4318
Epoch 14/20
115416/115416 [==============================] - 3s - loss: 0.4322 - val_loss: 0.4317
Epoch 15/20
115416/115416 [==============================] - 3s - loss: 0.4320 - val_loss: 0.4316
Epoch 16/20
115416/115416 [==============================] - 3s - loss: 0.4320 - val_loss: 0.4315
Epoch 17/20
115416/115416 [==============================] - 2s - loss: 0.4319 - val_loss: 0.4314
Epoch 18/20
115416/115416 [==============================] - 3s - loss: 0.4318 - val_loss: 0.4314
Epoch 19/20
115416/115416 [==============================] - 2s - loss: 0.4317 - val_loss: 0.4313
Epoch 20/20
115416/115416 [==============================] - 3s - loss: 0.4317 - val_loss: 0.4313
--- 62.60910415649414 seconds ---
Train on 115781 samples, validate on 12865 samples
Epoch 1/20
115781/115781 [==============================] - 3s - loss: 0.4521 - val_loss: 0.4366
Epoch 2/20
115781/115781 [==============================] - 3s - loss: 0.4359 - val_loss: 0.4355
Epoch 3/20
115781/115781 [==============================] - 3s - loss: 0.4351 - val_loss: 0.4347
Epoch 4/20
115781/115781 [==============================] - 2s - loss: 0.4347 - val_loss: 0.4344
Epoch 5/20
115781/115781 [==============================] - 3s - loss: 0.4345 - val_loss: 0.4341
Epoch 6/20
115781/115781 [==============================] - 2s - loss: 0.4341 - val_loss: 0.4336
Epoch 7/20
115781/115781 [==============================] - 3s - loss: 0.4337 - val_loss: 0.4333
Epoch 8/20
115781/115781 [==============================] - 2s - loss: 0.4333 - val_loss: 0.4330
Epoch 9/20
115781/115781 [==============================] - 3s - loss: 0.4331 - val_loss: 0.4327
Epoch 10/20
115781/115781 [==============================] - 3s - loss: 0.4329 - val_loss: 0.4325
Epoch 11/20
115781/115781 [==============================] - 3s - loss: 0.4327 - val_loss: 0.4324
Epoch 12/20
115781/115781 [==============================] - 3s - loss: 0.4326 - val_loss: 0.4322
Epoch 13/20
115781/115781 [==============================] - 2s - loss: 0.4324 - val_loss: 0.4321
Epoch 14/20
115781/115781 [==============================] - 3s - loss: 0.4323 - val_loss: 0.4320
Epoch 15/20
115781/115781 [==============================] - 3s - loss: 0.4322 - val_loss: 0.4319
Epoch 16/20
115781/115781 [==============================] - 3s - loss: 0.4322 - val_loss: 0.4318
Epoch 17/20
115781/115781 [==============================] - 2s - loss: 0.4321 - val_loss: 0.4317
Epoch 18/20
115781/115781 [==============================] - 3s - loss: 0.4320 - val_loss: 0.4316
Epoch 19/20
115781/115781 [==============================] - 2s - loss: 0.4320 - val_loss: 0.4315
Epoch 20/20
115781/115781 [==============================] - 3s - loss: 0.4320 - val_loss: 0.4315
--- 63.159154176712036 seconds ---
Train on 115670 samples, validate on 12853 samples
Epoch 1/20
115670/115670 [==============================] - 3s - loss: 0.4546 - val_loss: 0.4373
Epoch 2/20
115670/115670 [==============================] - 3s - loss: 0.4359 - val_loss: 0.4348
Epoch 3/20
115670/115670 [==============================] - 3s - loss: 0.4351 - val_loss: 0.4328
Epoch 4/20
115670/115670 [==============================] - 3s - loss: 0.4348 - val_loss: 0.4323
Epoch 5/20
115670/115670 [==============================] - 2s - loss: 0.4345 - val_loss: 0.4317
Epoch 6/20
115670/115670 [==============================] - 3s - loss: 0.4342 - val_loss: 0.4313
Epoch 7/20
115670/115670 [==============================] - 2s - loss: 0.4339 - val_loss: 0.4309
Epoch 8/20
115670/115670 [==============================] - 3s - loss: 0.4335 - val_loss: 0.4306
Epoch 9/20
115670/115670 [==============================] - 3s - loss: 0.4333 - val_loss: 0.4304
Epoch 10/20
115670/115670 [==============================] - 3s - loss: 0.4331 - val_loss: 0.4301
Epoch 11/20
115670/115670 [==============================] - 2s - loss: 0.4329 - val_loss: 0.4300
Epoch 12/20
115670/115670 [==============================] - 3s - loss: 0.4328 - val_loss: 0.4298
Epoch 13/20
115670/115670 [==============================] - 2s - loss: 0.4327 - val_loss: 0.4297
Epoch 14/20
115670/115670 [==============================] - 3s - loss: 0.4326 - val_loss: 0.4296
Epoch 15/20
115670/115670 [==============================] - 3s - loss: 0.4325 - val_loss: 0.4295
Epoch 16/20
115670/115670 [==============================] - 3s - loss: 0.4324 - val_loss: 0.4294
Epoch 17/20
115670/115670 [==============================] - 3s - loss: 0.4323 - val_loss: 0.4294
Epoch 18/20
115670/115670 [==============================] - 3s - loss: 0.4323 - val_loss: 0.4293
Epoch 19/20
115670/115670 [==============================] - 3s - loss: 0.4322 - val_loss: 0.4292
Epoch 20/20
115670/115670 [==============================] - 3s - loss: 0.4322 - val_loss: 0.4292
--- 63.109647274017334 seconds ---

In [14]:
from evaluation import *

In [15]:
ps, rs = perfomance(error_df.true_class, error_df.reconstruction_error)


P@10 1.0
recalls_values [0.8, 0.9, 0.95, 0.99]
precision_values [0.60498687664041995, 0.47702205882352944, 0.39409221902017288, 0.02148997134670487]
average_precision_score 0.682127627699
roc_auc_score 0.974657426931

In [ ]: