Deep Learning


In [1]:
from keras.layers import Input, Dense, Lambda, Layer
from keras.models import Model
from keras.layers.core import Dropout
from keras import regularizers
import keras
import pandas as pd
import numpy as np
from keras import backend as K
from keras import metrics
from collections import namedtuple
pd.set_option("display.max_rows",35)
%matplotlib inline


Using TensorFlow backend.

In [2]:
kdd_train_2labels = pd.read_pickle("dataset/kdd_train_2labels.pkl")
kdd_test_2labels = pd.read_pickle("dataset/kdd_test_2labels.pkl")

#y_train_labels = pd.read_pickle("dataset/kdd_train_2labels_y.pkl")
#y_train_labels = pd.read_pickle("dataset/kdd_train_2labels.pkl")
#y_test_labels = pd.read_pickle("dataset/kdd_test_2labels_y.pkl")

output_columns_2labels = ['is_Attack','is_Normal']

from sklearn import model_selection as ms
from sklearn import preprocessing as pp

x_input = kdd_train_2labels.drop(output_columns_2labels, axis = 1)
y_output = kdd_train_2labels.loc[:,output_columns_2labels]

ss = pp.StandardScaler()
x_input = ss.fit_transform(x_input)

#le = pp.LabelEncoder()
#y_train = le.fit_transform(y_train_labels).reshape(-1, 1)
#y_test = le.transform(y_test_labels).reshape(-1, 1)

y_train = kdd_train_2labels.loc[:,output_columns_2labels].values

x_train, x_valid, y_train, y_valid = ms.train_test_split(x_input, 
                              y_train, 
                              test_size=0.1)
#x_valid, x_test, y_valid, y_test = ms.train_test_split(x_valid, y_valid, test_size = 0.4)

x_test = kdd_test_2labels.drop(output_columns_2labels, axis = 1)
y_test = kdd_test_2labels.loc[:,output_columns_2labels].values

x_test = ss.transform(x_test)

#x_train = np.hstack((x_train, y_train))
#x_valid = np.hstack((x_valid, y_valid))

#x_test = np.hstack((x_test, np.random.normal(loc = 0, scale = 0.01, size = y_test.shape)))

In [ ]:
input_dim = 122
intermediate_dim = 10
latent_dim = 32
batch_size = 1409
hidden_layers = 8
classes = 2
drop_prob = 0.1

class Train:
    def build_vae_model():
        Train.x = Input(shape=(input_dim,))
        
        hidden_encoder = Train.x
        for i in range(hidden_layers):
            hidden_encoder = Dense(intermediate_dim, activation='relu', 
                      kernel_regularizer=keras.regularizers.l2(0.0001),
                      activity_regularizer=keras.regularizers.l1(0.0001))(hidden_encoder)
            
            hidden_encoder = Dropout(rate=drop_prob)(hidden_encoder)

        Train.mean_encoder = Dense(latent_dim, activation=None)(hidden_encoder)
        Train.logvar_encoder = Dense(latent_dim, activation=None)(hidden_encoder)

        def get_distrib(args):

            m_e, l_e = args

            # Sample epsilon
            epsilon = np.random.normal(loc=0.0, scale=0.05, size = (batch_size, latent_dim))

            # Sample latent variable
            z = m_e + K.exp(l_e / 2) * epsilon
            return z

        z = Lambda(get_distrib,name='z_dist')([Train.mean_encoder, Train.logvar_encoder])

        hidden_decoder = z
        for i in range(hidden_layers):
            hidden_decoder = Dense(intermediate_dim, activation="relu", 
                      kernel_regularizer=keras.regularizers.l2(0.0001),
                      activity_regularizer=keras.regularizers.l1(0.0001))(hidden_decoder)
            hidden_decoder = Dropout(rate=drop_prob)(hidden_decoder)

        Train.x_ = Dense(input_dim, activation=None, name='vae_output')(hidden_decoder)
        
    def build_softmax_model():
        Train.z_ = Input(shape=(latent_dim,))
        hidden_y = Dense(latent_dim, activation='relu', name='softmax_hidden')(Train.z_)
        Train.y = Dense(classes, activation='softmax', name='softmax_output')(hidden_y)
        
def vae_loss(x, x_decoded_mean):
    xent_loss = input_dim * keras.losses.binary_crossentropy(x, x_decoded_mean)
    kl_loss = - 0.5 * K.sum(1 + Train.logvar_encoder - K.square(Train.mean_encoder) - K.exp(Train.logvar_encoder), axis=-1)
    return K.abs(K.mean(xent_loss + kl_loss))


Train.build_vae_model()
Train.build_softmax_model()

In [ ]:
import itertools
#features_arr = [4, 16, 32, 256, 1024]
#hidden_layers_arr = [2, 6, 10, 100]

#features_arr = [4, 16, 32]
#hidden_layers_arr = [2, 6, 10]

features_arr = [4, 16, 32]
hidden_layers_arr = [2, 4, 6]

epoch_arr = [50]

score = namedtuple("score", ['epoch', 'no_of_features','hidden_layers','train_score', 'test_score'])
scores = []
predictions = {}

for e, h, f in itertools.product(epoch_arr, hidden_layers_arr, features_arr):
    
    print(" \n Current Layer Attributes - epochs:{} hidden layers:{} features count:{}".format(e,h,f))
    latent_dim = f
    epochs = e
    hidden_layers = h
    
    train_size = x_train.shape[0] - x_train.shape[0]%batch_size
    valid_size = x_valid.shape[0] - x_valid.shape[0]%batch_size

    
    optimizer = keras.optimizers.Adam(lr=0.01, beta_1=0.9, beta_2=0.999, epsilon=1e-04, decay=0.1)
    
    vae_model = Model(inputs = Train.x, outputs = Train.x_)
    vae_model.compile(optimizer = optimizer, 
                      loss = vae_loss)    
    vae_model.fit(x = x_train[:train_size,:], y = x_train[:train_size,:], 
                  shuffle=True, epochs=epochs, 
                  batch_size = batch_size, 
                  validation_data = (x_test, x_test),
                  verbose = 1)
    
    z_model = Model(inputs = Train.x, outputs = vae_model.get_layer("z_dist").output)
    z_train = z_model.predict(x_train[:train_size,:], batch_size=batch_size)
    z_valid = z_model.predict(x_valid[:valid_size,:], batch_size=batch_size)
    z_test = z_model.predict(x_test, batch_size=batch_size)
    
    sm_model = Model(inputs = Train.z_, outputs = Train.y)
    sm_model.compile(optimizer = optimizer, 
                      loss = keras.losses.categorical_crossentropy, 
                      metrics = ['accuracy'])
    
    sm_model.fit(x = z_train, y = y_train[:train_size,:],
                 shuffle=True, epochs=5, 
                  batch_size = batch_size, 
                  validation_data = (z_test, y_test),
                  verbose = 1)

    
    score_train = sm_model.evaluate(z_valid, y = y_valid[:valid_size,:],
                               batch_size = batch_size,
                               verbose = 1)
    
    score_test = sm_model.evaluate(z_test, y = y_test,
                           batch_size = batch_size,
                           verbose = 1)
    
    y_test_pred = sm_model.predict(z_test, batch_size=batch_size)
    

    y_pred = y_test_pred #np.argmax(y_test_pred[:,-2:], axis = 1)
    
    curr_pred = pd.DataFrame({"Attack_prob":y_pred[:,0], "Normal_prob":y_pred[:,1]})
    predictions.update({"{}_{}_{}".format(e,f,h):curr_pred})
    
    scores.append(score(e,f,h,score_train[-1], score_test[-1])) #score_test[-1]))
    
    print("\n Train Acc: {}, Test Acc: {}".format(score_train[-1], 
                                                  score_test[-1])  )
    
scores = pd.DataFrame(scores)


 
 Current Layer Attributes - epochs:50 hidden layers:2 features count:4
Train on 112720 samples, validate on 22544 samples
Epoch 1/50
112720/112720 [==============================] - 6s - loss: 4.6226 - val_loss: 9.6583
Epoch 2/50
112720/112720 [==============================] - 2s - loss: 4.2963 - val_loss: 17.3227
Epoch 3/50
112720/112720 [==============================] - 2s - loss: 4.1337 - val_loss: 14.8529
Epoch 4/50
112720/112720 [==============================] - 2s - loss: 3.8748 - val_loss: 17.1547
Epoch 5/50
112720/112720 [==============================] - 2s - loss: 3.9973 - val_loss: 15.5178
Epoch 6/50
112720/112720 [==============================] - 2s - loss: 4.1346 - val_loss: 14.2069s: 4.2
Epoch 7/50
112720/112720 [==============================] - 2s - loss: 4.0482 - val_loss: 13.7357
Epoch 8/50
112720/112720 [==============================] - 2s - loss: 3.9395 - val_loss: 14.0685
Epoch 9/50
112720/112720 [==============================] - 2s - loss: 4.1044 - val_loss: 13.1505
Epoch 10/50
112720/112720 [==============================] - 2s - loss: 3.9713 - val_loss: 13.8277
Epoch 11/50
112720/112720 [==============================] - 2s - loss: 4.2111 - val_loss: 13.9066
Epoch 12/50
112720/112720 [==============================] - 2s - loss: 4.5084 - val_loss: 13.9243
Epoch 13/50
112720/112720 [==============================] - 2s - loss: 4.1508 - val_loss: 14.1523
Epoch 14/50
112720/112720 [==============================] - 2s - loss: 4.1922 - val_loss: 14.2097
Epoch 15/50
112720/112720 [==============================] - 2s - loss: 4.2093 - val_loss: 14.4072
Epoch 16/50
112720/112720 [==============================] - 2s - loss: 3.6314 - val_loss: 15.0328
Epoch 17/50
112720/112720 [==============================] - 2s - loss: 4.1264 - val_loss: 15.5602
Epoch 18/50
112720/112720 [==============================] - 2s - loss: 3.8379 - val_loss: 14.6384
Epoch 19/50
112720/112720 [==============================] - 2s - loss: 4.2659 - val_loss: 14.5063
Epoch 20/50
112720/112720 [==============================] - 2s - loss: 4.1164 - val_loss: 14.4707
Epoch 21/50
112720/112720 [==============================] - 2s - loss: 3.5644 - val_loss: 14.4117
Epoch 22/50
112720/112720 [==============================] - 2s - loss: 3.7793 - val_loss: 14.4359
Epoch 23/50
112720/112720 [==============================] - 2s - loss: 3.6917 - val_loss: 14.5727
Epoch 24/50
112720/112720 [==============================] - 2s - loss: 3.9447 - val_loss: 14.5018
Epoch 25/50
112720/112720 [==============================] - 2s - loss: 3.8455 - val_loss: 14.4073
Epoch 26/50
112720/112720 [==============================] - 2s - loss: 3.8824 - val_loss: 13.7346
Epoch 27/50
112720/112720 [==============================] - 2s - loss: 4.3100 - val_loss: 13.8440
Epoch 28/50
112720/112720 [==============================] - 2s - loss: 3.7885 - val_loss: 13.6830
Epoch 29/50
112720/112720 [==============================] - 2s - loss: 4.1136 - val_loss: 13.9428
Epoch 30/50
112720/112720 [==============================] - 2s - loss: 3.9885 - val_loss: 14.0731
Epoch 31/50
112720/112720 [==============================] - 2s - loss: 3.9292 - val_loss: 13.9118
Epoch 32/50
112720/112720 [==============================] - 2s - loss: 3.5951 - val_loss: 14.7792
Epoch 33/50
112720/112720 [==============================] - 2s - loss: 3.6852 - val_loss: 14.5258
Epoch 34/50
112720/112720 [==============================] - 2s - loss: 3.9464 - val_loss: 15.2211
Epoch 35/50
112720/112720 [==============================] - 2s - loss: 3.4028 - val_loss: 17.8194
Epoch 36/50
112720/112720 [==============================] - 2s - loss: 3.9603 - val_loss: 18.6963
Epoch 37/50
112720/112720 [==============================] - 2s - loss: 3.8230 - val_loss: 18.7384
Epoch 38/50
112720/112720 [==============================] - 2s - loss: 4.3460 - val_loss: 18.6917
Epoch 39/50
112720/112720 [==============================] - 2s - loss: 4.1801 - val_loss: 18.5150
Epoch 40/50
112720/112720 [==============================] - 2s - loss: 4.0841 - val_loss: 18.0313
Epoch 41/50
112720/112720 [==============================] - 2s - loss: 3.9201 - val_loss: 17.8257
Epoch 42/50
112720/112720 [==============================] - 2s - loss: 4.0949 - val_loss: 17.8907
Epoch 43/50
112720/112720 [==============================] - 2s - loss: 3.9769 - val_loss: 17.9631
Epoch 44/50
112720/112720 [==============================] - 3s - loss: 4.0877 - val_loss: 18.1321
Epoch 45/50
112720/112720 [==============================] - 2s - loss: 4.4087 - val_loss: 18.1931
Epoch 46/50
112720/112720 [==============================] - 2s - loss: 4.1270 - val_loss: 18.1334
Epoch 47/50
112720/112720 [==============================] - 2s - loss: 4.4365 - val_loss: 18.0913
Epoch 48/50
112720/112720 [==============================] - 3s - loss: 3.9260 - val_loss: 18.0975
Epoch 49/50
112720/112720 [==============================] - 3s - loss: 4.0452 - val_loss: 18.1243
Epoch 50/50
112720/112720 [==============================] - 3s - loss: 3.8065 - val_loss: 18.2694
Train on 112720 samples, validate on 22544 samples
Epoch 1/5
112720/112720 [==============================] - 1s - loss: 0.6913 - acc: 0.5338 - val_loss: 0.7057 - val_acc: 0.4303
Epoch 2/5
112720/112720 [==============================] - 0s - loss: 0.6913 - acc: 0.5348 - val_loss: 0.7061 - val_acc: 0.4301
Epoch 3/5
112720/112720 [==============================] - 0s - loss: 0.6912 - acc: 0.5351 - val_loss: 0.7062 - val_acc: 0.4301
Epoch 4/5
112720/112720 [==============================] - 0s - loss: 0.6912 - acc: 0.5351 - val_loss: 0.7061 - val_acc: 0.4301
Epoch 5/5
112720/112720 [==============================] - 0s - loss: 0.6912 - acc: 0.5352 - val_loss: 0.7062 - val_acc: 0.4303
 1409/22544 [>.............................] - ETA: 0s
 Train Acc: 0.5308729633688927, Test Acc: 0.4302696958184242
 
 Current Layer Attributes - epochs:50 hidden layers:2 features count:16
Train on 112720 samples, validate on 22544 samples
Epoch 1/50
112720/112720 [==============================] - 7s - loss: 3.6495 - val_loss: 6.2388
Epoch 2/50
112720/112720 [==============================] - 3s - loss: 3.0892 - val_loss: 9.2299
Epoch 3/50
112720/112720 [==============================] - 3s - loss: 3.4179 - val_loss: 9.6655
Epoch 4/50
112720/112720 [==============================] - 3s - loss: 2.8032 - val_loss: 10.0836
Epoch 5/50
112720/112720 [==============================] - 3s - loss: 3.1915 - val_loss: 14.0896
Epoch 6/50
112720/112720 [==============================] - 3s - loss: 2.7326 - val_loss: 13.4261
Epoch 7/50
112720/112720 [==============================] - 3s - loss: 2.7104 - val_loss: 13.0943
Epoch 8/50
112720/112720 [==============================] - 3s - loss: 3.3168 - val_loss: 13.1281
Epoch 9/50
112720/112720 [==============================] - 3s - loss: 2.9920 - val_loss: 12.2821
Epoch 10/50
112720/112720 [==============================] - 3s - loss: 2.9959 - val_loss: 12.2116
Epoch 11/50
112720/112720 [==============================] - 3s - loss: 3.2765 - val_loss: 12.3960
Epoch 12/50
112720/112720 [==============================] - 3s - loss: 2.9445 - val_loss: 12.4079
Epoch 13/50
112720/112720 [==============================] - 3s - loss: 3.1459 - val_loss: 12.5645
Epoch 14/50
112720/112720 [==============================] - 3s - loss: 2.9850 - val_loss: 12.6982
Epoch 15/50
112720/112720 [==============================] - 3s - loss: 3.6318 - val_loss: 12.6987
Epoch 16/50
112720/112720 [==============================] - 3s - loss: 3.0783 - val_loss: 12.5225
Epoch 17/50
112720/112720 [==============================] - 3s - loss: 2.8146 - val_loss: 12.3031
Epoch 18/50
112720/112720 [==============================] - 2s - loss: 2.7078 - val_loss: 12.1945
Epoch 19/50
112720/112720 [==============================] - 2s - loss: 3.1515 - val_loss: 11.6738
Epoch 20/50
112720/112720 [==============================] - 2s - loss: 3.2896 - val_loss: 11.6794
Epoch 21/50
112720/112720 [==============================] - 2s - loss: 3.0679 - val_loss: 11.5044
Epoch 22/50
112720/112720 [==============================] - 2s - loss: 3.3226 - val_loss: 12.1108
Epoch 23/50
112720/112720 [==============================] - 2s - loss: 3.2274 - val_loss: 11.8361
Epoch 24/50
112720/112720 [==============================] - 3s - loss: 3.0953 - val_loss: 11.8267
Epoch 25/50
112720/112720 [==============================] - 3s - loss: 3.1074 - val_loss: 11.8097
Epoch 26/50
112720/112720 [==============================] - 3s - loss: 3.2979 - val_loss: 12.2779
Epoch 27/50
112720/112720 [==============================] - 3s - loss: 3.2541 - val_loss: 11.9326
Epoch 28/50
112720/112720 [==============================] - 3s - loss: 3.0018 - val_loss: 11.9822
Epoch 29/50
112720/112720 [==============================] - 3s - loss: 3.0226 - val_loss: 11.9825
Epoch 30/50
112720/112720 [==============================] - 3s - loss: 2.9763 - val_loss: 11.9292
Epoch 31/50
112720/112720 [==============================] - 3s - loss: 3.0620 - val_loss: 11.9390
Epoch 32/50
112720/112720 [==============================] - 3s - loss: 2.8541 - val_loss: 11.8941
Epoch 33/50
112720/112720 [==============================] - 3s - loss: 3.0832 - val_loss: 11.9300
Epoch 34/50
112720/112720 [==============================] - 3s - loss: 3.0755 - val_loss: 11.9480
Epoch 35/50
112720/112720 [==============================] - 3s - loss: 2.8983 - val_loss: 11.8686
Epoch 36/50
112720/112720 [==============================] - 3s - loss: 3.5553 - val_loss: 11.8753
Epoch 37/50
112720/112720 [==============================] - 3s - loss: 2.7190 - val_loss: 12.1316
Epoch 38/50
112720/112720 [==============================] - 3s - loss: 2.9486 - val_loss: 12.0800
Epoch 39/50
112720/112720 [==============================] - 3s - loss: 2.9564 - val_loss: 11.7788
Epoch 40/50
112720/112720 [==============================] - 3s - loss: 3.3303 - val_loss: 11.8428
Epoch 41/50
112720/112720 [==============================] - 3s - loss: 2.7543 - val_loss: 11.8531
Epoch 42/50
112720/112720 [==============================] - 3s - loss: 2.9762 - val_loss: 11.8256
Epoch 43/50
112720/112720 [==============================] - 3s - loss: 2.7665 - val_loss: 11.8301
Epoch 44/50
112720/112720 [==============================] - 3s - loss: 3.1074 - val_loss: 11.8319
Epoch 45/50
112720/112720 [==============================] - 3s - loss: 3.3360 - val_loss: 11.8000
Epoch 46/50
112720/112720 [==============================] - 3s - loss: 2.7823 - val_loss: 11.8053
Epoch 47/50
112720/112720 [==============================] - 3s - loss: 3.1303 - val_loss: 11.7966
Epoch 48/50
112720/112720 [==============================] - 3s - loss: 2.9273 - val_loss: 11.8028
Epoch 49/50
112720/112720 [==============================] - 3s - loss: 2.6084 - val_loss: 11.8159
Epoch 50/50
112720/112720 [==============================] - 3s - loss: 3.5789 - val_loss: 11.8159
Train on 112720 samples, validate on 22544 samples
Epoch 1/5
112720/112720 [==============================] - 1s - loss: 0.6912 - acc: 0.5348 - val_loss: 0.7045 - val_acc: 0.4316
Epoch 2/5
112720/112720 [==============================] - 0s - loss: 0.6911 - acc: 0.5353 - val_loss: 0.7051 - val_acc: 0.4310
Epoch 3/5
112720/112720 [==============================] - 0s - loss: 0.6910 - acc: 0.5353 - val_loss: 0.7053 - val_acc: 0.4309
Epoch 4/5
112720/112720 [==============================] - 0s - loss: 0.6910 - acc: 0.5353 - val_loss: 0.7055 - val_acc: 0.4309
Epoch 5/5
112720/112720 [==============================] - 0s - loss: 0.6910 - acc: 0.5353 - val_loss: 0.7055 - val_acc: 0.4309
22544/22544 [==============================] - 0s     

 Train Acc: 0.5305180996656418, Test Acc: 0.43093506060540676
 
 Current Layer Attributes - epochs:50 hidden layers:2 features count:32
Train on 112720 samples, validate on 22544 samples
Epoch 1/50
112720/112720 [==============================] - 7s - loss: 3.0327 - val_loss: 31.9546
Epoch 2/50
112720/112720 [==============================] - 3s - loss: 3.3371 - val_loss: 31.6824
Epoch 3/50
112720/112720 [==============================] - 3s - loss: 2.7587 - val_loss: 30.9835
Epoch 4/50
112720/112720 [==============================] - 3s - loss: 2.7138 - val_loss: 31.2903
Epoch 5/50
112720/112720 [==============================] - 3s - loss: 3.0735 - val_loss: 31.2605
Epoch 6/50
112720/112720 [==============================] - 3s - loss: 2.6826 - val_loss: 31.1338
Epoch 7/50
112720/112720 [==============================] - 3s - loss: 3.0646 - val_loss: 31.1164
Epoch 8/50
112720/112720 [==============================] - 3s - loss: 3.1550 - val_loss: 31.1472
Epoch 9/50
112720/112720 [==============================] - 3s - loss: 3.1541 - val_loss: 30.9361
Epoch 10/50
112720/112720 [==============================] - 3s - loss: 2.6742 - val_loss: 30.8595
Epoch 11/50
112720/112720 [==============================] - 3s - loss: 2.6590 - val_loss: 30.7422
Epoch 12/50
112720/112720 [==============================] - 3s - loss: 2.5802 - val_loss: 30.7457
Epoch 13/50
112720/112720 [==============================] - 3s - loss: 2.7282 - val_loss: 30.7225
Epoch 14/50
112720/112720 [==============================] - 4s - loss: 2.4876 - val_loss: 29.5197
Epoch 15/50
112720/112720 [==============================] - 3s - loss: 2.6989 - val_loss: 29.0333
Epoch 16/50
112720/112720 [==============================] - 3s - loss: 2.9888 - val_loss: 28.9541
Epoch 17/50
112720/112720 [==============================] - 3s - loss: 2.2129 - val_loss: 28.9514
Epoch 18/50
112720/112720 [==============================] - 3s - loss: 2.7947 - val_loss: 28.8805
Epoch 19/50
112720/112720 [==============================] - 3s - loss: 3.2235 - val_loss: 28.9014
Epoch 20/50
112720/112720 [==============================] - 3s - loss: 2.7726 - val_loss: 28.8350
Epoch 21/50
112720/112720 [==============================] - 3s - loss: 2.5837 - val_loss: 28.8403
Epoch 22/50
112720/112720 [==============================] - 3s - loss: 2.6415 - val_loss: 28.8355
Epoch 23/50
112720/112720 [==============================] - 3s - loss: 2.7274 - val_loss: 28.8814
Epoch 24/50
112720/112720 [==============================] - 3s - loss: 2.5344 - val_loss: 28.8808s: - ETA
Epoch 25/50
112720/112720 [==============================] - 3s - loss: 2.9916 - val_loss: 28.8275
Epoch 26/50
112720/112720 [==============================] - 3s - loss: 2.5410 - val_loss: 28.8770
Epoch 27/50
112720/112720 [==============================] - 3s - loss: 2.5570 - val_loss: 28.8381
Epoch 28/50
112720/112720 [==============================] - 3s - loss: 2.5853 - val_loss: 28.8260
Epoch 29/50
112720/112720 [==============================] - 3s - loss: 3.0930 - val_loss: 28.8242
Epoch 30/50
112720/112720 [==============================] - 3s - loss: 3.2902 - val_loss: 28.8064
Epoch 31/50
112720/112720 [==============================] - 3s - loss: 2.9229 - val_loss: 28.8018
Epoch 32/50
112720/112720 [==============================] - 3s - loss: 2.8565 - val_loss: 28.7804
Epoch 33/50
111311/112720 [============================>.] - ETA: 0s - loss: 2.8026

In [ ]:
scores.sort_values("test_score", ascending=False)

In [ ]:
pd.Panel(predictions).to_pickle("dataset/keras_vae_dense_trained_seperately_nsl_kdd_predictions.pkl")
scores.to_pickle("dataset/keras_vae_dense_trained_seperately_nsl_kdd_scores.pkl")

In [ ]:
pd.Panel(predictions)

In [ ]: