Read Data Sample


In [1]:
import pandas as pd
import numpy as np
import os
from collections import namedtuple
pd.set_option("display.max_rows",1000)
%matplotlib inline

In [2]:
%%bash
rm dataset/scores/tf_vae_dense_trained_together_nsl_kdd_all.pkl


rm: cannot remove 'dataset/tf_vae_dense_trained_together_nsl_kdd_all.pkl': No such file or directory

In [3]:
class dataset:
    kdd_train_2labels = pd.read_pickle("dataset/kdd_train_2labels.pkl")
    kdd_test_2labels = pd.read_pickle("dataset/kdd_test_2labels.pkl")
    kdd_test__2labels = pd.read_pickle("dataset/kdd_test__2labels.pkl")
    
    kdd_train_5labels = pd.read_pickle("dataset/kdd_train_5labels.pkl")
    kdd_test_5labels = pd.read_pickle("dataset/kdd_test_5labels.pkl")

In [4]:
dataset.kdd_train_2labels.shape


Out[4]:
(125973, 124)

In [5]:
dataset.kdd_test_2labels.shape


Out[5]:
(22544, 124)

In [6]:
from sklearn import model_selection as ms
from sklearn import preprocessing as pp

class preprocess:
    
    output_columns_2labels = ['is_Normal','is_Attack']
    
    x_input = dataset.kdd_train_2labels.drop(output_columns_2labels, axis = 1)
    y_output = dataset.kdd_train_2labels.loc[:,output_columns_2labels]

    x_test_input = dataset.kdd_test_2labels.drop(output_columns_2labels, axis = 1)
    y_test = dataset.kdd_test_2labels.loc[:,output_columns_2labels]

    x_test__input = dataset.kdd_test__2labels.drop(output_columns_2labels, axis = 1)
    y_test_ = dataset.kdd_test__2labels.loc[:,output_columns_2labels]
    
    ss = pp.StandardScaler()

    x_train = ss.fit_transform(x_input)
    x_test = ss.transform(x_test_input)
    x_test_ = ss.transform(x_test__input)

    y_train = y_output.values
    y_test = y_test.values
    y_test_ = y_test_.values

preprocess.x_train.std()


Out[6]:
0.99589320646770185

In [7]:
import tensorflow as tf

In [8]:
class network(object):
    
    input_dim = 122
    classes = 2
    hidden_encoder_dim = 122
    hidden_layers = 1
    latent_dim = 10

    hidden_decoder_dim = 122
    lam = 0.001
    
    def __init__(self, classes, hidden_layers, num_of_features):
        self.classes = classes
        self.hidden_layers = hidden_layers
        self.latent_dim = num_of_features
            
    def build_layers(self):
        tf.reset_default_graph()
        #learning_rate = tf.Variable(initial_value=0.001)

        input_dim = self.input_dim
        classes = self.classes
        hidden_encoder_dim = self.hidden_encoder_dim
        hidden_layers = self.hidden_layers
        latent_dim = self.latent_dim
        hidden_decoder_dim = self.hidden_decoder_dim
        lam = self.lam
        
        with tf.variable_scope("Input"):
            self.x = tf.placeholder("float", shape=[None, input_dim])
            self.y_ = tf.placeholder("float", shape=[None, classes])
            self.keep_prob = tf.placeholder("float")
            self.lr = tf.placeholder("float")
        
        with tf.variable_scope("Layer_Encoder"):

            hidden_encoder = tf.layers.dense(self.x, hidden_encoder_dim, activation = tf.nn.relu, kernel_regularizer=tf.nn.l2_loss)
            hidden_encoder = tf.nn.dropout(hidden_encoder, self.keep_prob)
            for h in range(hidden_layers - 1):
                hidden_encoder = tf.layers.dense(hidden_encoder, hidden_encoder_dim, activation = tf.nn.relu, kernel_regularizer=tf.nn.l2_loss)
                hidden_encoder = tf.nn.dropout(hidden_encoder, self.keep_prob)
                
        with tf.variable_scope("Layer_Mean"):
            mu_encoder = tf.layers.dense(hidden_encoder, latent_dim, activation = None, kernel_regularizer=tf.nn.l2_loss)

        with tf.variable_scope("Layer_Variance"):
            logvar_encoder = tf.layers.dense(hidden_encoder, latent_dim, activation = None, kernel_regularizer=tf.nn.l2_loss)

        with tf.variable_scope("Sampling_Distribution"):
            # Sample epsilon
            epsilon = tf.random_normal(tf.shape(logvar_encoder), mean=0, stddev=1, name='epsilon')

            # Sample latent variable
            std_encoder = tf.exp(0.5 * logvar_encoder)
            z = mu_encoder + tf.multiply(std_encoder, epsilon)
            
            #tf.summary.histogram("Sample_Distribution", z)

        with tf.variable_scope("Layer_Decoder"):
            hidden_decoder = tf.layers.dense(z, hidden_decoder_dim, activation = tf.nn.relu, kernel_regularizer=tf.nn.l2_loss)
            hidden_decoder = tf.nn.dropout(hidden_decoder, self.keep_prob)
            for h in range(hidden_layers - 1):
                hidden_decoder = tf.layers.dense(hidden_decoder, hidden_decoder_dim, activation = tf.nn.relu, kernel_regularizer=tf.nn.l2_loss)
                hidden_decoder = tf.nn.dropout(hidden_decoder, self.keep_prob)
                
        with tf.variable_scope("Layer_Reconstruction"):
            x_hat = tf.layers.dense(hidden_decoder, input_dim, activation = None)
            
        with tf.variable_scope("Layer_Dense_Hidden"):
            hidden_output = tf.layers.dense(z,latent_dim, activation=tf.nn.relu)

        with tf.variable_scope("Layer_Dense_Softmax"):
            self.y = tf.layers.dense(z, classes, activation=tf.nn.softmax)

        with tf.variable_scope("Loss"):
            
            BCE = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=x_hat, labels=self.x), reduction_indices=1)
            KLD = -0.5 * tf.reduce_mean(1 + logvar_encoder - tf.pow(mu_encoder, 2) - tf.exp(logvar_encoder), reduction_indices=1)
            softmax_loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels = self.y_, logits = self.y))

            loss = tf.reduce_mean((BCE + KLD + softmax_loss) * lam)

            loss = tf.clip_by_value(loss, -1e-2, 1e-2)
            loss = tf.where(tf.is_nan(loss), 1e-2, loss)
            loss = tf.where(tf.equal(loss, -1e-2), tf.random_normal(loss.shape), loss)
            loss = tf.where(tf.equal(loss, 1e-2), tf.random_normal(loss.shape), loss)
            
            self.regularized_loss = tf.abs(loss, name = "Regularized_loss")
            correct_prediction = tf.equal(tf.argmax(self.y_, 1), tf.argmax(self.y, 1))
            self.tf_accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32), name = "Accuracy")

        with tf.variable_scope("Optimizer"):
            learning_rate=self.lr #1e-2
            optimizer = tf.train.AdamOptimizer(learning_rate)
            gradients, variables = zip(*optimizer.compute_gradients(self.regularized_loss))
            gradients = [
                None if gradient is None else tf.clip_by_value(gradient, -1, 1)
                for gradient in gradients]
            self.train_op = optimizer.apply_gradients(zip(gradients, variables))
            #self.train_op = optimizer.minimize(self.regularized_loss)
            
        # add op for merging summary
        #self.summary_op = tf.summary.merge_all()
        self.pred = tf.argmax(self.y, axis = 1)
        self.actual = tf.argmax(self.y_, axis = 1)

        # add Saver ops
        self.saver = tf.train.Saver()

In [21]:
import collections
import time
import sklearn.metrics as me 

class Train:    
    
    result = namedtuple("score", ['epoch', 'no_of_features','hidden_layers','train_score', 'test_score', 'f1_score', 'test_score_20', 'f1_score_20', 'time_taken'])

    predictions = {}
    predictions_ = {}

    results = []
    best_acc = 0
    best_acc_global = 0

    def train(epochs, net, h,f, lrs):
        batch_iterations = 200
        train_loss = None
        Train.best_acc = 0
        os.makedirs("dataset/tf_vae_dense_trained_together_nsl_kdd/hidden layers_{}_features count_{}".format(h,f),
                    exist_ok = True)

        with tf.Session() as sess:
            #summary_writer_train = tf.summary.FileWriter('./logs/kdd/VAE/training', graph=sess.graph)
            #summary_writer_valid = tf.summary.FileWriter('./logs/kdd/VAE/validation')

            sess.run(tf.global_variables_initializer())
            start_time = time.perf_counter()

            Train.best_acc = 0
            for c, lr in enumerate(lrs):
                for epoch in range(1, (epochs+1)):
                    x_train, x_valid, y_train, y_valid, = ms.train_test_split(preprocess.x_train, 
                                                                              preprocess.y_train, 
                                                                              test_size=0.2)
                    batch_indices = np.array_split(np.arange(x_train.shape[0]), 
                                               batch_iterations)

                    for i in batch_indices:

                        def train_batch():
                            nonlocal train_loss
                            _, train_loss = sess.run([net.train_op, 
                                                                   net.regularized_loss, 
                                                                   ], #net.summary_op
                                                                  feed_dict={net.x: x_train[i,:], 
                                                                             net.y_: y_train[i,:], 
                                                                             net.keep_prob:1, net.lr:lr})

                        train_batch()
                        count = 10
                        
                        while((train_loss > 1e4 or np.isnan(train_loss)) and epoch > 1 and count > 1):
                            print("Step {} | High Training Loss: {:.6f} ... Restoring Net".format(epoch, train_loss))
                            net.saver.restore(sess, 
                                              tf.train.latest_checkpoint('dataset/tf_vae_dense_trained_together_nsl_kdd/hidden layers_{}_features count_{}'
                                                                         .format(h,f)))
                            train_batch()
                            count -= 1

                    valid_loss, valid_accuracy = sess.run([net.regularized_loss, net.tf_accuracy], #net.summary_op
                                                              feed_dict={net.x: x_valid, 
                                                                         net.y_: y_valid, 
                                                                         net.keep_prob:1, net.lr:lr})
                    
                    test_accuracy, test_loss, pred_value, actual_value, y_pred = sess.run([net.tf_accuracy, net.regularized_loss, net.pred, 
                                                                                      net.actual, net.y], #net.summary_op 
                                                                                      feed_dict={net.x: preprocess.x_test, 
                                                                                     net.y_: preprocess.y_test, 
                                                                                     net.keep_prob:1, net.lr:lr})
                    
                    f1_score = me.f1_score(actual_value, pred_value)
                    test_accuracy_, test_loss_, pred_value_, actual_value_, y_pred_ = sess.run([net.tf_accuracy, net.regularized_loss, net.pred, 
                                                                                      net.actual, net.y], #net.summary_op 
                                                                                      feed_dict={net.x: preprocess.x_test_, 
                                                                                     net.y_: preprocess.y_test_, 
                                                                                     net.keep_prob:1, net.lr:lr})
                    f1_score_ = me.f1_score(actual_value_, pred_value_)
                    #summary_writer_valid.add_summary(summary_str, epoch)

                    print("Step {} | Training Loss: {:.6f} | Validation Accuracy: {:.6f}".format(epoch, train_loss, valid_accuracy))
                    print("Accuracy on Test data: {}, {}".format(test_accuracy, test_accuracy_))

                    if test_accuracy > Train.best_acc_global:
                        Train.best_acc_global = test_accuracy
                        Train.pred_value = pred_value
                        Train.actual_value = actual_value
                        
                        Train.pred_value_ = pred_value_
                        Train.actual_value_ = actual_value_
                        
                        Train.best_parameters = "Hidden Layers:{}, Features Count:{}".format(h, f)

                    if test_accuracy > Train.best_acc:
                        Train.best_acc = test_accuracy

                        if not (np.isnan(train_loss)):
                            net.saver.save(sess, 
                                       "dataset/tf_vae_dense_trained_together_nsl_kdd/hidden layers_{}_features count_{}/model"
                                       .format(h,f), 
                                       global_step = epoch, 
                                       write_meta_graph=False)

                        curr_pred = pd.DataFrame({"Attack_prob":y_pred[:,-2], "Normal_prob":y_pred[:, -1], "Prediction":pred_value, "Actual":actual_value})
                        curr_pred_ = pd.DataFrame({"Attack_prob":y_pred_[:,-2], "Normal_prob":y_pred_[:, -1], "Prediction":pred_value_, "Actual": actual_value_})
                        
                        Train.predictions.update({"{}_{}_{}".format((epoch+1)*(c+1),f,h):(curr_pred, 
                                                   Train.result((epoch+1)*(c+1), f, h, valid_accuracy, test_accuracy, f1_score, test_accuracy_, f1_score_, time.perf_counter() - start_time))})
                        Train.predictions_.update({"{}_{}_{}".format((epoch+1)*(c+1),f,h):(curr_pred_, 
                                                   Train.result((epoch+1)*(c+1), f, h, valid_accuracy, test_accuracy, f1_score, test_accuracy_, f1_score_, time.perf_counter() - start_time))})
                        #Train.results.append(Train.result(epochs, f, h,valid_accuracy, test_accuracy))
            print("Best Accuracy on Test data: {}".format(Train.best_acc))

In [22]:
import itertools

df_results = []
past_scores = []

class Hyperparameters:
#    features_arr = [2, 4, 8, 16, 32, 64, 128, 256]
#    hidden_layers_arr = [2, 4, 6, 10]

    def start_training():
        global df_results
        global past_scores
        
        features_arr = [1, 12, 24, 48, 122]
        hidden_layers_arr = [1, 3]

        Train.predictions = {}
        Train.predictions_ = {}
        Train.results = []

        epochs = [15]
        lrs = [1e-2, 1e-2]

        for e, h, f in itertools.product(epochs, hidden_layers_arr, features_arr):
            print("Current Layer Attributes - epochs:{} hidden layers:{} features count:{}".format(e,h,f))
            n = network(2,h,f)
            n.build_layers()
            Train.train(e, n, h,f, lrs)
            
        dict1 = {}
        dict1_ = {}
        dict2 = []

        for k, (v1, v2) in Train.predictions.items():
            dict1.update({k: v1})
            dict2.append(v2)

        for k, (v1_, v2) in Train.predictions_.items():
            dict1_.update({k: v1_})

        Train.predictions = dict1
        Train.predictions_ = dict1_
        
        Train.results = dict2
        df_results = pd.DataFrame(Train.results)

        #temp = df_results.set_index(['no_of_features', 'hidden_layers'])

        
        if not os.path.isfile('dataset/scores/tf_vae_dense_trained_together_nsl_kdd_all.pkl'):
            past_scores = df_results
        else:
            past_scores = pd.read_pickle("dataset/scores/tf_vae_dense_trained_together_nsl_kdd_all.pkl")
            past_scores = past_scores.append(df_results, ignore_index=True)
                
        past_scores.to_pickle("dataset/scores/tf_vae_dense_trained_together_nsl_kdd_all.pkl")

In [23]:
#%%timeit -r 10

Hyperparameters.start_training()


Current Layer Attributes - epochs:15 hidden layers:1 features count:1
Step 1 | Training Loss: 0.000105 | Validation Accuracy: 0.779917
Accuracy on Test data: 0.6815117001533508, 0.5146835446357727
Step 2 | Training Loss: 0.000215 | Validation Accuracy: 0.881524
Accuracy on Test data: 0.761799156665802, 0.5572152137756348
Step 3 | Training Loss: 0.000090 | Validation Accuracy: 0.808533
Accuracy on Test data: 0.7109208703041077, 0.5145147442817688
Step 4 | Training Loss: 0.000043 | Validation Accuracy: 0.788490
Accuracy on Test data: 0.6199432015419006, 0.40708860754966736
Step 5 | Training Loss: 0.000306 | Validation Accuracy: 0.778647
Accuracy on Test data: 0.7403743863105774, 0.6514768004417419
Step 6 | Training Loss: 0.000425 | Validation Accuracy: 0.824846
Accuracy on Test data: 0.7391323447227478, 0.5559493899345398
Step 7 | Training Loss: 0.000102 | Validation Accuracy: 0.796706
Accuracy on Test data: 0.6808019876480103, 0.4925738275051117
Step 8 | Training Loss: 0.000069 | Validation Accuracy: 0.824132
Accuracy on Test data: 0.7008073329925537, 0.5202531814575195
Step 9 | Training Loss: 0.000006 | Validation Accuracy: 0.848502
Accuracy on Test data: 0.7631298899650574, 0.60472571849823
Step 10 | Training Loss: 0.000210 | Validation Accuracy: 0.819806
Accuracy on Test data: 0.719171404838562, 0.5349367260932922
/home/ritesh_malaiya/anaconda3/envs/p3/lib/python3.6/site-packages/sklearn/metrics/classification.py:1113: UndefinedMetricWarning: F-score is ill-defined and being set to 0.0 due to no predicted samples.
  'precision', 'predicted', average, warn_for)
Step 11 | Training Loss: 0.598608 | Validation Accuracy: 0.539829
Accuracy on Test data: 0.43075764179229736, 0.18160337209701538
Step 12 | Training Loss: 1.407515 | Validation Accuracy: 0.533915
Accuracy on Test data: 0.43075764179229736, 0.18160337209701538
Step 13 | Training Loss: 0.512440 | Validation Accuracy: 0.533876
Accuracy on Test data: 0.43075764179229736, 0.18160337209701538
Step 14 | Training Loss: 0.383999 | Validation Accuracy: 0.529867
Accuracy on Test data: 0.43075764179229736, 0.18160337209701538
Step 15 | Training Loss: 0.487952 | Validation Accuracy: 0.530185
Accuracy on Test data: 0.43075764179229736, 0.18160337209701538
Step 1 | Training Loss: 0.855318 | Validation Accuracy: 0.534392
Accuracy on Test data: 0.43075764179229736, 0.18160337209701538
Step 2 | Training Loss: 0.833317 | Validation Accuracy: 0.530105
Accuracy on Test data: 0.43075764179229736, 0.18160337209701538
Step 3 | Training Loss: 0.762139 | Validation Accuracy: 0.533320
Accuracy on Test data: 0.43075764179229736, 0.18160337209701538
Step 4 | Training Loss: 0.486080 | Validation Accuracy: 0.536059
Accuracy on Test data: 0.43075764179229736, 0.18160337209701538
Step 5 | Training Loss: 0.462135 | Validation Accuracy: 0.538718
Accuracy on Test data: 0.43075764179229736, 0.18160337209701538
Step 6 | Training Loss: 0.412023 | Validation Accuracy: 0.534193
Accuracy on Test data: 0.43075764179229736, 0.18160337209701538
Step 7 | Training Loss: 0.456503 | Validation Accuracy: 0.536773
Accuracy on Test data: 0.43075764179229736, 0.18160337209701538
Step 8 | Training Loss: 0.229875 | Validation Accuracy: 0.537130
Accuracy on Test data: 0.43075764179229736, 0.18160337209701538
Step 9 | Training Loss: 0.206023 | Validation Accuracy: 0.531931
Accuracy on Test data: 0.43075764179229736, 0.18160337209701538
Step 10 | Training Loss: 1.372092 | Validation Accuracy: 0.538242
Accuracy on Test data: 0.43075764179229736, 0.18160337209701538
Step 11 | Training Loss: 0.524235 | Validation Accuracy: 0.536257
Accuracy on Test data: 0.43075764179229736, 0.18160337209701538
Step 12 | Training Loss: 1.507455 | Validation Accuracy: 0.530105
Accuracy on Test data: 0.43075764179229736, 0.18160337209701538
Step 13 | Training Loss: 0.717263 | Validation Accuracy: 0.537051
Accuracy on Test data: 0.43075764179229736, 0.18160337209701538
Step 14 | Training Loss: 1.450042 | Validation Accuracy: 0.530978
Accuracy on Test data: 0.43075764179229736, 0.18160337209701538
Step 15 | Training Loss: 0.159706 | Validation Accuracy: 0.536297
Accuracy on Test data: 0.43075764179229736, 0.18160337209701538
Best Accuracy on Test data: 0.7631298899650574
Current Layer Attributes - epochs:15 hidden layers:1 features count:12
Step 1 | Training Loss: 0.000283 | Validation Accuracy: 0.907561
Accuracy on Test data: 0.8423970937728882, 0.7108016610145569
Step 2 | Training Loss: 0.000308 | Validation Accuracy: 0.912244
Accuracy on Test data: 0.7743967175483704, 0.5726582407951355
Step 3 | Training Loss: 0.512295 | Validation Accuracy: 0.899464
Accuracy on Test data: 0.7870386838912964, 0.6084387898445129
Step 4 | Training Loss: 0.000042 | Validation Accuracy: 0.908117
Accuracy on Test data: 0.7806955575942993, 0.5907173156738281
Step 5 | Training Loss: 0.000247 | Validation Accuracy: 0.914705
Accuracy on Test data: 0.783401370048523, 0.596540093421936
Step 6 | Training Loss: 0.490228 | Validation Accuracy: 0.911451
Accuracy on Test data: 0.8189762234687805, 0.6613501906394958
Step 7 | Training Loss: 1.068962 | Validation Accuracy: 0.534789
Accuracy on Test data: 0.43075764179229736, 0.18160337209701538
Step 8 | Training Loss: 0.006739 | Validation Accuracy: 0.537091
Accuracy on Test data: 0.43075764179229736, 0.18160337209701538
Step 9 | Training Loss: 0.074578 | Validation Accuracy: 0.531693
Accuracy on Test data: 0.43075764179229736, 0.18160337209701538
Step 10 | Training Loss: 1.751071 | Validation Accuracy: 0.532606
Accuracy on Test data: 0.43075764179229736, 0.18160337209701538
Step 11 | Training Loss: 1.019314 | Validation Accuracy: 0.533558
Accuracy on Test data: 0.43075764179229736, 0.18160337209701538
Step 12 | Training Loss: 0.578699 | Validation Accuracy: 0.533757
Accuracy on Test data: 0.43075764179229736, 0.18160337209701538
Step 13 | Training Loss: 1.384635 | Validation Accuracy: 0.537011
Accuracy on Test data: 0.43075764179229736, 0.18160337209701538
Step 14 | Training Loss: 0.271055 | Validation Accuracy: 0.533995
Accuracy on Test data: 0.43075764179229736, 0.18160337209701538
Step 15 | Training Loss: 0.088443 | Validation Accuracy: 0.530383
Accuracy on Test data: 0.43075764179229736, 0.18160337209701538
Step 1 | Training Loss: 0.808341 | Validation Accuracy: 0.533479
Accuracy on Test data: 0.43075764179229736, 0.18160337209701538
Step 2 | Training Loss: 0.233517 | Validation Accuracy: 0.535424
Accuracy on Test data: 0.43075764179229736, 0.18160337209701538
Step 3 | Training Loss: 0.325851 | Validation Accuracy: 0.528835
Accuracy on Test data: 0.43075764179229736, 0.18160337209701538
Step 4 | Training Loss: 0.184011 | Validation Accuracy: 0.533677
Accuracy on Test data: 0.43075764179229736, 0.18160337209701538
Step 5 | Training Loss: 0.879355 | Validation Accuracy: 0.530343
Accuracy on Test data: 0.43075764179229736, 0.18160337209701538
Step 6 | Training Loss: 0.286230 | Validation Accuracy: 0.533519
Accuracy on Test data: 0.43075764179229736, 0.18160337209701538
Step 7 | Training Loss: 0.695318 | Validation Accuracy: 0.532487
Accuracy on Test data: 0.43075764179229736, 0.18160337209701538
Step 8 | Training Loss: 1.898309 | Validation Accuracy: 0.533280
Accuracy on Test data: 0.43075764179229736, 0.18160337209701538
Step 9 | Training Loss: 0.264839 | Validation Accuracy: 0.534749
Accuracy on Test data: 0.43075764179229736, 0.18160337209701538
Step 10 | Training Loss: 0.602111 | Validation Accuracy: 0.537249
Accuracy on Test data: 0.43075764179229736, 0.18160337209701538
Step 11 | Training Loss: 0.615690 | Validation Accuracy: 0.531217
Accuracy on Test data: 0.43075764179229736, 0.18160337209701538
Step 12 | Training Loss: 1.076663 | Validation Accuracy: 0.535860
Accuracy on Test data: 0.43075764179229736, 0.18160337209701538
Step 13 | Training Loss: 0.814122 | Validation Accuracy: 0.537329
Accuracy on Test data: 0.43075764179229736, 0.18160337209701538
Step 14 | Training Loss: 0.133343 | Validation Accuracy: 0.533876
Accuracy on Test data: 0.43075764179229736, 0.18160337209701538
Step 15 | Training Loss: 0.706984 | Validation Accuracy: 0.536178
Accuracy on Test data: 0.43075764179229736, 0.18160337209701538
Best Accuracy on Test data: 0.8423970937728882
Current Layer Attributes - epochs:15 hidden layers:1 features count:24
Step 1 | Training Loss: 0.000079 | Validation Accuracy: 0.889303
Accuracy on Test data: 0.7514194250106812, 0.5342615842819214
Step 2 | Training Loss: 1.391636 | Validation Accuracy: 0.532804
Accuracy on Test data: 0.43075764179229736, 0.18160337209701538
Step 3 | Training Loss: 0.963884 | Validation Accuracy: 0.526335
Accuracy on Test data: 0.43075764179229736, 0.18160337209701538
Step 4 | Training Loss: 1.991958 | Validation Accuracy: 0.532804
Accuracy on Test data: 0.43075764179229736, 0.18160337209701538
Step 5 | Training Loss: 0.283592 | Validation Accuracy: 0.536297
Accuracy on Test data: 0.43075764179229736, 0.18160337209701538
Step 6 | Training Loss: 0.824139 | Validation Accuracy: 0.535344
Accuracy on Test data: 0.43075764179229736, 0.18160337209701538
Step 7 | Training Loss: 0.166301 | Validation Accuracy: 0.536456
Accuracy on Test data: 0.43075764179229736, 0.18160337209701538
Step 8 | Training Loss: 1.036381 | Validation Accuracy: 0.530462
Accuracy on Test data: 0.43075764179229736, 0.18160337209701538
Step 9 | Training Loss: 0.666918 | Validation Accuracy: 0.533955
Accuracy on Test data: 0.43075764179229736, 0.18160337209701538
Step 10 | Training Loss: 0.059618 | Validation Accuracy: 0.535066
Accuracy on Test data: 0.43075764179229736, 0.18160337209701538
Step 11 | Training Loss: 0.708252 | Validation Accuracy: 0.533320
Accuracy on Test data: 0.43075764179229736, 0.18160337209701538
Step 12 | Training Loss: 0.991342 | Validation Accuracy: 0.526295
Accuracy on Test data: 0.43075764179229736, 0.18160337209701538
Step 13 | Training Loss: 0.649616 | Validation Accuracy: 0.534908
Accuracy on Test data: 0.43075764179229736, 0.18160337209701538
Step 14 | Training Loss: 1.372472 | Validation Accuracy: 0.527605
Accuracy on Test data: 0.43075764179229736, 0.18160337209701538
Step 15 | Training Loss: 0.521095 | Validation Accuracy: 0.539909
Accuracy on Test data: 0.43075764179229736, 0.18160337209701538
Step 1 | Training Loss: 0.282591 | Validation Accuracy: 0.534114
Accuracy on Test data: 0.43075764179229736, 0.18160337209701538
Step 2 | Training Loss: 0.567270 | Validation Accuracy: 0.534312
Accuracy on Test data: 0.43075764179229736, 0.18160337209701538
Step 3 | Training Loss: 0.391793 | Validation Accuracy: 0.532288
Accuracy on Test data: 0.43075764179229736, 0.18160337209701538
Step 4 | Training Loss: 2.305556 | Validation Accuracy: 0.529788
Accuracy on Test data: 0.43075764179229736, 0.18160337209701538
Step 5 | Training Loss: 0.192790 | Validation Accuracy: 0.539909
Accuracy on Test data: 0.43075764179229736, 0.18160337209701538
Step 6 | Training Loss: 0.178322 | Validation Accuracy: 0.536932
Accuracy on Test data: 0.43075764179229736, 0.18160337209701538
Step 7 | Training Loss: 0.263125 | Validation Accuracy: 0.536098
Accuracy on Test data: 0.43075764179229736, 0.18160337209701538
Step 8 | Training Loss: 0.519965 | Validation Accuracy: 0.534590
Accuracy on Test data: 0.43075764179229736, 0.18160337209701538
Step 9 | Training Loss: 0.065175 | Validation Accuracy: 0.531256
Accuracy on Test data: 0.43075764179229736, 0.18160337209701538
Step 10 | Training Loss: 0.283050 | Validation Accuracy: 0.534590
Accuracy on Test data: 0.43075764179229736, 0.18160337209701538
Step 11 | Training Loss: 0.362700 | Validation Accuracy: 0.532884
Accuracy on Test data: 0.43075764179229736, 0.18160337209701538
Step 12 | Training Loss: 0.403375 | Validation Accuracy: 0.536138
Accuracy on Test data: 0.43075764179229736, 0.18160337209701538
Step 13 | Training Loss: 0.361964 | Validation Accuracy: 0.537805
Accuracy on Test data: 0.43075764179229736, 0.18160337209701538
Step 14 | Training Loss: 1.435013 | Validation Accuracy: 0.533082
Accuracy on Test data: 0.43075764179229736, 0.18160337209701538
Step 15 | Training Loss: 0.328835 | Validation Accuracy: 0.529708
Accuracy on Test data: 0.43075764179229736, 0.18160337209701538
Best Accuracy on Test data: 0.7514194250106812
Current Layer Attributes - epochs:15 hidden layers:1 features count:48
Step 1 | Training Loss: 2.599756 | Validation Accuracy: 0.532884
Accuracy on Test data: 0.43075764179229736, 0.18160337209701538
Step 2 | Training Loss: 1.657728 | Validation Accuracy: 0.537289
Accuracy on Test data: 0.43075764179229736, 0.18160337209701538
Step 3 | Training Loss: 0.467931 | Validation Accuracy: 0.530502
Accuracy on Test data: 0.43075764179229736, 0.18160337209701538
Step 4 | Training Loss: 0.734667 | Validation Accuracy: 0.530145
Accuracy on Test data: 0.43075764179229736, 0.18160337209701538
Step 5 | Training Loss: 1.043036 | Validation Accuracy: 0.535027
Accuracy on Test data: 0.43075764179229736, 0.18160337209701538
Step 6 | Training Loss: 0.021370 | Validation Accuracy: 0.537210
Accuracy on Test data: 0.43075764179229736, 0.18160337209701538
Step 7 | Training Loss: 1.043934 | Validation Accuracy: 0.528240
Accuracy on Test data: 0.43075764179229736, 0.18160337209701538
Step 8 | Training Loss: 0.737309 | Validation Accuracy: 0.532963
Accuracy on Test data: 0.43075764179229736, 0.18160337209701538
Step 9 | Training Loss: 1.598725 | Validation Accuracy: 0.537249
Accuracy on Test data: 0.43075764179229736, 0.18160337209701538
Step 10 | Training Loss: 0.189645 | Validation Accuracy: 0.533638
Accuracy on Test data: 0.43075764179229736, 0.18160337209701538
Step 11 | Training Loss: 1.675554 | Validation Accuracy: 0.533439
Accuracy on Test data: 0.43075764179229736, 0.18160337209701538
Step 12 | Training Loss: 0.609000 | Validation Accuracy: 0.531534
Accuracy on Test data: 0.43075764179229736, 0.18160337209701538
Step 13 | Training Loss: 0.704782 | Validation Accuracy: 0.537210
Accuracy on Test data: 0.43075764179229736, 0.18160337209701538
Step 14 | Training Loss: 0.193881 | Validation Accuracy: 0.532328
Accuracy on Test data: 0.43075764179229736, 0.18160337209701538
Step 15 | Training Loss: 0.475317 | Validation Accuracy: 0.538797
Accuracy on Test data: 0.43075764179229736, 0.18160337209701538
Step 1 | Training Loss: 0.510727 | Validation Accuracy: 0.541655
Accuracy on Test data: 0.43075764179229736, 0.18160337209701538
Step 2 | Training Loss: 0.823664 | Validation Accuracy: 0.527684
Accuracy on Test data: 0.43075764179229736, 0.18160337209701538
Step 3 | Training Loss: 0.593812 | Validation Accuracy: 0.537448
Accuracy on Test data: 0.43075764179229736, 0.18160337209701538
Step 4 | Training Loss: 1.228738 | Validation Accuracy: 0.534550
Accuracy on Test data: 0.43075764179229736, 0.18160337209701538
Step 5 | Training Loss: 0.098134 | Validation Accuracy: 0.532368
Accuracy on Test data: 0.43075764179229736, 0.18160337209701538
Step 6 | Training Loss: 0.616636 | Validation Accuracy: 0.537011
Accuracy on Test data: 0.43075764179229736, 0.18160337209701538
Step 7 | Training Loss: 2.150686 | Validation Accuracy: 0.534590
Accuracy on Test data: 0.43075764179229736, 0.18160337209701538
Step 8 | Training Loss: 0.166524 | Validation Accuracy: 0.533836
Accuracy on Test data: 0.43075764179229736, 0.18160337209701538
Step 9 | Training Loss: 0.651595 | Validation Accuracy: 0.533519
Accuracy on Test data: 0.43075764179229736, 0.18160337209701538
Step 10 | Training Loss: 0.061141 | Validation Accuracy: 0.532685
Accuracy on Test data: 0.43075764179229736, 0.18160337209701538
Step 11 | Training Loss: 0.292590 | Validation Accuracy: 0.538202
Accuracy on Test data: 0.43075764179229736, 0.18160337209701538
Step 12 | Training Loss: 0.214442 | Validation Accuracy: 0.537527
Accuracy on Test data: 0.43075764179229736, 0.18160337209701538
Step 13 | Training Loss: 0.199889 | Validation Accuracy: 0.532923
Accuracy on Test data: 0.43075764179229736, 0.18160337209701538
Step 14 | Training Loss: 0.964735 | Validation Accuracy: 0.530502
Accuracy on Test data: 0.43075764179229736, 0.18160337209701538
Step 15 | Training Loss: 0.996989 | Validation Accuracy: 0.536614
Accuracy on Test data: 0.43075764179229736, 0.18160337209701538
Best Accuracy on Test data: 0.43075764179229736
Current Layer Attributes - epochs:15 hidden layers:1 features count:122
Step 1 | Training Loss: 0.000490 | Validation Accuracy: 0.753403
Accuracy on Test data: 0.5652945637702942, 0.2329113930463791
Step 2 | Training Loss: 0.240252 | Validation Accuracy: 0.529550
Accuracy on Test data: 0.43075764179229736, 0.18160337209701538
Step 3 | Training Loss: 0.945947 | Validation Accuracy: 0.536337
Accuracy on Test data: 0.43075764179229736, 0.18160337209701538
Step 4 | Training Loss: 0.019427 | Validation Accuracy: 0.534154
Accuracy on Test data: 0.43075764179229736, 0.18160337209701538
Step 5 | Training Loss: 1.391309 | Validation Accuracy: 0.535741
Accuracy on Test data: 0.43075764179229736, 0.18160337209701538
Step 6 | Training Loss: 1.478997 | Validation Accuracy: 0.538678
Accuracy on Test data: 0.43075764179229736, 0.18160337209701538
Step 7 | Training Loss: 1.188855 | Validation Accuracy: 0.538956
Accuracy on Test data: 0.43075764179229736, 0.18160337209701538
Step 8 | Training Loss: 0.839642 | Validation Accuracy: 0.536376
Accuracy on Test data: 0.43075764179229736, 0.18160337209701538
Step 9 | Training Loss: 0.541445 | Validation Accuracy: 0.535781
Accuracy on Test data: 0.43075764179229736, 0.18160337209701538
Step 10 | Training Loss: 0.286773 | Validation Accuracy: 0.530542
Accuracy on Test data: 0.43075764179229736, 0.18160337209701538
Step 11 | Training Loss: 0.325222 | Validation Accuracy: 0.538559
Accuracy on Test data: 0.43075764179229736, 0.18160337209701538
Step 12 | Training Loss: 0.274262 | Validation Accuracy: 0.539472
Accuracy on Test data: 0.43075764179229736, 0.18160337209701538
Step 13 | Training Loss: 1.271192 | Validation Accuracy: 0.535027
Accuracy on Test data: 0.43075764179229736, 0.18160337209701538
Step 14 | Training Loss: 0.453955 | Validation Accuracy: 0.538361
Accuracy on Test data: 0.43075764179229736, 0.18160337209701538
Step 15 | Training Loss: 1.404501 | Validation Accuracy: 0.535344
Accuracy on Test data: 0.43075764179229736, 0.18160337209701538
Step 1 | Training Loss: 0.574517 | Validation Accuracy: 0.539353
Accuracy on Test data: 0.43075764179229736, 0.18160337209701538
Step 2 | Training Loss: 0.343881 | Validation Accuracy: 0.535463
Accuracy on Test data: 0.43075764179229736, 0.18160337209701538
Step 3 | Training Loss: 0.909692 | Validation Accuracy: 0.534947
Accuracy on Test data: 0.43075764179229736, 0.18160337209701538
Step 4 | Training Loss: 0.471563 | Validation Accuracy: 0.532129
Accuracy on Test data: 0.43075764179229736, 0.18160337209701538
Step 5 | Training Loss: 0.439323 | Validation Accuracy: 0.535305
Accuracy on Test data: 0.43075764179229736, 0.18160337209701538
Step 6 | Training Loss: 0.997143 | Validation Accuracy: 0.534312
Accuracy on Test data: 0.43075764179229736, 0.18160337209701538
Step 7 | Training Loss: 1.349683 | Validation Accuracy: 0.534273
Accuracy on Test data: 0.43075764179229736, 0.18160337209701538
Step 8 | Training Loss: 0.586275 | Validation Accuracy: 0.536654
Accuracy on Test data: 0.43075764179229736, 0.18160337209701538
Step 9 | Training Loss: 1.610457 | Validation Accuracy: 0.534908
Accuracy on Test data: 0.43075764179229736, 0.18160337209701538
Step 10 | Training Loss: 1.007753 | Validation Accuracy: 0.535821
Accuracy on Test data: 0.43075764179229736, 0.18160337209701538
Step 11 | Training Loss: 2.477878 | Validation Accuracy: 0.530304
Accuracy on Test data: 0.43075764179229736, 0.18160337209701538
Step 12 | Training Loss: 1.420019 | Validation Accuracy: 0.533757
Accuracy on Test data: 0.43075764179229736, 0.18160337209701538
Step 13 | Training Loss: 2.410817 | Validation Accuracy: 0.536892
Accuracy on Test data: 0.43075764179229736, 0.18160337209701538
Step 14 | Training Loss: 0.405838 | Validation Accuracy: 0.532248
Accuracy on Test data: 0.43075764179229736, 0.18160337209701538
Step 15 | Training Loss: 0.336811 | Validation Accuracy: 0.531177
Accuracy on Test data: 0.43075764179229736, 0.18160337209701538
Best Accuracy on Test data: 0.5652945637702942
Current Layer Attributes - epochs:15 hidden layers:3 features count:1
Step 1 | Training Loss: 0.000110 | Validation Accuracy: 0.699821
Accuracy on Test data: 0.6759669780731201, 0.6384810209274292
Step 2 | Training Loss: 0.000136 | Validation Accuracy: 0.701171
Accuracy on Test data: 0.7090134620666504, 0.6297890543937683
Step 3 | Training Loss: 0.000347 | Validation Accuracy: 0.711490
Accuracy on Test data: 0.7019606232643127, 0.6233755350112915
Step 4 | Training Loss: 0.000198 | Validation Accuracy: 0.730502
Accuracy on Test data: 0.6898065805435181, 0.5832067728042603
Step 5 | Training Loss: 0.000040 | Validation Accuracy: 0.772217
Accuracy on Test data: 0.7408179640769958, 0.6413502097129822
Step 6 | Training Loss: 0.000031 | Validation Accuracy: 0.780353
Accuracy on Test data: 0.7749733924865723, 0.7088607549667358
Step 7 | Training Loss: 1.086330 | Validation Accuracy: 0.538678
Accuracy on Test data: 0.43075764179229736, 0.18160337209701538
Step 8 | Training Loss: 0.463382 | Validation Accuracy: 0.533042
Accuracy on Test data: 0.43075764179229736, 0.18160337209701538
Step 9 | Training Loss: 0.691722 | Validation Accuracy: 0.536098
Accuracy on Test data: 0.43075764179229736, 0.18160337209701538
Step 10 | Training Loss: 0.293956 | Validation Accuracy: 0.536337
Accuracy on Test data: 0.43075764179229736, 0.18160337209701538
Step 11 | Training Loss: 2.297280 | Validation Accuracy: 0.530026
Accuracy on Test data: 0.43075764179229736, 0.18160337209701538
Step 12 | Training Loss: 0.260770 | Validation Accuracy: 0.541457
Accuracy on Test data: 0.43075764179229736, 0.18160337209701538
Step 13 | Training Loss: 0.219086 | Validation Accuracy: 0.536654
Accuracy on Test data: 0.43075764179229736, 0.18160337209701538
Step 14 | Training Loss: 0.508036 | Validation Accuracy: 0.535463
Accuracy on Test data: 0.43075764179229736, 0.18160337209701538
Step 15 | Training Loss: 0.848101 | Validation Accuracy: 0.532844
Accuracy on Test data: 0.43075764179229736, 0.18160337209701538
Step 1 | Training Loss: 0.276672 | Validation Accuracy: 0.535027
Accuracy on Test data: 0.43075764179229736, 0.18160337209701538
Step 2 | Training Loss: 0.588382 | Validation Accuracy: 0.533995
Accuracy on Test data: 0.43075764179229736, 0.18160337209701538
Step 3 | Training Loss: 0.999855 | Validation Accuracy: 0.534471
Accuracy on Test data: 0.43075764179229736, 0.18160337209701538
Step 4 | Training Loss: 0.462957 | Validation Accuracy: 0.536257
Accuracy on Test data: 0.43075764179229736, 0.18160337209701538
Step 5 | Training Loss: 0.483059 | Validation Accuracy: 0.535503
Accuracy on Test data: 0.43075764179229736, 0.18160337209701538
Step 6 | Training Loss: 0.131366 | Validation Accuracy: 0.535702
Accuracy on Test data: 0.43075764179229736, 0.18160337209701538
Step 7 | Training Loss: 0.427725 | Validation Accuracy: 0.537011
Accuracy on Test data: 0.43075764179229736, 0.18160337209701538
Step 8 | Training Loss: 0.029087 | Validation Accuracy: 0.537607
Accuracy on Test data: 0.43075764179229736, 0.18160337209701538
Step 9 | Training Loss: 0.568524 | Validation Accuracy: 0.535305
Accuracy on Test data: 0.43075764179229736, 0.18160337209701538
Step 10 | Training Loss: 0.773519 | Validation Accuracy: 0.532090
Accuracy on Test data: 0.43075764179229736, 0.18160337209701538
Step 11 | Training Loss: 2.523225 | Validation Accuracy: 0.532010
Accuracy on Test data: 0.43075764179229736, 0.18160337209701538
Step 12 | Training Loss: 1.735860 | Validation Accuracy: 0.534471
Accuracy on Test data: 0.43075764179229736, 0.18160337209701538
Step 13 | Training Loss: 0.865220 | Validation Accuracy: 0.533161
Accuracy on Test data: 0.43075764179229736, 0.18160337209701538
Step 14 | Training Loss: 0.138182 | Validation Accuracy: 0.534431
Accuracy on Test data: 0.43075764179229736, 0.18160337209701538
Step 15 | Training Loss: 0.185640 | Validation Accuracy: 0.537011
Accuracy on Test data: 0.43075764179229736, 0.18160337209701538
Best Accuracy on Test data: 0.7749733924865723
Current Layer Attributes - epochs:15 hidden layers:3 features count:12
Step 1 | Training Loss: 0.000227 | Validation Accuracy: 0.860528
Accuracy on Test data: 0.7907647490501404, 0.6569620370864868
Step 2 | Training Loss: 0.000039 | Validation Accuracy: 0.937210
Accuracy on Test data: 0.8527324199676514, 0.7318143248558044
Step 3 | Training Loss: 0.000080 | Validation Accuracy: 0.949712
Accuracy on Test data: 0.8428850173950195, 0.7129113674163818
Step 4 | Training Loss: 0.000047 | Validation Accuracy: 0.940385
Accuracy on Test data: 0.8350780606269836, 0.6978058815002441
Step 5 | Training Loss: 0.000057 | Validation Accuracy: 0.926890
Accuracy on Test data: 0.8594304323196411, 0.7392405271530151
Step 6 | Training Loss: 0.000022 | Validation Accuracy: 0.931217
Accuracy on Test data: 0.7964868545532227, 0.6178902983665466
Step 7 | Training Loss: 0.000013 | Validation Accuracy: 0.935305
Accuracy on Test data: 0.8556600213050842, 0.7412658333778381
Step 8 | Training Loss: 0.000010 | Validation Accuracy: 0.914507
Accuracy on Test data: 0.8818311095237732, 0.7859071493148804
Step 9 | Training Loss: 0.000015 | Validation Accuracy: 0.901766
Accuracy on Test data: 0.8468772172927856, 0.7282700538635254
Step 10 | Training Loss: 0.000013 | Validation Accuracy: 0.916928
Accuracy on Test data: 0.8149396777153015, 0.654261589050293
Step 11 | Training Loss: 0.000048 | Validation Accuracy: 0.915023
Accuracy on Test data: 0.8591199517250061, 0.74388188123703
Step 12 | Training Loss: 0.000078 | Validation Accuracy: 0.906013
Accuracy on Test data: 0.8856902122497559, 0.796708881855011
Step 13 | Training Loss: 0.000028 | Validation Accuracy: 0.926573
Accuracy on Test data: 0.8008782863616943, 0.6591561436653137
Step 14 | Training Loss: 0.000007 | Validation Accuracy: 0.906767
Accuracy on Test data: 0.8501597046852112, 0.750717282295227
Step 15 | Training Loss: 0.000037 | Validation Accuracy: 0.912880
Accuracy on Test data: 0.8659066557884216, 0.7583122253417969
Step 1 | Training Loss: 0.000024 | Validation Accuracy: 0.918992
Accuracy on Test data: 0.8545067310333252, 0.7286919951438904
Step 2 | Training Loss: 0.000003 | Validation Accuracy: 0.919548
Accuracy on Test data: 0.8691004514694214, 0.7659915685653687
Step 3 | Training Loss: 0.000047 | Validation Accuracy: 0.873784
Accuracy on Test data: 0.8163591027259827, 0.6986497640609741
Step 4 | Training Loss: 0.000076 | Validation Accuracy: 0.894344
Accuracy on Test data: 0.8423527479171753, 0.7253164649009705
Step 5 | Training Loss: 0.431742 | Validation Accuracy: 0.535582
Accuracy on Test data: 0.43075764179229736, 0.18160337209701538
Step 6 | Training Loss: 0.744948 | Validation Accuracy: 0.535622
Accuracy on Test data: 0.43075764179229736, 0.18160337209701538
Step 7 | Training Loss: 0.141115 | Validation Accuracy: 0.532288
Accuracy on Test data: 0.43075764179229736, 0.18160337209701538
Step 8 | Training Loss: 1.027838 | Validation Accuracy: 0.535305
Accuracy on Test data: 0.43075764179229736, 0.18160337209701538
Step 9 | Training Loss: 0.055295 | Validation Accuracy: 0.536892
Accuracy on Test data: 0.43075764179229736, 0.18160337209701538
Step 10 | Training Loss: 2.534327 | Validation Accuracy: 0.535741
Accuracy on Test data: 0.43075764179229736, 0.18160337209701538
Step 11 | Training Loss: 0.274675 | Validation Accuracy: 0.533796
Accuracy on Test data: 0.43075764179229736, 0.18160337209701538
Step 12 | Training Loss: 0.007651 | Validation Accuracy: 0.535265
Accuracy on Test data: 0.43075764179229736, 0.18160337209701538
Step 13 | Training Loss: 0.587539 | Validation Accuracy: 0.531018
Accuracy on Test data: 0.43075764179229736, 0.18160337209701538
Step 14 | Training Loss: 0.390128 | Validation Accuracy: 0.534630
Accuracy on Test data: 0.43075764179229736, 0.18160337209701538
Step 15 | Training Loss: 0.066462 | Validation Accuracy: 0.528756
Accuracy on Test data: 0.43075764179229736, 0.18160337209701538
Best Accuracy on Test data: 0.8856902122497559
Current Layer Attributes - epochs:15 hidden layers:3 features count:24
Step 1 | Training Loss: 0.000070 | Validation Accuracy: 0.864060
Accuracy on Test data: 0.8336586356163025, 0.7323206663131714
Step 2 | Training Loss: 0.000006 | Validation Accuracy: 0.903473
Accuracy on Test data: 0.7686302065849304, 0.5948523283004761
Step 3 | Training Loss: 0.000167 | Validation Accuracy: 0.895336
Accuracy on Test data: 0.7985273003578186, 0.6618565320968628
Step 4 | Training Loss: 0.000017 | Validation Accuracy: 0.862274
Accuracy on Test data: 0.7118080258369446, 0.49282699823379517
Step 5 | Training Loss: 0.000008 | Validation Accuracy: 0.894106
Accuracy on Test data: 0.7937366962432861, 0.6267510652542114
Step 6 | Training Loss: 0.000027 | Validation Accuracy: 0.866243
Accuracy on Test data: 0.8131210207939148, 0.6902953386306763
Step 7 | Training Loss: 0.000010 | Validation Accuracy: 0.903116
Accuracy on Test data: 0.7984386086463928, 0.6377215385437012
Step 8 | Training Loss: 0.000013 | Validation Accuracy: 0.912364
Accuracy on Test data: 0.8799237012863159, 0.7912236452102661
Step 9 | Training Loss: 0.000030 | Validation Accuracy: 0.914626
Accuracy on Test data: 0.8757097125053406, 0.7803375720977783
Step 10 | Training Loss: 0.000010 | Validation Accuracy: 0.914864
Accuracy on Test data: 0.8627572655677795, 0.7563713192939758
Step 11 | Training Loss: 0.000002 | Validation Accuracy: 0.917881
Accuracy on Test data: 0.8733587861061096, 0.7728270292282104
Step 12 | Training Loss: 0.000030 | Validation Accuracy: 0.919270
Accuracy on Test data: 0.8570795059204102, 0.7411814332008362
Step 13 | Training Loss: 0.000047 | Validation Accuracy: 0.923556
Accuracy on Test data: 0.8618701100349426, 0.7477636933326721
Step 14 | Training Loss: 0.000057 | Validation Accuracy: 0.916134
Accuracy on Test data: 0.87211674451828, 0.7726582288742065
Step 15 | Training Loss: 0.000038 | Validation Accuracy: 0.918238
Accuracy on Test data: 0.861160397529602, 0.7481856346130371
Step 1 | Training Loss: 0.000007 | Validation Accuracy: 0.909704
Accuracy on Test data: 0.8639549612998962, 0.7567088603973389
Step 2 | Training Loss: 0.000013 | Validation Accuracy: 0.915340
Accuracy on Test data: 0.8660397529602051, 0.7621096968650818
Step 3 | Training Loss: 0.000038 | Validation Accuracy: 0.909823
Accuracy on Test data: 0.8595635294914246, 0.7497046589851379
Step 4 | Training Loss: 0.000012 | Validation Accuracy: 0.920738
Accuracy on Test data: 0.8688786625862122, 0.7633755207061768
Step 5 | Training Loss: 0.000036 | Validation Accuracy: 0.893392
Accuracy on Test data: 0.8675922751426697, 0.7720674872398376
Step 6 | Training Loss: 0.000001 | Validation Accuracy: 0.894225
Accuracy on Test data: 0.8588981628417969, 0.7493671178817749
Step 7 | Training Loss: 0.000029 | Validation Accuracy: 0.912364
Accuracy on Test data: 0.8685237765312195, 0.7661603093147278
Step 8 | Training Loss: 0.000007 | Validation Accuracy: 0.918635
Accuracy on Test data: 0.8604506850242615, 0.749113917350769
Step 9 | Training Loss: 0.000037 | Validation Accuracy: 0.913078
Accuracy on Test data: 0.8537526726722717, 0.7414345741271973
Step 10 | Training Loss: 0.000039 | Validation Accuracy: 0.915975
Accuracy on Test data: 0.8671486973762512, 0.7614346146583557
Step 11 | Training Loss: 0.000001 | Validation Accuracy: 0.880056
Accuracy on Test data: 0.8529098629951477, 0.7587341666221619
Step 12 | Training Loss: 0.000025 | Validation Accuracy: 0.910419
Accuracy on Test data: 0.868080198764801, 0.7656540274620056
Step 13 | Training Loss: 0.000033 | Validation Accuracy: 0.915301
Accuracy on Test data: 0.8611160516738892, 0.752489447593689
Step 14 | Training Loss: 0.000023 | Validation Accuracy: 0.907164
Accuracy on Test data: 0.8494055867195129, 0.7344303727149963
Step 15 | Training Loss: 0.000023 | Validation Accuracy: 0.905100
Accuracy on Test data: 0.8606281280517578, 0.7523206472396851
Best Accuracy on Test data: 0.8799237012863159
Current Layer Attributes - epochs:15 hidden layers:3 features count:48
Step 1 | Training Loss: 0.000033 | Validation Accuracy: 0.854574
Accuracy on Test data: 0.70737224817276, 0.5328270196914673
Step 2 | Training Loss: 0.000131 | Validation Accuracy: 0.830522
Accuracy on Test data: 0.6902058124542236, 0.5268354415893555
Step 3 | Training Loss: 0.000059 | Validation Accuracy: 0.801548
Accuracy on Test data: 0.6207416653633118, 0.5194936990737915
Step 4 | Training Loss: 0.000129 | Validation Accuracy: 0.802897
Accuracy on Test data: 0.5893363952636719, 0.4386498034000397
Step 5 | Training Loss: 0.000020 | Validation Accuracy: 0.765588
Accuracy on Test data: 0.5790454149246216, 0.41316455602645874
Step 6 | Training Loss: 0.000010 | Validation Accuracy: 0.838897
Accuracy on Test data: 0.7046664357185364, 0.4897046387195587
Step 7 | Training Loss: 0.000008 | Validation Accuracy: 0.830085
Accuracy on Test data: 0.7195262312889099, 0.5097890496253967
Step 8 | Training Loss: 0.000032 | Validation Accuracy: 0.845009
Accuracy on Test data: 0.7484918236732483, 0.5669198036193848
Step 9 | Training Loss: 0.000033 | Validation Accuracy: 0.855209
Accuracy on Test data: 0.7460521459579468, 0.5572995543479919
Step 10 | Training Loss: 0.867616 | Validation Accuracy: 0.534431
Accuracy on Test data: 0.43075764179229736, 0.18160337209701538
Step 11 | Training Loss: 0.678946 | Validation Accuracy: 0.535344
Accuracy on Test data: 0.43075764179229736, 0.18160337209701538
Step 12 | Training Loss: 0.269429 | Validation Accuracy: 0.538321
Accuracy on Test data: 0.43075764179229736, 0.18160337209701538
Step 13 | Training Loss: 1.313553 | Validation Accuracy: 0.531018
Accuracy on Test data: 0.43075764179229736, 0.18160337209701538
Step 14 | Training Loss: 0.295088 | Validation Accuracy: 0.530383
Accuracy on Test data: 0.43075764179229736, 0.18160337209701538
Step 15 | Training Loss: 0.506885 | Validation Accuracy: 0.532606
Accuracy on Test data: 0.43075764179229736, 0.18160337209701538
Step 1 | Training Loss: 0.353613 | Validation Accuracy: 0.527367
Accuracy on Test data: 0.43075764179229736, 0.18160337209701538
Step 2 | Training Loss: 0.180588 | Validation Accuracy: 0.530740
Accuracy on Test data: 0.43075764179229736, 0.18160337209701538
Step 3 | Training Loss: 0.412178 | Validation Accuracy: 0.531336
Accuracy on Test data: 0.43075764179229736, 0.18160337209701538
Step 4 | Training Loss: 2.251077 | Validation Accuracy: 0.534630
Accuracy on Test data: 0.43075764179229736, 0.18160337209701538
Step 5 | Training Loss: 0.228582 | Validation Accuracy: 0.537646
Accuracy on Test data: 0.43075764179229736, 0.18160337209701538
Step 6 | Training Loss: 0.063389 | Validation Accuracy: 0.532248
Accuracy on Test data: 0.43075764179229736, 0.18160337209701538
Step 7 | Training Loss: 0.499909 | Validation Accuracy: 0.536495
Accuracy on Test data: 0.43075764179229736, 0.18160337209701538
Step 8 | Training Loss: 2.939292 | Validation Accuracy: 0.535384
Accuracy on Test data: 0.43075764179229736, 0.18160337209701538
Step 9 | Training Loss: 0.613026 | Validation Accuracy: 0.535622
Accuracy on Test data: 0.43075764179229736, 0.18160337209701538
Step 10 | Training Loss: 0.745708 | Validation Accuracy: 0.534511
Accuracy on Test data: 0.43075764179229736, 0.18160337209701538
Step 11 | Training Loss: 1.526883 | Validation Accuracy: 0.534749
Accuracy on Test data: 0.43075764179229736, 0.18160337209701538
Step 12 | Training Loss: 1.543381 | Validation Accuracy: 0.532844
Accuracy on Test data: 0.43075764179229736, 0.18160337209701538
Step 13 | Training Loss: 0.672684 | Validation Accuracy: 0.534908
Accuracy on Test data: 0.43075764179229736, 0.18160337209701538
Step 14 | Training Loss: 2.188988 | Validation Accuracy: 0.539790
Accuracy on Test data: 0.43075764179229736, 0.18160337209701538
Step 15 | Training Loss: 0.816474 | Validation Accuracy: 0.531971
Accuracy on Test data: 0.43075764179229736, 0.18160337209701538
Best Accuracy on Test data: 0.7484918236732483
Current Layer Attributes - epochs:15 hidden layers:3 features count:122
Step 1 | Training Loss: 0.000112 | Validation Accuracy: 0.916888
Accuracy on Test data: 0.7238289713859558, 0.5325738191604614
Step 2 | Training Loss: 0.000172 | Validation Accuracy: 0.903274
Accuracy on Test data: 0.8101046681404114, 0.6719831228256226
Step 3 | Training Loss: 0.000208 | Validation Accuracy: 0.889978
Accuracy on Test data: 0.8575674295425415, 0.7473417520523071
Step 4 | Training Loss: 0.000098 | Validation Accuracy: 0.907164
Accuracy on Test data: 0.8722941875457764, 0.7708860635757446
Step 5 | Training Loss: 0.000096 | Validation Accuracy: 0.886922
Accuracy on Test data: 0.8825408220291138, 0.803206741809845
Step 6 | Training Loss: 0.000021 | Validation Accuracy: 0.895138
Accuracy on Test data: 0.8794357776641846, 0.7873417735099792
Step 7 | Training Loss: 0.000010 | Validation Accuracy: 0.898432
Accuracy on Test data: 0.8761976361274719, 0.7797468304634094
Step 8 | Training Loss: 0.000019 | Validation Accuracy: 0.899583
Accuracy on Test data: 0.8282470107078552, 0.6880168914794922
Step 9 | Training Loss: 0.000032 | Validation Accuracy: 0.901806
Accuracy on Test data: 0.87788325548172, 0.7838818430900574
Step 10 | Training Loss: 0.000030 | Validation Accuracy: 0.894265
Accuracy on Test data: 0.8773509860038757, 0.7844725847244263
Step 11 | Training Loss: 0.000008 | Validation Accuracy: 0.898154
Accuracy on Test data: 0.891678512096405, 0.8102953433990479
Step 12 | Training Loss: 0.000022 | Validation Accuracy: 0.896448
Accuracy on Test data: 0.8845369219779968, 0.7934176921844482
Step 13 | Training Loss: 0.000012 | Validation Accuracy: 0.893114
Accuracy on Test data: 0.8878193497657776, 0.798396646976471
Step 14 | Training Loss: 0.000014 | Validation Accuracy: 0.892717
Accuracy on Test data: 0.8825408220291138, 0.7909704446792603
Step 15 | Training Loss: 0.000045 | Validation Accuracy: 0.894820
Accuracy on Test data: 0.8801011443138123, 0.7875949144363403
Step 1 | Training Loss: 0.000011 | Validation Accuracy: 0.899504
Accuracy on Test data: 0.8856014609336853, 0.7947679162025452
Step 2 | Training Loss: 0.000031 | Validation Accuracy: 0.896051
Accuracy on Test data: 0.8849804997444153, 0.7942615747451782
Step 3 | Training Loss: 0.000007 | Validation Accuracy: 0.901290
Accuracy on Test data: 0.8878637552261353, 0.7965400815010071
Step 4 | Training Loss: 0.000062 | Validation Accuracy: 0.900853
Accuracy on Test data: 0.875842809677124, 0.7763713002204895
Step 5 | Training Loss: 0.000009 | Validation Accuracy: 0.889819
Accuracy on Test data: 0.8692334890365601, 0.7675105333328247
Step 6 | Training Loss: 0.000010 | Validation Accuracy: 0.891883
Accuracy on Test data: 0.8947835564613342, 0.8172996044158936
Step 7 | Training Loss: 0.000013 | Validation Accuracy: 0.900258
Accuracy on Test data: 0.8918559551239014, 0.806075930595398
Step 8 | Training Loss: 0.000023 | Validation Accuracy: 0.897638
Accuracy on Test data: 0.8730039000511169, 0.7740084528923035
Step 9 | Training Loss: 0.000010 | Validation Accuracy: 0.908633
Accuracy on Test data: 0.8795244693756104, 0.7821096777915955
Step 10 | Training Loss: 0.000031 | Validation Accuracy: 0.908712
Accuracy on Test data: 0.8642654418945312, 0.75274258852005
Step 11 | Training Loss: 0.000050 | Validation Accuracy: 0.849613
Accuracy on Test data: 0.8345901370048523, 0.7156962156295776
Step 12 | Training Loss: 0.000063 | Validation Accuracy: 0.844969
Accuracy on Test data: 0.8437278270721436, 0.7367932200431824
Step 13 | Training Loss: 0.000041 | Validation Accuracy: 0.852590
Accuracy on Test data: 0.7818931937217712, 0.6157805919647217
Step 14 | Training Loss: 0.000075 | Validation Accuracy: 0.851994
Accuracy on Test data: 0.8068665862083435, 0.6631223559379578
Step 15 | Training Loss: 0.000066 | Validation Accuracy: 0.863743
Accuracy on Test data: 0.8223917484283447, 0.6722362637519836
Best Accuracy on Test data: 0.8947835564613342

In [24]:
pd.Panel(Train.predictions).to_pickle("dataset/tf_vae_dense_trained_together_nsl_kdd_predictions.pkl")
pd.Panel(Train.predictions_).to_pickle("dataset/tf_vae_dense_trained_together_nsl_kdd_predictions__.pkl")
df_results.to_pickle("dataset/tf_vae_dense_trained_together_nsl_kdd_scores.pkl")

In [25]:
import numpy as np
import matplotlib.pyplot as plt
import itertools

def plot_confusion_matrix(cm, classes,
                          normalize=False,
                          title='Confusion matrix',
                          cmap=plt.cm.Blues):
    """
    This function prints and plots the confusion matrix.
    Normalization can be applied by setting `normalize=True`.
    """
    np.set_printoptions(precision=4)

    plt.imshow(cm, interpolation='nearest', cmap=cmap)
    plt.title(title)
    plt.colorbar()
    tick_marks = np.arange(len(classes))
    plt.xticks(tick_marks, classes, rotation=45)
    plt.yticks(tick_marks, classes)

    if normalize:
        cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
        #print("Normalized confusion matrix")
    else:
        #print('Confusion matrix, without normalization')
        pass
    
    #print(cm)

    label = [["\n True Negative", "\n False Positive \n Type II Error"],
             ["\n False Negative \n Type I Error", "\n True Positive"]
            ]
    
    thresh = cm.max() / 2.
    for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
        
        plt.text(j, i, "{} {}".format(cm[i, j].round(4), label[i][j]),
                 horizontalalignment="center",
                 color="white" if cm[i, j] > thresh else "black")

    plt.tight_layout()
    plt.ylabel('True label')
    plt.xlabel('Predicted label')

def plot(actual_value, pred_value):
    from sklearn.metrics import confusion_matrix

    cm_2labels = confusion_matrix(y_pred = pred_value, y_true = actual_value)
    plt.figure(figsize=[6,6])
    plot_confusion_matrix(cm_2labels, ['Normal', 'Attack'], normalize = False)

In [26]:
past_scores = pd.read_pickle("dataset/scores/tf_vae_dense_trained_together_nsl_kdd_all.pkl")

In [27]:
past_scores.sort_values(by='f1_score',ascending=False)


Out[27]:
epoch no_of_features hidden_layers train_score test_score f1_score test_score_20 f1_score_20 time_taken
27 14 122 3 0.891883 0.894784 0.903624 0.817300 0.881402 88.621862
26 12 122 3 0.898154 0.891679 0.903600 0.810295 0.881146 46.063003
15 13 12 3 0.906013 0.885690 0.899103 0.796709 0.874276 40.399515
25 6 122 3 0.886922 0.882541 0.896108 0.803207 0.876783 21.043563
14 9 12 3 0.914507 0.881831 0.892892 0.785907 0.862783 26.981111
17 9 24 3 0.912364 0.879924 0.890303 0.791224 0.864275 27.275330
24 5 122 3 0.907164 0.872294 0.882254 0.770886 0.850306 16.874060
13 6 12 3 0.926890 0.859430 0.868512 0.739241 0.825857 16.922411
23 4 122 3 0.889978 0.857567 0.866069 0.747342 0.831019 12.697839
12 3 12 3 0.937210 0.852732 0.862332 0.731814 0.821921 6.821192
3 2 12 1 0.907561 0.842397 0.851158 0.710802 0.804874 1.861382
16 2 24 3 0.864060 0.833659 0.842807 0.732321 0.820059 3.465354
22 3 122 3 0.903274 0.810105 0.813326 0.671983 0.764923 8.460931
10 7 1 3 0.780353 0.774973 0.791757 0.708861 0.805371 19.076372
11 2 12 3 0.860528 0.790765 0.789203 0.656962 0.755988 3.423419
2 10 1 1 0.848502 0.763130 0.754911 0.604726 0.699320 14.574448
1 3 1 1 0.881524 0.761799 0.740830 0.557215 0.638910 3.351328
9 6 1 3 0.772217 0.740818 0.736458 0.641350 0.743822 15.911143
20 9 48 3 0.845009 0.748492 0.731991 0.566920 0.651453 28.795720
4 2 24 1 0.889303 0.751419 0.730136 0.534262 0.619615 1.909114
8 3 1 3 0.701171 0.709013 0.719945 0.629789 0.737917 6.423292
21 2 122 3 0.916888 0.723829 0.698206 0.532574 0.620122 4.292972
19 8 48 3 0.830085 0.719526 0.694172 0.509789 0.585397 25.208755
7 2 1 3 0.699821 0.675967 0.689215 0.638481 0.746629 3.246162
0 2 1 1 0.779917 0.681512 0.680719 0.514684 0.621719 1.706293
18 2 48 3 0.854574 0.707372 0.679586 0.532827 0.614753 3.672193
6 2 122 1 0.753403 0.565295 0.383027 0.232911 0.120209 2.686282
5 2 48 1 0.532884 0.430758 0.000000 0.181603 0.000000 2.086767

In [28]:
psg = past_scores.sort_values(by='test_score', ascending=False).groupby(by=['no_of_features', 'hidden_layers'])
psg.first().sort_values(by='test_score', ascending=False)


Out[28]:
epoch train_score test_score f1_score test_score_20 f1_score_20 time_taken
no_of_features hidden_layers
122 3 14 0.891883 0.894784 0.903624 0.817300 0.881402 88.621862
12 3 13 0.906013 0.885690 0.899103 0.796709 0.874276 40.399515
24 3 9 0.912364 0.879924 0.890303 0.791224 0.864275 27.275330
12 1 2 0.907561 0.842397 0.851158 0.710802 0.804874 1.861382
1 3 7 0.780353 0.774973 0.791757 0.708861 0.805371 19.076372
1 10 0.848502 0.763130 0.754911 0.604726 0.699320 14.574448
24 1 2 0.889303 0.751419 0.730136 0.534262 0.619615 1.909114
48 3 9 0.845009 0.748492 0.731991 0.566920 0.651453 28.795720
122 1 2 0.753403 0.565295 0.383027 0.232911 0.120209 2.686282
48 1 2 0.532884 0.430758 0.000000 0.181603 0.000000 2.086767

In [29]:
psg.mean().sort_values(by='test_score', ascending=False)


Out[29]:
epoch train_score test_score f1_score test_score_20 f1_score_20 time_taken
no_of_features hidden_layers
24 3 5.500000 0.888212 0.856791 0.866555 0.761772 0.842167 15.370342
12 3 6.600000 0.909030 0.854090 0.862408 0.742127 0.828165 18.909530
122 3 6.571429 0.899181 0.847543 0.851884 0.736227 0.815100 28.293461
12 1 2.000000 0.907561 0.842397 0.851158 0.710802 0.804874 1.861382
24 1 2.000000 0.889303 0.751419 0.730136 0.534262 0.619615 1.909114
1 1 5.000000 0.836647 0.735480 0.725487 0.558875 0.653316 6.544023
3 4.500000 0.738391 0.725193 0.734344 0.654620 0.758435 11.164242
48 3 6.333333 0.843223 0.725130 0.701916 0.536512 0.617201 19.225556
122 1 2.000000 0.753403 0.565295 0.383027 0.232911 0.120209 2.686282
48 1 2.000000 0.532884 0.430758 0.000000 0.181603 0.000000 2.086767

In [30]:
Train.predictions = pd.read_pickle("dataset/tf_vae_dense_trained_together_nsl_kdd_predictions.pkl")
Train.predictions_ = pd.read_pickle("dataset/tf_vae_dense_trained_together_nsl_kdd_predictions__.pkl")

In [36]:
#epoch_nof_hidden
Train.predictions["14_122_3"].sample()


Out[36]:
Actual Attack_prob Normal_prob Prediction
16476 0.0 1.0 3.718943e-13 0.0

In [37]:
Train.predictions_["14_122_3"].sample()


Out[37]:
Actual Attack_prob Normal_prob Prediction
11510 1.0 1.0 1.180671e-10 0.0

In [38]:
df = Train.predictions["14_122_3"].dropna()
df_ = Train.predictions_["14_122_3"].dropna()

In [39]:
from sklearn import metrics as me
def get_score(y_true, y_pred):
    f1 = me.f1_score(y_true, y_pred)
    pre = me.precision_score(y_true, y_pred)
    rec = me.recall_score(y_true, y_pred)
    acc = me.accuracy_score(y_true, y_pred)
    return {"F1 Score":f1, "Precision":pre, "Recall":rec, "Accuracy":acc}

In [40]:
from sklearn import metrics as me

scores = get_score(df.loc[:,'Actual'].values.astype(int),
                df.loc[:,'Prediction'].values.astype(int))
scores.update({"Scenario":"Train+/Test+"})
score_df = pd.DataFrame(scores, index=[0])

scores = get_score(df_.loc[:,'Actual'].values.astype(int),
                df_.loc[:,'Prediction'].values.astype(int))
scores.update({"Scenario":"Train+/Test-"})

score_df = score_df.append(pd.DataFrame(scores, index=[1]))

score_df


Out[40]:
Accuracy F1 Score Precision Recall Scenario
0 0.894784 0.903624 0.944053 0.866516 Train+/Test+
1 0.817300 0.881402 0.940166 0.829552 Train+/Test-

In [41]:
df.groupby(by="Actual").Actual.count()


Out[41]:
Actual
0.0     9711
1.0    12833
Name: Actual, dtype: int64

In [42]:
plot(actual_value = df.loc[:,'Actual'].values.astype(int),
     pred_value = df.loc[:,'Prediction'].values.astype(int))



In [43]:
df_.groupby(by="Actual").Actual.count()


Out[43]:
Actual
0.0    2152
1.0    9698
Name: Actual, dtype: int64

In [44]:
plot(actual_value = df_.loc[:,'Actual'].values.astype(int),
     pred_value = df_.loc[:,'Prediction'].values.astype(int))



In [45]:
from scipy import stats

def fn(x):
    #print(x)
    return stats.norm.interval(0.95, loc=x.f1_score.mean(), scale=x.f1_score.std())
psg.apply(fn)


Out[45]:
no_of_features  hidden_layers
1               1                  (0.64825565032, 0.80271747066)
                3                 (0.65008285401, 0.818604779719)
12              1                                      (nan, nan)
                3                (0.776571777742, 0.948245001786)
24              1                                      (nan, nan)
                3                 (0.80073006819, 0.932379483883)
48              1                                      (nan, nan)
                3                (0.648904848451, 0.754927695761)
122             1                                      (nan, nan)
                3                 (0.70535682815, 0.998411116676)
dtype: object

In [ ]: