Read Data Sample


In [1]:
import pandas as pd
import numpy as np
import os
from collections import namedtuple
pd.set_option("display.max_rows",1000)
%matplotlib inline

In [2]:
%%bash
rm dataset/scores/tf_vae_dense_trained_together_nsl_kdd_all-.pkl


rm: cannot remove 'dataset/tf_vae_dense_trained_together_nsl_kdd_all-.pkl': No such file or directory

In [3]:
class dataset:
    kdd_train_2labels = pd.read_pickle("dataset/kdd_train__2labels.pkl")
    kdd_test_2labels = pd.read_pickle("dataset/kdd_test_2labels.pkl")
    kdd_test__2labels = pd.read_pickle("dataset/kdd_test__2labels.pkl")
    
    kdd_train_5labels = pd.read_pickle("dataset/kdd_train_5labels.pkl")
    kdd_test_5labels = pd.read_pickle("dataset/kdd_test_5labels.pkl")

In [4]:
dataset.kdd_train_2labels.shape


Out[4]:
(25192, 124)

In [5]:
dataset.kdd_test_2labels.shape


Out[5]:
(22544, 124)

In [6]:
from sklearn import model_selection as ms
from sklearn import preprocessing as pp

class preprocess:
    
    output_columns_2labels = ['is_Normal','is_Attack']
    
    x_input = dataset.kdd_train_2labels.drop(output_columns_2labels, axis = 1)
    y_output = dataset.kdd_train_2labels.loc[:,output_columns_2labels]

    x_test_input = dataset.kdd_test_2labels.drop(output_columns_2labels, axis = 1)
    y_test = dataset.kdd_test_2labels.loc[:,output_columns_2labels]

    x_test__input = dataset.kdd_test__2labels.drop(output_columns_2labels, axis = 1)
    y_test_ = dataset.kdd_test__2labels.loc[:,output_columns_2labels]
    
    ss = pp.StandardScaler()

    x_train = ss.fit_transform(x_input)
    x_test = ss.transform(x_test_input)
    x_test_ = ss.transform(x_test__input)

    y_train = y_output.values
    y_test = y_test.values
    y_test_ = y_test_.values

preprocess.x_train.std()


Out[6]:
0.97509982675167528

In [7]:
import tensorflow as tf

In [8]:
class network(object):
    
    input_dim = 122
    classes = 2
    hidden_encoder_dim = 122
    hidden_layers = 1
    latent_dim = 10

    hidden_decoder_dim = 122
    lam = 0.001
    
    def __init__(self, classes, hidden_layers, num_of_features):
        self.classes = classes
        self.hidden_layers = hidden_layers
        self.latent_dim = num_of_features
            
    def build_layers(self):
        tf.reset_default_graph()
        #learning_rate = tf.Variable(initial_value=0.001)

        input_dim = self.input_dim
        classes = self.classes
        hidden_encoder_dim = self.hidden_encoder_dim
        hidden_layers = self.hidden_layers
        latent_dim = self.latent_dim
        hidden_decoder_dim = self.hidden_decoder_dim
        lam = self.lam
        
        with tf.variable_scope("Input"):
            self.x = tf.placeholder("float", shape=[None, input_dim])
            self.y_ = tf.placeholder("float", shape=[None, classes])
            self.keep_prob = tf.placeholder("float")
            self.lr = tf.placeholder("float")
        
        with tf.variable_scope("Layer_Encoder"):

            hidden_encoder = tf.layers.dense(self.x, hidden_encoder_dim, activation = tf.nn.relu, kernel_regularizer=tf.nn.l2_loss)
            hidden_encoder = tf.nn.dropout(hidden_encoder, self.keep_prob)
            for h in range(hidden_layers - 1):
                hidden_encoder = tf.layers.dense(hidden_encoder, hidden_encoder_dim, activation = tf.nn.relu, kernel_regularizer=tf.nn.l2_loss)
                hidden_encoder = tf.nn.dropout(hidden_encoder, self.keep_prob)
                
        with tf.variable_scope("Layer_Mean"):
            mu_encoder = tf.layers.dense(hidden_encoder, latent_dim, activation = None, kernel_regularizer=tf.nn.l2_loss)

        with tf.variable_scope("Layer_Variance"):
            logvar_encoder = tf.layers.dense(hidden_encoder, latent_dim, activation = None, kernel_regularizer=tf.nn.l2_loss)

        with tf.variable_scope("Sampling_Distribution"):
            # Sample epsilon
            epsilon = tf.random_normal(tf.shape(logvar_encoder), mean=0, stddev=1, name='epsilon')

            # Sample latent variable
            std_encoder = tf.exp(0.5 * logvar_encoder)
            z = mu_encoder + tf.multiply(std_encoder, epsilon)
            
            #tf.summary.histogram("Sample_Distribution", z)

        with tf.variable_scope("Layer_Decoder"):
            hidden_decoder = tf.layers.dense(z, hidden_decoder_dim, activation = tf.nn.relu, kernel_regularizer=tf.nn.l2_loss)
            hidden_decoder = tf.nn.dropout(hidden_decoder, self.keep_prob)
            for h in range(hidden_layers - 1):
                hidden_decoder = tf.layers.dense(hidden_decoder, hidden_decoder_dim, activation = tf.nn.relu, kernel_regularizer=tf.nn.l2_loss)
                hidden_decoder = tf.nn.dropout(hidden_decoder, self.keep_prob)
                
        with tf.variable_scope("Layer_Reconstruction"):
            x_hat = tf.layers.dense(hidden_decoder, input_dim, activation = None)
            
        with tf.variable_scope("Layer_Dense_Hidden"):
            hidden_output = tf.layers.dense(z,latent_dim, activation=tf.nn.relu)

        with tf.variable_scope("Layer_Dense_Softmax"):
            self.y = tf.layers.dense(z, classes, activation=tf.nn.softmax)

        with tf.variable_scope("Loss"):
            
            BCE = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=x_hat, labels=self.x), reduction_indices=1)
            KLD = -0.5 * tf.reduce_mean(1 + logvar_encoder - tf.pow(mu_encoder, 2) - tf.exp(logvar_encoder), reduction_indices=1)
            softmax_loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels = self.y_, logits = self.y))

            loss = tf.reduce_mean((BCE + KLD + softmax_loss) * lam)

            loss = tf.clip_by_value(loss, -1e-4, 1e-4)
            loss = tf.where(tf.is_nan(loss), 1e-4, loss)
            loss = tf.where(tf.equal(loss, -1e-4), tf.random_normal(loss.shape), loss)
            loss = tf.where(tf.equal(loss, 1e-4), tf.random_normal(loss.shape), loss)
            
            self.regularized_loss = tf.abs(loss, name = "Regularized_loss")
            correct_prediction = tf.equal(tf.argmax(self.y_, 1), tf.argmax(self.y, 1))
            self.tf_accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32), name = "Accuracy")

        with tf.variable_scope("Optimizer"):
            learning_rate=self.lr #1e-2
            optimizer = tf.train.AdamOptimizer(learning_rate)
            gradients, variables = zip(*optimizer.compute_gradients(self.regularized_loss))
            gradients = [
                None if gradient is None else tf.clip_by_value(gradient, -1, 1)
                for gradient in gradients]
            self.train_op = optimizer.apply_gradients(zip(gradients, variables))
            #self.train_op = optimizer.minimize(self.regularized_loss)
            
        # add op for merging summary
        #self.summary_op = tf.summary.merge_all()
        self.pred = tf.argmax(self.y, axis = 1)
        self.actual = tf.argmax(self.y_, axis = 1)

        # add Saver ops
        self.saver = tf.train.Saver()

In [9]:
import collections
import time
import sklearn.metrics as me 

class Train:    
    
    result = namedtuple("score", ['epoch', 'no_of_features','hidden_layers','train_score', 'test_score', 'f1_score', 'test_score_20', 'f1_score_20', 'time_taken'])

    predictions = {}
    predictions_ = {}

    results = []
    best_acc = 0
    best_acc_global = 0

    def train(epochs, net, h,f, lrs):
        batch_iterations = 200
        train_loss = None
        Train.best_acc = 0
        os.makedirs("dataset/tf_vae_dense_trained_together_nsl_kdd-/hidden layers_{}_features count_{}".format(h,f),
                    exist_ok = True)

        with tf.Session() as sess:
            #summary_writer_train = tf.summary.FileWriter('./logs/kdd/VAE/training', graph=sess.graph)
            #summary_writer_valid = tf.summary.FileWriter('./logs/kdd/VAE/validation')

            sess.run(tf.global_variables_initializer())
            start_time = time.perf_counter()

            Train.best_acc = 0
            for c, lr in enumerate(lrs):
                for epoch in range(1, (epochs+1)):
                    x_train, x_valid, y_train, y_valid, = ms.train_test_split(preprocess.x_train, 
                                                                              preprocess.y_train, 
                                                                              test_size=0.2)
                    batch_indices = np.array_split(np.arange(x_train.shape[0]), 
                                               batch_iterations)

                    for i in batch_indices:

                        def train_batch():
                            nonlocal train_loss
                            _, train_loss = sess.run([net.train_op, 
                                                                   net.regularized_loss, 
                                                                   ], #net.summary_op
                                                                  feed_dict={net.x: x_train[i,:], 
                                                                             net.y_: y_train[i,:], 
                                                                             net.keep_prob:1, net.lr:lr})

                        train_batch()
                        count = 10
                        
                        while((train_loss > 1e4 or np.isnan(train_loss)) and epoch > 1 and count > 1):
                            print("Step {} | High Training Loss: {:.6f} ... Restoring Net".format(epoch, train_loss))
                            net.saver.restore(sess, 
                                              tf.train.latest_checkpoint('dataset/tf_vae_dense_trained_together_nsl_kdd-/hidden layers_{}_features count_{}'
                                                                         .format(h,f)))
                            train_batch()
                            count -= 1

                    valid_loss, valid_accuracy = sess.run([net.regularized_loss, net.tf_accuracy], #net.summary_op
                                                              feed_dict={net.x: x_valid, 
                                                                         net.y_: y_valid, 
                                                                         net.keep_prob:1, net.lr:lr})
                    
                    test_accuracy, test_loss, pred_value, actual_value, y_pred = sess.run([net.tf_accuracy, net.regularized_loss, net.pred, 
                                                                                      net.actual, net.y], #net.summary_op 
                                                                                      feed_dict={net.x: preprocess.x_test, 
                                                                                     net.y_: preprocess.y_test, 
                                                                                     net.keep_prob:1, net.lr:lr})
                    
                    f1_score = me.f1_score(actual_value, pred_value)
                    test_accuracy_, test_loss_, pred_value_, actual_value_, y_pred_ = sess.run([net.tf_accuracy, net.regularized_loss, net.pred, 
                                                                                      net.actual, net.y], #net.summary_op 
                                                                                      feed_dict={net.x: preprocess.x_test_, 
                                                                                     net.y_: preprocess.y_test_, 
                                                                                     net.keep_prob:1, net.lr:lr})
                    f1_score_ = me.f1_score(actual_value_, pred_value_)
                    #summary_writer_valid.add_summary(summary_str, epoch)

                    print("Step {} | Training Loss: {:.6f} | Validation Accuracy: {:.6f}".format(epoch, train_loss, valid_accuracy))
                    print("Accuracy on Test data: {}, {}".format(test_accuracy, test_accuracy_))

                    if test_accuracy > Train.best_acc_global:
                        Train.best_acc_global = test_accuracy
                        Train.pred_value = pred_value
                        Train.actual_value = actual_value
                        
                        Train.pred_value_ = pred_value_
                        Train.actual_value_ = actual_value_
                        
                        Train.best_parameters = "Hidden Layers:{}, Features Count:{}".format(h, f)

                    if test_accuracy > Train.best_acc:
                        Train.best_acc = test_accuracy

                        if not (np.isnan(train_loss)):
                            net.saver.save(sess, 
                                       "dataset/tf_vae_dense_trained_together_nsl_kdd-/hidden layers_{}_features count_{}/model"
                                       .format(h,f), 
                                       global_step = epoch, 
                                       write_meta_graph=False)

                        curr_pred = pd.DataFrame({"Attack_prob":y_pred[:,-2], "Normal_prob":y_pred[:, -1], "Prediction":pred_value, "Actual":actual_value})
                        curr_pred_ = pd.DataFrame({"Attack_prob":y_pred_[:,-2], "Normal_prob":y_pred_[:, -1], "Prediction":pred_value_, "Actual": actual_value_})
                        
                        Train.predictions.update({"{}_{}_{}".format((epoch+1)*(c+1),f,h):(curr_pred, 
                                                   Train.result((epoch+1)*(c+1), f, h, valid_accuracy, test_accuracy, f1_score, test_accuracy_, f1_score_, time.perf_counter() - start_time))})
                        Train.predictions_.update({"{}_{}_{}".format((epoch+1)*(c+1),f,h):(curr_pred_, 
                                                   Train.result((epoch+1)*(c+1), f, h, valid_accuracy, test_accuracy, f1_score, test_accuracy_, f1_score_, time.perf_counter() - start_time))})
                        #Train.results.append(Train.result(epochs, f, h,valid_accuracy, test_accuracy))
            print("Best Accuracy on Test data: {}".format(Train.best_acc))

In [10]:
import itertools

df_results = []
past_scores = []

class Hyperparameters:
#    features_arr = [2, 4, 8, 16, 32, 64, 128, 256]
#    hidden_layers_arr = [2, 4, 6, 10]

    def start_training():
        global df_results
        global past_scores
        
        features_arr = [1, 12, 24, 48, 122]
        hidden_layers_arr = [1, 3]

        Train.predictions = {}
        Train.predictions_ = {}
        Train.results = []

        epochs = [15]
        lrs = [1e-2, 1e-2]

        for e, h, f in itertools.product(epochs, hidden_layers_arr, features_arr):
            print("Current Layer Attributes - epochs:{} hidden layers:{} features count:{}".format(e,h,f))
            n = network(2,h,f)
            n.build_layers()
            Train.train(e, n, h,f, lrs)
            
        dict1 = {}
        dict1_ = {}
        dict2 = []

        for k, (v1, v2) in Train.predictions.items():
            dict1.update({k: v1})
            dict2.append(v2)

        for k, (v1_, v2) in Train.predictions_.items():
            dict1_.update({k: v1_})

        Train.predictions = dict1
        Train.predictions_ = dict1_
        
        Train.results = dict2
        df_results = pd.DataFrame(Train.results)

        #temp = df_results.set_index(['no_of_features', 'hidden_layers'])

        
        if not os.path.isfile('dataset/scores/tf_vae_dense_trained_together_nsl_kdd_all-.pkl'):
            past_scores = df_results
        else:
            past_scores = pd.read_pickle("dataset/scores/tf_vae_dense_trained_together_nsl_kdd_all-.pkl")
            past_scores = past_scores.append(df_results, ignore_index=True)
                
        past_scores.to_pickle("dataset/scores/tf_vae_dense_trained_together_nsl_kdd_all-.pkl")

In [11]:
#%%timeit -r 10

Hyperparameters.start_training()


Current Layer Attributes - epochs:15 hidden layers:1 features count:1
Step 1 | Training Loss: 0.000665 | Validation Accuracy: 0.652113
Accuracy on Test data: 0.6522799730300903, 0.6925738453865051
Step 2 | Training Loss: 0.000305 | Validation Accuracy: 0.815836
Accuracy on Test data: 0.716864824295044, 0.48877638578414917
Step 3 | Training Loss: 0.000190 | Validation Accuracy: 0.802540
Accuracy on Test data: 0.7153566479682922, 0.5171307921409607
Step 4 | Training Loss: 0.000305 | Validation Accuracy: 0.773963
Accuracy on Test data: 0.7897001504898071, 0.7081856727600098
Step 5 | Training Loss: 0.000015 | Validation Accuracy: 0.752133
Accuracy on Test data: 0.7259137630462646, 0.6094514727592468
Step 6 | Training Loss: 0.000042 | Validation Accuracy: 0.818218
Accuracy on Test data: 0.7488910555839539, 0.598143458366394
Step 7 | Training Loss: 0.000106 | Validation Accuracy: 0.742012
Accuracy on Test data: 0.6262863874435425, 0.45603376626968384
Step 8 | Training Loss: 0.000142 | Validation Accuracy: 0.820004
Accuracy on Test data: 0.6794712543487549, 0.4881856441497803
Step 9 | Training Loss: 0.000031 | Validation Accuracy: 0.876563
Accuracy on Test data: 0.7201915979385376, 0.4871729910373688
Step 10 | Training Loss: 0.000224 | Validation Accuracy: 0.867236
Accuracy on Test data: 0.7204577922821045, 0.5154430270195007
Step 11 | Training Loss: 0.000171 | Validation Accuracy: 0.852947
Accuracy on Test data: 0.7219215631484985, 0.5103797316551208
Step 12 | Training Loss: 0.000192 | Validation Accuracy: 0.803136
Accuracy on Test data: 0.7052430510520935, 0.5766244530677795
Step 13 | Training Loss: 0.000263 | Validation Accuracy: 0.811074
Accuracy on Test data: 0.6953513026237488, 0.5509704351425171
Step 14 | Training Loss: 0.000170 | Validation Accuracy: 0.793411
Accuracy on Test data: 0.6611958742141724, 0.46725738048553467
Step 15 | Training Loss: 0.000350 | Validation Accuracy: 0.838857
Accuracy on Test data: 0.7302608489990234, 0.5295358896255493
Step 1 | Training Loss: 0.000066 | Validation Accuracy: 0.834888
Accuracy on Test data: 0.7156227827072144, 0.5097046494483948
Step 2 | Training Loss: 0.000019 | Validation Accuracy: 0.855725
Accuracy on Test data: 0.7503548860549927, 0.5929113626480103
Step 3 | Training Loss: 0.000022 | Validation Accuracy: 0.856718
Accuracy on Test data: 0.778788149356842, 0.6087763905525208
Step 4 | Training Loss: 0.000024 | Validation Accuracy: 0.845207
Accuracy on Test data: 0.7721788287162781, 0.6077637076377869
Step 5 | Training Loss: 0.000170 | Validation Accuracy: 0.857908
Accuracy on Test data: 0.7426809668540955, 0.5760337710380554
Step 6 | Training Loss: 0.000027 | Validation Accuracy: 0.800357
Accuracy on Test data: 0.7191270589828491, 0.5897046327590942
Step 7 | Training Loss: 0.000359 | Validation Accuracy: 0.854336
Accuracy on Test data: 0.7472054362297058, 0.5720674991607666
Step 8 | Training Loss: 0.000118 | Validation Accuracy: 0.871800
Accuracy on Test data: 0.7754169702529907, 0.6183122396469116
Step 9 | Training Loss: 0.000018 | Validation Accuracy: 0.873983
Accuracy on Test data: 0.7435681223869324, 0.5558649897575378
Step 10 | Training Loss: 0.000125 | Validation Accuracy: 0.857908
Accuracy on Test data: 0.7341199517250061, 0.5496202707290649
Step 11 | Training Loss: 0.000137 | Validation Accuracy: 0.859893
Accuracy on Test data: 0.7411284446716309, 0.5871729850769043
Step 12 | Training Loss: 0.000012 | Validation Accuracy: 0.868823
Accuracy on Test data: 0.76987224817276, 0.6269198060035706
Step 13 | Training Loss: 0.000013 | Validation Accuracy: 0.865846
Accuracy on Test data: 0.7399308085441589, 0.5775527358055115
Step 14 | Training Loss: 0.000078 | Validation Accuracy: 0.885295
Accuracy on Test data: 0.7483587861061096, 0.5668354630470276
Step 15 | Training Loss: 0.000042 | Validation Accuracy: 0.874975
Accuracy on Test data: 0.7628637552261353, 0.6121519207954407
Best Accuracy on Test data: 0.7897001504898071
Current Layer Attributes - epochs:15 hidden layers:1 features count:12
Step 1 | Training Loss: 1.987724 | Validation Accuracy: 0.815241
Accuracy on Test data: 0.7488023638725281, 0.5691139101982117
/home/ritesh_malaiya/anaconda3/envs/p3/lib/python3.6/site-packages/sklearn/metrics/classification.py:1113: UndefinedMetricWarning: F-score is ill-defined and being set to 0.0 due to no predicted samples.
  'precision', 'predicted', average, warn_for)
Step 2 | Training Loss: 0.012218 | Validation Accuracy: 0.542568
Accuracy on Test data: 0.43075764179229736, 0.18160337209701538
Step 3 | Training Loss: 1.805758 | Validation Accuracy: 0.538599
Accuracy on Test data: 0.43075764179229736, 0.18160337209701538
Step 4 | Training Loss: 1.564975 | Validation Accuracy: 0.541774
Accuracy on Test data: 0.43075764179229736, 0.18160337209701538
Step 5 | Training Loss: 0.934638 | Validation Accuracy: 0.525700
Accuracy on Test data: 0.43075764179229736, 0.18160337209701538
Step 6 | Training Loss: 0.153852 | Validation Accuracy: 0.528279
Accuracy on Test data: 0.43075764179229736, 0.18160337209701538
Step 7 | Training Loss: 0.682114 | Validation Accuracy: 0.544751
Accuracy on Test data: 0.43075764179229736, 0.18160337209701538
Step 8 | Training Loss: 0.747098 | Validation Accuracy: 0.530859
Accuracy on Test data: 0.43075764179229736, 0.18160337209701538
Step 9 | Training Loss: 0.797108 | Validation Accuracy: 0.534431
Accuracy on Test data: 0.43075764179229736, 0.18160337209701538
Step 10 | Training Loss: 1.495139 | Validation Accuracy: 0.524707
Accuracy on Test data: 0.43075764179229736, 0.18160337209701538
Step 11 | Training Loss: 0.579927 | Validation Accuracy: 0.527287
Accuracy on Test data: 0.43075764179229736, 0.18160337209701538
Step 12 | Training Loss: 0.396832 | Validation Accuracy: 0.539591
Accuracy on Test data: 0.43075764179229736, 0.18160337209701538
Step 13 | Training Loss: 0.592717 | Validation Accuracy: 0.534431
Accuracy on Test data: 0.43075764179229736, 0.18160337209701538
Step 14 | Training Loss: 0.511182 | Validation Accuracy: 0.528875
Accuracy on Test data: 0.43075764179229736, 0.18160337209701538
Step 15 | Training Loss: 0.804181 | Validation Accuracy: 0.529073
Accuracy on Test data: 0.43075764179229736, 0.18160337209701538
Step 1 | Training Loss: 0.318750 | Validation Accuracy: 0.530462
Accuracy on Test data: 0.43075764179229736, 0.18160337209701538
Step 2 | Training Loss: 0.295224 | Validation Accuracy: 0.536416
Accuracy on Test data: 0.43075764179229736, 0.18160337209701538
Step 3 | Training Loss: 1.602901 | Validation Accuracy: 0.543362
Accuracy on Test data: 0.43075764179229736, 0.18160337209701538
Step 4 | Training Loss: 0.663169 | Validation Accuracy: 0.534630
Accuracy on Test data: 0.43075764179229736, 0.18160337209701538
Step 5 | Training Loss: 0.338673 | Validation Accuracy: 0.532248
Accuracy on Test data: 0.43075764179229736, 0.18160337209701538
Step 6 | Training Loss: 1.041948 | Validation Accuracy: 0.540980
Accuracy on Test data: 0.43075764179229736, 0.18160337209701538
Step 7 | Training Loss: 0.936222 | Validation Accuracy: 0.543759
Accuracy on Test data: 0.43075764179229736, 0.18160337209701538
Step 8 | Training Loss: 0.630083 | Validation Accuracy: 0.528676
Accuracy on Test data: 0.43075764179229736, 0.18160337209701538
Step 9 | Training Loss: 0.986195 | Validation Accuracy: 0.531852
Accuracy on Test data: 0.43075764179229736, 0.18160337209701538
Step 10 | Training Loss: 0.356152 | Validation Accuracy: 0.537011
Accuracy on Test data: 0.43075764179229736, 0.18160337209701538
Step 11 | Training Loss: 0.058322 | Validation Accuracy: 0.533836
Accuracy on Test data: 0.43075764179229736, 0.18160337209701538
Step 12 | Training Loss: 0.735073 | Validation Accuracy: 0.531653
Accuracy on Test data: 0.43075764179229736, 0.18160337209701538
Step 13 | Training Loss: 0.321524 | Validation Accuracy: 0.533836
Accuracy on Test data: 0.43075764179229736, 0.18160337209701538
Step 14 | Training Loss: 0.654641 | Validation Accuracy: 0.527287
Accuracy on Test data: 0.43075764179229736, 0.18160337209701538
Step 15 | Training Loss: 2.348252 | Validation Accuracy: 0.531653
Accuracy on Test data: 0.43075764179229736, 0.18160337209701538
Best Accuracy on Test data: 0.7488023638725281
Current Layer Attributes - epochs:15 hidden layers:1 features count:24
Step 1 | Training Loss: 0.001060 | Validation Accuracy: 0.871998
Accuracy on Test data: 0.6661639213562012, 0.45257383584976196
Step 2 | Training Loss: 0.000838 | Validation Accuracy: 0.864854
Accuracy on Test data: 0.7920954823493958, 0.6361181139945984
Step 3 | Training Loss: 0.001454 | Validation Accuracy: 0.791030
Accuracy on Test data: 0.6505500078201294, 0.3876793384552002
Step 4 | Training Loss: 0.955112 | Validation Accuracy: 0.541774
Accuracy on Test data: 0.43075764179229736, 0.18160337209701538
Step 5 | Training Loss: 0.983788 | Validation Accuracy: 0.529272
Accuracy on Test data: 0.43075764179229736, 0.18160337209701538
Step 6 | Training Loss: 1.135413 | Validation Accuracy: 0.537408
Accuracy on Test data: 0.43075764179229736, 0.18160337209701538
Step 7 | Training Loss: 0.508043 | Validation Accuracy: 0.535027
Accuracy on Test data: 0.43075764179229736, 0.18160337209701538
Step 8 | Training Loss: 1.153921 | Validation Accuracy: 0.536019
Accuracy on Test data: 0.43075764179229736, 0.18160337209701538
Step 9 | Training Loss: 0.184121 | Validation Accuracy: 0.528478
Accuracy on Test data: 0.43075764179229736, 0.18160337209701538
Step 10 | Training Loss: 0.921163 | Validation Accuracy: 0.546537
Accuracy on Test data: 0.43075764179229736, 0.18160337209701538
Step 11 | Training Loss: 2.288743 | Validation Accuracy: 0.533638
Accuracy on Test data: 0.43075764179229736, 0.18160337209701538
Step 12 | Training Loss: 0.920667 | Validation Accuracy: 0.530264
Accuracy on Test data: 0.43075764179229736, 0.18160337209701538
Step 13 | Training Loss: 0.536525 | Validation Accuracy: 0.538996
Accuracy on Test data: 0.43075764179229736, 0.18160337209701538
Step 14 | Training Loss: 0.539061 | Validation Accuracy: 0.526692
Accuracy on Test data: 0.43075764179229736, 0.18160337209701538
Step 15 | Training Loss: 0.954227 | Validation Accuracy: 0.535225
Accuracy on Test data: 0.43075764179229736, 0.18160337209701538
Step 1 | Training Loss: 0.161555 | Validation Accuracy: 0.531455
Accuracy on Test data: 0.43075764179229736, 0.18160337209701538
Step 2 | Training Loss: 0.406868 | Validation Accuracy: 0.530859
Accuracy on Test data: 0.43075764179229736, 0.18160337209701538
Step 3 | Training Loss: 0.634016 | Validation Accuracy: 0.527684
Accuracy on Test data: 0.43075764179229736, 0.18160337209701538
Step 4 | Training Loss: 1.316543 | Validation Accuracy: 0.533241
Accuracy on Test data: 0.43075764179229736, 0.18160337209701538
Step 5 | Training Loss: 1.074220 | Validation Accuracy: 0.538400
Accuracy on Test data: 0.43075764179229736, 0.18160337209701538
Step 6 | Training Loss: 0.439035 | Validation Accuracy: 0.524707
Accuracy on Test data: 0.43075764179229736, 0.18160337209701538
Step 7 | Training Loss: 1.131871 | Validation Accuracy: 0.529272
Accuracy on Test data: 0.43075764179229736, 0.18160337209701538
Step 8 | Training Loss: 0.295083 | Validation Accuracy: 0.539988
Accuracy on Test data: 0.43075764179229736, 0.18160337209701538
Step 9 | Training Loss: 0.265419 | Validation Accuracy: 0.525303
Accuracy on Test data: 0.43075764179229736, 0.18160337209701538
Step 10 | Training Loss: 0.232637 | Validation Accuracy: 0.546140
Accuracy on Test data: 0.43075764179229736, 0.18160337209701538
Step 11 | Training Loss: 0.895969 | Validation Accuracy: 0.533836
Accuracy on Test data: 0.43075764179229736, 0.18160337209701538
Step 12 | Training Loss: 1.249705 | Validation Accuracy: 0.526295
Accuracy on Test data: 0.43075764179229736, 0.18160337209701538
Step 13 | Training Loss: 2.190820 | Validation Accuracy: 0.534035
Accuracy on Test data: 0.43075764179229736, 0.18160337209701538
Step 14 | Training Loss: 0.695601 | Validation Accuracy: 0.529867
Accuracy on Test data: 0.43075764179229736, 0.18160337209701538
Step 15 | Training Loss: 1.205935 | Validation Accuracy: 0.541377
Accuracy on Test data: 0.43075764179229736, 0.18160337209701538
Best Accuracy on Test data: 0.7920954823493958
Current Layer Attributes - epochs:15 hidden layers:1 features count:48
Step 1 | Training Loss: 0.000368 | Validation Accuracy: 0.914864
Accuracy on Test data: 0.8122338652610779, 0.647426187992096
Step 2 | Training Loss: 1.315042 | Validation Accuracy: 0.871602
Accuracy on Test data: 0.7071061134338379, 0.4583122432231903
Step 3 | Training Loss: 0.944233 | Validation Accuracy: 0.538400
Accuracy on Test data: 0.43075764179229736, 0.18160337209701538
Step 4 | Training Loss: 1.076023 | Validation Accuracy: 0.542766
Accuracy on Test data: 0.43075764179229736, 0.18160337209701538
Step 5 | Training Loss: 0.046245 | Validation Accuracy: 0.518357
Accuracy on Test data: 0.43075764179229736, 0.18160337209701538
Step 6 | Training Loss: 0.521358 | Validation Accuracy: 0.529272
Accuracy on Test data: 0.43075764179229736, 0.18160337209701538
Step 7 | Training Loss: 0.107319 | Validation Accuracy: 0.526295
Accuracy on Test data: 0.43075764179229736, 0.18160337209701538
Step 8 | Training Loss: 1.585423 | Validation Accuracy: 0.540187
Accuracy on Test data: 0.43075764179229736, 0.18160337209701538
Step 9 | Training Loss: 1.665708 | Validation Accuracy: 0.530661
Accuracy on Test data: 0.43075764179229736, 0.18160337209701538
Step 10 | Training Loss: 1.072209 | Validation Accuracy: 0.521334
Accuracy on Test data: 0.43075764179229736, 0.18160337209701538
Step 11 | Training Loss: 0.860059 | Validation Accuracy: 0.529867
Accuracy on Test data: 0.43075764179229736, 0.18160337209701538
Step 12 | Training Loss: 1.319000 | Validation Accuracy: 0.541576
Accuracy on Test data: 0.43075764179229736, 0.18160337209701538
Step 13 | Training Loss: 0.366902 | Validation Accuracy: 0.537607
Accuracy on Test data: 0.43075764179229736, 0.18160337209701538
Step 14 | Training Loss: 1.190232 | Validation Accuracy: 0.525898
Accuracy on Test data: 0.43075764179229736, 0.18160337209701538
Step 15 | Training Loss: 0.362021 | Validation Accuracy: 0.536019
Accuracy on Test data: 0.43075764179229736, 0.18160337209701538
Step 1 | Training Loss: 0.718000 | Validation Accuracy: 0.528875
Accuracy on Test data: 0.43075764179229736, 0.18160337209701538
Step 2 | Training Loss: 1.052636 | Validation Accuracy: 0.531653
Accuracy on Test data: 0.43075764179229736, 0.18160337209701538
Step 3 | Training Loss: 0.863017 | Validation Accuracy: 0.537210
Accuracy on Test data: 0.43075764179229736, 0.18160337209701538
Step 4 | Training Loss: 2.786286 | Validation Accuracy: 0.537011
Accuracy on Test data: 0.43075764179229736, 0.18160337209701538
Step 5 | Training Loss: 0.742634 | Validation Accuracy: 0.540583
Accuracy on Test data: 0.43075764179229736, 0.18160337209701538
Step 6 | Training Loss: 1.083127 | Validation Accuracy: 0.531852
Accuracy on Test data: 0.43075764179229736, 0.18160337209701538
Step 7 | Training Loss: 0.426010 | Validation Accuracy: 0.530859
Accuracy on Test data: 0.43075764179229736, 0.18160337209701538
Step 8 | Training Loss: 0.333158 | Validation Accuracy: 0.537210
Accuracy on Test data: 0.43075764179229736, 0.18160337209701538
Step 9 | Training Loss: 0.448228 | Validation Accuracy: 0.531455
Accuracy on Test data: 0.43075764179229736, 0.18160337209701538
Step 10 | Training Loss: 1.253593 | Validation Accuracy: 0.528279
Accuracy on Test data: 0.43075764179229736, 0.18160337209701538
Step 11 | Training Loss: 0.744492 | Validation Accuracy: 0.546537
Accuracy on Test data: 0.43075764179229736, 0.18160337209701538
Step 12 | Training Loss: 0.139003 | Validation Accuracy: 0.531852
Accuracy on Test data: 0.43075764179229736, 0.18160337209701538
Step 13 | Training Loss: 1.035890 | Validation Accuracy: 0.536614
Accuracy on Test data: 0.43075764179229736, 0.18160337209701538
Step 14 | Training Loss: 3.021080 | Validation Accuracy: 0.522723
Accuracy on Test data: 0.43075764179229736, 0.18160337209701538
Step 15 | Training Loss: 0.213813 | Validation Accuracy: 0.527684
Accuracy on Test data: 0.43075764179229736, 0.18160337209701538
Best Accuracy on Test data: 0.8122338652610779
Current Layer Attributes - epochs:15 hidden layers:1 features count:122
Step 1 | Training Loss: 0.763570 | Validation Accuracy: 0.537011
Accuracy on Test data: 0.43075764179229736, 0.18160337209701538
Step 2 | Training Loss: 0.756096 | Validation Accuracy: 0.537011
Accuracy on Test data: 0.43075764179229736, 0.18160337209701538
Step 3 | Training Loss: 0.130683 | Validation Accuracy: 0.533638
Accuracy on Test data: 0.43075764179229736, 0.18160337209701538
Step 4 | Training Loss: 1.112396 | Validation Accuracy: 0.533241
Accuracy on Test data: 0.43075764179229736, 0.18160337209701538
Step 5 | Training Loss: 2.970290 | Validation Accuracy: 0.537011
Accuracy on Test data: 0.43075764179229736, 0.18160337209701538
Step 6 | Training Loss: 0.214373 | Validation Accuracy: 0.529470
Accuracy on Test data: 0.43075764179229736, 0.18160337209701538
Step 7 | Training Loss: 1.212557 | Validation Accuracy: 0.531256
Accuracy on Test data: 0.43075764179229736, 0.18160337209701538
Step 8 | Training Loss: 1.729573 | Validation Accuracy: 0.540583
Accuracy on Test data: 0.43075764179229736, 0.18160337209701538
Step 9 | Training Loss: 0.746703 | Validation Accuracy: 0.527883
Accuracy on Test data: 0.43075764179229736, 0.18160337209701538
Step 10 | Training Loss: 1.527806 | Validation Accuracy: 0.533042
Accuracy on Test data: 0.43075764179229736, 0.18160337209701538
Step 11 | Training Loss: 0.094671 | Validation Accuracy: 0.536218
Accuracy on Test data: 0.43075764179229736, 0.18160337209701538
Step 12 | Training Loss: 0.807922 | Validation Accuracy: 0.544949
Accuracy on Test data: 0.43075764179229736, 0.18160337209701538
Step 13 | Training Loss: 0.894072 | Validation Accuracy: 0.540980
Accuracy on Test data: 0.43075764179229736, 0.18160337209701538
Step 14 | Training Loss: 0.135036 | Validation Accuracy: 0.523318
Accuracy on Test data: 0.43075764179229736, 0.18160337209701538
Step 15 | Training Loss: 0.040706 | Validation Accuracy: 0.540583
Accuracy on Test data: 0.43075764179229736, 0.18160337209701538
Step 1 | Training Loss: 0.391631 | Validation Accuracy: 0.547132
Accuracy on Test data: 0.43075764179229736, 0.18160337209701538
Step 2 | Training Loss: 1.237380 | Validation Accuracy: 0.542370
Accuracy on Test data: 0.43075764179229736, 0.18160337209701538
Step 3 | Training Loss: 0.263388 | Validation Accuracy: 0.527287
Accuracy on Test data: 0.43075764179229736, 0.18160337209701538
Step 4 | Training Loss: 0.399686 | Validation Accuracy: 0.541179
Accuracy on Test data: 0.43075764179229736, 0.18160337209701538
Step 5 | Training Loss: 0.961852 | Validation Accuracy: 0.535225
Accuracy on Test data: 0.43075764179229736, 0.18160337209701538
Step 6 | Training Loss: 0.705434 | Validation Accuracy: 0.537210
Accuracy on Test data: 0.43075764179229736, 0.18160337209701538
Step 7 | Training Loss: 0.979439 | Validation Accuracy: 0.543163
Accuracy on Test data: 0.43075764179229736, 0.18160337209701538
Step 8 | Training Loss: 0.680752 | Validation Accuracy: 0.538004
Accuracy on Test data: 0.43075764179229736, 0.18160337209701538
Step 9 | Training Loss: 0.936349 | Validation Accuracy: 0.544949
Accuracy on Test data: 0.43075764179229736, 0.18160337209701538
Step 10 | Training Loss: 0.199504 | Validation Accuracy: 0.539988
Accuracy on Test data: 0.43075764179229736, 0.18160337209701538
Step 11 | Training Loss: 0.096401 | Validation Accuracy: 0.543759
Accuracy on Test data: 0.43075764179229736, 0.18160337209701538
Step 12 | Training Loss: 0.173291 | Validation Accuracy: 0.532645
Accuracy on Test data: 0.43075764179229736, 0.18160337209701538
Step 13 | Training Loss: 0.421284 | Validation Accuracy: 0.543362
Accuracy on Test data: 0.43075764179229736, 0.18160337209701538
Step 14 | Training Loss: 1.940808 | Validation Accuracy: 0.537805
Accuracy on Test data: 0.43075764179229736, 0.18160337209701538
Step 15 | Training Loss: 0.315776 | Validation Accuracy: 0.530661
Accuracy on Test data: 0.43075764179229736, 0.18160337209701538
Best Accuracy on Test data: 0.43075764179229736
Current Layer Attributes - epochs:15 hidden layers:3 features count:1
Step 1 | Training Loss: 0.000093 | Validation Accuracy: 0.643580
Accuracy on Test data: 0.6314318776130676, 0.5137552618980408
Step 2 | Training Loss: 0.000737 | Validation Accuracy: 0.597539
Accuracy on Test data: 0.595901370048523, 0.5198312401771545
Step 3 | Training Loss: 0.000062 | Validation Accuracy: 0.626910
Accuracy on Test data: 0.6588449478149414, 0.645316481590271
Step 4 | Training Loss: 0.000216 | Validation Accuracy: 0.696765
Accuracy on Test data: 0.7372693419456482, 0.6072573661804199
Step 5 | Training Loss: 0.000007 | Validation Accuracy: 0.725144
Accuracy on Test data: 0.7258250713348389, 0.6274261474609375
Step 6 | Training Loss: 0.000046 | Validation Accuracy: 0.668783
Accuracy on Test data: 0.6443843245506287, 0.46928268671035767
Step 7 | Training Loss: 0.000046 | Validation Accuracy: 0.681484
Accuracy on Test data: 0.6622604727745056, 0.5103797316551208
Step 8 | Training Loss: 0.000096 | Validation Accuracy: 0.667196
Accuracy on Test data: 0.6324077248573303, 0.47578057646751404
Step 9 | Training Loss: 0.000156 | Validation Accuracy: 0.694979
Accuracy on Test data: 0.7618878483772278, 0.647763729095459
Step 10 | Training Loss: 0.000062 | Validation Accuracy: 0.709069
Accuracy on Test data: 0.7138928174972534, 0.5889451503753662
Step 11 | Training Loss: 0.000121 | Validation Accuracy: 0.737249
Accuracy on Test data: 0.7503548860549927, 0.6370463967323303
Step 12 | Training Loss: 0.000149 | Validation Accuracy: 0.706489
Accuracy on Test data: 0.697613537311554, 0.5612658262252808
Step 13 | Training Loss: 0.000105 | Validation Accuracy: 0.673348
Accuracy on Test data: 0.6672285199165344, 0.5334177017211914
Step 14 | Training Loss: 0.000102 | Validation Accuracy: 0.674737
Accuracy on Test data: 0.674237072467804, 0.5168776512145996
Step 15 | Training Loss: 0.000025 | Validation Accuracy: 0.688232
Accuracy on Test data: 0.6826649904251099, 0.5340084433555603
Step 1 | Training Loss: 0.000021 | Validation Accuracy: 0.725541
Accuracy on Test data: 0.7473385334014893, 0.6248100996017456
Step 2 | Training Loss: 0.000006 | Validation Accuracy: 0.721969
Accuracy on Test data: 0.7300834059715271, 0.6056540012359619
Step 3 | Training Loss: 0.000058 | Validation Accuracy: 0.715420
Accuracy on Test data: 0.7372693419456482, 0.6158649921417236
Step 4 | Training Loss: 0.000099 | Validation Accuracy: 0.697956
Accuracy on Test data: 0.7182399034500122, 0.5803375244140625
Step 5 | Training Loss: 0.000006 | Validation Accuracy: 0.724549
Accuracy on Test data: 0.7513307332992554, 0.6348523497581482
Step 6 | Training Loss: 0.000114 | Validation Accuracy: 0.714626
Accuracy on Test data: 0.7411284446716309, 0.6107172966003418
Step 7 | Training Loss: 0.000177 | Validation Accuracy: 0.723755
Accuracy on Test data: 0.7515525221824646, 0.6207594871520996
Step 8 | Training Loss: 0.000006 | Validation Accuracy: 0.671562
Accuracy on Test data: 0.6394606232643127, 0.47130802273750305
Step 9 | Training Loss: 0.000023 | Validation Accuracy: 0.692201
Accuracy on Test data: 0.6698899865150452, 0.5066666603088379
Step 10 | Training Loss: 0.000054 | Validation Accuracy: 0.692399
Accuracy on Test data: 0.6977022886276245, 0.5557805895805359
Step 11 | Training Loss: 0.000069 | Validation Accuracy: 0.725938
Accuracy on Test data: 0.7234297394752502, 0.5946835279464722
Step 12 | Training Loss: 0.000110 | Validation Accuracy: 0.708474
Accuracy on Test data: 0.7145581841468811, 0.5817721486091614
Step 13 | Training Loss: 0.000157 | Validation Accuracy: 0.739829
Accuracy on Test data: 0.7646380662918091, 0.6535021066665649
Step 14 | Training Loss: 0.000051 | Validation Accuracy: 0.690415
Accuracy on Test data: 0.6781848669052124, 0.5355274081230164
Step 15 | Training Loss: 0.000283 | Validation Accuracy: 0.792221
Accuracy on Test data: 0.7861515283584595, 0.6638818383216858
Best Accuracy on Test data: 0.7861515283584595
Current Layer Attributes - epochs:15 hidden layers:3 features count:12
Step 1 | Training Loss: 0.000279 | Validation Accuracy: 0.895813
Accuracy on Test data: 0.7882806658744812, 0.6433755159378052
Step 2 | Training Loss: 0.000117 | Validation Accuracy: 0.868029
Accuracy on Test data: 0.7578069567680359, 0.5910548567771912
Step 3 | Training Loss: 0.000168 | Validation Accuracy: 0.913276
Accuracy on Test data: 0.8248314261436462, 0.6795780658721924
Step 4 | Training Loss: 0.000052 | Validation Accuracy: 0.848978
Accuracy on Test data: 0.790587306022644, 0.6556962132453918
Step 5 | Training Loss: 0.000049 | Validation Accuracy: 0.808891
Accuracy on Test data: 0.8168026804924011, 0.699578046798706
Step 6 | Training Loss: 0.000156 | Validation Accuracy: 0.810677
Accuracy on Test data: 0.8396469354629517, 0.7426160573959351
Step 7 | Training Loss: 0.000006 | Validation Accuracy: 0.806311
Accuracy on Test data: 0.8388041257858276, 0.7439662218093872
Step 8 | Training Loss: 0.000038 | Validation Accuracy: 0.819607
Accuracy on Test data: 0.8314407467842102, 0.7313923835754395
Step 9 | Training Loss: 0.000054 | Validation Accuracy: 0.807501
Accuracy on Test data: 0.7965312004089355, 0.6744303703308105
Step 10 | Training Loss: 0.000009 | Validation Accuracy: 0.806311
Accuracy on Test data: 0.8097498416900635, 0.7119831442832947
Step 11 | Training Loss: 0.000135 | Validation Accuracy: 0.778329
Accuracy on Test data: 0.8306422829627991, 0.7534177303314209
Step 12 | Training Loss: 0.000137 | Validation Accuracy: 0.797182
Accuracy on Test data: 0.8293559551239014, 0.7227004170417786
Step 13 | Training Loss: 0.000058 | Validation Accuracy: 0.806112
Accuracy on Test data: 0.8036284446716309, 0.6767088770866394
Step 14 | Training Loss: 0.000061 | Validation Accuracy: 0.789045
Accuracy on Test data: 0.8027856349945068, 0.6749367117881775
Step 15 | Training Loss: 0.000001 | Validation Accuracy: 0.797182
Accuracy on Test data: 0.8103708028793335, 0.6791561245918274
Step 1 | Training Loss: 0.000009 | Validation Accuracy: 0.798373
Accuracy on Test data: 0.8103264570236206, 0.6867510676383972
Step 2 | Training Loss: 0.000094 | Validation Accuracy: 0.793411
Accuracy on Test data: 0.8203956484794617, 0.700843870639801
Step 3 | Training Loss: 0.000007 | Validation Accuracy: 0.794007
Accuracy on Test data: 0.813653290271759, 0.6978058815002441
Step 4 | Training Loss: 0.000047 | Validation Accuracy: 0.806112
Accuracy on Test data: 0.824609637260437, 0.7148523330688477
Step 5 | Training Loss: 0.000039 | Validation Accuracy: 0.786862
Accuracy on Test data: 0.81538325548172, 0.6918143630027771
Step 6 | Training Loss: 0.000129 | Validation Accuracy: 0.808891
Accuracy on Test data: 0.7985717058181763, 0.6779747009277344
Step 7 | Training Loss: 0.000000 | Validation Accuracy: 0.822385
Accuracy on Test data: 0.8220812678337097, 0.7082700133323669
Step 8 | Training Loss: 0.000009 | Validation Accuracy: 0.722564
Accuracy on Test data: 0.7679648399353027, 0.6787341833114624
Step 9 | Training Loss: 0.000016 | Validation Accuracy: 0.787061
Accuracy on Test data: 0.8216376900672913, 0.7361181378364563
Step 10 | Training Loss: 0.000102 | Validation Accuracy: 0.770193
Accuracy on Test data: 0.8221699595451355, 0.7306329011917114
Step 11 | Training Loss: 0.000045 | Validation Accuracy: 0.708275
Accuracy on Test data: 0.7452980875968933, 0.6262447237968445
Step 12 | Training Loss: 0.000043 | Validation Accuracy: 0.707879
Accuracy on Test data: 0.7305713295936584, 0.6079325079917908
Step 13 | Training Loss: 0.000001 | Validation Accuracy: 0.756102
Accuracy on Test data: 0.7484918236732483, 0.6216877698898315
Step 14 | Training Loss: 0.000008 | Validation Accuracy: 0.765033
Accuracy on Test data: 0.805890679359436, 0.7077637314796448
Step 15 | Training Loss: 0.000051 | Validation Accuracy: 0.783092
Accuracy on Test data: 0.8480305075645447, 0.7746835350990295
Best Accuracy on Test data: 0.8480305075645447
Current Layer Attributes - epochs:15 hidden layers:3 features count:24
Step 1 | Training Loss: 0.000086 | Validation Accuracy: 0.868029
Accuracy on Test data: 0.7796309590339661, 0.6357805728912354
Step 2 | Training Loss: 0.000111 | Validation Accuracy: 0.894423
Accuracy on Test data: 0.8007451891899109, 0.6647257208824158
Step 3 | Training Loss: 0.000183 | Validation Accuracy: 0.909506
Accuracy on Test data: 0.8608055114746094, 0.7559493780136108
Step 4 | Training Loss: 0.000109 | Validation Accuracy: 0.901171
Accuracy on Test data: 0.8163147568702698, 0.6899577975273132
Step 5 | Training Loss: 1.435426 | Validation Accuracy: 0.845406
Accuracy on Test data: 0.7838005423545837, 0.6617721319198608
Step 6 | Training Loss: 0.000046 | Validation Accuracy: 0.825362
Accuracy on Test data: 0.7087916731834412, 0.6480168700218201
Step 7 | Training Loss: 0.000027 | Validation Accuracy: 0.815638
Accuracy on Test data: 0.7389993071556091, 0.6801687479019165
Step 8 | Training Loss: 0.000091 | Validation Accuracy: 0.858702
Accuracy on Test data: 0.7409954071044922, 0.6364557147026062
Step 9 | Training Loss: 0.000141 | Validation Accuracy: 0.903751
Accuracy on Test data: 0.8419091701507568, 0.751139223575592
Step 10 | Training Loss: 0.000025 | Validation Accuracy: 0.926771
Accuracy on Test data: 0.8447036743164062, 0.7358649969100952
Step 11 | Training Loss: 0.000121 | Validation Accuracy: 0.906331
Accuracy on Test data: 0.8088626861572266, 0.7099577784538269
Step 12 | Training Loss: 0.000034 | Validation Accuracy: 0.915261
Accuracy on Test data: 0.7733321785926819, 0.594936728477478
Step 13 | Training Loss: 0.000092 | Validation Accuracy: 0.917444
Accuracy on Test data: 0.8308640718460083, 0.6921519041061401
Step 14 | Training Loss: 0.000078 | Validation Accuracy: 0.914666
Accuracy on Test data: 0.8145847916603088, 0.6745991706848145
Step 15 | Training Loss: 0.000006 | Validation Accuracy: 0.928557
Accuracy on Test data: 0.7756831049919128, 0.6475949287414551
Step 1 | Training Loss: 0.000014 | Validation Accuracy: 0.938678
Accuracy on Test data: 0.7747959494590759, 0.5864135026931763
Step 2 | Training Loss: 0.000099 | Validation Accuracy: 0.898393
Accuracy on Test data: 0.7079932689666748, 0.4611814320087433
Step 3 | Training Loss: 0.000020 | Validation Accuracy: 0.932526
Accuracy on Test data: 0.7804737687110901, 0.5898734331130981
Step 4 | Training Loss: 0.000022 | Validation Accuracy: 0.927763
Accuracy on Test data: 0.7914301156997681, 0.6102953553199768
Step 5 | Training Loss: 0.000099 | Validation Accuracy: 0.926771
Accuracy on Test data: 0.7542139887809753, 0.545485258102417
Step 6 | Training Loss: 0.000002 | Validation Accuracy: 0.906331
Accuracy on Test data: 0.7510645985603333, 0.5399156212806702
Step 7 | Training Loss: 0.000018 | Validation Accuracy: 0.918833
Accuracy on Test data: 0.7733321785926819, 0.5786497592926025
Step 8 | Training Loss: 0.000089 | Validation Accuracy: 0.916650
Accuracy on Test data: 0.761799156665802, 0.5546835660934448
Step 9 | Training Loss: 0.000116 | Validation Accuracy: 0.910300
Accuracy on Test data: 0.7876153588294983, 0.603966236114502
Step 10 | Training Loss: 0.000036 | Validation Accuracy: 0.913673
Accuracy on Test data: 0.7576295137405396, 0.5553586483001709
Step 11 | Training Loss: 0.000127 | Validation Accuracy: 0.903552
Accuracy on Test data: 0.7853087186813354, 0.605991542339325
Step 12 | Training Loss: 0.000209 | Validation Accuracy: 0.900972
Accuracy on Test data: 0.754347026348114, 0.5496202707290649
Step 13 | Training Loss: 0.000066 | Validation Accuracy: 0.875967
Accuracy on Test data: 0.7751508355140686, 0.5967932343482971
Step 14 | Training Loss: 0.000044 | Validation Accuracy: 0.905537
Accuracy on Test data: 0.7607789039611816, 0.556033730506897
Step 15 | Training Loss: 0.000064 | Validation Accuracy: 0.902362
Accuracy on Test data: 0.7811391353607178, 0.5908860564231873
Best Accuracy on Test data: 0.8608055114746094
Current Layer Attributes - epochs:15 hidden layers:3 features count:48
Step 1 | Training Loss: 0.000347 | Validation Accuracy: 0.884501
Accuracy on Test data: 0.8092618584632874, 0.6761181354522705
Step 2 | Training Loss: 0.000208 | Validation Accuracy: 0.904346
Accuracy on Test data: 0.8415099382400513, 0.7249789237976074
Step 3 | Training Loss: 0.000039 | Validation Accuracy: 0.887081
Accuracy on Test data: 0.8492282032966614, 0.7174683809280396
Step 4 | Training Loss: 0.000073 | Validation Accuracy: 0.897202
Accuracy on Test data: 0.8461675047874451, 0.7134177088737488
Step 5 | Training Loss: 0.000024 | Validation Accuracy: 0.895217
Accuracy on Test data: 0.8549503087997437, 0.7382278442382812
Step 6 | Training Loss: 0.000026 | Validation Accuracy: 0.892042
Accuracy on Test data: 0.8667938113212585, 0.7738396525382996
Step 7 | Training Loss: 0.000022 | Validation Accuracy: 0.886485
Accuracy on Test data: 0.8884403705596924, 0.801518976688385
Step 8 | Training Loss: 0.000046 | Validation Accuracy: 0.883707
Accuracy on Test data: 0.8852466344833374, 0.8017721772193909
Step 9 | Training Loss: 0.000068 | Validation Accuracy: 0.894423
Accuracy on Test data: 0.8867548108100891, 0.795443058013916
Step 10 | Training Loss: 0.000186 | Validation Accuracy: 0.846993
Accuracy on Test data: 0.8771735429763794, 0.8194093108177185
Step 11 | Training Loss: 0.000033 | Validation Accuracy: 0.870609
Accuracy on Test data: 0.8665720224380493, 0.8086919784545898
Step 12 | Training Loss: 0.000070 | Validation Accuracy: 0.818615
Accuracy on Test data: 0.8417760729789734, 0.7779746651649475
Step 13 | Training Loss: 0.000002 | Validation Accuracy: 0.835483
Accuracy on Test data: 0.8708747625350952, 0.8137552738189697
Step 14 | Training Loss: 0.000024 | Validation Accuracy: 0.837865
Accuracy on Test data: 0.8723385334014893, 0.8140928149223328
Step 15 | Training Loss: 0.000037 | Validation Accuracy: 0.838658
Accuracy on Test data: 0.8765525221824646, 0.8258227705955505
Step 1 | Training Loss: 0.000087 | Validation Accuracy: 0.866640
Accuracy on Test data: 0.8734918236732483, 0.7939240336418152
Step 2 | Training Loss: 0.000059 | Validation Accuracy: 0.865053
Accuracy on Test data: 0.8814762234687805, 0.8101266026496887
Step 3 | Training Loss: 0.000072 | Validation Accuracy: 0.766223
Accuracy on Test data: 0.840445339679718, 0.7813501954078674
Step 4 | Training Loss: 0.000023 | Validation Accuracy: 0.785275
Accuracy on Test data: 0.844481885433197, 0.7789029479026794
Step 5 | Training Loss: 0.000051 | Validation Accuracy: 0.813257
Accuracy on Test data: 0.8501153588294983, 0.7616033554077148
Step 6 | Training Loss: 0.000136 | Validation Accuracy: 0.817226
Accuracy on Test data: 0.8500709533691406, 0.7587341666221619
Step 7 | Training Loss: 0.000059 | Validation Accuracy: 0.829133
Accuracy on Test data: 0.8434173464775085, 0.7453164458274841
Step 8 | Training Loss: 0.000060 | Validation Accuracy: 0.841238
Accuracy on Test data: 0.8391146063804626, 0.7361181378364563
Step 9 | Training Loss: 0.000038 | Validation Accuracy: 0.797579
Accuracy on Test data: 0.8138307332992554, 0.6846413612365723
Step 10 | Training Loss: 0.000049 | Validation Accuracy: 0.796785
Accuracy on Test data: 0.805890679359436, 0.6732489466667175
Step 11 | Training Loss: 0.000072 | Validation Accuracy: 0.776940
Accuracy on Test data: 0.8035397529602051, 0.6749367117881775
Step 12 | Training Loss: 0.000030 | Validation Accuracy: 0.804326
Accuracy on Test data: 0.8095280528068542, 0.6799156069755554
Step 13 | Training Loss: 0.000085 | Validation Accuracy: 0.803334
Accuracy on Test data: 0.8109474778175354, 0.6880168914794922
Step 14 | Training Loss: 0.000031 | Validation Accuracy: 0.806112
Accuracy on Test data: 0.825141966342926, 0.7178903222084045
Step 15 | Training Loss: 0.000110 | Validation Accuracy: 0.799960
Accuracy on Test data: 0.8295333385467529, 0.7158649563789368
Best Accuracy on Test data: 0.8884403705596924
Current Layer Attributes - epochs:15 hidden layers:3 features count:122
Step 1 | Training Loss: 0.000374 | Validation Accuracy: 0.877357
Accuracy on Test data: 0.7500443458557129, 0.5751898884773254
Step 2 | Training Loss: 0.470249 | Validation Accuracy: 0.535027
Accuracy on Test data: 0.43075764179229736, 0.18160337209701538
Step 3 | Training Loss: 0.291069 | Validation Accuracy: 0.553880
Accuracy on Test data: 0.43075764179229736, 0.18160337209701538
Step 4 | Training Loss: 0.748151 | Validation Accuracy: 0.533439
Accuracy on Test data: 0.43075764179229736, 0.18160337209701538
Step 5 | Training Loss: 0.128098 | Validation Accuracy: 0.535622
Accuracy on Test data: 0.43075764179229736, 0.18160337209701538
Step 6 | Training Loss: 0.591737 | Validation Accuracy: 0.541576
Accuracy on Test data: 0.43075764179229736, 0.18160337209701538
Step 7 | Training Loss: 0.140650 | Validation Accuracy: 0.529669
Accuracy on Test data: 0.43075764179229736, 0.18160337209701538
Step 8 | Training Loss: 0.280989 | Validation Accuracy: 0.533042
Accuracy on Test data: 0.43075764179229736, 0.18160337209701538
Step 9 | Training Loss: 0.040990 | Validation Accuracy: 0.525700
Accuracy on Test data: 0.43075764179229736, 0.18160337209701538
Step 10 | Training Loss: 1.556867 | Validation Accuracy: 0.538004
Accuracy on Test data: 0.43075764179229736, 0.18160337209701538
Step 11 | Training Loss: 0.118808 | Validation Accuracy: 0.521334
Accuracy on Test data: 0.43075764179229736, 0.18160337209701538
Step 12 | Training Loss: 0.178845 | Validation Accuracy: 0.527883
Accuracy on Test data: 0.43075764179229736, 0.18160337209701538
Step 13 | Training Loss: 0.798469 | Validation Accuracy: 0.531852
Accuracy on Test data: 0.43075764179229736, 0.18160337209701538
Step 14 | Training Loss: 0.688534 | Validation Accuracy: 0.539988
Accuracy on Test data: 0.43075764179229736, 0.18160337209701538
Step 15 | Training Loss: 0.495494 | Validation Accuracy: 0.528875
Accuracy on Test data: 0.43075764179229736, 0.18160337209701538
Step 1 | Training Loss: 0.170531 | Validation Accuracy: 0.538004
Accuracy on Test data: 0.43075764179229736, 0.18160337209701538
Step 2 | Training Loss: 0.804761 | Validation Accuracy: 0.531256
Accuracy on Test data: 0.43075764179229736, 0.18160337209701538
Step 3 | Training Loss: 0.115751 | Validation Accuracy: 0.543362
Accuracy on Test data: 0.43075764179229736, 0.18160337209701538
Step 4 | Training Loss: 0.866743 | Validation Accuracy: 0.529867
Accuracy on Test data: 0.43075764179229736, 0.18160337209701538
Step 5 | Training Loss: 1.777332 | Validation Accuracy: 0.524509
Accuracy on Test data: 0.43075764179229736, 0.18160337209701538
Step 6 | Training Loss: 0.207653 | Validation Accuracy: 0.524906
Accuracy on Test data: 0.43075764179229736, 0.18160337209701538
Step 7 | Training Loss: 0.018124 | Validation Accuracy: 0.536614
Accuracy on Test data: 0.43075764179229736, 0.18160337209701538
Step 8 | Training Loss: 0.531272 | Validation Accuracy: 0.533439
Accuracy on Test data: 0.43075764179229736, 0.18160337209701538
Step 9 | Training Loss: 0.878964 | Validation Accuracy: 0.531256
Accuracy on Test data: 0.43075764179229736, 0.18160337209701538
Step 10 | Training Loss: 0.343119 | Validation Accuracy: 0.536813
Accuracy on Test data: 0.43075764179229736, 0.18160337209701538
Step 11 | Training Loss: 0.019428 | Validation Accuracy: 0.534828
Accuracy on Test data: 0.43075764179229736, 0.18160337209701538
Step 12 | Training Loss: 0.266668 | Validation Accuracy: 0.526493
Accuracy on Test data: 0.43075764179229736, 0.18160337209701538
Step 13 | Training Loss: 0.947765 | Validation Accuracy: 0.531455
Accuracy on Test data: 0.43075764179229736, 0.18160337209701538
Step 14 | Training Loss: 1.278037 | Validation Accuracy: 0.535424
Accuracy on Test data: 0.43075764179229736, 0.18160337209701538
Step 15 | Training Loss: 0.399154 | Validation Accuracy: 0.532844
Accuracy on Test data: 0.43075764179229736, 0.18160337209701538
Best Accuracy on Test data: 0.7500443458557129

In [12]:
pd.Panel(Train.predictions).to_pickle("dataset/tf_vae_dense_trained_together_nsl_kdd_predictions-.pkl")
pd.Panel(Train.predictions_).to_pickle("dataset/tf_vae_dense_trained_together_nsl_kdd_predictions-__.pkl")
df_results.to_pickle("dataset/tf_vae_dense_trained_together_nsl_kdd_scores-.pkl")

In [13]:
import numpy as np
import matplotlib.pyplot as plt
import itertools

def plot_confusion_matrix(cm, classes,
                          normalize=False,
                          title='Confusion matrix',
                          cmap=plt.cm.Blues):
    """
    This function prints and plots the confusion matrix.
    Normalization can be applied by setting `normalize=True`.
    """
    np.set_printoptions(precision=4)

    plt.imshow(cm, interpolation='nearest', cmap=cmap)
    plt.title(title)
    plt.colorbar()
    tick_marks = np.arange(len(classes))
    plt.xticks(tick_marks, classes, rotation=45)
    plt.yticks(tick_marks, classes)

    if normalize:
        cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
        #print("Normalized confusion matrix")
    else:
        #print('Confusion matrix, without normalization')
        pass
    
    #print(cm)

    label = [["\n True Negative", "\n False Positive \n Type II Error"],
             ["\n False Negative \n Type I Error", "\n True Positive"]
            ]
    
    thresh = cm.max() / 2.
    for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
        
        plt.text(j, i, "{} {}".format(cm[i, j].round(4), label[i][j]),
                 horizontalalignment="center",
                 color="white" if cm[i, j] > thresh else "black")

    plt.tight_layout()
    plt.ylabel('True label')
    plt.xlabel('Predicted label')

def plot(actual_value, pred_value):
    from sklearn.metrics import confusion_matrix

    cm_2labels = confusion_matrix(y_pred = pred_value, y_true = actual_value)
    plt.figure(figsize=[6,6])
    plot_confusion_matrix(cm_2labels, ['Normal', 'Attack'], normalize = False)

In [14]:
past_scores = pd.read_pickle("dataset/scores/tf_vae_dense_trained_together_nsl_kdd_all-.pkl")

In [15]:
past_scores.sort_values(by='f1_score',ascending=False)


Out[15]:
epoch no_of_features hidden_layers train_score test_score f1_score test_score_20 f1_score_20 time_taken
26 8 48 3 0.886485 0.888440 0.897502 0.801519 0.870041 10.408620
25 7 48 3 0.892042 0.866794 0.879101 0.773840 0.853839 8.919692
17 32 12 3 0.783092 0.848031 0.874127 0.774684 0.869207 41.830300
20 4 24 3 0.909506 0.860806 0.871478 0.755949 0.839262 4.260230
24 6 48 3 0.895217 0.854950 0.863829 0.738228 0.823750 7.443657
16 7 12 3 0.810677 0.839647 0.862595 0.742616 0.844277 8.260208
23 4 48 3 0.887081 0.849228 0.861044 0.717468 0.815130 4.494314
22 3 48 3 0.904346 0.841510 0.847977 0.724979 0.807672 3.025846
15 4 12 3 0.913276 0.824831 0.831764 0.679578 0.778562 4.172075
6 2 48 1 0.914864 0.812234 0.815209 0.647426 0.748767 1.302569
21 2 48 3 0.884501 0.809262 0.811470 0.676118 0.768739 1.547668
13 32 1 3 0.792221 0.786152 0.800149 0.663882 0.778192 39.101900
19 3 24 3 0.894423 0.800745 0.799249 0.664726 0.758993 2.863533
2 5 1 1 0.773963 0.789700 0.790397 0.708186 0.800092 2.712769
14 2 12 3 0.895813 0.788281 0.784796 0.643376 0.739104 1.435609
5 3 24 1 0.864854 0.792095 0.784297 0.636118 0.725035 2.065240
12 28 1 3 0.739829 0.764638 0.781969 0.653502 0.774470 36.502404
18 2 24 3 0.868029 0.779631 0.779082 0.635781 0.738962 1.464846
11 10 1 3 0.694979 0.761888 0.773349 0.647764 0.759118 11.878906
3 2 12 1 0.815241 0.748802 0.741640 0.569114 0.676876 0.853013
10 5 1 3 0.696765 0.737269 0.739109 0.607257 0.723601 5.346412
27 2 122 3 0.877357 0.750044 0.736818 0.575190 0.660140 1.778840
0 2 1 1 0.652113 0.652280 0.725458 0.692574 0.802301 0.737689
9 4 1 3 0.626910 0.658845 0.691978 0.645316 0.759016 4.035135
1 3 1 1 0.815836 0.716865 0.678130 0.488776 0.559098 1.401122
8 2 1 3 0.643580 0.631432 0.592796 0.513755 0.610518 1.428993
4 2 24 1 0.871998 0.666164 0.591200 0.452574 0.508151 0.956361
7 2 122 1 0.537011 0.430758 0.000000 0.181603 0.000000 1.642546

In [16]:
psg = past_scores.sort_values(by='test_score', ascending=False).groupby(by=['no_of_features', 'hidden_layers'])
psg.first().sort_values(by='test_score', ascending=False)


Out[16]:
epoch train_score test_score f1_score test_score_20 f1_score_20 time_taken
no_of_features hidden_layers
48 3 8 0.886485 0.888440 0.897502 0.801519 0.870041 10.408620
24 3 4 0.909506 0.860806 0.871478 0.755949 0.839262 4.260230
12 3 32 0.783092 0.848031 0.874127 0.774684 0.869207 41.830300
48 1 2 0.914864 0.812234 0.815209 0.647426 0.748767 1.302569
24 1 3 0.864854 0.792095 0.784297 0.636118 0.725035 2.065240
1 1 5 0.773963 0.789700 0.790397 0.708186 0.800092 2.712769
3 32 0.792221 0.786152 0.800149 0.663882 0.778192 39.101900
122 3 2 0.877357 0.750044 0.736818 0.575190 0.660140 1.778840
12 1 2 0.815241 0.748802 0.741640 0.569114 0.676876 0.853013
122 1 2 0.537011 0.430758 0.000000 0.181603 0.000000 1.642546

In [17]:
psg.mean().sort_values(by='test_score', ascending=False)


Out[17]:
epoch train_score test_score f1_score test_score_20 f1_score_20 time_taken
no_of_features hidden_layers
48 3 5.000000 0.891612 0.851697 0.860154 0.738692 0.823195 5.973299
12 3 11.250000 0.850714 0.825197 0.838321 0.710063 0.807787 13.924548
24 3 3.000000 0.890653 0.813727 0.816603 0.685485 0.779072 2.862870
48 1 2.000000 0.914864 0.812234 0.815209 0.647426 0.748767 1.302569
122 3 2.000000 0.877357 0.750044 0.736818 0.575190 0.660140 1.778840
12 1 2.000000 0.815241 0.748802 0.741640 0.569114 0.676876 0.853013
24 1 2.500000 0.868426 0.729130 0.687749 0.544346 0.616593 1.510800
1 3 13.500000 0.699047 0.723371 0.729892 0.621913 0.734153 16.382292
1 3.333333 0.747304 0.719615 0.731329 0.629845 0.720497 1.617193
122 1 2.000000 0.537011 0.430758 0.000000 0.181603 0.000000 1.642546

In [18]:
Train.predictions = pd.read_pickle("dataset/tf_vae_dense_trained_together_nsl_kdd_predictions-.pkl")
Train.predictions_ = pd.read_pickle("dataset/tf_vae_dense_trained_together_nsl_kdd_predictions-__.pkl")

In [20]:
#epoch_nof_hidden
Train.predictions["8_48_3"].sample()


Out[20]:
Actual Attack_prob Normal_prob Prediction
11602 0.0 0.99998 0.00002 0.0

In [21]:
Train.predictions_["8_48_3"].sample()


Out[21]:
Actual Attack_prob Normal_prob Prediction
903 1.0 2.599516e-07 1.0 1.0

In [22]:
df = Train.predictions["8_48_3"].dropna()
df_ = Train.predictions_["8_48_3"].dropna()

In [23]:
from sklearn import metrics as me
def get_score(y_true, y_pred):
    f1 = me.f1_score(y_true, y_pred)
    pre = me.precision_score(y_true, y_pred)
    rec = me.recall_score(y_true, y_pred)
    acc = me.accuracy_score(y_true, y_pred)
    return {"F1 Score":f1, "Precision":pre, "Recall":rec, "Accuracy":acc}

In [24]:
from sklearn import metrics as me

scores = get_score(df.loc[:,'Actual'].values.astype(int),
                df.loc[:,'Prediction'].values.astype(int))
scores.update({"Scenario":"Train+/Test+"})
score_df = pd.DataFrame(scores, index=[0])

scores = get_score(df_.loc[:,'Actual'].values.astype(int),
                df_.loc[:,'Prediction'].values.astype(int))
scores.update({"Scenario":"Train+/Test-"})

score_df = score_df.append(pd.DataFrame(scores, index=[1]))

score_df


Out[24]:
Accuracy F1 Score Precision Recall Scenario
0 0.888440 0.897502 0.940789 0.858022 Train+/Test+
1 0.801519 0.870041 0.937262 0.811817 Train+/Test-

In [25]:
df.groupby(by="Actual").Actual.count()


Out[25]:
Actual
0.0     9711
1.0    12833
Name: Actual, dtype: int64

In [26]:
plot(actual_value = df.loc[:,'Actual'].values.astype(int),
     pred_value = df.loc[:,'Prediction'].values.astype(int))



In [27]:
df_.groupby(by="Actual").Actual.count()


Out[27]:
Actual
0.0    2152
1.0    9698
Name: Actual, dtype: int64

In [28]:
plot(actual_value = df_.loc[:,'Actual'].values.astype(int),
     pred_value = df_.loc[:,'Prediction'].values.astype(int))



In [29]:
from scipy import stats

def fn(x):
    #print(x)
    return stats.norm.interval(0.95, loc=x.f1_score.mean(), scale=x.f1_score.std())
psg.apply(fn)


Out[29]:
no_of_features  hidden_layers
1               1                (0.620858304949, 0.841798744397)
                3                (0.578442504538, 0.881341074074)
12              1                                      (nan, nan)
                3                (0.760092317917, 0.916548956428)
24              1                (0.420135021181, 0.955362895992)
                3                (0.721386278662, 0.911819782842)
48              1                                      (nan, nan)
                3                (0.802766024432, 0.917541709845)
122             1                                      (nan, nan)
                3                                      (nan, nan)
dtype: object

In [ ]: