Deep Learning


In [1]:
import pandas as pd
import numpy as np

from collections import namedtuple
pd.set_option("display.max_rows",35)
%matplotlib inline


Using TensorFlow backend.

In [2]:
kdd_train_2labels = pd.read_pickle("dataset/kdd_train_2labels.pkl")
kdd_test_2labels = pd.read_pickle("dataset/kdd_test_2labels.pkl")

#y_train_labels = pd.read_pickle("dataset/kdd_train_2labels_y.pkl")
#y_train_labels = pd.read_pickle("dataset/kdd_train_2labels.pkl")
#y_test_labels = pd.read_pickle("dataset/kdd_test_2labels_y.pkl")

output_columns_2labels = ['is_Attack','is_Normal']

from sklearn import model_selection as ms
from sklearn import preprocessing as pp

x_input = kdd_train_2labels.drop(output_columns_2labels, axis = 1)
y_output = kdd_train_2labels.loc[:,output_columns_2labels]

ss = pp.StandardScaler()
x_input = ss.fit_transform(x_input)

#le = pp.LabelEncoder()
#y_train = le.fit_transform(y_train_labels).reshape(-1, 1)
#y_test = le.transform(y_test_labels).reshape(-1, 1)

y_train = kdd_train_2labels.loc[:,output_columns_2labels].values

x_train, x_valid, y_train, y_valid = ms.train_test_split(x_input, 
                              y_train, 
                              test_size=0.1)
#x_valid, x_test, y_valid, y_test = ms.train_test_split(x_valid, y_valid, test_size = 0.4)

x_test = kdd_test_2labels.drop(output_columns_2labels, axis = 1)
y_test = kdd_test_2labels.loc[:,output_columns_2labels].values

x_test = ss.transform(x_test)

#x_train = np.hstack((x_train, y_train))
#x_valid = np.hstack((x_valid, y_valid))

#x_test = np.hstack((x_test, np.random.normal(loc = 0, scale = 0.01, size = y_test.shape)))

In [3]:
from nolearn.dbn import DBN

input_dim = 122
intermediate_dim = 10
latent_dim = 32
batch_size = 1409
hidden_layers = 8
classes = 2
drop_prob = 0.2
timesteps = 1

class Train:
    def build_lstm_model(X_train):
        Train.x = Input(shape=(timesteps, input_dim))
        encoded = LSTM(latent_dim)(Train.x)
        
        decoded = RepeatVector(timesteps)(encoded)
        Train.y = LSTM(classes, return_sequences=True)(decoded)
        
        clf = DBN(
                    [X_train.shape[1], 300, 10],
                    learn_rates=0.3,
                    learn_rate_decays=0.9,
                    epochs=10,
                    verbose=1,
                )


Train.build_lstm_model()

In [8]:
import itertools
#features_arr = [4, 16, 32, 256, 1024]
#hidden_layers_arr = [2, 6, 10, 100]

#features_arr = [4, 16, 32]
#hidden_layers_arr = [2, 6, 10]

features_arr = [4, 16, 32]
hidden_layers_arr = [2, 4, 6]

epoch_arr = [5]

score = namedtuple("score", ['epoch', 'no_of_features','hidden_layers','train_score', 'test_score'])
scores = []
predictions = {}

for e, h, f in itertools.product(epoch_arr, hidden_layers_arr, features_arr):
    
    print(" \n Current Layer Attributes - epochs:{} hidden layers:{} features count:{}".format(e,h,f))
    latent_dim = f
    epochs = e
    hidden_layers = h
    
    train_size = x_train.shape[0] - x_train.shape[0]%batch_size
    valid_size = x_valid.shape[0] - x_valid.shape[0]%batch_size

    
    optimizer = keras.optimizers.RMSprop(lr=0.001, rho=0.9, epsilon=1e-08, decay=0.01)
    
    seq2seq_model = Model(Train.x, Train.y)
    seq2seq_model.compile(optimizer = optimizer, 
                      loss = keras.losses.categorical_crossentropy, 
                      metrics = ['accuracy'])
    
    seq2seq_model.fit(x = x_train[:train_size,np.newaxis,:], y = y_train[:train_size,np.newaxis,:],
                 shuffle=True, epochs=5, 
                  batch_size = batch_size, 
                  validation_data = (x_test[:,np.newaxis,:], y_test[:,np.newaxis,:]),
                  verbose = 1)

    
    score_train = seq2seq_model.evaluate(x_valid[:valid_size,np.newaxis,:], y = y_valid[:valid_size,np.newaxis,:],
                               batch_size = batch_size,
                               verbose = 1)
    
    score_test = seq2seq_model.evaluate(x_test[:,np.newaxis,:], y = y_test[:,np.newaxis,:],
                           batch_size = batch_size,
                           verbose = 1)
    
    y_test_pred = seq2seq_model.predict(x_test[:,np.newaxis,:], batch_size=batch_size)
    y_test_pred = np.squeeze(y_test_pred)

    y_pred = y_test_pred #np.argmax(y_test_pred[:,-2:], axis = 1)
    
    curr_pred = pd.DataFrame({"Attack_prob":y_pred[:,0], "Normal_prob":y_pred[:,1]})
    predictions.update({"{}_{}_{}".format(e,f,h):curr_pred})
    
    scores.append(score(e,f,h,score_train[-1], score_test[-1])) #score_test[-1]))
    
    print("\n Train Acc: {}, Test Acc: {}".format(score_train[-1], 
                                                  score_test[-1])  )
    
scores = pd.DataFrame(scores)


 
 Current Layer Attributes - epochs:5 hidden layers:2 features count:4
Train on 112720 samples, validate on 22544 samples
Epoch 1/5
112720/112720 [==============================] - 2s - loss: 1.4641 - acc: 0.3800 - val_loss: 1.0248 - val_acc: 0.3802
Epoch 2/5
112720/112720 [==============================] - 0s - loss: 2.4112 - acc: 0.5941 - val_loss: 1.8572 - val_acc: 0.5476
Epoch 3/5
112720/112720 [==============================] - 1s - loss: 2.1326 - acc: 0.5220 - val_loss: 1.3698 - val_acc: 0.4876
Epoch 4/5
112720/112720 [==============================] - 1s - loss: 1.3263 - acc: 0.5303 - val_loss: 1.3992 - val_acc: 0.3769
Epoch 5/5
112720/112720 [==============================] - 1s - loss: 1.2813 - acc: 0.3823 - val_loss: 1.4164 - val_acc: 0.3478
14090/22544 [=================>............] - ETA: 0s
 Train Acc: 0.36053938791155815, Test Acc: 0.3478087317198515
 
 Current Layer Attributes - epochs:5 hidden layers:2 features count:16
Train on 112720 samples, validate on 22544 samples
Epoch 1/5
112720/112720 [==============================] - 2s - loss: 1.2067 - acc: 0.3820 - val_loss: 1.3160 - val_acc: 0.3490
Epoch 2/5
112720/112720 [==============================] - 1s - loss: 0.9477 - acc: 0.5824 - val_loss: 1.1828 - val_acc: 0.6031
Epoch 3/5
112720/112720 [==============================] - 1s - loss: 0.9461 - acc: 0.4960 - val_loss: 1.0378 - val_acc: 0.4139
Epoch 4/5
112720/112720 [==============================] - 1s - loss: 1.0320 - acc: 0.4873 - val_loss: 1.0699 - val_acc: 0.6309
Epoch 5/5
112720/112720 [==============================] - 1s - loss: 1.1610 - acc: 0.6286 - val_loss: 1.2914 - val_acc: 0.4756
14090/22544 [=================>............] - ETA: 0s
 Train Acc: 0.5172107890248299, Test Acc: 0.475603261962533
 
 Current Layer Attributes - epochs:5 hidden layers:2 features count:32
Train on 112720 samples, validate on 22544 samples
Epoch 1/5
112720/112720 [==============================] - 2s - loss: 1.3332 - acc: 0.6460 - val_loss: 1.2293 - val_acc: 0.3503
Epoch 2/5
112720/112720 [==============================] - 1s - loss: 1.3980 - acc: 0.5946 - val_loss: 1.3357 - val_acc: 0.3834
Epoch 3/5
112720/112720 [==============================] - 1s - loss: 1.3772 - acc: 0.6088 - val_loss: 1.4706 - val_acc: 0.3779
Epoch 4/5
112720/112720 [==============================] - 1s - loss: 1.3465 - acc: 0.4797 - val_loss: 1.5584 - val_acc: 0.4921
Epoch 5/5
112720/112720 [==============================] - 1s - loss: 1.2662 - acc: 0.5459 - val_loss: 1.4969 - val_acc: 0.3645
14090/22544 [=================>............] - ETA: 0s
 Train Acc: 0.4393186643719673, Test Acc: 0.36453158035874367
 
 Current Layer Attributes - epochs:5 hidden layers:4 features count:4
Train on 112720 samples, validate on 22544 samples
Epoch 1/5
112720/112720 [==============================] - 2s - loss: 1.2749 - acc: 0.4945 - val_loss: 2.0869 - val_acc: 0.6469
Epoch 2/5
112720/112720 [==============================] - 1s - loss: 1.0038 - acc: 0.6326 - val_loss: 1.6330 - val_acc: 0.3981
Epoch 3/5
112720/112720 [==============================] - 1s - loss: 0.9638 - acc: 0.6259 - val_loss: 2.1587 - val_acc: 0.6177
Epoch 4/5
112720/112720 [==============================] - 1s - loss: 0.9849 - acc: 0.5885 - val_loss: 1.7566 - val_acc: 0.5338
Epoch 5/5
112720/112720 [==============================] - 1s - loss: 1.0884 - acc: 0.5783 - val_loss: 1.8359 - val_acc: 0.5933
14090/22544 [=================>............] - ETA: 0s
 Train Acc: 0.6379524543881416, Test Acc: 0.5932842493057251
 
 Current Layer Attributes - epochs:5 hidden layers:4 features count:16
Train on 112720 samples, validate on 22544 samples
Epoch 1/5
112720/112720 [==============================] - 2s - loss: 0.9974 - acc: 0.4780 - val_loss: 1.8067 - val_acc: 0.4911
Epoch 2/5
112720/112720 [==============================] - 1s - loss: 1.0954 - acc: 0.4796 - val_loss: 1.7547 - val_acc: 0.5131
Epoch 3/5
112720/112720 [==============================] - 1s - loss: 1.2057 - acc: 0.5915 - val_loss: 1.6918 - val_acc: 0.4977
Epoch 4/5
112720/112720 [==============================] - 1s - loss: 1.2615 - acc: 0.5567 - val_loss: 1.7361 - val_acc: 0.5539
Epoch 5/5
112720/112720 [==============================] - 1s - loss: 1.3334 - acc: 0.5586 - val_loss: 1.7497 - val_acc: 0.3946
14090/22544 [=================>............] - ETA: 0s
 Train Acc: 0.43541518598794937, Test Acc: 0.3945617415010929
 
 Current Layer Attributes - epochs:5 hidden layers:4 features count:32
Train on 112720 samples, validate on 22544 samples
Epoch 1/5
112720/112720 [==============================] - 2s - loss: 1.2899 - acc: 0.4899 - val_loss: 2.0035 - val_acc: 0.6129
Epoch 2/5
112720/112720 [==============================] - 1s - loss: 1.2513 - acc: 0.4567 - val_loss: 1.9046 - val_acc: 0.4340
Epoch 3/5
112720/112720 [==============================] - 1s - loss: 1.2136 - acc: 0.3553 - val_loss: 1.9023 - val_acc: 0.4178
Epoch 4/5
112720/112720 [==============================] - 1s - loss: 1.2193 - acc: 0.4104 - val_loss: 1.9938 - val_acc: 0.4592
Epoch 5/5
112720/112720 [==============================] - 1s - loss: 1.2120 - acc: 0.3580 - val_loss: 1.9567 - val_acc: 0.3889
14090/22544 [=================>............] - ETA: 0s
 Train Acc: 0.31085876375436783, Test Acc: 0.3889283165335655
 
 Current Layer Attributes - epochs:5 hidden layers:6 features count:4
Train on 112720 samples, validate on 22544 samples
Epoch 1/5
112720/112720 [==============================] - 2s - loss: 1.2386 - acc: 0.3590 - val_loss: 2.2186 - val_acc: 0.3480
Epoch 2/5
112720/112720 [==============================] - 1s - loss: 1.1973 - acc: 0.5448 - val_loss: 2.4204 - val_acc: 0.5747
Epoch 3/5
112720/112720 [==============================] - 1s - loss: 1.2082 - acc: 0.5077 - val_loss: 2.3045 - val_acc: 0.4374
Epoch 4/5
112720/112720 [==============================] - 1s - loss: 1.1804 - acc: 0.4586 - val_loss: 2.2869 - val_acc: 0.4348
Epoch 5/5
112720/112720 [==============================] - 1s - loss: 1.1588 - acc: 0.5310 - val_loss: 2.3507 - val_acc: 0.5582
14090/22544 [=================>............] - ETA: 0s
 Train Acc: 0.5840134769678116, Test Acc: 0.5581529401242733
 
 Current Layer Attributes - epochs:5 hidden layers:6 features count:16
Train on 112720 samples, validate on 22544 samples
Epoch 1/5
112720/112720 [==============================] - 2s - loss: 1.1587 - acc: 0.6904 - val_loss: 2.4666 - val_acc: 0.6243
Epoch 2/5
112720/112720 [==============================] - 1s - loss: 1.1067 - acc: 0.6812 - val_loss: 2.3738 - val_acc: 0.5925
Epoch 3/5
112720/112720 [==============================] - 1s - loss: 0.9486 - acc: 0.5837 - val_loss: 2.2907 - val_acc: 0.5720
Epoch 4/5
112720/112720 [==============================] - 1s - loss: 0.7938 - acc: 0.5653 - val_loss: 2.4171 - val_acc: 0.5721
Epoch 5/5
112720/112720 [==============================] - 1s - loss: 0.7901 - acc: 0.5580 - val_loss: 2.4938 - val_acc: 0.5702
14090/22544 [=================>............] - ETA: 0s
 Train Acc: 0.5575762912631035, Test Acc: 0.5701738782227039
 
 Current Layer Attributes - epochs:5 hidden layers:6 features count:32
Train on 112720 samples, validate on 22544 samples
Epoch 1/5
112720/112720 [==============================] - 3s - loss: 0.2917 - acc: 0.5748 - val_loss: 2.4110 - val_acc: 0.5888
Epoch 2/5
112720/112720 [==============================] - 1s - loss: 0.1574 - acc: 0.5860 - val_loss: 2.4123 - val_acc: 0.5833
Epoch 3/5
112720/112720 [==============================] - 1s - loss: 0.1358 - acc: 0.5829 - val_loss: 2.5204 - val_acc: 0.5858
Epoch 4/5
112720/112720 [==============================] - 1s - loss: 0.1419 - acc: 0.5896 - val_loss: 2.7127 - val_acc: 0.5897
Epoch 5/5
112720/112720 [==============================] - 1s - loss: 0.1289 - acc: 0.5939 - val_loss: 2.7496 - val_acc: 0.5900
14090/22544 [=================>............] - ETA: 0s
 Train Acc: 0.5977643728256226, Test Acc: 0.5900017693638802

In [9]:
scores.sort_values("test_score", ascending=False)


Out[9]:
epoch no_of_features hidden_layers train_score test_score
3 5 4 4 0.637952 0.593284
8 5 32 6 0.597764 0.590002
7 5 16 6 0.557576 0.570174
6 5 4 6 0.584013 0.558153
1 5 16 2 0.517211 0.475603
4 5 16 4 0.435415 0.394562
5 5 32 4 0.310859 0.388928
2 5 32 2 0.439319 0.364532
0 5 4 2 0.360539 0.347809

In [14]:
pd.Panel(predictions).to_pickle("dataset/keras_lstm_nsl_kdd_predictions.pkl")
scores.to_pickle("dataset/keras_lstm_nsl_kdd_scores.pkl")

In [13]:
pd.Panel(predictions)


Out[13]:
<class 'pandas.core.panel.Panel'>
Dimensions: 9 (items) x 22544 (major_axis) x 2 (minor_axis)
Items axis: 5_16_2 to 5_4_6
Major_axis axis: 0 to 22543
Minor_axis axis: Attack_prob to Normal_prob

In [ ]: