In [125]:
from keras.layers import Input, Dense, Dropout
from keras.models import Model
from keras.datasets import mnist
from keras.models import Sequential, load_model
from keras.optimizers import RMSprop
from keras.callbacks import TensorBoard
from __future__ import print_function
from keras.utils import plot_model
from IPython.display import SVG
from keras.utils.vis_utils import model_to_dot

import keras
import matplotlib.pyplot as plt
import numpy as np
import math
import pydot
import graphviz
import pandas as pd

Data Set Information

This radar data was collected by a system in Goose Bay, Labrador. This system consists of a phased array of 16 high-frequency antennas with a total transmitted power on the order of 6.4 kilowatts. See the paper for more details. The targets were free electrons in the ionosphere. "Good" radar returns are those showing evidence of some type of structure in the ionosphere. "Bad" returns are those that do not; their signals pass through the ionosphere.

Received signals were processed using an autocorrelation function whose arguments are the time of a pulse and the pulse number. There were 17 pulse numbers for the Goose Bay system. Instances in this databse are described by 2 attributes per pulse number, corresponding to the complex values returned by the function resulting from the complex electromagnetic signal.

Attribute Information

Data Import and preprocessing


In [126]:
data = np.genfromtxt("data/ionosphere.data")

In [127]:
data = pd.read_csv('data/ionosphere.data', sep=",", header=None)
data.describe()


Out[127]:
0 1 2 3 4 5 6 7 8 9 ... 24 25 26 27 28 29 30 31 32 33
count 351.000000 351.0 351.000000 351.000000 351.000000 351.000000 351.000000 351.000000 351.000000 351.000000 ... 351.000000 351.000000 351.000000 351.000000 351.000000 351.000000 351.000000 351.000000 351.000000 351.000000
mean 0.891738 0.0 0.641342 0.044372 0.601068 0.115889 0.550095 0.119360 0.511848 0.181345 ... 0.396135 -0.071187 0.541641 -0.069538 0.378445 -0.027907 0.352514 -0.003794 0.349364 0.014480
std 0.311155 0.0 0.497708 0.441435 0.519862 0.460810 0.492654 0.520750 0.507066 0.483851 ... 0.578451 0.508495 0.516205 0.550025 0.575886 0.507974 0.571483 0.513574 0.522663 0.468337
min 0.000000 0.0 -1.000000 -1.000000 -1.000000 -1.000000 -1.000000 -1.000000 -1.000000 -1.000000 ... -1.000000 -1.000000 -1.000000 -1.000000 -1.000000 -1.000000 -1.000000 -1.000000 -1.000000 -1.000000
25% 1.000000 0.0 0.472135 -0.064735 0.412660 -0.024795 0.211310 -0.054840 0.087110 -0.048075 ... 0.000000 -0.332390 0.286435 -0.443165 0.000000 -0.236885 0.000000 -0.242595 0.000000 -0.165350
50% 1.000000 0.0 0.871110 0.016310 0.809200 0.022800 0.728730 0.014710 0.684210 0.018290 ... 0.553890 -0.015050 0.708240 -0.017690 0.496640 0.000000 0.442770 0.000000 0.409560 0.000000
75% 1.000000 0.0 1.000000 0.194185 1.000000 0.334655 0.969240 0.445675 0.953240 0.534195 ... 0.905240 0.156765 0.999945 0.153535 0.883465 0.154075 0.857620 0.200120 0.813765 0.171660
max 1.000000 0.0 1.000000 1.000000 1.000000 1.000000 1.000000 1.000000 1.000000 1.000000 ... 1.000000 1.000000 1.000000 1.000000 1.000000 1.000000 1.000000 1.000000 1.000000 1.000000

8 rows × 34 columns


In [128]:
data.head()


Out[128]:
0 1 2 3 4 5 6 7 8 9 ... 25 26 27 28 29 30 31 32 33 34
0 1 0 0.99539 -0.05889 0.85243 0.02306 0.83398 -0.37708 1.00000 0.03760 ... -0.51171 0.41078 -0.46168 0.21266 -0.34090 0.42267 -0.54487 0.18641 -0.45300 g
1 1 0 1.00000 -0.18829 0.93035 -0.36156 -0.10868 -0.93597 1.00000 -0.04549 ... -0.26569 -0.20468 -0.18401 -0.19040 -0.11593 -0.16626 -0.06288 -0.13738 -0.02447 b
2 1 0 1.00000 -0.03365 1.00000 0.00485 1.00000 -0.12062 0.88965 0.01198 ... -0.40220 0.58984 -0.22145 0.43100 -0.17365 0.60436 -0.24180 0.56045 -0.38238 g
3 1 0 1.00000 -0.45161 1.00000 1.00000 0.71216 -1.00000 0.00000 0.00000 ... 0.90695 0.51613 1.00000 1.00000 -0.20099 0.25682 1.00000 -0.32382 1.00000 b
4 1 0 1.00000 -0.02401 0.94140 0.06531 0.92106 -0.23255 0.77152 -0.16399 ... -0.65158 0.13290 -0.53206 0.02431 -0.62197 -0.05707 -0.59573 -0.04608 -0.65697 g

5 rows × 35 columns


In [129]:
data.drop(data.columns[1], inplace=True, axis=1)
data[34] = [1 if e is "g" else 0 for e in data[34]]
data.head()


Out[129]:
0 2 3 4 5 6 7 8 9 10 ... 25 26 27 28 29 30 31 32 33 34
0 1 0.99539 -0.05889 0.85243 0.02306 0.83398 -0.37708 1.00000 0.03760 0.85243 ... -0.51171 0.41078 -0.46168 0.21266 -0.34090 0.42267 -0.54487 0.18641 -0.45300 1
1 1 1.00000 -0.18829 0.93035 -0.36156 -0.10868 -0.93597 1.00000 -0.04549 0.50874 ... -0.26569 -0.20468 -0.18401 -0.19040 -0.11593 -0.16626 -0.06288 -0.13738 -0.02447 0
2 1 1.00000 -0.03365 1.00000 0.00485 1.00000 -0.12062 0.88965 0.01198 0.73082 ... -0.40220 0.58984 -0.22145 0.43100 -0.17365 0.60436 -0.24180 0.56045 -0.38238 1
3 1 1.00000 -0.45161 1.00000 1.00000 0.71216 -1.00000 0.00000 0.00000 0.00000 ... 0.90695 0.51613 1.00000 1.00000 -0.20099 0.25682 1.00000 -0.32382 1.00000 0
4 1 1.00000 -0.02401 0.94140 0.06531 0.92106 -0.23255 0.77152 -0.16399 0.52798 ... -0.65158 0.13290 -0.53206 0.02431 -0.62197 -0.05707 -0.59573 -0.04608 -0.65697 1

5 rows × 34 columns


In [130]:
# sample the dataframe
data_train = data.sample(frac=0.9, random_state=42)
data_valid = data.drop(data_train.index)

In [131]:
df_x_train = data_train.iloc[:,:-1]
df_y_train = data_train.iloc[:,-1]

df_x_valid = data_valid.iloc[:,:-1]
df_y_valid = data_valid.iloc[:,-1]

In [132]:
df_y_train.sum()


Out[132]:
201

In [133]:
df_y_train.sum()/len(df_y_train)


Out[133]:
0.6360759493670886

preprocessing


In [134]:
x_train = np.array(df_x_train.as_matrix())
y_train = np.array(pd.DataFrame(df_y_train).as_matrix())

x_val = np.array(df_x_valid.as_matrix())
y_val = np.array(pd.DataFrame(df_y_valid).as_matrix())

In [135]:
input_dim = x_train.shape[1]

Train Classifier

The goal is to get a very accurate classifier.


In [152]:
result = []
for i in range(1,5):
    model = Sequential()
    model.add(Dense(15, input_dim=input_dim, activation='relu'))
    model.add(Dropout(0.1))
    model.add(Dense(15, activation='relu'))
    model.add(Dropout(0.1))
    model.add(Dense(15, activation='relu'))
    model.add(Dropout(0.1))
    model.add(Dense(1, activation='sigmoid'))

    model.compile(loss='binary_crossentropy',
                  optimizer='adadelta',
                  metrics=['accuracy'])

    model.fit(x_train, y_train, 
              epochs=150,
              shuffle=True,
              batch_size=4,
              verbose=0,
              validation_split=0.1
             )
    score = model.evaluate(x_val, y_val)[1]
    result.append(score)
print(np.mean(result))


35/35 [==============================] - 0s 115us/step
35/35 [==============================] - 0s 86us/step
35/35 [==============================] - 0s 100us/step
35/35 [==============================] - 0s 100us/step
0.921428571429

Single Hidden Layer Autoencoder


In [155]:
result = []
for i in range(1,5):
    single_auto = Sequential()
    single_auto.add(Dense(8, input_dim=input_dim, activation='relu'))
    # model.add(Dropout(0.1))
    single_auto.add(Dense(input_dim, activation='sigmoid'))

    single_auto.compile(loss='mean_squared_error',
                  optimizer='adadelta')

    single_auto.fit(x_train, x_train, 
                      epochs=150,
                      shuffle=True,
                      batch_size=4,
                      verbose=0,
                      validation_split=0.1
                     )
    score = single_auto.evaluate(x_val, x_val)
    result.append(score)
print(np.mean(result))


35/35 [==============================] - 0s 86us/step
35/35 [==============================] - 0s 72us/step
35/35 [==============================] - 0s 86us/step
35/35 [==============================] - 0s 86us/step
0.128157497028

How well does a classifier after the vanilla autoencoder perform?


In [159]:
model.evaluate(single_auto.predict(x_val), y_val)


35/35 [==============================] - 0s 200us/step
Out[159]:
[0.54365701441253933, 0.80000000000000004]

So we lose about 12% by decoding

Stacked Autoencoder


In [160]:
######## constants for stacked autoencoder ############
encoding_dim1 = 16
encoding_dim2 = 8
decoding_dim1 = 16
decoding_dim2 = input_dim

In [162]:
result = []
for i in range(1,5):
    stacked_auto = Sequential()
    stacked_auto.add(Dense(encoding_dim1, input_dim=input_dim, activation='relu'))
    # model.add(Dropout(0.1))
    stacked_auto.add(Dense(encoding_dim2, activation='relu'))
    # model.add(Dropout(0.05))
    stacked_auto.add(Dense(decoding_dim1, activation='relu'))
    # model.add(Dropout(0.05))
    stacked_auto.add(Dense(decoding_dim2, activation='sigmoid'))

    stacked_auto.compile(loss='mean_squared_error',
                  optimizer='adadelta')

    stacked_auto.fit(x_train, x_train, 
                      epochs=150,
                      shuffle=True,
                      batch_size=4,
                      verbose=0,
                      validation_split=0.1
                     )
    score = stacked_auto.evaluate(x_val, x_val)
    result.append(score)
print(np.mean(score))


35/35 [==============================] - 0s 100us/step
35/35 [==============================] - 0s 86us/step
35/35 [==============================] - 0s 86us/step
35/35 [==============================] - 0s 100us/step
0.130945818326