In [125]:
from keras.layers import Input, Dense, Dropout
from keras.models import Model
from keras.datasets import mnist
from keras.models import Sequential, load_model
from keras.optimizers import RMSprop
from keras.callbacks import TensorBoard
from __future__ import print_function
from keras.utils import plot_model
from IPython.display import SVG
from keras.utils.vis_utils import model_to_dot
import keras
import matplotlib.pyplot as plt
import numpy as np
import math
import pydot
import graphviz
import pandas as pd
This radar data was collected by a system in Goose Bay, Labrador. This system consists of a phased array of 16 high-frequency antennas with a total transmitted power on the order of 6.4 kilowatts. See the paper for more details. The targets were free electrons in the ionosphere. "Good" radar returns are those showing evidence of some type of structure in the ionosphere. "Bad" returns are those that do not; their signals pass through the ionosphere.
Received signals were processed using an autocorrelation function whose arguments are the time of a pulse and the pulse number. There were 17 pulse numbers for the Goose Bay system. Instances in this databse are described by 2 attributes per pulse number, corresponding to the complex values returned by the function resulting from the complex electromagnetic signal.
In [126]:
data = np.genfromtxt("data/ionosphere.data")
In [127]:
data = pd.read_csv('data/ionosphere.data', sep=",", header=None)
data.describe()
Out[127]:
In [128]:
data.head()
Out[128]:
In [129]:
data.drop(data.columns[1], inplace=True, axis=1)
data[34] = [1 if e is "g" else 0 for e in data[34]]
data.head()
Out[129]:
In [130]:
# sample the dataframe
data_train = data.sample(frac=0.9, random_state=42)
data_valid = data.drop(data_train.index)
In [131]:
df_x_train = data_train.iloc[:,:-1]
df_y_train = data_train.iloc[:,-1]
df_x_valid = data_valid.iloc[:,:-1]
df_y_valid = data_valid.iloc[:,-1]
In [132]:
df_y_train.sum()
Out[132]:
In [133]:
df_y_train.sum()/len(df_y_train)
Out[133]:
In [134]:
x_train = np.array(df_x_train.as_matrix())
y_train = np.array(pd.DataFrame(df_y_train).as_matrix())
x_val = np.array(df_x_valid.as_matrix())
y_val = np.array(pd.DataFrame(df_y_valid).as_matrix())
In [135]:
input_dim = x_train.shape[1]
The goal is to get a very accurate classifier.
In [152]:
result = []
for i in range(1,5):
model = Sequential()
model.add(Dense(15, input_dim=input_dim, activation='relu'))
model.add(Dropout(0.1))
model.add(Dense(15, activation='relu'))
model.add(Dropout(0.1))
model.add(Dense(15, activation='relu'))
model.add(Dropout(0.1))
model.add(Dense(1, activation='sigmoid'))
model.compile(loss='binary_crossentropy',
optimizer='adadelta',
metrics=['accuracy'])
model.fit(x_train, y_train,
epochs=150,
shuffle=True,
batch_size=4,
verbose=0,
validation_split=0.1
)
score = model.evaluate(x_val, y_val)[1]
result.append(score)
print(np.mean(result))
In [155]:
result = []
for i in range(1,5):
single_auto = Sequential()
single_auto.add(Dense(8, input_dim=input_dim, activation='relu'))
# model.add(Dropout(0.1))
single_auto.add(Dense(input_dim, activation='sigmoid'))
single_auto.compile(loss='mean_squared_error',
optimizer='adadelta')
single_auto.fit(x_train, x_train,
epochs=150,
shuffle=True,
batch_size=4,
verbose=0,
validation_split=0.1
)
score = single_auto.evaluate(x_val, x_val)
result.append(score)
print(np.mean(result))
How well does a classifier after the vanilla autoencoder perform?
In [159]:
model.evaluate(single_auto.predict(x_val), y_val)
Out[159]:
So we lose about 12% by decoding
In [160]:
######## constants for stacked autoencoder ############
encoding_dim1 = 16
encoding_dim2 = 8
decoding_dim1 = 16
decoding_dim2 = input_dim
In [162]:
result = []
for i in range(1,5):
stacked_auto = Sequential()
stacked_auto.add(Dense(encoding_dim1, input_dim=input_dim, activation='relu'))
# model.add(Dropout(0.1))
stacked_auto.add(Dense(encoding_dim2, activation='relu'))
# model.add(Dropout(0.05))
stacked_auto.add(Dense(decoding_dim1, activation='relu'))
# model.add(Dropout(0.05))
stacked_auto.add(Dense(decoding_dim2, activation='sigmoid'))
stacked_auto.compile(loss='mean_squared_error',
optimizer='adadelta')
stacked_auto.fit(x_train, x_train,
epochs=150,
shuffle=True,
batch_size=4,
verbose=0,
validation_split=0.1
)
score = stacked_auto.evaluate(x_val, x_val)
result.append(score)
print(np.mean(score))