In [5]:
from keras.layers import Input, Dense, Dropout, Concatenate, Add
from keras.models import Model
from keras.datasets import mnist
from keras.models import Sequential, load_model
from keras.optimizers import RMSprop
from keras.callbacks import TensorBoard
from __future__ import print_function
from keras.utils import plot_model
from IPython.display import SVG
from keras.utils.vis_utils import model_to_dot
from sklearn import preprocessing
from keras import layers
from keras import initializers
from matplotlib import axes
from matplotlib import rc
import keras
import matplotlib.pyplot as plt
import numpy as np
import math
import pydot
import graphviz
import pandas as pd
import IPython
In [6]:
%matplotlib inline
font = {'family' : 'monospace',
'weight' : 'bold',
'size' : 20}
rc('font', **font)
seed=42
This radar data was collected by a system in Goose Bay, Labrador. This system consists of a phased array of 16 high-frequency antennas with a total transmitted power on the order of 6.4 kilowatts. See the paper for more details. The targets were free electrons in the ionosphere. "Good" radar returns are those showing evidence of some type of structure in the ionosphere. "Bad" returns are those that do not; their signals pass through the ionosphere.
Received signals were processed using an autocorrelation function whose arguments are the time of a pulse and the pulse number. There were 17 pulse numbers for the Goose Bay system. Instances in this databse are described by 2 attributes per pulse number, corresponding to the complex values returned by the function resulting from the complex electromagnetic signal.
In [7]:
# data = np.genfromtxt("data/ionosphere.data")
In [8]:
data = pd.read_csv('data/ionosphere.data', sep=",", header=None)
data.head()
Out[8]:
In [9]:
data.describe()
Out[9]:
This is a very small dataset.
In [10]:
df_tab = data
df_tab[34] = df_tab[34].astype('category')
tab = pd.crosstab(index=df_tab[34], columns="frequency")
tab.index.name = 'Class/Direction'
tab/tab.sum()
Out[10]:
In [11]:
data.drop(data.columns[1], inplace=True, axis=1)
data[34] = [1 if e is "g" else 0 for e in data[34]]
In [12]:
# sample the dataframe
data_train = data.sample(frac=0.9, random_state=seed)
data_valid = data.drop(data_train.index)
In [13]:
df_x_train = data_train.iloc[:,:-1]
df_x_train = df_x_train.transform(lambda x: (x - x.min()) / (x.max() - x.min()))
df_y_train = data_train.iloc[:,-1]
df_x_valid = data_valid.iloc[:,:-1]
df_x_valid = df_x_valid.transform(lambda x: (x - x.min()) / (x.max() - x.min()))
df_y_valid = data_valid.iloc[:,-1]
In [14]:
df_x_train.describe()
Out[14]:
In [15]:
df_y_train.sum()/len(df_y_train)
Out[15]:
About 63% of all observations are good.
In [16]:
x_train = np.array(df_x_train.as_matrix())
y_train = np.array(pd.DataFrame(df_y_train).as_matrix())
x_val = np.array(df_x_valid.as_matrix())
y_val = np.array(pd.DataFrame(df_y_valid).as_matrix())
y_eval = y_val
y_train = keras.utils.to_categorical(y_train, 2)
y_val = keras.utils.to_categorical(y_val, 2)
In [17]:
epochsize = 60
batchsize = 4
shuffle = False
dropout = 0.1
num_classes = 2
input_dim = x_train.shape[1]
hidden1_dim = 40
hidden2_dim = 40
# weights = keras.initializers.RandomNormal(mean=0.0, stddev=0.05, seed=42)
The goal is to get a very accurate classifier.
In [18]:
input_data = Input(shape=(input_dim,), dtype='float32', name='main_input')
hidden_layer1 = Dense(hidden1_dim
, activation='relu'
, input_shape=(input_dim,)
# , kernel_initializer=weights
)(input_data)
dropout1 = Dropout(dropout)(hidden_layer1)
hidden_layer2 = Dense(hidden2_dim
, activation='relu'
, input_shape=(input_dim,)
# , kernel_initializer=weights
)(dropout1)
dropout2 = Dropout(dropout)(hidden_layer2)
output_layer = Dense(num_classes
, activation='sigmoid'
# , kernel_initializer=weights
)(dropout2)
model = Model(inputs=input_data, outputs=output_layer)
model.compile(loss='binary_crossentropy',
optimizer=RMSprop(),
metrics=['accuracy'])
In [19]:
plot_model(model, to_file='images/ionosphere_nn.png', show_shapes=True, show_layer_names=True)
In [20]:
IPython.display.Image("images/ionosphere_nn.png")
Out[20]:
In [21]:
model.fit(x_train, y_train,
batch_size=batchsize,
epochs=epochsize,
verbose=0,
shuffle=shuffle,
validation_split=0.05)
nn_score = model.evaluate(x_val, y_val)[1]
print(nn_score)
In [22]:
fig = plt.figure(figsize=(20,10))
plt.plot(model.history.history['val_acc'])
plt.plot(model.history.history['acc'])
plt.axhline(y=nn_score, c="red")
plt.text(0, nn_score, "test: " + str(round(nn_score, 4)), fontdict=font)
plt.title('model accuracy for neural net with 2 hidden layers')
plt.ylabel('accuracy')
plt.xlabel('epochs')
plt.legend(['valid', 'train'], loc='lower right')
plt.show()
In [23]:
import itertools
from sklearn.model_selection import train_test_split
from sklearn.metrics import confusion_matrix
def plot_confusion_matrix(cm, classes,
normalize=False,
title='Confusion matrix',
cmap=plt.cm.Blues):
"""
This function prints and plots the confusion matrix.
Normalization can be applied by setting `normalize=True`.
"""
if normalize:
cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
print("Normalized confusion matrix")
else:
print('Confusion matrix, without normalization')
print(cm)
plt.imshow(cm, interpolation='nearest', cmap=cmap)
plt.title(title)
plt.colorbar()
tick_marks = np.arange(len(classes))
plt.xticks(tick_marks, classes, rotation=45)
plt.yticks(tick_marks, classes)
fmt = '.2f' if normalize else 'd'
thresh = cm.max() / 2.
for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
plt.text(j, i, format(cm[i, j], fmt),
horizontalalignment="center",
color="white" if cm[i, j] > thresh else "black")
plt.tight_layout()
plt.ylabel('True label')
plt.xlabel('Predicted label')
In [24]:
# Compute confusion matrix
cnf_matrix = confusion_matrix(y_eval, model.predict(x_val).argmax(axis=-1))
np.set_printoptions(precision=2)
In [25]:
# Plot non-normalized confusion matrix
plt.figure(figsize=(16,8))
plot_confusion_matrix(cnf_matrix, classes=['bad', 'good'],
title='Confusion matrix, without normalization')
In [26]:
# Plot normalized confusion matrix
plt.figure(figsize=(16,8))
plot_confusion_matrix(cnf_matrix, classes=['bad', 'good'], normalize=True,
title='Normalized confusion matrix')
In [27]:
# the initial coding dimension s.t. there is no dim reduction at the beginning
encoding_dim = input_dim
result = {'encoding_dim': [], 'auto_classifier_acc': []}
In [ ]:
while encoding_dim > 0:
main_input = Input(shape=(input_dim,), dtype='float32', name='main_input')
encoding_layer = Dense(encoding_dim
, activation='relu'
, name='encoder'
# , kernel_initializer='normal'
)
encoding_layer_output = encoding_layer(main_input)
decoding_layer_output = Dense(input_dim, activation='sigmoid'
,name='decoder_output'
# ,kernel_initializer='normal'
)(encoding_layer_output)
x = Dense(hidden1_dim
, activation='relu'
# , kernel_initializer=weights
)(encoding_layer_output)
x = Dropout(dropout)(x)
x = Dense(hidden2_dim
, activation='relu'
# , kernel_initializer=weights
)(x)
x = Dropout(dropout)(x)
classifier_output = Dense(num_classes
, activation='sigmoid'
, name='main_output'
# , kernel_initializer=weights
)(x)
auto_classifier = Model(inputs=main_input, outputs=[classifier_output, decoding_layer_output])
auto_classifier.compile(optimizer=RMSprop(),
loss={'main_output': 'binary_crossentropy', 'decoder_output': 'mean_squared_error'},
loss_weights={'main_output': .2, 'decoder_output': .8},
metrics=['accuracy'])
auto_classifier.fit({'main_input': x_train},
{'main_output': y_train, 'decoder_output': x_train},
epochs=epochsize,
batch_size=batchsize,
shuffle=shuffle,
validation_split=0.05,
verbose=0)
accuracy = auto_classifier.evaluate(x=x_val, y=[y_val, x_val], verbose=1)[3]
result['encoding_dim'].append(encoding_dim)
result['auto_classifier_acc'].append(accuracy)
encoding_dim -=1
In [29]:
result_df = pd.DataFrame(result)
result_df['neural_net_acc'] = nn_score
result_df.head()
Out[29]:
In [30]:
fig = plt.figure(figsize=(20,10))
plt.bar(result_df['encoding_dim'], result_df['auto_classifier_acc'])
plt.axhline(y=result_df['neural_net_acc'][0], c="red")
plt.text(0, result_df['neural_net_acc'][0], "best neural net: " + str(round(result_df['neural_net_acc'][0], 4))
,fontdict=font)
plt.title('model accuracy for different encoding dimensions')
plt.ylabel('accuracy')
plt.xlabel('dimension')
plt.ylim(0.6, 1)
Out[30]:
The result is implausible. This might be due to a very small number ob observations.
In [31]:
result_df.to_csv('results/ionosphere_results.csv')