In [15]:
import pandas as pd
import numpy as np
np.random.seed(1)
from sklearn.model_selection import StratifiedShuffleSplit, ShuffleSplit
from sklearn.metrics import accuracy_score, precision_score, recall_score
from keras import backend as K
from keras.models import Sequential, load_model, optimizers
from keras.layers import Dense, Activation, Dropout
from keras.callbacks import EarlyStopping
from __future__ import division
from imblearn.over_sampling import SMOTE
In [2]:
data = pd.read_csv("data/creditcard.csv")
In [3]:
X, y = data[data.columns[1:29]].values, data.Class.values
In [4]:
sss = StratifiedShuffleSplit(n_splits=1, test_size=0.5, random_state=0)
In [5]:
for train_index, test_index in sss.split(X, y):
X_train, X_test = X[train_index], X[test_index]
y_train, y_test = y[train_index], y[test_index]
In [6]:
def network_1(X_train):
model = Sequential()
model.add(Dense(1024, activation="sigmoid", input_dim=X_train.shape[1]))
model.add(Dense(512, activation="sigmoid"))
model.add(Dense(256, activation="sigmoid"))
model.add(Dense(128, activation="sigmoid"))
model.add(Dense(64, activation="sigmoid"))
model.add(Dense(32, activation="sigmoid"))
model.add(Dense(16, activation="sigmoid"))
model.add(Dropout(0.5))
model.add(Dense(1, activation="sigmoid"))
model.compile(optimizer="rmsprop",
loss="binary_crossentropy",
metrics=["accuracy"])
early_stopping = EarlyStopping(monitor="loss", patience=4)
model.fit(X_train, y_train, epochs=20, batch_size=32, callbacks=[early_stopping], verbose=1)
return model
In [7]:
def network_2(X_train):
model = Sequential()
model.add(Dense(256, activation="sigmoid", input_dim=X_train.shape[1]))
model.add(Dense(128, activation="sigmoid"))
model.add(Dense(64, activation="sigmoid"))
model.add(Dense(32, activation="sigmoid"))
model.add(Dense(16, activation="sigmoid"))
model.add(Dense(1, activation="sigmoid"))
model.compile(optimizer="rmsprop",
loss="binary_crossentropy",
metrics=["accuracy"])
early_stopping = EarlyStopping(monitor="loss", patience=4)
model.fit(X_train, y_train, epochs=20, batch_size=32, callbacks=[early_stopping], verbose=1)
return model
In [26]:
def evaluation(y_true, y_pred):
acc = accuracy_score(y_true, y_pred)
prec = precision_score(y_true, y_pred)
rec = recall_score(y_true, y_pred)
return acc, prec, rec
In [9]:
model_1 = network_1(X_train)
In [10]:
y_test_pred = model_1.predict_classes(X_test)
network_1 = evaluation(y_test, y_test_pred)
print(network_1)
In [11]:
model_2 = network_2(X_train)
In [12]:
y_test_pred = model_2.predict_classes(X_test)
network_2 = evaluation(y_test, y_test_pred)
print(network_2)
In [108]:
fraudulent = data[data.Class == 1]
normal = data[data.Class == 0]
In [109]:
normal_sample = normal.sample(fraudulent.Class.count(), random_state=0)
In [110]:
data_undersample = pd.concat([fraudulent, normal_sample])
In [111]:
X, y = data_undersample[data_undersample.columns[1:29]].values, data_undersample.Class.values
In [112]:
sss = StratifiedShuffleSplit(n_splits=1, test_size=0.5, random_state=0)
In [113]:
for train_index, test_index in sss.split(X, y):
X_train, X_test = X[train_index], X[test_index]
y_train, y_test = y[train_index], y[test_index]
In [114]:
def network_3(X_train):
model = Sequential()
model.add(Dense(256, activation="sigmoid", input_dim=X_train.shape[1]))
model.add(Dense(128, activation="sigmoid"))
model.add(Dense(64, activation="sigmoid"))
model.add(Dense(32, activation="sigmoid"))
model.add(Dense(1, activation="sigmoid"))
model.compile(optimizer="rmsprop",
loss="binary_crossentropy",
metrics=["accuracy"])
early_stopping = EarlyStopping(monitor="loss", patience=10)
model.fit(X_train, y_train, epochs=100, batch_size=32, callbacks=[early_stopping], verbose=1)
return model
In [115]:
model_3 = network_3(X_train)
In [116]:
y_test_pred = model_3.predict_classes(X_test)
network_3 = evaluation(y_test, y_test_pred)
print(network_3)
In [117]:
# all data
y_test_pred_all = model_3.predict_classes(X)
network_3_all = evaluation(y, y_test_pred_all)
print(network_3_all)
In [324]:
model_3.save("models/balance_model.h5")
In [18]:
fraudulent = data[data.Class == 1]
normal = data[data.Class == 0]
In [19]:
fraudulent.Class.count(), normal.Class.count()
Out[19]:
In [25]:
normal.Class.count() / fraudulent.Class.count()
Out[25]:
In [28]:
fraudulent_oversample = pd.concat([fraudulent]*578, ignore_index=True)
In [29]:
fraudulent_oversample.Class.count()
Out[29]:
In [30]:
data_oversample = pd.concat([fraudulent_oversample, normal])
In [33]:
data_oversample.Class.value_counts(normalize=True)
Out[33]:
In [34]:
X, y = data_oversample[data_oversample.columns[1:29]].values, data_oversample.Class.values
In [35]:
sss = StratifiedShuffleSplit(n_splits=1, test_size=0.5, random_state=0)
In [36]:
for train_index, test_index in sss.split(X, y):
X_train, X_test = X[train_index], X[test_index]
y_train, y_test = y[train_index], y[test_index]
In [39]:
def network_4(X_train):
model = Sequential()
model.add(Dense(64, activation="sigmoid", input_dim=X_train.shape[1]))
model.add(Dense(32, activation="sigmoid"))
model.add(Dense(1, activation="sigmoid"))
model.compile(optimizer="rmsprop",
loss="binary_crossentropy",
metrics=["accuracy"])
early_stopping = EarlyStopping(monitor="loss", patience=4)
model.fit(X_train, y_train, epochs=20, batch_size=32, callbacks=[early_stopping], verbose=1)
return model
In [40]:
model_4 = network_4(X_train)
In [43]:
y_test_pred = model_4.predict_classes(X_test)
network_4 = evaluation(y_test, y_test_pred)
print(network_4)
In [4]:
sm = SMOTE(random_state=42)
In [5]:
X, y = data[data.columns[1:29]].values, data.Class.values
In [6]:
X_res, y_res = sm.fit_sample(X, y)
In [7]:
pd.Series(y_res).value_counts()
Out[7]:
In [16]:
rs = ShuffleSplit(n_splits=1, test_size=0.2, random_state=0)
In [17]:
for train_index, test_index in rs.split(X_res, y_res):
X_train, X_test = X_res[train_index], X_res[test_index]
y_train, y_test = y_res[train_index], y_res[test_index]
In [20]:
X_train.shape, X_test.shape
Out[20]:
In [60]:
def network_5(X_train):
model = Sequential()
model.add(Dense(64, activation="sigmoid", input_dim=X_train.shape[1]))
model.add(Dense(32, activation="sigmoid"))
model.add(Dense(1, activation="sigmoid"))
model.compile(optimizer="rmsprop",
loss="binary_crossentropy",
metrics=["accuracy"])
early_stopping = EarlyStopping(monitor="loss", patience=4)
model.fit(X_train, y_train, epochs=20, batch_size=32, callbacks=[early_stopping], verbose=1)
return model
In [61]:
model_5 = network_5(X_train)
In [62]:
y_test_pred = model_5.predict_classes(X_test)
network_5 = evaluation(y_test, y_test_pred)
print(network_5)
In [21]:
def network_6(X_train):
model = Sequential()
model.add(Dense(256, activation="sigmoid", input_dim=X_train.shape[1]))
model.add(Dense(128, activation="sigmoid"))
model.add(Dense(64, activation="sigmoid"))
model.add(Dense(32, activation="sigmoid"))
model.add(Dropout(0.5))
model.add(Dense(1, activation="sigmoid"))
model.compile(optimizer="rmsprop",
loss="binary_crossentropy",
metrics=["accuracy"])
early_stopping = EarlyStopping(monitor="val_loss", patience=4)
model.fit(X_train, y_train, epochs=20, batch_size=32, validation_split=0.2, callbacks=[early_stopping], verbose=1)
return model
In [22]:
model_6 = network_6(X_train)
In [27]:
y_test_pred = model_6.predict_classes(X_test)
network_6 = evaluation(y_test, y_test_pred)
print(network_6)
In [29]:
y_full = model_6.predict_classes(X)
full_data = evaluation(y, y_full)
print(full_data)
In [336]:
X, y = data[data.columns[1:29]].values, data.Class.values
In [337]:
sss = StratifiedShuffleSplit(n_splits=1, test_size=0.5, random_state=0)
In [338]:
for train_index, test_index in sss.split(X, y):
X_train, X_test = X[train_index], X[test_index]
y_train, y_test = y[train_index], y[test_index]
In [339]:
model = load_model("models/balance_model.h5")
In [340]:
model.layers
Out[340]:
In [341]:
model.summary()
In [342]:
model.layers.pop()
model.layers.pop()
Out[342]:
In [343]:
model.summary()
In [344]:
top_model = Sequential()
top_model.add(Dense(32, activation="sigmoid", input_shape=model.output_shape[1:]))
top_model.add(Dense(1, activation="sigmoid"))
In [345]:
model.add(top_model)
In [346]:
model.summary()
In [347]:
# freeze weights until top layer
for layer in model.layers[:4]:
layer.trainable = False
In [348]:
# fine-tuning should be done with slow learning rate
model.compile(optimizer=optimizers.SGD(lr=1e-4, momentum=0.9),
loss="binary_crossentropy",
metrics=["accuracy"])
early_stopping = EarlyStopping(monitor="loss", patience=4)
model.fit(X_train, y_train, epochs=20, batch_size=32, callbacks=[early_stopping], verbose=1)
Out[348]:
In [349]:
y_test_pred = model.predict_classes(X_test)
network = evaluation(y_test, y_test_pred)
print(network)
In [ ]: