Using this credit card fraud dataset develop an algorithm to predict fraud. Prioritize correctly finding fraud rather than correctly labeling non-fraudulent transactions.
In [96]:
%matplotlib inline
import numpy as np
import pandas as pd
import scipy
import sklearn
import matplotlib.pyplot as plt
import seaborn as sns
import math
from matplotlib.mlab import PCA as mlabPCA
from sklearn.preprocessing import StandardScaler
from sklearn.decomposition import PCA
from sklearn import preprocessing
from sklearn.feature_selection import SelectKBest
import seaborn as sns
import scipy.stats as stats
from sklearn.naive_bayes import GaussianNB
from sklearn.naive_bayes import MultinomialNB
from sklearn.naive_bayes import BernoulliNB
from sklearn.metrics import confusion_matrix
from sklearn.model_selection import cross_val_score, KFold
import matplotlib.pyplot as plt
from sklearn.model_selection import StratifiedKFold
from sklearn.feature_selection import RFECV
from sklearn.datasets import make_classification
from sklearn.feature_selection import RFE
from sklearn.model_selection import cross_val_predict
from sklearn import metrics
from sklearn.decomposition import PCA as sklearn_pca
import locale
from locale import atof
import warnings
from IPython.display import display
from sklearn import linear_model
from sklearn.model_selection import cross_val_score, cross_val_predict
from sklearn.feature_selection import f_regression
import statsmodels.formula.api as smf
from statsmodels.sandbox.regression.predstd import wls_prediction_std
import xlrd
from sklearn import ensemble
import time
from sklearn.model_selection import cross_val_score, KFold
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import train_test_split
from sklearn.feature_selection import SelectFromModel
from sklearn.metrics import accuracy_score
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
import statsmodels.api as sm
from sklearn.linear_model import LogisticRegression
from sklearn.utils import resample
from sklearn.ensemble import RandomForestClassifier
from sklearn.svm import SVC
from sklearn import ensemble
from sklearn import datasets
from sklearn.utils import shuffle
from sklearn.metrics import mean_squared_error
In [97]:
# Read and import data
creditcard = pd.read_csv('creditcard.csv')
creditcard.head()
Out[97]:
In [98]:
#Analyse the data and number of datapoints in each category
creditcard['Class'].value_counts()
Out[98]:
In [99]:
#Upsample the majority class
# Separate majority and minority classes
creditcard_majority = creditcard[creditcard.Class==0]
creditcard_minority = creditcard[creditcard.Class==1]
# Upsample minority class
creditcard_minority_upsampled = resample(creditcard_minority, replace=True, n_samples=284315, random_state=123)
# Combine majority class with upsampled minority class
creditcard_upsampled = pd.concat([creditcard_majority, creditcard_minority_upsampled])
# Display new class counts
creditcard_upsampled.Class.value_counts()
Out[99]:
In [100]:
#Downsample majority class
# Separate majority and minority classes
creditcard_majority = creditcard[creditcard.Class==0]
creditcard_minority = creditcard[creditcard.Class==1]
# Downsample majority class
creditcard_majority_downsampled = resample(creditcard_majority, replace=False, n_samples=492, random_state=123)
# Combine minority class with downsampled majority class
creditcard_downsampled = pd.concat([creditcard_majority_downsampled, creditcard_minority])
# Display new class counts
creditcard_downsampled.Class.value_counts()
Out[100]:
In [101]:
#Define Outcome & Predictors
y = creditcard_downsampled['Class']
X = creditcard_downsampled.drop(creditcard_downsampled[['Class','Time']],axis=1)
#Scale the data
names = X.columns
X = pd.DataFrame(preprocessing.scale(X), columns = names)
#Split into test and train sets
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.33, random_state=42)
#KFOld
kf = KFold(20)
In [102]:
# Initialize and fit the model.
lr = LogisticRegression()
lr.fit(X_train,y_train)
lr.fit(X_test,y_test)
# Predict on training set
predtrain_y = lr.predict(X_train)
predtest_y = lr.predict(X_test)
In [103]:
#Training Scores
target_names = ['0', '1']
print(classification_report(y_train, predtrain_y, target_names=target_names))
cnf = confusion_matrix(y_train, predtrain_y)
print(cnf)
# Accuracy tables.
table_train = pd.crosstab(y_train, predtrain_y, margins=True)
train_tI_errors = table_train.loc[0.0,1.0] / table_train.loc['All','All']
train_tII_errors = table_train.loc[1.0,0.0] / table_train.loc['All','All']
print((
'Training set accuracy:\n'
'Percent Type I errors: {}\n'
'Percent Type II errors: {}\n\n'
).format(train_tI_errors, train_tII_errors))
In [104]:
#Testing Scores
target_names = ['0', '1']
print(classification_report(y_test, predtest_y, target_names=target_names))
cnf = confusion_matrix(y_test, predtest_y)
print(cnf)
table_test = pd.crosstab(y_test, predtest_y, margins=True)
test_tI_errors = table_test.loc[0.0,1.0]/table_test.loc['All','All']
test_tII_errors = table_test.loc[1.0,0.0]/table_test.loc['All','All']
print((
'Test set accuracy:\n'
'Percent Type I errors: {}\n'
'Percent Type II errors: {}'
).format(test_tI_errors, test_tII_errors))
print(cross_val_score(lr,X,y,cv=kf))
print(cross_val_score(lr,X,y,cv=kf).mean())
In [105]:
# Train and fit the model
clf1 = SVC(kernel='linear',
class_weight='balanced',
probability=True)
clf1.fit(X_train, y_train)
clf1.fit(X_test,y_test)
# Predict on training set
predtrainclf_y = clf1.predict(X_train)
predtestclf_y = clf1.predict(X_test)
In [106]:
#Training Scores
target_names = ['0', '1']
print(classification_report(y_train, predtrainclf_y, target_names=target_names))
cnf = confusion_matrix(y_train, predtrainclf_y)
print(cnf)
# Accuracy tables.
table_train = pd.crosstab(y_train, predtrainclf_y, margins=True)
train_tI_errors = table_train.loc[0.0,1.0] / table_train.loc['All','All']
train_tII_errors = table_train.loc[1.0,0.0] / table_train.loc['All','All']
print((
'Training set accuracy:\n'
'Percent Type I errors: {}\n'
'Percent Type II errors: {}\n\n'
).format(train_tI_errors, train_tII_errors))
In [107]:
#Testing Scores
target_names = ['0', '1']
print(classification_report(y_test, predtestclf_y, target_names=target_names))
cnf = confusion_matrix(y_test, predtestclf_y)
print(cnf)
table_test = pd.crosstab(y_test, predtestclf_y, margins=True)
test_tI_errors = table_test.loc[0.0,1.0]/table_test.loc['All','All']
test_tII_errors = table_test.loc[1.0,0.0]/table_test.loc['All','All']
print((
'Test set accuracy:\n'
'Percent Type I errors: {}\n'
'Percent Type II errors: {}'
).format(test_tI_errors, test_tII_errors))
print(cross_val_score(clf1,X,y,cv=kf))
print(cross_val_score(clf1,X,y,cv=kf).mean())
In [108]:
# Train model
rf = RandomForestClassifier()
rf.fit(X_train, y_train)
rf.fit(X_test, y_test)
# Predict on training set
predtrainrf_y = rf.predict(X_train)
predtestrf_y = rf.predict(X_test)
In [109]:
#Training Scores
target_names = ['0', '1']
print(classification_report(y_train, predtrainrf_y, target_names=target_names))
cnf = confusion_matrix(y_train, predtrainrf_y)
print(cnf)
# Accuracy tables.
table_train = pd.crosstab(y_train, predtrainrf_y, margins=True)
train_tI_errors = table_train.loc[0.0,1.0] / table_train.loc['All','All']
train_tII_errors = table_train.loc[1.0,0.0] / table_train.loc['All','All']
print((
'Training set accuracy:\n'
'Percent Type I errors: {}\n'
'Percent Type II errors: {}\n\n'
).format(train_tI_errors, train_tII_errors))
In [110]:
#Test Scores
target_names = ['0', '1']
print(classification_report(y_test, predtestrf_y, target_names=target_names))
cnf = confusion_matrix(y_test, predtestrf_y)
print(cnf)
table_test = pd.crosstab(y_test, predtestrf_y, margins=True)
test_tI_errors = table_test.loc[0.0,1.0]/table_test.loc['All','All']
test_tII_errors = table_test.loc[1.0,0.0]/table_test.loc['All','All']
print((
'Test set accuracy:\n'
'Percent Type I errors: {}\n'
'Percent Type II errors: {}'
).format(test_tI_errors, test_tII_errors))
print(cross_val_score(rf,X,y,cv=kf))
print(cross_val_score(rf,X,y,cv=kf).mean())
In [111]:
# We'll make 500 iterations, use 2-deep trees, and set our loss function.
params = {'n_estimators': 500,
'max_depth': 2,
'loss': 'deviance'}
# Initialize and fit the model.
clf = ensemble.GradientBoostingClassifier(**params)
clf.fit(X_train, y_train)
clf.fit(X_test, y_test)
# Predict on training set
predtrainclf_y = clf.predict(X_train)
predtestclf_y = clf.predict(X_test)
In [112]:
#Training Scores
target_names = ['0', '1']
print(classification_report(y_train, predtrainclf_y, target_names=target_names))
cnf = confusion_matrix(y_train, predtrainclf_y)
print(cnf)
# Accuracy tables.
table_train = pd.crosstab(y_train, predtrainclf_y, margins=True)
train_tI_errors = table_train.loc[0.0,1.0] / table_train.loc['All','All']
train_tII_errors = table_train.loc[1.0,0.0] / table_train.loc['All','All']
print((
'Training set accuracy:\n'
'Percent Type I errors: {}\n'
'Percent Type II errors: {}\n\n'
).format(train_tI_errors, train_tII_errors))
In [113]:
#Test Scores
target_names = ['0', '1']
print(classification_report(y_test, predtestclf_y, target_names=target_names))
cnf = confusion_matrix(y_test, predtestclf_y)
print(cnf)
table_test = pd.crosstab(y_test, predtestclf_y, margins=True)
test_tI_errors = table_test.loc[0.0,1.0]/table_test.loc['All','All']
test_tII_errors = table_test.loc[1.0,0.0]/table_test.loc['All','All']
print((
'Test set accuracy:\n'
'Percent Type I errors: {}\n'
'Percent Type II errors: {}'
).format(test_tI_errors, test_tII_errors))
print(cross_val_score(clf,X,y,cv=kf))
print(cross_val_score(clf,X,y,cv=kf).mean())