Get your data here. The data is related with direct marketing campaigns of a Portuguese banking institution. The marketing campaigns were based on phone calls. Often, more than one contact to the same client was required, in order to access if the product (bank term deposit) would be ('yes') or not ('no') subscribed. There are four datasets:
1) bank-additional-full.csv with all examples (41188) and 20 inputs, ordered by date (from May 2008 to November 2010)
2) bank-additional.csv with 10% of the examples (4119), randomly selected from 1), and 20 inputs.
3) bank-full.csv with all examples and 17 inputs, ordered by date (older version of this dataset with less inputs).
4) bank.csv with 10% of the examples and 17 inputs, randomly selected from 3 (older version of this dataset with less inputs).
The smallest datasets are provided to test more computationally demanding machine learning algorithms (e.g., SVM).
The classification goal is to predict if the client will subscribe (yes/no) a term deposit (variable y).
LabelEncoder
useful)
In [ ]:
In [1]:
# Standard imports for data analysis packages in Python
import pandas as pd
import numpy as np
#import seaborn as sns # for pretty layout of plots
import matplotlib.pyplot as plt
from pprint import pprint # for pretty printing
# This enables inline Plots
%matplotlib inline
# Limit rows displayed in notebook
pd.set_option('display.max_rows', 10)
pd.set_option('display.precision', 2)
In [2]:
dataset = pd.read_csv("/Users/arthurconner/Documents/DataScience/classwork/bank-additional/bank-additional-full.csv")
dataset.head(2)
Out[2]:
In [5]:
?pd.read_csv
In [3]:
dataset = pd.read_csv("/Users/arthurconner/Documents/DataScience/classwork/bank-additional/bank-additional-full.csv",delimiter=";")
dataset.head(2)
Out[3]:
In [4]:
dataset.info()
In [5]:
from sklearn.preprocessing import LabelEncoder
In [6]:
nondata = ["job","marital","education","default","housing","loan","contact","poutcome","y"]
transformLabels = []
encoders = {}
for x in nondata:
nextlabel = x + "_encoded"
transformLabels.append(nextlabel)
le = LabelEncoder()
encoders[x] = le
le.fit(dataset[x].unique())
dataset[nextlabel] = dataset[x].map(le.transform)
base = dataset[nextlabel].unique()
print nextlabel , base, "<-->", x, le.inverse_transform(base)
encoders
Out[6]:
In [7]:
numeric = ["duration", "campaign", "pdays","previous","emp.var.rate","cons.price.idx","cons.conf.idx","euribor3m","nr.employed"]
xlabels = []
for x in transformLabels:
xlabels.append(x)
yLabel = xlabels.pop()
for x in numeric:
xlabels.append(x)
xlabels
Out[7]:
In [8]:
xData = dataset[xlabels]
xData
Out[8]:
In [9]:
yData = dataset[yLabel]
yData
Out[9]:
In [10]:
#Scikit Imports
from sklearn import linear_model, tree, metrics, naive_bayes, ensemble, cross_validation, grid_search
X_train, X_test, y_train, y_test = cross_validation.train_test_split(xData, yData, random_state=12, test_size=0.2)
print X_train.shape, X_test.shape, y_train.shape, y_test.shape
In [11]:
from sklearn.neighbors import KNeighborsClassifier
from sklearn.ensemble import RandomForestClassifier
In [12]:
#out of the box
classifiers = [KNeighborsClassifier,RandomForestClassifier]
for cl in classifiers:
classif = cl()
classif.fit(X_train,y_train)
print cl,classif.score(X_test,y_test)
In [13]:
#okay knn is its own separate beast
#lets try a simple one
knnParams = {"n_neighbors":[1,2,5,10,20,50]}
kgrid = grid_search.GridSearchCV(KNeighborsClassifier(), knnParams)
kgrid.fit(X_train, y_train)
print kgrid.score(X_test,y_test)
print kgrid.best_params_
In [14]:
knnParams = {"n_neighbors":[50,100,250,500]}
kgrid = grid_search.GridSearchCV(KNeighborsClassifier(), knnParams)
kgrid.fit(X_train, y_train)
print kgrid.score(X_test,y_test)
print kgrid.best_params_
knnBest = kgrid.best_estimator_
In [15]:
# Find the best parameters by computing their learning curve (feel free to verify this with grid search)
import matplotlib as mpl
import matplotlib.pyplot as plt
In [ ]:
from sklearn.pipeline import make_pipeline
?np.arange
In [18]:
neigbors = np.arange(5, 80,5)
training_error = []
test_error = []
mse = metrics.mean_squared_error
for neigh in neigbors:
model = KNeighborsClassifier(n_neighbors=neigh)
model.fit(X_train, y_train)
training_error.append(mse(model.predict(X_train), y_train))
test_error.append(mse(model.predict(X_test), y_test))
# note that the test error can also be computed via cross-validation
plt.plot(neigbors, training_error, label='training')
plt.plot(neigbors, test_error, label='test')
plt.legend()
plt.xlabel('neigbors')
plt.ylabel('MSE')
Out[18]:
In [19]:
#Create a clasification report
from sklearn.metrics import classification_report
In [20]:
#It looks like it levels out around 40
knnModel = KNeighborsClassifier(n_neighbors=40)
knnModel.fit(X_train, y_train)
y_true = y_test
y_pred = knnModel.predict(X_test)
print(classification_report(y_true, y_pred))
In [22]:
from sklearn.cross_validation import KFold
num_rows = yData.shape[0]
y = np.zeros((num_rows))
kf = KFold(num_rows, n_folds=5)
y_pred = y * 0
for train, test in kf:
X_train = xData.values[train,:]
X_test = xData.values[test,:]
y_train = yData.values[train]
y_test = yData.values[test]
#, X_test, y_train, y_test , xData[test,:], yData[train], yData[test]
clf = KNeighborsClassifier(n_neighbors=40)
#clf = SVC(kernel='rbf', class_weight='auto',verbose=3)
clf.fit(X_train, y_train)
y_pred[test] = clf.predict(X_test)
#print("done in %0.3fs" % (time() - t0))
print(classification_report(yData.values, y_pred))
In [23]:
from sklearn.metrics import roc_curve
from sklearn.metrics import auc
def plot_roc_curve(target_test, target_predicted_proba):
fpr, tpr, thresholds = roc_curve(target_test, target_predicted_proba)
roc_auc = auc(fpr, tpr)
# Plot ROC curve
plt.plot(fpr, tpr, label='ROC curve (area = %0.3f)' % roc_auc)
plt.plot([0, 1], [0, 1], 'k--') # random predictions curve
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.0])
plt.xlabel('False Positive Rate or (1 - Specifity)')
plt.ylabel('True Positive Rate or (Sensitivity)')
plt.title('Receiver Operating Characteristic')
plt.legend(loc="lower right")
In [24]:
plot_roc_curve(yData.values, y_pred)
y_pred.shape
Out[24]:
In [26]:
num_rows = yData.shape[0]
y = np.zeros((num_rows))
kf = KFold(num_rows, n_folds=5)
y_pred = y * 0
for train, test in kf:
X_train = xData.values[train,:]
X_test = xData.values[test,:]
y_train = yData.values[train]
y_test = yData.values[test]
#, X_test, y_train, y_test , xData[test,:], yData[train], yData[test]
clf = KNeighborsClassifier(n_neighbors=10)
#clf = SVC(kernel='rbf', class_weight='auto',verbose=3)
clf.fit(X_train, y_train)
y_pred[test] = clf.predict(X_test)
#print("done in %0.3fs" % (time() - t0))
print(classification_report(yData.values, y_pred))
plot_roc_curve(yData.values, y_pred)
y_pred.shape
Out[26]:
In [27]:
#randomForsest
treeSizes = np.arange(5, 80,5)
training_error = []
test_error = []
mse = metrics.mean_squared_error
for treeSize in treeSizes:
model = RandomForestClassifier(n_estimators=treeSize)
model.fit(X_train, y_train)
training_error.append(mse(model.predict(X_train), y_train))
test_error.append(mse(model.predict(X_test), y_test))
# note that the test error can also be computed via cross-validation
plt.plot(treeSizes, training_error, label='training')
plt.plot(treeSizes, test_error, label='test')
plt.legend()
plt.xlabel('Tree Sizes')
plt.ylabel('MSE')
Out[27]:
In [28]:
#lets zoom
training_error = []
test_error = []
mse = metrics.mean_squared_error
for treeSize in treeSizes:
model = RandomForestClassifier(n_estimators=treeSize)
model.fit(X_train, y_train)
training_error.append(mse(model.predict(X_train), y_train)*10)
test_error.append(mse(model.predict(X_test), y_test))
# note that the test error can also be computed via cross-validation
plt.plot(treeSizes, training_error, label='training * 10')
plt.plot(treeSizes, test_error, label='test')
plt.legend()
plt.xlabel('Tree Sizes')
plt.ylabel('MSE')
Out[28]:
In [29]:
#it doesn't look like you gain much past 40
treeModel = RandomForestClassifier(n_estimators=40)
treeModel.fit(X_train, y_train)
y_true = y_test
y_pred = treeModel.predict(X_test)
print(classification_report(y_true, y_pred))
In [30]:
#treeModel.feature_importances_
labels = []
'''
for i in range(0,X_train.size):
desc = xLabels[i]
val = treeModel.feature_importances_[i]'''
sorted(zip(treeModel.feature_importances_, xlabels), reverse=True)
Out[30]:
In [91]:
In [31]:
num_rows = yData.shape[0]
y = np.zeros((num_rows))
kf = KFold(num_rows, n_folds=5)
y_pred = y * 0
for train, test in kf:
X_train = xData.values[train,:]
X_test = xData.values[test,:]
y_train = yData.values[train]
y_test = yData.values[test]
#, X_test, y_train, y_test , xData[test,:], yData[train], yData[test]
clf = RandomForestClassifier(n_estimators=40, n_jobs=3)
#clf = SVC(kernel='rbf', class_weight='auto',verbose=3)
clf.fit(X_train, y_train)
y_pred[test] = clf.predict(X_test)
'''
print test
'''
#print("done in %0.3fs" % (time() - t0))
print(classification_report(yData.values, y_pred))
In [32]:
plot_roc_curve(yData.values, y_pred)
In [33]:
roc_curve(yData.values, y_pred)
Out[33]: