Get your data here. The data is related with direct marketing campaigns of a Portuguese banking institution. The marketing campaigns were based on phone calls. Often, more than one contact to the same client was required, in order to access if the product (bank term deposit) would be ('yes') or not ('no') subscribed. There are four datasets:
1) bank-additional-full.csv with all examples (41188) and 20 inputs, ordered by date (from May 2008 to November 2010)
2) bank-additional.csv with 10% of the examples (4119), randomly selected from 1), and 20 inputs.
3) bank-full.csv with all examples and 17 inputs, ordered by date (older version of this dataset with less inputs).
4) bank.csv with 10% of the examples and 17 inputs, randomly selected from 3 (older version of this dataset with less inputs).
The smallest datasets are provided to test more computationally demanding machine learning algorithms (e.g., SVM).
The classification goal is to predict if the client will subscribe (yes/no) a term deposit (variable y).
LabelEncoder
useful)
In [1]:
#import needed packages
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn import preprocessing
%matplotlib inline
In [2]:
bank_full=pd.read_csv('../data/bank/bank-full.csv', sep=';')
In [68]:
bank=pd.read_csv('../data/bank/bank.csv', sep=';')
In [3]:
bank_additional_full=pd.read_csv('../data/bank-additional/bank-additional-full.csv', sep=';')
In [49]:
bank_additional=pd.read_csv('../data/bank-additional/bank-additional.csv', sep=';')
In [4]:
bank_additional_full.head(5)
Out[4]:
In [5]:
bank_additional_full.info()
In [6]:
le = preprocessing.LabelEncoder()
le.fit(bank_additional_full.y)
Out[6]:
In [7]:
le.classes_
Out[7]:
In [8]:
le.transform(bank_additional_full.y)
Out[8]:
In [9]:
bank_additional_full.y=le.transform(bank_additional_full.y)
In [10]:
bank_additional_full.y
Out[10]:
In [11]:
#check for unique values of features
bank_additional_full.marital.unique()
Out[11]:
In [12]:
pd.scatter_matrix(bank_additional_full[['age','campaign','pdays','duration','previous','y','emp.var.rate']],figsize=(20,20))
Out[12]:
In [13]:
X = bank_additional_full.ix[:,'age':'poutcome']
y = bank_additional_full.y
In [14]:
X_data=pd.get_dummies(X)
In [15]:
#Split the Data
from sklearn.cross_validation import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X_data, y, test_size=0.3)
print X_train.shape, X_test.shape
In [16]:
from sklearn.neighbors import KNeighborsClassifier
# Instantiate the estimator
clf=KNeighborsClassifier(algorithm='brute')
# Fit the estimator to the Training Data
clf.fit(X_train, y_train)
# Use the model to predict Test Data
y_pred=clf.predict(X_test)
In [17]:
from sklearn import metrics
def plot_confusion_matrix(y_pred, y):
plt.imshow(metrics.confusion_matrix(y, y_pred),
cmap=plt.cm.binary, interpolation='nearest')
plt.colorbar()
plt.xlabel('true value')
plt.ylabel('predicted value')
print "classification accuracy:", metrics.accuracy_score(y_test, y_pred)
plot_confusion_matrix(y_test, y_pred)
In [18]:
print "accuracy:", metrics.accuracy_score(y_test, y_pred)
print "precision:", metrics.precision_score(y_test, y_pred)
print "recall:", metrics.recall_score(y_test, y_pred)
print "f1 score:", metrics.f1_score(y_test, y_pred)
In [19]:
from sklearn.cross_validation import cross_val_score
from sklearn.ensemble import RandomForestClassifier
from sklearn.tree import DecisionTreeClassifier
In [20]:
#plot learning curve
%matplotlib inline
import numpy as np
import matplotlib.pyplot as plt
from sklearn.learning_curve import learning_curve
def plot_learning_curve(estimator, title, X, y, ylim=None, cv=None,
n_jobs=1, train_sizes=np.linspace(.1, 1.0, 5)):
plt.figure()
plt.title(title)
if ylim is not None:
plt.ylim(*ylim)
plt.xlabel("Training examples")
plt.ylabel("Score")
train_sizes, train_scores, test_scores = learning_curve(
estimator, X, y, cv=cv, n_jobs=n_jobs, train_sizes=train_sizes)
train_scores_mean = np.mean(train_scores, axis=1)
train_scores_std = np.std(train_scores, axis=1)
test_scores_mean = np.mean(test_scores, axis=1)
test_scores_std = np.std(test_scores, axis=1)
plt.grid()
plt.fill_between(train_sizes, train_scores_mean - train_scores_std,
train_scores_mean + train_scores_std, alpha=0.1,
color="r")
plt.fill_between(train_sizes, test_scores_mean - test_scores_std,
test_scores_mean + test_scores_std, alpha=0.1, color="g")
plt.plot(train_sizes, train_scores_mean, 'o-', color="r",
label="Training score")
plt.plot(train_sizes, test_scores_mean, 'o-', color="g",
label="Cross-validation score")
plt.legend(loc="best")
return plt
In [21]:
rf_model = RandomForestClassifier(n_estimators=100,max_depth=15,criterion='entropy')
rf_model.fit(X_train,y_train)
Out[21]:
In [23]:
from sklearn.grid_search import GridSearchCV
In [24]:
param= {'n_estimators':np.arange(50,200,50), 'max_depth':np.arange(5,25,5)}
In [25]:
gs=GridSearchCV(RandomForestClassifier(),param)
In [26]:
gs.fit(X_train, y_train)
Out[26]:
In [27]:
gs.best_params_, gs.best_score_
Out[27]:
In [28]:
_ = plot_learning_curve(RandomForestClassifier(n_estimators=50),'test',X_train,y_train)
In [25]:
_ = plot_learning_curve(KNeighborsClassifier(algorithm='brute'),'test',X_train,y_train)
In [26]:
_ = plot_learning_curve(KNeighborsClassifier(algorithm='kd_tree'),'test',X_train,y_train)
In [30]:
rf_model = RandomForestClassifier(n_estimators=50,max_depth=25,criterion='entropy')
rf_model.fit(X_train,y_train)
Out[30]:
In [31]:
y_pred = rf_model.predict(X_test)
In [32]:
from sklearn.metrics import confusion_matrix
def plot_confusion_matrix(y_pred, y):
plt.imshow(confusion_matrix(y, y_pred),
cmap=plt.cm.binary, interpolation='nearest')
plt.colorbar()
plt.xlabel('true value')
plt.ylabel('predicted value')
In [33]:
plot_confusion_matrix( y_pred,y_test)
In [34]:
from sklearn.metrics import classification_report
print classification_report(y_test,y_pred)
In [35]:
rf_model.fit(X_train,y_train)
sorted(zip(rf_model.feature_importances_, X_data.columns.values), reverse=True)
Out[35]:
In [ ]: