SGDClassifier


In [1]:
from __future__      import division
from IPython.display import display
from matplotlib      import pyplot as plt
%matplotlib inline

import numpy  as np
import pandas as pd
import random, sys, os, re

from sklearn.linear_model     import SGDClassifier

from sklearn.cross_validation import StratifiedKFold
from sklearn.grid_search      import RandomizedSearchCV, GridSearchCV
from sklearn.cross_validation import cross_val_predict, permutation_test_score

In [2]:
SEED   = 97
scale  = False 
minmax = False
norm   = False
nointercept = False

N_CLASSES = 2

submission_filename = "../submissions/submission_SGDClassifier.csv"

Load the training data


In [3]:
from load_blood_data import load_blood_data

y_train, X_train = load_blood_data(train=True, SEED   = SEED, 
                                               scale  = scale,
                                               minmax = minmax,
                                               norm   = norm,
                                               nointercept = nointercept)

Fit the model


In [4]:
StatifiedCV = StratifiedKFold(y            = y_train, 
                              n_folds      = 10, 
                              shuffle      = True, 
                              random_state = SEED)

In [5]:
%%time

random.seed(SEED)

# NOTE:  For best results using the default learning rate schedule, 
#        the data should have zero mean and unit variance.

clf = SGDClassifier(loss          = 'hinge',  # 'hinge', 'log', 'modified_huber', 'squared_hinge', 'perceptron', 
                                              # or a regression loss: 'squared_loss', 'huber', 'epsilon_insensitive',
                                              #                       'squared_epsilon_insensitive'
                    
                    penalty       = 'l2',     # 'none', 'l2', 'l1', or 'elasticnet' 
                    alpha         = 0.0001,   # multiplies a single regularization term 
                    l1_ratio      = 0.15,     # Elastic Net mixing parameter 
                    n_iter        = 5,        # epochs 
                    
                    fit_intercept = False,    # If False, the data is assumed to be centered. 
                    epsilon       = 0.1,      # for 'huber', 'epsilon_insensitive', or 'squared_epsilon_insensitive'
                    shuffle       = True, 
                    verbose       = 0, 
                    n_jobs        = -1, 
                    random_state  = SEED, 
                    learning_rate = 'optimal', # The learning rate schedule
                    eta0          = 0.0,       # The initial learning rate for 'constant' or 'invscaling'  
                    power_t       = 0.5,       # The exponent for inverse scaling learning rate 
                    class_weight  = None, 
                    warm_start    = False) 
                    #average       = False)


# param_grid = dict(loss     = ['hinge','log','modified_huber','squared_hinge','perceptron'],
#                   penalty  = ['none','elasticnet'],
#                   l1_ratio = [0.0, 0.001, 0.01, 0.1, 0.5, 1.0],
#                   n_iter   = [5, 10, 20, 30, 40])

# grid_clf = GridSearchCV(estimator  = clf, 
#                         param_grid = param_grid,
#                         n_jobs     = -1,  
#                         cv         = StatifiedCV,
#                         verbose    = 0
#                        )

# grid_clf.fit(X = X_train, y = y_train)

# print("clf_params = {}".format(grid_clf.best_params_))
# print("score: {}".format(round(grid_clf.best_score_, 4)))
# print

# clf = grid_clf.best_estimator_




clf_params = {'penalty': 'elasticnet', 'l1_ratio': 0.01, 'n_iter': 20, 'loss': 'squared_hinge'}
clf.set_params(**clf_params)
clf.fit(X_train, y_train)


CPU times: user 4 ms, sys: 0 ns, total: 4 ms
Wall time: 1.81 ms

In [6]:
# from sklearn_utilities import GridSearchHeatmap

# GridSearchHeatmap(grid_clf, y_key='learning_rate', x_key='n_estimators')

# from sklearn_utilities import plot_validation_curves

# plot_validation_curves(grid_clf, param_grid, X_train, y_train, ylim = (0.0, 1.05))

In [7]:
%%time

try:
    from sklearn_utilities import plot_learning_curve
except:
    import imp, os
    util = imp.load_source('sklearn_utilities', os.path.expanduser('~/Dropbox/Python/sklearn_utilities.py'))
    from sklearn_utilities import plot_learning_curve

plot_learning_curve(estimator   = clf, 
                    title       = None, 
                    X           = X_train, 
                    y           = y_train, 
                    ylim        = (0.0, 1.10), 
                    cv          = StratifiedKFold(y            = y_train, 
                                                  n_folds      = 10, 
                                                  shuffle      = True, 
                                                  random_state = SEED), 
                    train_sizes = np.linspace(.1, 1.0, 5),
                    n_jobs      = -1)

plt.show()


/home/george/.local/lib/python2.7/site-packages/matplotlib/collections.py:590: FutureWarning: elementwise comparison failed; returning scalar instead, but in the future will perform elementwise comparison
  if self._edgecolors == str('face'):
CPU times: user 204 ms, sys: 48 ms, total: 252 ms
Wall time: 384 ms

Training set predictions


In [8]:
%%time

train_preds = cross_val_predict(estimator    = clf, 
                                X            = X_train, 
                                y            = y_train, 
                                cv           = StatifiedCV, 
                                n_jobs       = -1, 
                                verbose      = 0, 
                                fit_params   = None, 
                                pre_dispatch = '2*n_jobs')

y_true, y_pred   = y_train, train_preds


CPU times: user 56 ms, sys: 16 ms, total: 72 ms
Wall time: 171 ms

In [9]:
from sklearn.metrics import confusion_matrix
cm = confusion_matrix(y_true, y_pred, labels=None)
print cm

try:
    from sklearn_utilities import plot_confusion_matrix
except:
    import imp, os
    util = imp.load_source('sklearn_utilities', os.path.expanduser('~/Dropbox/Python/sklearn_utilities.py'))
    from sklearn_utilities import plot_confusion_matrix


plot_confusion_matrix(cm, ['Did not Donate','Donated'])

accuracy = round(np.trace(cm)/float(np.sum(cm)),4)
misclass = 1 - accuracy
print("Accuracy {}, mis-class rate {}".format(accuracy,misclass))


[[389  49]
 [108  30]]
Accuracy 0.7274, mis-class rate 0.2726

In [10]:
from sklearn.metrics import roc_curve
from sklearn.metrics import roc_auc_score
from sklearn.metrics import log_loss
from sklearn.metrics import f1_score

fpr, tpr, thresholds = roc_curve(y_true, y_pred, pos_label=None)


plt.figure(figsize=(10,6))
plt.plot([0, 1], [0, 1], 'k--')
plt.plot(fpr, tpr)

AUC = roc_auc_score(y_true, y_pred, average='macro')
plt.text(x=0.6,y=0.4,s="AUC         {:.4f}"\
         .format(AUC),
        fontsize=16)

plt.text(x=0.6,y=0.3,s="accuracy {:.2f}%"\
         .format(accuracy*100),
        fontsize=16)

logloss = log_loss(y_true, y_pred)
plt.text(x=0.6,y=0.2,s="LogLoss   {:.4f}"\
         .format(logloss),
        fontsize=16)

f1 = f1_score(y_true, y_pred)
plt.text(x=0.6,y=0.1,s="f1             {:.4f}"\
         .format(f1),
        fontsize=16)

plt.xlabel('False positive rate')
plt.ylabel('True positive rate')
plt.title('ROC curve')
plt.show()



In [11]:
%%time

score, permutation_scores, pvalue = permutation_test_score(estimator      = clf, 
                                                           X              = X_train.values.astype(np.float32), 
                                                           y              = y_train, 
                                                           cv             = StatifiedCV, 
                                                           labels         = None,
                                                           random_state   = SEED,
                                                           verbose        = 0,
                                                           n_permutations = 100, 
                                                           scoring        = None,
                                                           n_jobs         = -1) 

plt.figure(figsize=(20,8))
plt.hist(permutation_scores, 20, label='Permutation scores')
ylim = plt.ylim()
plt.plot(2 * [score], ylim, '--g', linewidth=3,
         label='Classification Score (pvalue {:.4f})'.format(pvalue))
         
plt.plot(2 * [1. / N_CLASSES], ylim, 'r', linewidth=7, label='Luck')

plt.ylim(ylim)
plt.legend(loc='center',fontsize=16)
plt.xlabel('Score')
plt.show()

# find mean and stdev of the scores
from scipy.stats import norm
mu, std = norm.fit(permutation_scores)


CPU times: user 376 ms, sys: 44 ms, total: 420 ms
Wall time: 657 ms

In [12]:
# format for scores.csv file
import re
algo = re.search(r"submission_(.*?)\.csv", submission_filename).group(1)
print("{: <26} ,        ,   {:.4f} ,  {:.4f} , {:.4f} , {:.4f} , {:.4f} , {:.4f}"\
      .format(algo,accuracy,logloss,AUC,f1,mu,std))


SGDClassifier              ,        ,   0.7274 ,  9.4143 , 0.5528 , 0.2765 , 0.6339 , 0.0690

--------------------------------------------------------------------------------------------

Test Set Predictions

Re-fit with the full training set


In [13]:
clf.set_params(**clf_params)
clf.fit(X_train, y_train)


Out[13]:
SGDClassifier(alpha=0.0001, average=False, class_weight=None, epsilon=0.1,
       eta0=0.0, fit_intercept=False, l1_ratio=0.01,
       learning_rate='optimal', loss='squared_hinge', n_iter=20, n_jobs=-1,
       penalty='elasticnet', power_t=0.5, random_state=97, shuffle=True,
       verbose=0, warm_start=False)

Read the test data


In [14]:
from load_blood_data import load_blood_data

X_test, IDs = load_blood_data(train=False, SEED   = SEED, 
                                           scale  = scale,
                                           minmax = minmax,
                                           norm   = norm,
                                           nointercept = nointercept)

Predict the test set with the fitted model


In [15]:
y_pred = clf.predict(X_test)
print(y_pred[:10])

try:
    y_pred_probs  = clf.predict_proba(X_test)
    print(y_pred_probs[:10])
    donate_probs  = [prob[1] for prob in y_pred_probs]
except Exception,e:
    print(e)
    donate_probs = [0.65 if x>0 else 1-0.65 for x in y_pred]
    
print(donate_probs[:10])


[1 0 0 0 1 1 1 0 0 0]
probability estimates are not available for loss='squared_hinge'
[0.65, 0.35, 0.35, 0.35, 0.65, 0.65, 0.65, 0.35, 0.35, 0.35]

Create the submission file


In [164]:
assert len(IDs)==len(donate_probs)

f = open(submission_filename, "w")

f.write(",Made Donation in March 2007\n")
for ID, prob in zip(IDs, donate_probs):
    f.write("{},{}\n".format(ID,prob))
    
f.close()

In [ ]: