parameter tuning on the digits dataset

Based on the tutorial on the scikit learn website


In [45]:
from __future__ import print_function

from sklearn import datasets
from sklearn.model_selection import train_test_split
from sklearn.model_selection import GridSearchCV
from sklearn.metrics import classification_report
from sklearn.svm import SVC

print(__doc__)

# Loading the Digits dataset
digits = datasets.load_digits()


Automatically created module for IPython interactive environment

In [46]:
digits.images.shape#1797 data points each of 8*8 images


Out[46]:
(1797, 8, 8)

In [47]:
n_samples = len(digits.images)
print('n_samples = ',n_samples)


n_samples =  1797

In [48]:
#need to flatten image so that data is (samples, features) matrix to apply sklearn classifier
X = digits.images.reshape((len(digits.images), -1))
print('X.shape = ',X.shape)


X.shape =  (1797, 64)

In [50]:
import numpy as np

In [51]:
y = digits.target
print("y.shape = ", y.shape)


y.shape =  (1797,)

In [55]:
#split data into training and test sets
X_train, X_test, y_train, y_test = train_test_split(
X,y,test_size=.5, random_state=0)
print('X_train.shape = {}, X_test.shape = {}, y_train.shape = {}, y_test.shape = {}'.format(X_train.shape, X_test.shape, y_train.shape, y_test.shape))


X_train.shape = (898, 64), X_test.shape = (899, 64), y_train.shape = (898,), y_test.shape = (899,)

In [56]:
#set the parameters of cross validation
tuned_parameters = [{'kernel': ['rbf'], 'gamma': [1e-3, 1e-4], 
                    'C': [1, 10 ,100, 1000]}, {'kernel': ['linear'], 'C': [1,10,100,1000]}]

In [57]:
scores = ['precision', 'recall']
#precision: ability of the classifier not to label as positive a sample that is negative
#or how many selected items are relevant
#tp/(tp + fp)
#recall: ability to find all positive samples.
#or how many releveant items are selected
#tp/(tp + fn)

In [58]:
for score in scores:
    print("#Tuning hyper-parameters for {}\n".format(score))
    
    clf = GridSearchCV(SVC(C=1), tuned_parameters, cv=5, scoring='%s_macro' % score)#GridSearchCV(estimator obj, param_grid(dict or list of dicts),scoring )
    clf.fit(X_train,y_train)
    
    print("Best parameter set found on dev set:\n{} \n".format(clf.best_params_))
    print("Grid scores on dev set: \n")
    means = clf.cv_results_['mean_test_score']
    stds = clf.cv_results_['std_test_score']
    for mean,std,params in zip(means, stds, clf.cv_results_['params']):
        print('%0.3f (+/-%0.03f) for %r'%(mean,std*2, params))
    print()
    
    print('Detailed classification report: \n')
    print('The model is trained on the full dev set.')
    print('The scores are computed on the full evaluation set.\n')
    
    y_true, y_pred = y_test, clf.predict(X_test)
    print(classification_report(y_true, y_pred),'\n')


#Tuning hyper-parameters for precision

Best parameter set found on dev set:
{'C': 10, 'kernel': 'rbf', 'gamma': 0.001} 

Grid scores on dev set: 

0.986 (+/-0.016) for {'C': 1, 'kernel': 'rbf', 'gamma': 0.001}
0.959 (+/-0.029) for {'C': 1, 'kernel': 'rbf', 'gamma': 0.0001}
0.988 (+/-0.017) for {'C': 10, 'kernel': 'rbf', 'gamma': 0.001}
0.982 (+/-0.026) for {'C': 10, 'kernel': 'rbf', 'gamma': 0.0001}
0.988 (+/-0.017) for {'C': 100, 'kernel': 'rbf', 'gamma': 0.001}
0.982 (+/-0.025) for {'C': 100, 'kernel': 'rbf', 'gamma': 0.0001}
0.988 (+/-0.017) for {'C': 1000, 'kernel': 'rbf', 'gamma': 0.001}
0.982 (+/-0.025) for {'C': 1000, 'kernel': 'rbf', 'gamma': 0.0001}
0.975 (+/-0.014) for {'C': 1, 'kernel': 'linear'}
0.975 (+/-0.014) for {'C': 10, 'kernel': 'linear'}
0.975 (+/-0.014) for {'C': 100, 'kernel': 'linear'}
0.975 (+/-0.014) for {'C': 1000, 'kernel': 'linear'}

Detailed classification report: 

The model is trained on the full dev set.
The scores are computed on the full evaluation set.

             precision    recall  f1-score   support

          0       1.00      1.00      1.00        89
          1       0.97      1.00      0.98        90
          2       0.99      0.98      0.98        92
          3       1.00      0.99      0.99        93
          4       1.00      1.00      1.00        76
          5       0.99      0.98      0.99       108
          6       0.99      1.00      0.99        89
          7       0.99      1.00      0.99        78
          8       1.00      0.98      0.99        92
          9       0.99      0.99      0.99        92

avg / total       0.99      0.99      0.99       899
 

#Tuning hyper-parameters for recall

Best parameter set found on dev set:
{'C': 10, 'kernel': 'rbf', 'gamma': 0.001} 

Grid scores on dev set: 

0.986 (+/-0.019) for {'C': 1, 'kernel': 'rbf', 'gamma': 0.001}
0.957 (+/-0.029) for {'C': 1, 'kernel': 'rbf', 'gamma': 0.0001}
0.987 (+/-0.019) for {'C': 10, 'kernel': 'rbf', 'gamma': 0.001}
0.981 (+/-0.028) for {'C': 10, 'kernel': 'rbf', 'gamma': 0.0001}
0.987 (+/-0.019) for {'C': 100, 'kernel': 'rbf', 'gamma': 0.001}
0.981 (+/-0.026) for {'C': 100, 'kernel': 'rbf', 'gamma': 0.0001}
0.987 (+/-0.019) for {'C': 1000, 'kernel': 'rbf', 'gamma': 0.001}
0.981 (+/-0.026) for {'C': 1000, 'kernel': 'rbf', 'gamma': 0.0001}
0.972 (+/-0.012) for {'C': 1, 'kernel': 'linear'}
0.972 (+/-0.012) for {'C': 10, 'kernel': 'linear'}
0.972 (+/-0.012) for {'C': 100, 'kernel': 'linear'}
0.972 (+/-0.012) for {'C': 1000, 'kernel': 'linear'}

Detailed classification report: 

The model is trained on the full dev set.
The scores are computed on the full evaluation set.

             precision    recall  f1-score   support

          0       1.00      1.00      1.00        89
          1       0.97      1.00      0.98        90
          2       0.99      0.98      0.98        92
          3       1.00      0.99      0.99        93
          4       1.00      1.00      1.00        76
          5       0.99      0.98      0.99       108
          6       0.99      1.00      0.99        89
          7       0.99      1.00      0.99        78
          8       1.00      0.98      0.99        92
          9       0.99      0.99      0.99        92

avg / total       0.99      0.99      0.99       899
 


In [ ]:


In [ ]:


In [ ]:


In [ ]: