In [48]:
print(__doc__) # Python Docstrings


Automatically created module for IPython interactive environment

In [49]:
import numpy as np
import matplotlib.pyplot as plt

In [50]:
from sklearn import linear_model, decomposition, datasets
from sklearn.pipeline import Pipeline
from sklearn.grid_search import GridSearchCV

In [51]:
logistic = linear_model.LogisticRegression()

In [52]:
# The sklearn.decomposition module includes matrix decomposition algorithms, including among others PCA, NMF 
# or ICA. Most of the algorithms of this module can be regarded as dimensionality reduction techniques
pca = decomposition.PCA()

In [53]:
pipe = Pipeline(steps=[('pca',pca),('logistic',logistic)])

In [54]:
digits = datasets.load_digits()
digits


Out[54]:
{'DESCR': " Optical Recognition of Handwritten Digits Data Set\n\nNotes\n-----\nData Set Characteristics:\n    :Number of Instances: 5620\n    :Number of Attributes: 64\n    :Attribute Information: 8x8 image of integer pixels in the range 0..16.\n    :Missing Attribute Values: None\n    :Creator: E. Alpaydin (alpaydin '@' boun.edu.tr)\n    :Date: July; 1998\n\nThis is a copy of the test set of the UCI ML hand-written digits datasets\nhttp://archive.ics.uci.edu/ml/datasets/Optical+Recognition+of+Handwritten+Digits\n\nThe data set contains images of hand-written digits: 10 classes where\neach class refers to a digit.\n\nPreprocessing programs made available by NIST were used to extract\nnormalized bitmaps of handwritten digits from a preprinted form. From a\ntotal of 43 people, 30 contributed to the training set and different 13\nto the test set. 32x32 bitmaps are divided into nonoverlapping blocks of\n4x4 and the number of on pixels are counted in each block. This generates\nan input matrix of 8x8 where each element is an integer in the range\n0..16. This reduces dimensionality and gives invariance to small\ndistortions.\n\nFor info on NIST preprocessing routines, see M. D. Garris, J. L. Blue, G.\nT. Candela, D. L. Dimmick, J. Geist, P. J. Grother, S. A. Janet, and C.\nL. Wilson, NIST Form-Based Handprint Recognition System, NISTIR 5469,\n1994.\n\nReferences\n----------\n  - C. Kaynak (1995) Methods of Combining Multiple Classifiers and Their\n    Applications to Handwritten Digit Recognition, MSc Thesis, Institute of\n    Graduate Studies in Science and Engineering, Bogazici University.\n  - E. Alpaydin, C. Kaynak (1998) Cascading Classifiers, Kybernetika.\n  - Ken Tang and Ponnuthurai N. Suganthan and Xi Yao and A. Kai Qin.\n    Linear dimensionalityreduction using relevance weighted LDA. School of\n    Electrical and Electronic Engineering Nanyang Technological University.\n    2005.\n  - Claudio Gentile. A New Approximate Maximal Margin Classification\n    Algorithm. NIPS. 2000.\n",
 'data': array([[  0.,   0.,   5., ...,   0.,   0.,   0.],
        [  0.,   0.,   0., ...,  10.,   0.,   0.],
        [  0.,   0.,   0., ...,  16.,   9.,   0.],
        ..., 
        [  0.,   0.,   1., ...,   6.,   0.,   0.],
        [  0.,   0.,   2., ...,  12.,   0.,   0.],
        [  0.,   0.,  10., ...,  12.,   1.,   0.]]),
 'images': array([[[  0.,   0.,   5., ...,   1.,   0.,   0.],
         [  0.,   0.,  13., ...,  15.,   5.,   0.],
         [  0.,   3.,  15., ...,  11.,   8.,   0.],
         ..., 
         [  0.,   4.,  11., ...,  12.,   7.,   0.],
         [  0.,   2.,  14., ...,  12.,   0.,   0.],
         [  0.,   0.,   6., ...,   0.,   0.,   0.]],
 
        [[  0.,   0.,   0., ...,   5.,   0.,   0.],
         [  0.,   0.,   0., ...,   9.,   0.,   0.],
         [  0.,   0.,   3., ...,   6.,   0.,   0.],
         ..., 
         [  0.,   0.,   1., ...,   6.,   0.,   0.],
         [  0.,   0.,   1., ...,   6.,   0.,   0.],
         [  0.,   0.,   0., ...,  10.,   0.,   0.]],
 
        [[  0.,   0.,   0., ...,  12.,   0.,   0.],
         [  0.,   0.,   3., ...,  14.,   0.,   0.],
         [  0.,   0.,   8., ...,  16.,   0.,   0.],
         ..., 
         [  0.,   9.,  16., ...,   0.,   0.,   0.],
         [  0.,   3.,  13., ...,  11.,   5.,   0.],
         [  0.,   0.,   0., ...,  16.,   9.,   0.]],
 
        ..., 
        [[  0.,   0.,   1., ...,   1.,   0.,   0.],
         [  0.,   0.,  13., ...,   2.,   1.,   0.],
         [  0.,   0.,  16., ...,  16.,   5.,   0.],
         ..., 
         [  0.,   0.,  16., ...,  15.,   0.,   0.],
         [  0.,   0.,  15., ...,  16.,   0.,   0.],
         [  0.,   0.,   2., ...,   6.,   0.,   0.]],
 
        [[  0.,   0.,   2., ...,   0.,   0.,   0.],
         [  0.,   0.,  14., ...,  15.,   1.,   0.],
         [  0.,   4.,  16., ...,  16.,   7.,   0.],
         ..., 
         [  0.,   0.,   0., ...,  16.,   2.,   0.],
         [  0.,   0.,   4., ...,  16.,   2.,   0.],
         [  0.,   0.,   5., ...,  12.,   0.,   0.]],
 
        [[  0.,   0.,  10., ...,   1.,   0.,   0.],
         [  0.,   2.,  16., ...,   1.,   0.,   0.],
         [  0.,   0.,  15., ...,  15.,   0.,   0.],
         ..., 
         [  0.,   4.,  16., ...,  16.,   6.,   0.],
         [  0.,   8.,  16., ...,  16.,   8.,   0.],
         [  0.,   1.,   8., ...,  12.,   1.,   0.]]]),
 'target': array([0, 1, 2, ..., 8, 9, 8]),
 'target_names': array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])}

In [55]:
pca.explained_variance_


---------------------------------------------------------------------------
AttributeError                            Traceback (most recent call last)
<ipython-input-55-0f2968a2997e> in <module>()
----> 1 pca.explained_variance_

AttributeError: 'PCA' object has no attribute 'explained_variance_'

In [56]:
X_digits = digits.data
y_digits = digits.target
X_digits.shape


Out[56]:
(1797L, 64L)

In [57]:
pca.fit(X_digits)  # Fit the model with X.
pca.explained_variance_


Out[57]:
array([  1.78907316e+02,   1.63626641e+02,   1.41709536e+02,
         1.01044115e+02,   6.94744827e+01,   5.90756320e+01,
         5.18556662e+01,   4.39906130e+01,   4.02885629e+01,
         3.69912020e+01,   2.85031708e+01,   2.73059660e+01,
         2.18893003e+01,   2.13124899e+01,   1.76269077e+01,
         1.69374332e+01,   1.58425689e+01,   1.49961105e+01,
         1.22276649e+01,   1.08808010e+01,   1.06876155e+01,
         9.57726524e+00,   9.22126826e+00,   8.68553268e+00,
         8.36095658e+00,   7.16179198e+00,   6.91588809e+00,
         6.18950881e+00,   5.88171633e+00,   5.15299774e+00,
         4.48879723e+00,   4.24451468e+00,   4.04518650e+00,
         3.94120891e+00,   3.70440987e+00,   3.52968776e+00,
         3.08285758e+00,   2.73627648e+00,   2.67062197e+00,
         2.54029121e+00,   2.28171700e+00,   1.90618094e+00,
         1.81615447e+00,   1.68902395e+00,   1.40119202e+00,
         1.29149979e+00,   1.15828926e+00,   9.30701800e-01,
         6.69477833e-01,   4.85794730e-01,   2.52210004e-01,
         9.90976176e-02,   6.30956535e-02,   6.07039586e-02,
         3.96441561e-02,   1.49422438e-02,   8.46835749e-03,
         3.62164306e-03,   1.27634048e-03,   6.60902920e-04,
         4.11993910e-04,   1.14223098e-30,   1.14223098e-30,
         1.12479977e-30])

In [71]:
###############################################################################
# Plot the PCA spectrum
plt.figure(1, figsize=(4, 3)) # 1st figure; figsize:w,h tuple in inches
plt.clf() # Clear the current figure
plt.axes([.2, .2, .7, .7])
plt.plot(pca.explained_variance_, linewidth=2)
plt.axis('tight')
plt.xlabel('n_components')
plt.ylabel('explained_variance_')
###############################################################################


Out[71]:
<matplotlib.text.Text at 0x18ce69b0>

In [72]:
# Prediction
n_components = [20, 40, 64]

In [73]:
Cs = np.logspace(-4, 4, 3)
Cs


Out[73]:
array([  1.00000000e-04,   1.00000000e+00,   1.00000000e+04])

In [74]:
#Parameters of pipelines can be set using ‘__’ separated parameter names:
estimator = GridSearchCV(pipe,
                         dict(pca__n_components=n_components,
                              logistic__C=Cs))

In [75]:
estimator.fit(X_digits, y_digits)
estimator.best_estimator_.named_steps['pca'].n_components


Out[75]:
40

In [76]:
plt.axvline(estimator.best_estimator_.named_steps['pca'].n_components,
            linestyle=':', label='n_components chosen')
# prop: which allows full control of the font size, etc. You can set an 8 point font using legend(..., prop={'size':8}).
plt.legend(prop=dict(size=12)) 
plt.show()

In [ ]: