In [1]:
%matplotlib inline 
import matplotlib.pyplot as plt
from sklearn.linear_model import LinearRegression
from sklearn.linear_model import SGDRegressor
from sklearn.metrics import mean_squared_error, classification_report, confusion_matrix
from sklearn.model_selection import train_test_split
from sklearn.pipeline import Pipeline
from sklearn.linear_model import Ridge
from sklearn.linear_model import ElasticNet
from sklearn import datasets
from sklearn.linear_model import LogisticRegression
import numpy as np
from sklearn import datasets
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import StandardScaler
from sklearn.svm import LinearSVC
from sklearn.svm import SVC
from sklearn.svm import LinearSVR
from sklearn.svm import SVR
from sklearn.model_selection import GridSearchCV
from sklearn.model_selection import cross_val_score, cross_val_predict
from sklearn.model_selection import train_test_split

In [2]:
from sklearn.datasets import fetch_mldata

mnist = fetch_mldata('MNIST original')
mnist

X, y = mnist["data"], mnist["target"]

X_train, X_test, y_train, y_test = X[:60000], X[60000:], y[:60000],y[60000:]

shuffle_index = np.random.permutation(60000)
X_train, y_train = X_train[shuffle_index], y_train[shuffle_index]

In [3]:
classificators = [None] * 10

Cs = [0.001, 0.01, 0.1, 1, 10]
kernels = ['linear']
probability = [True]

X_train, X_test, y_train, y_test = train_test_split(X_train, y_train, train_size=6000, test_size=1000, random_state=42)
#smaller train and test sets to speed up the process - consequently smaller precision and recall
for index in range(10):
    y_train_tmp = (y_train == index)
    svc = SVC()
    
    parameters = {'C': Cs,'kernel':kernels, 'probability':probability}
    
    grid_search = GridSearchCV(svc, parameters, n_jobs=-1, cv=3)
    grid_search.fit(X_train, y_train_tmp)

    svc = grid_search.best_estimator_
    classificators[index] = svc

In [4]:
probabilities = [None] * 10
for index, classificator in enumerate(classificators):
    probabilities[index] = classificator.predict_proba(X_test)[:,1]
probabilities = np.asarray(probabilities)
predictions = probabilities.argmax(axis=0)

In [5]:
print(classification_report(predictions,y_test),"\n",confusion_matrix(y_test,predictions)) #around 0.86 both precision and recall


             precision    recall  f1-score   support

          0       0.99      0.91      0.95        97
          1       0.97      0.93      0.95       127
          2       0.82      0.84      0.83        93
          3       0.78      0.81      0.79       112
          4       0.83      0.85      0.84        97
          5       0.74      0.81      0.77        83
          6       0.91      0.89      0.90        97
          7       0.92      0.90      0.91       119
          8       0.70      0.73      0.72        78
          9       0.75      0.74      0.75        97

avg / total       0.85      0.85      0.85      1000
 
 [[ 88   0   0   0   0   0   0   1   0   0]
 [  0 118   2   0   1   0   0   0   0   1]
 [  1   4  78   3   2   1   1   0   5   0]
 [  0   1   4  91   0  10   0   0   7   4]
 [  0   0   2   0  82   0   5   1   1   8]
 [  0   1   2   7   1  67   2   0   4   7]
 [  4   0   1   0   1   1  86   0   1   0]
 [  0   1   0   1   1   1   0 107   1   4]
 [  3   2   3   7   1   3   3   1  57   1]
 [  1   0   1   3   8   0   0   9   2  72]]

In [ ]: