In [ ]:
print('Hello MNIST!')

In [ ]:
import numpy as np
import pandas as pd

3.1 MNIST

  • Download the dataset from here
  • See the stackoverflow discussion here

In [ ]:
from scipy.io import loadmat
mnist = loadmat('./datasets/mnist-original.mat')

In [ ]:
mnist

In [ ]:
X, y = mnist['data'], mnist['label']

In [ ]:
X = X.T
X.shape

In [ ]:
y = y.T
y.shape

In [ ]:
type(y)

In [ ]:
%matplotlib inline
import matplotlib
import matplotlib.pyplot as plt

In [ ]:
some_digit = X[36000]
some_digit_image = some_digit.reshape((28, 28))
plt.imshow(some_digit_image, cmap = matplotlib.cm.binary, interpolation="nearest")
plt.axis('off')
plt.show()

Split test and training data


In [ ]:
X_train, X_test, y_train, y_test = X[:60000], X[60000:], y[:60000], y[60000:]

In [ ]:
len(X_train)

In [ ]:
shuffle_index = np.random.permutation(len(X_train))
X_train, y_train = X_train[shuffle_index], y_train[shuffle_index]

3.2 Training a binary classifier


In [ ]:
y_train_5 = (y_train == 5)
y_test_5 = (y_test == 5)

In [ ]:
pd.Series(y_train_5.reshape(len(y_train_5),)).value_counts()

In [ ]:
y_train_5_0_1 = np.where(y_train_5, 1, 0)
pd.Series(y_train_5_0_1.reshape(len(y_train_5),)).value_counts()

In [ ]:
from sklearn.linear_model import SGDClassifier

In [ ]:
y_train_5 = y_train_5.reshape(len(y_train_5),)
y_train_5.shape

In [ ]:
sgd_clf = SGDClassifier(random_state=42)
sgd_clf.fit(X_train, y_train_5)

In [ ]:
sgd_clf.predict([some_digit])

In [ ]:
sgd_clf.predict(some_digit.reshape(1, -1))

In [ ]:
some_digit.reshape(1, -1).shape

3.3 Performance Measure

3.3.1 Implementing the cross-validation


In [ ]:
from sklearn.model_selection import StratifiedKFold
from sklearn.base import clone

In [ ]:
skfolds = StratifiedKFold(n_splits=3, random_state=42)

In [ ]:
for train_index, test_index in skfolds.split(X_train, y_train_5):
    clone_clf = clone(sgd_clf)
    X_train_folds = X_train[train_index]
    X_test_fold   = X_train[test_index]
    y_train_folds = y_train_5[train_index]
    y_test_fold   = y_train_5[test_index]
    
    clone_clf.fit(X_train_folds, y_train_folds)
    y_pred = clone_clf.predict(X_test_fold)
    n_correct = sum(y_pred == y_test_fold)
    print(n_correct / len(y_pred))

In [ ]:
from sklearn.model_selection import cross_val_score
# cross_val_score(sgd_clf, X_train, y_train_5.reshape(len(y_train_5,)), cv=3, scoring="accuracy")

Dummy predictor


In [ ]:
from sklearn.base import BaseEstimator

class Never5Classifier(BaseEstimator):
    def fit(self, X, y=None):
        pass
    def predict(self, X):
        return np.zeros((len(X), 1), dtype=bool)

In [ ]:
cross_val_score(Never5Classifier(), X_train, y_train_5.reshape(len(y_train_5,)), cv=3, scoring="accuracy")

Confusion Matrix


In [ ]:
from sklearn.model_selection import cross_val_predict
y_train_predict = cross_val_predict(sgd_clf, X_train, y_train_5.reshape(len(y_train_5,)), cv=3)

In [ ]:
y_train_predict

In [ ]:
from sklearn.metrics import confusion_matrix
confusion_matrix(y_train_5, y_train_predict)

In [ ]:
confusion_matrix(y_train_5, y_train_5)

In [ ]:
from sklearn.metrics import precision_score, recall_score, f1_score

In [ ]:
precision_score(y_train_5, y_train_predict)

In [ ]:
recall_score(y_train_5, y_train_predict)

In [ ]:
f1_score(y_train_5, y_train_predict)

3.3.3 Precision Recall Tradeoff


In [ ]:
y_scores = sgd_clf.decision_function([some_digit])

In [ ]:
np.array_equal(np.where(sgd_clf.decision_function(X_train) < 0, False, True), sgd_clf.predict(X_train))

In [ ]:
y_scores = cross_val_predict(sgd_clf, X_train, y_train_5, cv=3, method="decision_function")

In [ ]:
from sklearn.metrics import precision_recall_curve
precisions, recalls, thresholds = precision_recall_curve(y_train_5, y_scores)

In [ ]:
len(precisions), len(recalls), len(thresholds)

In [ ]:
def plot_precision_recall_vs_threshold(precisions, recalls, thresholds):
    plt.plot(thresholds, precisions[:-1], 'b--')
    plt.plot(thresholds, recalls[:-1], 'g-')

In [ ]:
plot_precision_recall_vs_threshold(precisions, recalls, thresholds)
plt.show()

In [ ]:
def plot_precision_vs_recall(precisions, recalls):
    plt.plot(recalls, precisions, 'b--')
    
plot_precision_vs_recall(precisions, recalls)
plt.show()

In [ ]:
threshold_90_precision = thresholds[np.argmax(precisions >= 0.9)]
y_train_pred_90 = (y_scores >= threshold_90_precision)

In [ ]:
print(precision_score(y_train_5, y_train_pred_90))
print(recall_score(y_train_5, y_train_pred_90))

3.3.4 ROC Curve


In [ ]:
from sklearn.metrics import roc_curve

In [ ]:
fpr, tpr, thresholds = roc_curve(y_train_5, y_scores)

In [ ]:
confusion_matrix(y_train_5, y_scores>=0)

In [ ]:
len(fpr), len(thresholds)
fpr[1096], tpr[1096]

In [ ]:
4130/(1291+4130), 779 / (53799+780)

In [ ]:
def plot_roc_curve(fpr, tpr):
    plt.plot(fpr, tpr, linewidth=2)
    plt.plot([0,1], [0,1], 'k--')

In [ ]:
plot_roc_curve(fpr, tpr)

In [ ]:
from sklearn.metrics import roc_auc_score
roc_auc_score(y_train_5, y_scores)

RandomForestClassifier


In [ ]:
from sklearn.ensemble import RandomForestClassifier

forest_clf = RandomForestClassifier(random_state=42)

In [ ]:
y_probas_forest = cross_val_predict(forest_clf, X_train, y_train_5, cv=3, method='predict_proba')

In [ ]:
y_scores_froest = y_probas_forest[:, 1]
fpr_forest, tpr_forest, thresholds_forest = roc_curve(y_train_5, y_scores_froest)

In [ ]:
plt.plot(fpr, tpr, 'b:', label='SGD')
plt.plot(fpr_forest, tpr_forest, label='RandomForest')
plt.legend(loc='lower right')

In [ ]:
roc_auc_score(y_train_5, y_scores_froest)

In [ ]:
precision_score(y_train_5, y_scores_froest>0.5)

In [ ]:
recall_score(y_train_5, y_scores_froest>0.5)

3.4 Multiclass


In [ ]:
sgd_clf.fit(X_train, y_train)

In [ ]:
some_digit_scores = sgd_clf.decision_function([some_digit])

In [ ]:
sgd_clf.predict([some_digit])

In [ ]:
np.argmax(some_digit_scores)

In [ ]:
sgd_clf.classes_

Manually OVO or OVA


In [ ]:
from sklearn.multiclass import OneVsOneClassifier
from sklearn.multiclass import OneVsRestClassifier

In [ ]:
ovo_clf = OneVsOneClassifier(SGDClassifier(random_state=42))

In [ ]:
ovo_clf.fit(X_train, y_train)

In [ ]:
ovo_clf.predict([some_digit])

In [ ]:
len(ovo_clf.estimators_)

In [ ]:
forest_clf.fit(X_train, y_train)

In [ ]:
forest_clf.predict_proba([some_digit])

In [ ]:
# Time consuming, skip
# cross_val_score(sgd_clf, X_train, y_train, cv=3, scoring="accuracy")

In [ ]:
from sklearn.preprocessing import StandardScaler

In [ ]:
scaler = StandardScaler()
X_train_scaled = scaler.fit_transform(X_train)

In [ ]:
# Time consuming, skip
# cross_val_score(sgd_clf, X_train_scaled, y_train, cv=3, scoring="accuracy")

3.5 Error Analysis


In [ ]:
y_train_pred = cross_val_predict(sgd_clf, X_train, y_train.reshape(len(y_train,)), cv=3)

In [ ]:
conf_mx = confusion_matrix(y_train, y_train_pred)

In [ ]:
conf_mx

In [ ]:
plt.matshow(conf_mx, cmap=plt.cm.gray)

In [ ]:
rowsum = conf_mx.sum(axis=1, keepdims=True)
rowsum

In [ ]:
norm_conf_mx = conf_mx / rowsum
norm_conf_mx

In [ ]:
np.fill_diagonal(norm_conf_mx, 0)
norm_conf_mx

In [ ]:
plt.matshow(norm_conf_mx, cmap=plt.cm.gray)

In [ ]:
y_train_pred = y_train_pred.reshape(-1,)
y_train = y_train.reshape(-1,)
y_train.shape, y_train_pred.shape

In [ ]:
cl_a, cl_b = 3, 5
X_aa = X_train[(y_train == cl_a) & (y_train_pred == cl_a)]
X_ab = X_train[(y_train == cl_a) & (y_train_pred == cl_b)]
X_ba = X_train[(y_train == cl_b) & (y_train_pred == cl_a)]
X_bb = X_train[(y_train == cl_b) & (y_train_pred == cl_b)]

In [ ]:
fig, axes = plt.subplots(2, 2)
# https://github.com/ageron/handson-ml/issues/257
def plot_digits(instances, images_per_row=10, **options):
    size = 28
    pad_num = len(instances) % images_per_row
    instances_pad = np.concatenate([instances, np.zeros((pad_num, size**2))])
    image = []
    for i in range(0, len(instances_pad), images_per_row):
        row_image = np.concatenate([j.reshape((size,size)) for j in instances_pad[i:i+images_per_row]], axis=1)
        image.append(row_image)
    image = np.concatenate(image)
    plt.imshow(image, cmap = matplotlib.cm.binary, interpolation="nearest")
    plt.axis('off')

plt.subplot(2,2,1); plot_digits(X_aa[:25], images_per_row=5)
plt.subplot(2,2,2); plot_digits(X_ab[:25], images_per_row=5)
plt.subplot(2,2,3); plot_digits(X_ba[:25], images_per_row=5)
plt.subplot(2,2,4); plot_digits(X_ba[:25], images_per_row=5)
plt.subplots_adjust(right=1.0, bottom=0, wspace=0, hspace=0)

Chapter 3.6 Multilabel classification


In [ ]:
from sklearn.neighbors import KNeighborsClassifier

In [ ]:
y_train_larger = (y_train >= 7)

In [ ]:
y_train_odd = (y_train % 2 == 1)

In [ ]:
y_multilabel = np.c_[y_train_larger, y_train_odd]

In [ ]:
knn_clf = KNeighborsClassifier()

In [ ]:
knn_clf.fit(X_train, y_multilabel)

In [ ]:
knn_clf.predict([some_digit])

In [ ]:
# y_train_knn_pred = cross_val_predict(knn_clf, X_train, y_multilabel, cv=3)

3.7 Multioutput Classification


In [ ]:
noise = np.random.randint(0, 100, (len(X_train), 784))
X_train_mod = X_train + noise
noise = np.random.randint(0, 100, (len(X_test), 784))
X_test_mod = X_test + noise
y_train_mod = X_train
y_test_mod = X_test

In [ ]:
plt.imshow(X_train_mod[35000].reshape(28,28), cmap = matplotlib.cm.binary, interpolation="nearest")

In [ ]:
knn_clf.fit(X_train_mod, y_train_mod)

In [ ]:
clean_digit = knn_clf.predict([X_train_mod[35000]])

In [ ]:
plt.imshow(clean_digit.reshape(28,28), cmap = matplotlib.cm.binary, interpolation="nearest")