In [1]:
%matplotlib inline

import matplotlib as plt
import numpy as np

from autotagger.helpers.preprocess import load_dataset
from sklearn import cross_validation,linear_model
from sklearn.datasets import make_multilabel_classification
from sklearn.metrics import f1_score, precision_score, recall_score
from sklearn.multiclass import OneVsRestClassifier

In [2]:
X,Y = load_dataset("stackoverflow", max_features=100)
X_train, X_test, Y_train, Y_test = cross_validation.train_test_split(X,Y,test_size=0.80, random_state=42)

In [3]:
clf = linear_model.LogisticRegression()
meta_clf = OneVsRestClassifier(clf)

In [4]:
meta_clf.fit(X_train,Y_train)


Out[4]:
OneVsRestClassifier(estimator=LogisticRegression(C=1.0, class_weight=None, dual=False, fit_intercept=True,
          intercept_scaling=1, max_iter=100, multi_class='ovr', n_jobs=1,
          penalty='l2', random_state=None, solver='liblinear', tol=0.0001,
          verbose=0, warm_start=False),
          n_jobs=1)

In [5]:
Y_pred = meta_clf.predict(X_test)

In [6]:
# macro average refers to the average f1_score for each label
f1_score(Y_test,Y_pred, average='macro')


/home/felipe/auto-tagger/venv3/lib/python3.4/site-packages/sklearn/metrics/classification.py:1074: UndefinedMetricWarning: F-score is ill-defined and being set to 0.0 in labels with no predicted samples.
  'precision', 'predicted', average, warn_for)
Out[6]:
0.037940736071986787

In [7]:
# if we just consider the labels that have had at least one instance predicted,
# our score goes up:

label_scores = f1_score(Y_test,Y_pred,average=None)
valid_label_indices = np.nonzero(label_scores)[0]
f1_score(Y_test,Y_pred,average='macro',labels=valid_label_indices)


/home/felipe/auto-tagger/venv3/lib/python3.4/site-packages/sklearn/metrics/classification.py:1074: UndefinedMetricWarning: F-score is ill-defined and being set to 0.0 in labels with no predicted samples.
  'precision', 'predicted', average, warn_for)
Out[7]:
0.10539093353329665

In [8]:
# micro average refers to the average f1_score for each instance
f1_score(Y_test,Y_pred,average='micro')


Out[8]:
0.18395809178395894