In [1]:
import numpy as np
from sklearn.metrics import accuracy_score, mean_absolute_error

# The following 3 functions have been taken from Ben Hamner's github repository
# https://github.com/benhamner/Metrics
def confusion_matrix(rater_a, rater_b, min_rating=None, max_rating=None):
    """
    Returns the confusion matrix between rater's ratings
    """
    assert(len(rater_a) == len(rater_b))
    if min_rating is None:
        min_rating = min(rater_a + rater_b)
    if max_rating is None:
        max_rating = max(rater_a + rater_b)
    num_ratings = int(max_rating - min_rating + 1)
    conf_mat = [[0 for i in range(num_ratings)]
                for j in range(num_ratings)]
    for a, b in zip(rater_a, rater_b):
        conf_mat[a - min_rating][b - min_rating] += 1
    return conf_mat


def histogram(ratings, min_rating=None, max_rating=None):
    """
    Returns the counts of each type of rating that a rater made
    """
    if min_rating is None:
        min_rating = min(ratings)
    if max_rating is None:
        max_rating = max(ratings)
    num_ratings = int(max_rating - min_rating + 1)
    hist_ratings = [0 for x in range(num_ratings)]
    for r in ratings:
        hist_ratings[r - min_rating] += 1
    return hist_ratings


def quadratic_weighted_kappa(y, y_pred):
    """
    Calculates the quadratic weighted kappa
    axquadratic_weighted_kappa calculates the quadratic weighted kappa
    value, which is a measure of inter-rater agreement between two raters
    that provide discrete numeric ratings.  Potential values range from -1
    (representing complete disagreement) to 1 (representing complete
    agreement).  A kappa value of 0 is expected if all agreement is due to
    chance.
    quadratic_weighted_kappa(rater_a, rater_b), where rater_a and rater_b
    each correspond to a list of integer ratings.  These lists must have the
    same length.
    The ratings should be integers, and it is assumed that they contain
    the complete range of possible ratings.
    quadratic_weighted_kappa(X, min_rating, max_rating), where min_rating
    is the minimum possible rating, and max_rating is the maximum possible
    rating
    """
    rater_a = y
    rater_b = y_pred
    min_rating=None
    max_rating=None
    rater_a = np.array(rater_a, dtype=int)
    rater_b = np.array(rater_b, dtype=int)
    assert(len(rater_a) == len(rater_b))
    if min_rating is None:
        min_rating = min(min(rater_a), min(rater_b))
    if max_rating is None:
        max_rating = max(max(rater_a), max(rater_b))
    conf_mat = confusion_matrix(rater_a, rater_b,
                                min_rating, max_rating)
    num_ratings = len(conf_mat)
    num_scored_items = float(len(rater_a))

    hist_rater_a = histogram(rater_a, min_rating, max_rating)
    hist_rater_b = histogram(rater_b, min_rating, max_rating)

    numerator = 0.0
    denominator = 0.0

    for i in range(num_ratings):
        for j in range(num_ratings):
            expected_count = (hist_rater_a[i] * hist_rater_b[j]
                              / num_scored_items)
            d = pow(i - j, 2.0) / pow(num_ratings - 1, 2.0)
            numerator += d * conf_mat[i][j] / num_scored_items
            denominator += d * expected_count / num_scored_items

    return (1.0 - numerator / denominator)

In [6]:
Y_real = np.array([1,2,3,1,4,4,4,4,4,4])  
Y_pred = np.array([4,2,3,1,4,4,4,4,4,4])

print("ONE EXTREME ERROR")
print("Ground truth:\t%s"%Y_real)
print("Predicted   :\t%s"%Y_pred)
print("MAE         :\t%s"%mean_absolute_error(Y_real,Y_pred))
print("Accuracy    :\t%s"%accuracy_score(Y_real,Y_pred))
print("Kappa       :\t%s"%quadratic_weighted_kappa(Y_real,Y_pred))
print()


ONE EXTREME ERROR
Ground truth:	[1 2 3 1 4 4 4 4 4 4]
Predicted   :	[4 2 3 1 4 4 4 4 4 4]
MAE         :	0.3
Accuracy    :	0.9
Kappa       :	0.656488549618
()

In [3]:
Y_real = np.array([1,2,3,1,4,4,4,4,4,4])
Y_pred = np.array([1,2,3,1,4,3,3,3,3,2])

print("FIVE SMALL ERRORS")
print("Ground truth:\t%s"%Y_real)
print("Predicted   :\t%s"%Y_pred)
print("MAE         :\t%s"%mean_absolute_error(Y_real,Y_pred))
print("Accuracy    :\t%s"%accuracy_score(Y_real,Y_pred))
print("Kappa       :\t%s"%quadratic_weighted_kappa(Y_real,Y_pred))
print()


FIVE SMALL ERRORS
Ground truth:	[1 2 3 1 4 4 4 4 4 4]
Predicted   :	[1 2 3 1 4 3 3 3 3 2]
MAE         :	0.6
Accuracy    :	0.5
Kappa       :	0.703703703704
()

In [4]:
Y_real = np.array([1,1,3,1,4,4,4,4,4,4])
Y_pred = np.array([1,1,3,1,4,3,3,3,3,2])

print("KAPPA CHANGES WHEN DISTRIBUTION CHANGES")
print("Ground truth:\t%s"%Y_real)
print("Predicted   :\t%s"%Y_pred)
print("MAE         :\t%s"%mean_absolute_error(Y_real,Y_pred))
print("Accuracy    :\t%s"%accuracy_score(Y_real,Y_pred))
print("Kappa       :\t%s"%quadratic_weighted_kappa(Y_real,Y_pred))


KAPPA CHANGES WHEN DISTRIBUTION CHANGES
Ground truth:	[1 1 3 1 4 4 4 4 4 4]
Predicted   :	[1 1 3 1 4 3 3 3 3 2]
MAE         :	0.6
Accuracy    :	0.5
Kappa       :	0.75