In [1]:
    
import numpy as np
    
In [2]:
    
np.__version__
    
    Out[2]:
In [24]:
    
arr = np.array(
[
    [1,8,3],
    [4,5,6]
])
    
In [25]:
    
arr.sum(axis=0)
    
    Out[25]:
In [26]:
    
arr.sum(axis=1)
    
    Out[26]:
In [27]:
    
arr[::-1]
    
    Out[27]:
In [28]:
    
# this is equivalent to:
np.flip(arr,axis=0)
    
    Out[28]:
In [ ]:
    
    
In [33]:
    
orders = np.argsort(arr)
orders
    
    Out[33]:
In [36]:
    
first_elems = orders[:,:1]
    
In [41]:
    
arr[:,first_elems]
    
    Out[41]:
In [ ]:
    
    
In [43]:
    
def ranking_precision_score(y_true, y_score, k=None):
    """Precision at rank k
    Parameters
    ----------
    y_true : array-like, shape = [n_samples]
        Ground truth (true relevance labels).
    y_score : array-like, shape = [n_samples]
        Predicted scores.
    k : int
        Rank.
    Returns
    -------
    precision @k : float
    """
    if k is None:
        k = 10
    unique_y = np.unique(y_true)
    if len(unique_y) > 2:
        raise ValueError("Only supported for two relevance levels.")
    pos_label = unique_y[1]
    n_pos = np.sum(y_true == pos_label)
    order = np.argsort(y_score)[::-1]
    y_true = np.take(y_true, order[:k])
    n_relevant = np.sum(y_true == pos_label)
    # Divide by min(n_pos, k) such that the best achievable score is always 1.0.
    return float(n_relevant) / min(n_pos, k)
    
In [63]:
    
def precision_at_k(y_true, y_preds, k=None):
    """Precision at rank k
    Parameters
    ----------
    y_true : array-like, shape = [n_samples,n_labels]
        Ground truth (binary) (true relevance labels)
    y_preds : array-like, shape = [n_samples,n_labels]
        Predicted scores (float) (label probabilities)
    k : int
        Rank.
    Returns
    -------
    precision @k : float
    """
    if k is None:
        k = 10
    assert y_true.shape == y_preds.shape, "Y_true (binary label vectors) and y_preds (predicted probability scores) must have the same shapes."
    unique_y = np.unique(y_true)
    if len(unique_y) > 2:
        raise ValueError("Only supported for two relevance levels (binary " +
                         "indicator arrays).")
    positive_label = unique_y[1]
    n_positives = np.sum(y_true == positive_label)
    order = np.argsort(y_preds)[::-1]
    y_true = np.take(y_true, order[:k])
    n_relevant = np.sum(y_true == positive_label)
    # Divide by min(n_pos, k) such that the best achievable score is always 1.0.
    return float(n_relevant) / min(n_positives, k)
    
In [69]:
    
y_trues = np.array([
    [1,0,0,1],
    [0,0,1,1]
])
y_preds = np.array([
    [0.25,0.3,0.2,0.35],
    [0.2,0.1,0.4,0.3]
])
    
In [71]:
    
score0=ranking_precision_score(y_trues[0], y_preds[0],2)
score1=ranking_precision_score(y_trues[1], y_preds[1],2)
score0,score1
    
    Out[71]:
In [64]:
    
precision_at_k(y_trues,y_preds,2)
    
    Out[64]:
In [ ]:
    
    
In [ ]: