In [1]:
import time
import pandas as pd
import numpy as np
from sklearn.model_selection import train_test_split
from scipy.spatial.distance import cosine
from sklearn.metrics.pairwise import cosine_similarity, pairwise_distances
from sklearn.model_selection import KFold
from sklearn.metrics import mean_squared_error, mean_absolute_error

%matplotlib inline

Loading the Book Ratings Dataset


In [2]:
ratings = pd.read_csv('../raw-data/BX-Book-Ratings.csv', encoding='iso-8859-1', sep = ';')
ratings.columns = ['user_id', 'isbn', 'book_rating']

In [3]:
print(ratings.dtypes)
print()
print(ratings.head())
print()
print("Data Points :", ratings.shape[0])


user_id         int64
isbn           object
book_rating     int64
dtype: object

   user_id        isbn  book_rating
0   276725  034545104X            0
1   276726  0155061224            5
2   276727  0446520802            0
3   276729  052165615X            3
4   276729  0521795028            6

Data Points : 1149780

Loading the Books Dataset


In [60]:
books = pd.read_csv('../raw-data/BX-Books.csv', sep=';', encoding = 'iso-8859-1', dtype =str)
del books['Image-URL-L']
del books['Image-URL-M']
del books['Image-URL-S']
del books['Book-Author']
del books['Publisher']

Some Books don't have unique ISBN, creating a 1:1 maping between books-title and ISBN


In [61]:
print('Number of Books == Number of ISBN ? ', books["Book-Title"].nunique() == books["ISBN"].nunique())
book_dict = books[["Book-Title","ISBN"]].set_index("Book-Title").to_dict()["ISBN"]
books['new_isbn'] = books["Book-Title"].apply(lambda x: book_dict[x])
print('Number of Books == Number of ISBN ? ', books["Book-Title"].nunique() == books["new_isbn"].nunique())


Number of Books == Number of ISBN ?  False
Number of Books == Number of ISBN ?  True

In [62]:
books['isbn'] = books['new_isbn']

del books['ISBN']
del books['new_isbn']

Data Preparation/ Cleaning

Removing ratings equal to zero, since Book Crossing Dataset has rating scale from 1-10. Taking Inner Join with books dataframe to maintain books whose details exist.


In [63]:
newdf = ratings[ratings.book_rating>0]
joined = books.merge(newdf, on ='isbn')
print(newdf.shape)


(433671, 3)

Sampling

Book Crossing Dataset is a very sparse Dataset with sparsity of more than 99.99%. In order to choose a small subset of dataset with constraints given in the dataset, we selected top 100 items which have been rated the most and took intersection with top 20000 users who have given ratings to more books. Further we remove all users who rated only one book to make the data more denser. This gave a dataset with 2517 users and 100 items with 8242 data points. In order to verify the scalability of our model we created different dataset with increasing book counts (150, 200, 300, 500)


In [58]:
datasets = []
for j in [100, 150, 200, 300, 500]:
    df = joined.groupby('isbn').count().sort_values('user_id', ascending =False)[0:j].index.values
    test = joined.groupby('user_id').count().sort_values('isbn', ascending = False)[:20000].index.values
    newdf = joined[joined.user_id.isin(test) & joined.isbn.isin(df)]
    data  = newdf[newdf['user_id'].isin(newdf['user_id'].value_counts()[newdf['user_id'].value_counts()>1].index)]
    print("users books")
    print(data.user_id.nunique(), data.isbn.nunique())
    print()
    print('Sparsity :', data.shape[0]/(data.user_id.nunique() * data.isbn.nunique()))
    print()
    print(data.shape)
    print()
    print(data.groupby('user_id').count().sort_values('isbn', ascending = False).mean())
    print()
    datasets.append(data)


users books
2517 100

Sparsity : 0.03274533174413985

(8242, 5)

Book-Title             3.274533
Year-Of-Publication    3.274533
isbn                   3.274533
book_rating            3.274533
dtype: float64

users books
3086 150

Sparsity : 0.023393821559732124

(10829, 5)

Book-Title             3.509073
Year-Of-Publication    3.509073
isbn                   3.509073
book_rating            3.509073
dtype: float64

users books
3543 200

Sparsity : 0.01872847869037539

(13271, 5)

Book-Title             3.745696
Year-Of-Publication    3.745696
isbn                   3.745696
book_rating            3.745696
dtype: float64

users books
4280 300

Sparsity : 0.013364485981308412

(17160, 5)

Book-Title             4.009346
Year-Of-Publication    4.009346
isbn                   4.009346
book_rating            4.009346
dtype: float64

users books
5282 500

Sparsity : 0.008777357061719046

(23181, 5)

Book-Title             4.388679
Year-Of-Publication    4.388679
isbn                   4.388679
book_rating            4.388679
dtype: float64

Taking Dataset with 100 items

Algo 1: Memory Based Algorithm : Item-Item CF Algorithm

Since average number of books rated by an user was around 3.3, we decided to use item-item CF as our memory based algorithm. Our implementation of item-item algorithm is below:


In [9]:
data = datasets[0]
rows = data.user_id.unique()
cols = data['Book-Title'].unique()
print(data.user_id.nunique(), data.isbn.nunique())
data = data[['user_id', 'Book-Title', 'book_rating']]


2517 100

In [10]:
print("Sparsity :", 100 - (data.shape[0]/(len(cols)*len(rows)) * 100))


Sparsity : 96.72546682558601

In [11]:
idict  = dict(zip(cols, range(len(cols))))
udict = dict(zip(rows, range(len(rows))))

data.user_id = [
    udict[i] for i in data.user_id
]
data['Book-Title'] = [
    idict[i] for i in data['Book-Title']
]

nmat = data.as_matrix()


/Library/Frameworks/Python.framework/Versions/3.4/lib/python3.4/site-packages/pandas/core/generic.py:2701: SettingWithCopyWarning: 
A value is trying to be set on a copy of a slice from a DataFrame.
Try using .loc[row_indexer,col_indexer] = value instead

See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
  self[name] = value
/Library/Frameworks/Python.framework/Versions/3.4/lib/python3.4/site-packages/ipykernel/__main__.py:8: SettingWithCopyWarning: 
A value is trying to be set on a copy of a slice from a DataFrame.
Try using .loc[row_indexer,col_indexer] = value instead

See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy

In [12]:
nmat


Out[12]:
array([[   0,    0,   10],
       [   1,    0,    5],
       [   2,    0,    8],
       ..., 
       [2081,   99,    8],
       [ 288,   99,    8],
       [2088,   99,    8]])

Function for Evaluation Metrics: MAE and RMSE


In [13]:
def rmse(ypred, ytrue):
    ypred = ypred[ytrue.nonzero()].flatten()
    ytrue = ytrue[ytrue.nonzero()].flatten()
    return np.sqrt(mean_squared_error(ypred, ytrue))

def mae(ypred, ytrue):
    ypred = ypred[ytrue.nonzero()].flatten()
    ytrue = ytrue[ytrue.nonzero()].flatten()
    return mean_absolute_error(ypred, ytrue)

Our Naive Baseline for any user i, item j prediction is to assign it with (sum of mean rating given by user i (umean[i]), mean rating received by item j (imean[j]) substracting average rating over entire dataset. (amean))

-------------- Naive Baseline ---------------


In [14]:
def predict_naive(user, item):
    prediction = imean1[item] + umean1[user] - amean1
    return prediction

In [15]:
x1, x2 = train_test_split(nmat, test_size = 0.2, random_state =42)
naive = np.zeros((len(rows),len(cols)))
for row in x1:
    naive[row[0], row[1]] = row[2]

predictions = []
targets = []

amean1 = np.mean(naive[naive!=0])
umean1 = sum(naive.T) / sum((naive!=0).T)
imean1 = sum(naive) / sum((naive!=0))

umean1 = np.where(np.isnan(umean1), amean1, umean1)
imean1 = np.where(np.isnan(imean1), amean1, imean1)


print('Naive---')
for row in x2:
    user, item, actual = row[0], row[1], row[2]
    predictions.append(predict_naive(user, item))
    targets.append(actual)

print('rmse %.4f' % rmse(np.array(predictions), np.array(targets)))
print('mae %.4f' % mae(np.array(predictions), np.array(targets)))
print()


Naive---
rmse 1.7181
mae 1.2156

Following are the functions to calculate pairwise similarity between two items : Cosine, Adjusted Cosine, Euclidean, Pearson Corelation.


In [16]:
def cos(mat, a, b):
    if a == b:
        return 1
    aval = mat.T[a].nonzero()
    bval = mat.T[b].nonzero()
    corated = np.intersect1d(aval, bval)
    if len(corated) == 0:
        return 0
    avec = np.take(mat.T[a], corated)
    bvec = np.take(mat.T[b], corated)
    val = 1 - cosine(avec, bvec)
    if np.isnan(val):
        return 0
    return val

In [17]:
def adjcos(mat, a, b, umean):
    if a == b:
        return 1
    aval = mat.T[a].nonzero()
    bval = mat.T[b].nonzero()
    corated = np.intersect1d(aval, bval)
    if len(corated) == 0:
        return 0
    avec = np.take(mat.T[a], corated)
    bvec = np.take(mat.T[b], corated)
    avec1 = avec - umean[corated]
    bvec1 = bvec - umean[corated]
    val = 1 - cosine(avec1, bvec1)
    if np.isnan(val):
        return 0
    return val

In [18]:
def pr(mat, a, b, imean):
    if a == b:
        return 1
    aval = mat.T[a].nonzero()
    bval = mat.T[b].nonzero()
    corated = np.intersect1d(aval, bval)
    if len(corated) < 2:
        return 0
    avec = np.take(mat.T[a], corated)
    bvec = np.take(mat.T[b], corated)
    avec1 = avec - imean[a]
    bvec1 = bvec - imean[b]
    val = 1 - cosine(avec1, bvec1)
    if np.isnan(val):
        return 0
    return val

In [19]:
def euc(mat, a, b):
    if a == b:
        return 1
    aval = mat.T[a].nonzero()
    bval = mat.T[b].nonzero()
    corated = np.intersect1d(aval, bval)
    if len(corated) == 0:
        return 0
    avec = np.take(mat.T[a], corated)
    bvec = np.take(mat.T[b], corated)
    dist = np.sqrt(np.sum(a-b)**2)
    val = 1/(1+dist)
    if np.isnan(val):
        return 0
    return val

Function item similar returns matrix of pairwise similarity between all items based on the option provided. Also return amean (global mean rating), umean (average rating of each user), imean (Average rating of each item)


In [20]:
def itemsimilar(mat, option):
    amean = np.mean(mat[mat!=0])
    umean = sum(mat.T) / sum((mat!=0).T)
    imean = sum(mat) / sum((mat!=0))
    
    umean = np.where(np.isnan(umean), amean, umean)
    imean = np.where(np.isnan(imean), amean, imean)
    
    n = mat.shape[1]
    sim_mat = np.zeros((n, n))
    
    if option == 'pr':
        #print("PR")
        for i in range(n):
            for j in range(n):
                sim_mat[i][j] = pr(mat, i, j, imean)
        sim_mat = (sim_mat + 1)/2
    elif option == 'cos':
        #print("COS")
        for i in range(n):
            for j in range(n):
                sim_mat[i][j] = cos(mat, i, j)
    elif option == 'adjcos':
        #print("ADJCOS")
        for i in range(n):
            for j in range(n):
                sim_mat[i][j] = adjcos(mat, i, j, umean)
        sim_mat = (sim_mat + 1)/2
    elif option == 'euc':
        #print("EUCLIDEAN")
        for i in range(n):
            for j in range(n):
                sim_mat[i][j] = euc(mat, i, j)
    else:
        #print("Hello")
        sim_mat = cosine_similarity(mat.T)
    
    return sim_mat, amean, umean, imean

Predict function is used to get recommended rating by user i for item j.


In [21]:
def predict(user, item, mat, item_similarity, amean, umean, imean,  k=20):
    nzero = mat[user].nonzero()[0]
    if len(nzero) == 0:
        return amean
    baseline = imean + umean[user] - amean
    choice = nzero[item_similarity[item, nzero].argsort()[::-1][:k]]
    prediction = ((mat[user, choice] - baseline[choice]).dot(item_similarity[item, choice])/ sum(item_similarity[item, choice])) + baseline[item]
        
    if np.isnan(prediction):
        prediction = amean
    if prediction > 10:
        prediction = 10
    if prediction < 1:
        prediction = 1
    return prediction

get_results function is our function to cross_val setup and changing the parameter of this function will help to tune hyperparameter k (nearest neighbours)


In [22]:
def get_results(X,  option, rows, cols, folds, k, timing = False):
    kf =  KFold(n_splits=folds, shuffle = True, random_state=42)
    count = 1
    rmse_list = []
    mae_list = []
    trmse_list = []
    tmae_list = []
    for train_index, test_index in kf.split(X):
        print("----------   Fold ", count, "---------------")
        train_data, test_data = X[train_index], X[test_index]
        
        full_mat = np.zeros((rows, cols))
        
        for row in train_data:
            full_mat[row[0], row[1]] = row[2]
        
        if timing:
            start = time.time()
            
        item_similarity, amean, umean, imean = itemsimilar(full_mat, option)
        
        if timing:
            end = time.time()
            train_time = end - start
            print("Training Time : ", train_time)
        
        preds = []
        real = []
        
       
        for row in train_data:
            user_id, isbn, rating = row[0], row[1], row[2]
            preds.append(predict(user_id, isbn, full_mat, item_similarity, amean, umean, imean, k))
            real.append(rating)
        
        
        err1 = rmse(np.array(preds), np.array(real))
        err2 = mae(np.array(preds), np.array(real))
        trmse_list.append(err1)
        tmae_list.append(err2)
        
        print('Train Errors')
        print('RMSE : %.4f' % err1)
        print('MAE : %.4f' % err2)
        
        preds = []
        real = []
        
        if timing:
            start = time.time()
        
        for row in test_data:
            user_id, isbn, rating = row[0], row[1], row[2]
            preds.append(predict(user_id, isbn, full_mat, item_similarity, amean, umean, imean, k))
            real.append(rating)
        
        if timing:
            end = time.time()
            test_time = end - start
            print("Prediction Time : ", test_time)
        
        err1 = rmse(np.array(preds), np.array(real))
        err2 = mae(np.array(preds), np.array(real))
        rmse_list.append(err1)
        mae_list.append(err2)
        
       
        
        print('Test Errors')
        print('RMSE : %.4f' % err1)
        print('MAE : %.4f' % err2)
        count+=1
        
        if timing:
            return train_time, test_time
    
    print("-------------------------------------")
    print("Training Avg Error:")
    print("AVG RMSE :", str(np.mean(trmse_list)))
    print("AVG MAE :", str(np.mean(tmae_list)))
    print()
    print("Testing Avg Error:")
    print("AVG RMSE :", str(np.mean(rmse_list)))
    print("AVG MAE :", str(np.mean(mae_list)))
    print(" ")
        
    return np.mean(mae_list), np.mean(rmse_list)

Grid Search for best K for item-item CF using all the similarity metric implemented.


In [23]:
sims = []
sims_rmse = []
for arg in ['euc','cos','','pr','adjcos']:
    each_sims = []
    each_sims_rmse = []
    for k in [2, 3, 4, 5, 10, 15, 20, 25]:
        print(arg, k)
        ans1, ans2  = get_results(nmat, arg, len(rows), len(cols), 5 ,k)
        each_sims.append(ans1)
        each_sims_rmse.append(ans2)
        
    print()
    print("Best K Value for ", arg)
    print()
    print("Min MAE")
    print(np.min(each_sims), np.argmin(each_sims))
    print("Min RMSE")
    print(np.min(each_sims_rmse), np.argmin(each_sims_rmse))
    print()
    sims.append(each_sims)
    sims_rmse.append(each_sims_rmse)


euc 2
----------   Fold  1 ---------------
Train Errors
RMSE : 0.2425
MAE : 0.1169
Test Errors
RMSE : 1.5845
MAE : 0.9888
----------   Fold  2 ---------------
Train Errors
RMSE : 0.2371
MAE : 0.1144
Test Errors
RMSE : 1.5260
MAE : 0.9736
----------   Fold  3 ---------------
Train Errors
RMSE : 0.2383
MAE : 0.1157
Test Errors
RMSE : 1.5302
MAE : 0.9649
----------   Fold  4 ---------------
Train Errors
RMSE : 0.2389
MAE : 0.1160
Test Errors
RMSE : 1.5309
MAE : 0.9700
----------   Fold  5 ---------------
Train Errors
RMSE : 0.2377
MAE : 0.1150
Test Errors
RMSE : 1.6196
MAE : 0.9774
-------------------------------------
Training Avg Error:
AVG RMSE : 0.238903809744
AVG MAE : 0.115603824721

Testing Avg Error:
AVG RMSE : 1.55822204728
AVG MAE : 0.974936715721
 
euc 3
----------   Fold  1 ---------------
Train Errors
RMSE : 0.2787
MAE : 0.1364
Test Errors
RMSE : 1.5687
MAE : 0.9829
----------   Fold  2 ---------------
Train Errors
RMSE : 0.2697
MAE : 0.1330
Test Errors
RMSE : 1.5056
MAE : 0.9592
----------   Fold  3 ---------------
Train Errors
RMSE : 0.2712
MAE : 0.1353
Test Errors
RMSE : 1.5137
MAE : 0.9579
----------   Fold  4 ---------------
Train Errors
RMSE : 0.2699
MAE : 0.1348
Test Errors
RMSE : 1.5124
MAE : 0.9618
----------   Fold  5 ---------------
Train Errors
RMSE : 0.2726
MAE : 0.1345
Test Errors
RMSE : 1.6097
MAE : 0.9747
-------------------------------------
Training Avg Error:
AVG RMSE : 0.272405209969
AVG MAE : 0.134798554992

Testing Avg Error:
AVG RMSE : 1.54201878964
AVG MAE : 0.96731918614
 
euc 4
----------   Fold  1 ---------------
Train Errors
RMSE : 0.2914
MAE : 0.1430
Test Errors
RMSE : 1.5625
MAE : 0.9788
----------   Fold  2 ---------------
Train Errors
RMSE : 0.2868
MAE : 0.1410
Test Errors
RMSE : 1.4997
MAE : 0.9563
----------   Fold  3 ---------------
Train Errors
RMSE : 0.2855
MAE : 0.1429
Test Errors
RMSE : 1.5108
MAE : 0.9589
----------   Fold  4 ---------------
Train Errors
RMSE : 0.2859
MAE : 0.1427
Test Errors
RMSE : 1.5071
MAE : 0.9594
----------   Fold  5 ---------------
Train Errors
RMSE : 0.2898
MAE : 0.1422
Test Errors
RMSE : 1.6069
MAE : 0.9731
-------------------------------------
Training Avg Error:
AVG RMSE : 0.287894722992
AVG MAE : 0.142365102399

Testing Avg Error:
AVG RMSE : 1.53740603735
AVG MAE : 0.965311673664
 
euc 5
----------   Fold  1 ---------------
Train Errors
RMSE : 0.3009
MAE : 0.1471
Test Errors
RMSE : 1.5606
MAE : 0.9771
----------   Fold  2 ---------------
Train Errors
RMSE : 0.2950
MAE : 0.1449
Test Errors
RMSE : 1.4980
MAE : 0.9545
----------   Fold  3 ---------------
Train Errors
RMSE : 0.2938
MAE : 0.1464
Test Errors
RMSE : 1.5093
MAE : 0.9571
----------   Fold  4 ---------------
Train Errors
RMSE : 0.2941
MAE : 0.1464
Test Errors
RMSE : 1.5062
MAE : 0.9581
----------   Fold  5 ---------------
Train Errors
RMSE : 0.2980
MAE : 0.1453
Test Errors
RMSE : 1.6068
MAE : 0.9734
-------------------------------------
Training Avg Error:
AVG RMSE : 0.296340102405
AVG MAE : 0.146041424987

Testing Avg Error:
AVG RMSE : 1.53619565103
AVG MAE : 0.96403828336
 
euc 10
----------   Fold  1 ---------------
Train Errors
RMSE : 0.3160
MAE : 0.1517
Test Errors
RMSE : 1.5598
MAE : 0.9777
----------   Fold  2 ---------------
Train Errors
RMSE : 0.3098
MAE : 0.1498
Test Errors
RMSE : 1.4969
MAE : 0.9537
----------   Fold  3 ---------------
Train Errors
RMSE : 0.3083
MAE : 0.1515
Test Errors
RMSE : 1.5079
MAE : 0.9568
----------   Fold  4 ---------------
Train Errors
RMSE : 0.3092
MAE : 0.1513
Test Errors
RMSE : 1.5046
MAE : 0.9568
----------   Fold  5 ---------------
Train Errors
RMSE : 0.3138
MAE : 0.1504
Test Errors
RMSE : 1.6031
MAE : 0.9709
-------------------------------------
Training Avg Error:
AVG RMSE : 0.311414784942
AVG MAE : 0.150945763877

Testing Avg Error:
AVG RMSE : 1.53446973171
AVG MAE : 0.963200021069
 
euc 15
----------   Fold  1 ---------------
Train Errors
RMSE : 0.3220
MAE : 0.1531
Test Errors
RMSE : 1.5597
MAE : 0.9777
----------   Fold  2 ---------------
Train Errors
RMSE : 0.3145
MAE : 0.1511
Test Errors
RMSE : 1.4979
MAE : 0.9549
----------   Fold  3 ---------------
Train Errors
RMSE : 0.3120
MAE : 0.1524
Test Errors
RMSE : 1.5081
MAE : 0.9567
----------   Fold  4 ---------------
Train Errors
RMSE : 0.3137
MAE : 0.1525
Test Errors
RMSE : 1.5037
MAE : 0.9567
----------   Fold  5 ---------------
Train Errors
RMSE : 0.3198
MAE : 0.1517
Test Errors
RMSE : 1.6033
MAE : 0.9712
-------------------------------------
Training Avg Error:
AVG RMSE : 0.316395477984
AVG MAE : 0.152155653145

Testing Avg Error:
AVG RMSE : 1.53453826803
AVG MAE : 0.963446605385
 
euc 20
----------   Fold  1 ---------------
Train Errors
RMSE : 0.3246
MAE : 0.1536
Test Errors
RMSE : 1.5595
MAE : 0.9776
----------   Fold  2 ---------------
Train Errors
RMSE : 0.3171
MAE : 0.1516
Test Errors
RMSE : 1.4980
MAE : 0.9550
----------   Fold  3 ---------------
Train Errors
RMSE : 0.3140
MAE : 0.1528
Test Errors
RMSE : 1.5079
MAE : 0.9565
----------   Fold  4 ---------------
Train Errors
RMSE : 0.3165
MAE : 0.1530
Test Errors
RMSE : 1.5035
MAE : 0.9566
----------   Fold  5 ---------------
Train Errors
RMSE : 0.3219
MAE : 0.1521
Test Errors
RMSE : 1.6032
MAE : 0.9712
-------------------------------------
Training Avg Error:
AVG RMSE : 0.318793389418
AVG MAE : 0.152598783459

Testing Avg Error:
AVG RMSE : 1.53441108883
AVG MAE : 0.963370898085
 
euc 25
----------   Fold  1 ---------------
Train Errors
RMSE : 0.3263
MAE : 0.1539
Test Errors
RMSE : 1.5595
MAE : 0.9774
----------   Fold  2 ---------------
Train Errors
RMSE : 0.3186
MAE : 0.1518
Test Errors
RMSE : 1.4981
MAE : 0.9550
----------   Fold  3 ---------------
Train Errors
RMSE : 0.3153
MAE : 0.1530
Test Errors
RMSE : 1.5077
MAE : 0.9563
----------   Fold  4 ---------------
Train Errors
RMSE : 0.3181
MAE : 0.1532
Test Errors
RMSE : 1.5037
MAE : 0.9567
----------   Fold  5 ---------------
Train Errors
RMSE : 0.3235
MAE : 0.1523
Test Errors
RMSE : 1.6033
MAE : 0.9713
-------------------------------------
Training Avg Error:
AVG RMSE : 0.320334746553
AVG MAE : 0.15285671534

Testing Avg Error:
AVG RMSE : 1.53444246027
AVG MAE : 0.963354863854
 

Best K Value for  euc

Min MAE
0.963200021069 4
Min RMSE
1.53441108883 6

cos 2
----------   Fold  1 ---------------
Train Errors
RMSE : 0.8518
MAE : 0.5394
Test Errors
RMSE : 1.6265
MAE : 1.0794
----------   Fold  2 ---------------
Train Errors
RMSE : 0.8722
MAE : 0.5447
Test Errors
RMSE : 1.5799
MAE : 1.0751
----------   Fold  3 ---------------
Train Errors
RMSE : 0.8948
MAE : 0.5612
Test Errors
RMSE : 1.5706
MAE : 1.0602
----------   Fold  4 ---------------
Train Errors
RMSE : 0.8739
MAE : 0.5518
Test Errors
RMSE : 1.5473
MAE : 1.0501
----------   Fold  5 ---------------
Train Errors
RMSE : 0.8544
MAE : 0.5433
Test Errors
RMSE : 1.6965
MAE : 1.1014
-------------------------------------
Training Avg Error:
AVG RMSE : 0.869422400469
AVG MAE : 0.548066224104

Testing Avg Error:
AVG RMSE : 1.60416952823
AVG MAE : 1.07324103048
 
cos 3
----------   Fold  1 ---------------
Train Errors
RMSE : 0.9188
MAE : 0.5970
Test Errors
RMSE : 1.6121
MAE : 1.0769
----------   Fold  2 ---------------
Train Errors
RMSE : 0.9402
MAE : 0.5984
Test Errors
RMSE : 1.5479
MAE : 1.0510
----------   Fold  3 ---------------
Train Errors
RMSE : 0.9532
MAE : 0.6161
Test Errors
RMSE : 1.5526
MAE : 1.0487
----------   Fold  4 ---------------
Train Errors
RMSE : 0.9433
MAE : 0.6103
Test Errors
RMSE : 1.5398
MAE : 1.0505
----------   Fold  5 ---------------
Train Errors
RMSE : 0.9237
MAE : 0.5977
Test Errors
RMSE : 1.6740
MAE : 1.0946
-------------------------------------
Training Avg Error:
AVG RMSE : 0.935857496217
AVG MAE : 0.603889524602

Testing Avg Error:
AVG RMSE : 1.5852944281
AVG MAE : 1.06433901343
 
cos 4
----------   Fold  1 ---------------
Train Errors
RMSE : 0.9531
MAE : 0.6236
Test Errors
RMSE : 1.6044
MAE : 1.0766
----------   Fold  2 ---------------
Train Errors
RMSE : 0.9801
MAE : 0.6277
Test Errors
RMSE : 1.5408
MAE : 1.0463
----------   Fold  3 ---------------
Train Errors
RMSE : 0.9863
MAE : 0.6407
Test Errors
RMSE : 1.5470
MAE : 1.0480
----------   Fold  4 ---------------
Train Errors
RMSE : 0.9791
MAE : 0.6345
Test Errors
RMSE : 1.5306
MAE : 1.0466
----------   Fold  5 ---------------
Train Errors
RMSE : 0.9579
MAE : 0.6224
Test Errors
RMSE : 1.6715
MAE : 1.0938
-------------------------------------
Training Avg Error:
AVG RMSE : 0.971300476915
AVG MAE : 0.629757994204

Testing Avg Error:
AVG RMSE : 1.57886628547
AVG MAE : 1.06226326722
 
cos 5
----------   Fold  1 ---------------
Train Errors
RMSE : 0.9758
MAE : 0.6377
Test Errors
RMSE : 1.6012
MAE : 1.0757
----------   Fold  2 ---------------
Train Errors
RMSE : 0.9969
MAE : 0.6413
Test Errors
RMSE : 1.5326
MAE : 1.0425
----------   Fold  3 ---------------
Train Errors
RMSE : 1.0004
MAE : 0.6503
Test Errors
RMSE : 1.5452
MAE : 1.0486
----------   Fold  4 ---------------
Train Errors
RMSE : 0.9989
MAE : 0.6476
Test Errors
RMSE : 1.5283
MAE : 1.0458
----------   Fold  5 ---------------
Train Errors
RMSE : 0.9794
MAE : 0.6386
Test Errors
RMSE : 1.6674
MAE : 1.0917
-------------------------------------
Training Avg Error:
AVG RMSE : 0.99029970246
AVG MAE : 0.6431006575

Testing Avg Error:
AVG RMSE : 1.57494430957
AVG MAE : 1.06086334643
 
cos 10
----------   Fold  1 ---------------
Train Errors
RMSE : 0.9949
MAE : 0.6513
Test Errors
RMSE : 1.6036
MAE : 1.0798
----------   Fold  2 ---------------
Train Errors
RMSE : 1.0142
MAE : 0.6554
Test Errors
RMSE : 1.5304
MAE : 1.0416
----------   Fold  3 ---------------
Train Errors
RMSE : 1.0233
MAE : 0.6675
Test Errors
RMSE : 1.5439
MAE : 1.0490
----------   Fold  4 ---------------
Train Errors
RMSE : 1.0245
MAE : 0.6655
Test Errors
RMSE : 1.5246
MAE : 1.0415
----------   Fold  5 ---------------
Train Errors
RMSE : 0.9990
MAE : 0.6544
Test Errors
RMSE : 1.6592
MAE : 1.0842
-------------------------------------
Training Avg Error:
AVG RMSE : 1.0111592769
AVG MAE : 0.658845547064

Testing Avg Error:
AVG RMSE : 1.57232606819
AVG MAE : 1.05921676655
 
cos 15
----------   Fold  1 ---------------
Train Errors
RMSE : 0.9946
MAE : 0.6521
Test Errors
RMSE : 1.6031
MAE : 1.0787
----------   Fold  2 ---------------
Train Errors
RMSE : 1.0145
MAE : 0.6566
Test Errors
RMSE : 1.5298
MAE : 1.0413
----------   Fold  3 ---------------
Train Errors
RMSE : 1.0252
MAE : 0.6682
Test Errors
RMSE : 1.5436
MAE : 1.0491
----------   Fold  4 ---------------
Train Errors
RMSE : 1.0247
MAE : 0.6669
Test Errors
RMSE : 1.5228
MAE : 1.0401
----------   Fold  5 ---------------
Train Errors
RMSE : 0.9984
MAE : 0.6556
Test Errors
RMSE : 1.6573
MAE : 1.0823
-------------------------------------
Training Avg Error:
AVG RMSE : 1.01145772013
AVG MAE : 0.659865728615

Testing Avg Error:
AVG RMSE : 1.57132673565
AVG MAE : 1.05829920639
 
cos 20
----------   Fold  1 ---------------
Train Errors
RMSE : 0.9950
MAE : 0.6520
Test Errors
RMSE : 1.6034
MAE : 1.0784
----------   Fold  2 ---------------
Train Errors
RMSE : 1.0158
MAE : 0.6575
Test Errors
RMSE : 1.5287
MAE : 1.0411
----------   Fold  3 ---------------
Train Errors
RMSE : 1.0261
MAE : 0.6687
Test Errors
RMSE : 1.5440
MAE : 1.0490
----------   Fold  4 ---------------
Train Errors
RMSE : 1.0260
MAE : 0.6676
Test Errors
RMSE : 1.5230
MAE : 1.0407
----------   Fold  5 ---------------
Train Errors
RMSE : 1.0004
MAE : 0.6564
Test Errors
RMSE : 1.6575
MAE : 1.0820
-------------------------------------
Training Avg Error:
AVG RMSE : 1.0126620588
AVG MAE : 0.660428699154

Testing Avg Error:
AVG RMSE : 1.57130438492
AVG MAE : 1.05824640312
 
cos 25
----------   Fold  1 ---------------
Train Errors
RMSE : 0.9972
MAE : 0.6530
Test Errors
RMSE : 1.6029
MAE : 1.0778
----------   Fold  2 ---------------
Train Errors
RMSE : 1.0172
MAE : 0.6583
Test Errors
RMSE : 1.5298
MAE : 1.0418
----------   Fold  3 ---------------
Train Errors
RMSE : 1.0276
MAE : 0.6695
Test Errors
RMSE : 1.5438
MAE : 1.0486
----------   Fold  4 ---------------
Train Errors
RMSE : 1.0273
MAE : 0.6680
Test Errors
RMSE : 1.5242
MAE : 1.0413
----------   Fold  5 ---------------
Train Errors
RMSE : 1.0014
MAE : 0.6566
Test Errors
RMSE : 1.6570
MAE : 1.0814
-------------------------------------
Training Avg Error:
AVG RMSE : 1.01414253035
AVG MAE : 0.661079444107

Testing Avg Error:
AVG RMSE : 1.57153768119
AVG MAE : 1.05818786499
 

Best K Value for  cos

Min MAE
1.05818786499 7
Min RMSE
1.57130438492 6

 2
----------   Fold  1 ---------------
Train Errors
RMSE : 0.1480
MAE : 0.0846
Test Errors
RMSE : 1.5468
MAE : 0.9483
----------   Fold  2 ---------------
Train Errors
RMSE : 0.1378
MAE : 0.0804
Test Errors
RMSE : 1.4972
MAE : 0.9453
----------   Fold  3 ---------------
Train Errors
RMSE : 0.1409
MAE : 0.0839
Test Errors
RMSE : 1.5088
MAE : 0.9312
----------   Fold  4 ---------------
Train Errors
RMSE : 0.1444
MAE : 0.0817
Test Errors
RMSE : 1.5137
MAE : 0.9510
----------   Fold  5 ---------------
Train Errors
RMSE : 0.1436
MAE : 0.0832
Test Errors
RMSE : 1.5986
MAE : 0.9618
-------------------------------------
Training Avg Error:
AVG RMSE : 0.142934142968
AVG MAE : 0.0827527980094

Testing Avg Error:
AVG RMSE : 1.53301400401
AVG MAE : 0.947518242338
 
 3
----------   Fold  1 ---------------
Train Errors
RMSE : 0.1851
MAE : 0.1073
Test Errors
RMSE : 1.5312
MAE : 0.9446
----------   Fold  2 ---------------
Train Errors
RMSE : 0.1756
MAE : 0.1034
Test Errors
RMSE : 1.4805
MAE : 0.9347
----------   Fold  3 ---------------
Train Errors
RMSE : 0.1800
MAE : 0.1075
Test Errors
RMSE : 1.4915
MAE : 0.9295
----------   Fold  4 ---------------
Train Errors
RMSE : 0.1808
MAE : 0.1059
Test Errors
RMSE : 1.4855
MAE : 0.9397
----------   Fold  5 ---------------
Train Errors
RMSE : 0.1814
MAE : 0.1070
Test Errors
RMSE : 1.5812
MAE : 0.9525
-------------------------------------
Training Avg Error:
AVG RMSE : 0.180566116372
AVG MAE : 0.106210631987

Testing Avg Error:
AVG RMSE : 1.51396678536
AVG MAE : 0.94019574174
 
 4
----------   Fold  1 ---------------
Train Errors
RMSE : 0.2057
MAE : 0.1188
Test Errors
RMSE : 1.5249
MAE : 0.9433
----------   Fold  2 ---------------
Train Errors
RMSE : 0.1969
MAE : 0.1150
Test Errors
RMSE : 1.4781
MAE : 0.9345
----------   Fold  3 ---------------
Train Errors
RMSE : 0.2005
MAE : 0.1188
Test Errors
RMSE : 1.4883
MAE : 0.9285
----------   Fold  4 ---------------
Train Errors
RMSE : 0.2011
MAE : 0.1168
Test Errors
RMSE : 1.4739
MAE : 0.9352
----------   Fold  5 ---------------
Train Errors
RMSE : 0.2011
MAE : 0.1184
Test Errors
RMSE : 1.5822
MAE : 0.9555
-------------------------------------
Training Avg Error:
AVG RMSE : 0.201049615815
AVG MAE : 0.117555864854

Testing Avg Error:
AVG RMSE : 1.50948386821
AVG MAE : 0.939376110309
 
 5
----------   Fold  1 ---------------
Train Errors
RMSE : 0.2194
MAE : 0.1251
Test Errors
RMSE : 1.5221
MAE : 0.9408
----------   Fold  2 ---------------
Train Errors
RMSE : 0.2088
MAE : 0.1210
Test Errors
RMSE : 1.4740
MAE : 0.9326
----------   Fold  3 ---------------
Train Errors
RMSE : 0.2118
MAE : 0.1244
Test Errors
RMSE : 1.4892
MAE : 0.9310
----------   Fold  4 ---------------
Train Errors
RMSE : 0.2146
MAE : 0.1232
Test Errors
RMSE : 1.4720
MAE : 0.9341
----------   Fold  5 ---------------
Train Errors
RMSE : 0.2139
MAE : 0.1247
Test Errors
RMSE : 1.5823
MAE : 0.9552
-------------------------------------
Training Avg Error:
AVG RMSE : 0.213699760821
AVG MAE : 0.123681743591

Testing Avg Error:
AVG RMSE : 1.50791831458
AVG MAE : 0.938718870488
 
 10
----------   Fold  1 ---------------
Train Errors
RMSE : 0.2413
MAE : 0.1337
Test Errors
RMSE : 1.5213
MAE : 0.9432
----------   Fold  2 ---------------
Train Errors
RMSE : 0.2303
MAE : 0.1298
Test Errors
RMSE : 1.4707
MAE : 0.9316
----------   Fold  3 ---------------
Train Errors
RMSE : 0.2348
MAE : 0.1341
Test Errors
RMSE : 1.4873
MAE : 0.9307
----------   Fold  4 ---------------
Train Errors
RMSE : 0.2387
MAE : 0.1327
Test Errors
RMSE : 1.4640
MAE : 0.9311
----------   Fold  5 ---------------
Train Errors
RMSE : 0.2380
MAE : 0.1345
Test Errors
RMSE : 1.5807
MAE : 0.9542
-------------------------------------
Training Avg Error:
AVG RMSE : 0.236619823078
AVG MAE : 0.132944525391

Testing Avg Error:
AVG RMSE : 1.50479140282
AVG MAE : 0.938163692761
 
 15
----------   Fold  1 ---------------
Train Errors
RMSE : 0.2484
MAE : 0.1356
Test Errors
RMSE : 1.5223
MAE : 0.9448
----------   Fold  2 ---------------
Train Errors
RMSE : 0.2391
MAE : 0.1324
Test Errors
RMSE : 1.4713
MAE : 0.9330
----------   Fold  3 ---------------
Train Errors
RMSE : 0.2410
MAE : 0.1361
Test Errors
RMSE : 1.4871
MAE : 0.9303
----------   Fold  4 ---------------
Train Errors
RMSE : 0.2462
MAE : 0.1351
Test Errors
RMSE : 1.4641
MAE : 0.9316
----------   Fold  5 ---------------
Train Errors
RMSE : 0.2463
MAE : 0.1368
Test Errors
RMSE : 1.5809
MAE : 0.9551
-------------------------------------
Training Avg Error:
AVG RMSE : 0.244190229729
AVG MAE : 0.135192597308

Testing Avg Error:
AVG RMSE : 1.50514504077
AVG MAE : 0.938976805258
 
 20
----------   Fold  1 ---------------
Train Errors
RMSE : 0.2519
MAE : 0.1365
Test Errors
RMSE : 1.5219
MAE : 0.9448
----------   Fold  2 ---------------
Train Errors
RMSE : 0.2432
MAE : 0.1334
Test Errors
RMSE : 1.4710
MAE : 0.9328
----------   Fold  3 ---------------
Train Errors
RMSE : 0.2447
MAE : 0.1371
Test Errors
RMSE : 1.4866
MAE : 0.9304
----------   Fold  4 ---------------
Train Errors
RMSE : 0.2498
MAE : 0.1361
Test Errors
RMSE : 1.4638
MAE : 0.9317
----------   Fold  5 ---------------
Train Errors
RMSE : 0.2507
MAE : 0.1380
Test Errors
RMSE : 1.5810
MAE : 0.9553
-------------------------------------
Training Avg Error:
AVG RMSE : 0.248072712431
AVG MAE : 0.136214100425

Testing Avg Error:
AVG RMSE : 1.50485333424
AVG MAE : 0.938993996454
 
 25
----------   Fold  1 ---------------
Train Errors
RMSE : 0.2550
MAE : 0.1373
Test Errors
RMSE : 1.5217
MAE : 0.9450
----------   Fold  2 ---------------
Train Errors
RMSE : 0.2468
MAE : 0.1343
Test Errors
RMSE : 1.4710
MAE : 0.9327
----------   Fold  3 ---------------
Train Errors
RMSE : 0.2471
MAE : 0.1376
Test Errors
RMSE : 1.4866
MAE : 0.9304
----------   Fold  4 ---------------
Train Errors
RMSE : 0.2533
MAE : 0.1370
Test Errors
RMSE : 1.4640
MAE : 0.9321
----------   Fold  5 ---------------
Train Errors
RMSE : 0.2534
MAE : 0.1385
Test Errors
RMSE : 1.5810
MAE : 0.9555
-------------------------------------
Training Avg Error:
AVG RMSE : 0.251117823377
AVG MAE : 0.136932453008

Testing Avg Error:
AVG RMSE : 1.504868981
AVG MAE : 0.939133852583
 

Best K Value for  

Min MAE
0.938163692761 4
Min RMSE
1.50479140282 4

pr 2
----------   Fold  1 ---------------
Train Errors
RMSE : 0.5613
MAE : 0.3643
Test Errors
RMSE : 1.6341
MAE : 1.0430
----------   Fold  2 ---------------
Train Errors
RMSE : 0.5752
MAE : 0.3660
Test Errors
RMSE : 1.5130
MAE : 1.0073
----------   Fold  3 ---------------
Train Errors
RMSE : 0.5840
MAE : 0.3749
Test Errors
RMSE : 1.5105
MAE : 1.0061
----------   Fold  4 ---------------
Train Errors
RMSE : 0.5749
MAE : 0.3689
Test Errors
RMSE : 1.5103
MAE : 0.9997
----------   Fold  5 ---------------
Train Errors
RMSE : 0.5683
MAE : 0.3691
Test Errors
RMSE : 1.6327
MAE : 1.0413
-------------------------------------
Training Avg Error:
AVG RMSE : 0.572734680325
AVG MAE : 0.368640804867

Testing Avg Error:
AVG RMSE : 1.56012062639
AVG MAE : 1.01948326547
 
pr 3
----------   Fold  1 ---------------
Train Errors
RMSE : 0.6455
MAE : 0.4207
Test Errors
RMSE : 1.6183
MAE : 1.0435
----------   Fold  2 ---------------
Train Errors
RMSE : 0.6624
MAE : 0.4243
Test Errors
RMSE : 1.4834
MAE : 0.9950
----------   Fold  3 ---------------
Train Errors
RMSE : 0.6555
MAE : 0.4233
Test Errors
RMSE : 1.5003
MAE : 1.0051
----------   Fold  4 ---------------
Train Errors
RMSE : 0.6547
MAE : 0.4218
Test Errors
RMSE : 1.4777
MAE : 0.9883
----------   Fold  5 ---------------
Train Errors
RMSE : 0.6445
MAE : 0.4206
Test Errors
RMSE : 1.6196
MAE : 1.0353
-------------------------------------
Training Avg Error:
AVG RMSE : 0.652513012524
AVG MAE : 0.422121052163

Testing Avg Error:
AVG RMSE : 1.53987980468
AVG MAE : 1.0134563123
 
pr 4
----------   Fold  1 ---------------
Train Errors
RMSE : 0.6818
MAE : 0.4430
Test Errors
RMSE : 1.6209
MAE : 1.0527
----------   Fold  2 ---------------
Train Errors
RMSE : 0.6943
MAE : 0.4449
Test Errors
RMSE : 1.4769
MAE : 0.9894
----------   Fold  3 ---------------
Train Errors
RMSE : 0.6923
MAE : 0.4488
Test Errors
RMSE : 1.4988
MAE : 1.0087
----------   Fold  4 ---------------
Train Errors
RMSE : 0.6912
MAE : 0.4445
Test Errors
RMSE : 1.4721
MAE : 0.9851
----------   Fold  5 ---------------
Train Errors
RMSE : 0.6815
MAE : 0.4444
Test Errors
RMSE : 1.6106
MAE : 1.0295
-------------------------------------
Training Avg Error:
AVG RMSE : 0.688220483435
AVG MAE : 0.445107467587

Testing Avg Error:
AVG RMSE : 1.53583064685
AVG MAE : 1.01309429669
 
pr 5
----------   Fold  1 ---------------
Train Errors
RMSE : 0.6971
MAE : 0.4538
Test Errors
RMSE : 1.6221
MAE : 1.0546
----------   Fold  2 ---------------
Train Errors
RMSE : 0.7114
MAE : 0.4555
Test Errors
RMSE : 1.4757
MAE : 0.9913
----------   Fold  3 ---------------
Train Errors
RMSE : 0.7072
MAE : 0.4580
Test Errors
RMSE : 1.4968
MAE : 1.0075
----------   Fold  4 ---------------
Train Errors
RMSE : 0.7072
MAE : 0.4550
Test Errors
RMSE : 1.4710
MAE : 0.9847
----------   Fold  5 ---------------
Train Errors
RMSE : 0.6976
MAE : 0.4534
Test Errors
RMSE : 1.6046
MAE : 1.0272
-------------------------------------
Training Avg Error:
AVG RMSE : 0.704082752291
AVG MAE : 0.455160574636

Testing Avg Error:
AVG RMSE : 1.53406498865
AVG MAE : 1.01307941426
 
pr 10
----------   Fold  1 ---------------
Train Errors
RMSE : 0.7213
MAE : 0.4677
Test Errors
RMSE : 1.6215
MAE : 1.0549
----------   Fold  2 ---------------
Train Errors
RMSE : 0.7302
MAE : 0.4682
Test Errors
RMSE : 1.4734
MAE : 0.9905
----------   Fold  3 ---------------
Train Errors
RMSE : 0.7310
MAE : 0.4728
Test Errors
RMSE : 1.4936
MAE : 1.0087
----------   Fold  4 ---------------
Train Errors
RMSE : 0.7315
MAE : 0.4700
Test Errors
RMSE : 1.4731
MAE : 0.9859
----------   Fold  5 ---------------
Train Errors
RMSE : 0.7179
MAE : 0.4668
Test Errors
RMSE : 1.5994
MAE : 1.0222
-------------------------------------
Training Avg Error:
AVG RMSE : 0.726346579781
AVG MAE : 0.469105139646

Testing Avg Error:
AVG RMSE : 1.53219299389
AVG MAE : 1.01244464736
 
pr 15
----------   Fold  1 ---------------
Train Errors
RMSE : 0.7263
MAE : 0.4702
Test Errors
RMSE : 1.6209
MAE : 1.0545
----------   Fold  2 ---------------
Train Errors
RMSE : 0.7343
MAE : 0.4711
Test Errors
RMSE : 1.4726
MAE : 0.9886
----------   Fold  3 ---------------
Train Errors
RMSE : 0.7365
MAE : 0.4763
Test Errors
RMSE : 1.4945
MAE : 1.0083
----------   Fold  4 ---------------
Train Errors
RMSE : 0.7363
MAE : 0.4732
Test Errors
RMSE : 1.4720
MAE : 0.9852
----------   Fold  5 ---------------
Train Errors
RMSE : 0.7231
MAE : 0.4705
Test Errors
RMSE : 1.5995
MAE : 1.0223
-------------------------------------
Training Avg Error:
AVG RMSE : 0.731283348338
AVG MAE : 0.472261329073

Testing Avg Error:
AVG RMSE : 1.53189555248
AVG MAE : 1.01178094228
 
pr 20
----------   Fold  1 ---------------
Train Errors
RMSE : 0.7294
MAE : 0.4717
Test Errors
RMSE : 1.6204
MAE : 1.0539
----------   Fold  2 ---------------
Train Errors
RMSE : 0.7375
MAE : 0.4730
Test Errors
RMSE : 1.4729
MAE : 0.9885
----------   Fold  3 ---------------
Train Errors
RMSE : 0.7394
MAE : 0.4775
Test Errors
RMSE : 1.4953
MAE : 1.0087
----------   Fold  4 ---------------
Train Errors
RMSE : 0.7397
MAE : 0.4754
Test Errors
RMSE : 1.4716
MAE : 0.9856
----------   Fold  5 ---------------
Train Errors
RMSE : 0.7261
MAE : 0.4719
Test Errors
RMSE : 1.5991
MAE : 1.0225
-------------------------------------
Training Avg Error:
AVG RMSE : 0.73439769318
AVG MAE : 0.473910002644

Testing Avg Error:
AVG RMSE : 1.53184524656
AVG MAE : 1.01183573297
 
pr 25
----------   Fold  1 ---------------
Train Errors
RMSE : 0.7322
MAE : 0.4731
Test Errors
RMSE : 1.6210
MAE : 1.0548
----------   Fold  2 ---------------
Train Errors
RMSE : 0.7401
MAE : 0.4739
Test Errors
RMSE : 1.4736
MAE : 0.9886
----------   Fold  3 ---------------
Train Errors
RMSE : 0.7417
MAE : 0.4786
Test Errors
RMSE : 1.4949
MAE : 1.0083
----------   Fold  4 ---------------
Train Errors
RMSE : 0.7421
MAE : 0.4767
Test Errors
RMSE : 1.4726
MAE : 0.9859
----------   Fold  5 ---------------
Train Errors
RMSE : 0.7282
MAE : 0.4728
Test Errors
RMSE : 1.5998
MAE : 1.0232
-------------------------------------
Training Avg Error:
AVG RMSE : 0.736827506135
AVG MAE : 0.475018517634

Testing Avg Error:
AVG RMSE : 1.53237048325
AVG MAE : 1.01218471198
 

Best K Value for  pr

Min MAE
1.01178094228 5
Min RMSE
1.53184524656 6

adjcos 2
----------   Fold  1 ---------------
Train Errors
RMSE : 0.4457
MAE : 0.2626
Test Errors
RMSE : 1.6611
MAE : 1.0442
----------   Fold  2 ---------------
Train Errors
RMSE : 0.4569
MAE : 0.2691
Test Errors
RMSE : 1.5164
MAE : 0.9840
----------   Fold  3 ---------------
Train Errors
RMSE : 0.4517
MAE : 0.2691
Test Errors
RMSE : 1.5531
MAE : 1.0098
----------   Fold  4 ---------------
Train Errors
RMSE : 0.4658
MAE : 0.2684
Test Errors
RMSE : 1.4714
MAE : 0.9584
----------   Fold  5 ---------------
Train Errors
RMSE : 0.4551
MAE : 0.2730
Test Errors
RMSE : 1.6159
MAE : 1.0067
-------------------------------------
Training Avg Error:
AVG RMSE : 0.455045259323
AVG MAE : 0.268435437296

Testing Avg Error:
AVG RMSE : 1.56356823577
AVG MAE : 1.00063065258
 
adjcos 3
----------   Fold  1 ---------------
Train Errors
RMSE : 0.4909
MAE : 0.2936
Test Errors
RMSE : 1.6389
MAE : 1.0304
----------   Fold  2 ---------------
Train Errors
RMSE : 0.4940
MAE : 0.2928
Test Errors
RMSE : 1.4953
MAE : 0.9749
----------   Fold  3 ---------------
Train Errors
RMSE : 0.4894
MAE : 0.2961
Test Errors
RMSE : 1.5323
MAE : 0.9989
----------   Fold  4 ---------------
Train Errors
RMSE : 0.5055
MAE : 0.2953
Test Errors
RMSE : 1.4500
MAE : 0.9531
----------   Fold  5 ---------------
Train Errors
RMSE : 0.4949
MAE : 0.3000
Test Errors
RMSE : 1.5962
MAE : 0.9975
-------------------------------------
Training Avg Error:
AVG RMSE : 0.494932273028
AVG MAE : 0.295554997505

Testing Avg Error:
AVG RMSE : 1.54252842258
AVG MAE : 0.990956244864
 
adjcos 4
----------   Fold  1 ---------------
Train Errors
RMSE : 0.5087
MAE : 0.3034
Test Errors
RMSE : 1.6416
MAE : 1.0367
----------   Fold  2 ---------------
Train Errors
RMSE : 0.5149
MAE : 0.3047
Test Errors
RMSE : 1.4876
MAE : 0.9731
----------   Fold  3 ---------------
Train Errors
RMSE : 0.5100
MAE : 0.3068
Test Errors
RMSE : 1.5313
MAE : 1.0009
----------   Fold  4 ---------------
Train Errors
RMSE : 0.5268
MAE : 0.3079
Test Errors
RMSE : 1.4431
MAE : 0.9504
----------   Fold  5 ---------------
Train Errors
RMSE : 0.5130
MAE : 0.3096
Test Errors
RMSE : 1.5919
MAE : 0.9931
-------------------------------------
Training Avg Error:
AVG RMSE : 0.514691273068
AVG MAE : 0.306465970354

Testing Avg Error:
AVG RMSE : 1.53909702646
AVG MAE : 0.990844103568
 
adjcos 5
----------   Fold  1 ---------------
Train Errors
RMSE : 0.5202
MAE : 0.3096
Test Errors
RMSE : 1.6411
MAE : 1.0363
----------   Fold  2 ---------------
Train Errors
RMSE : 0.5256
MAE : 0.3102
Test Errors
RMSE : 1.4866
MAE : 0.9734
----------   Fold  3 ---------------
Train Errors
RMSE : 0.5194
MAE : 0.3116
Test Errors
RMSE : 1.5281
MAE : 0.9992
----------   Fold  4 ---------------
Train Errors
RMSE : 0.5379
MAE : 0.3148
Test Errors
RMSE : 1.4417
MAE : 0.9506
----------   Fold  5 ---------------
Train Errors
RMSE : 0.5226
MAE : 0.3161
Test Errors
RMSE : 1.5902
MAE : 0.9932
-------------------------------------
Training Avg Error:
AVG RMSE : 0.525132824237
AVG MAE : 0.312458003791

Testing Avg Error:
AVG RMSE : 1.53754087758
AVG MAE : 0.990545425511
 
adjcos 10
----------   Fold  1 ---------------
Train Errors
RMSE : 0.5301
MAE : 0.3139
Test Errors
RMSE : 1.6381
MAE : 1.0345
----------   Fold  2 ---------------
Train Errors
RMSE : 0.5333
MAE : 0.3155
Test Errors
RMSE : 1.4850
MAE : 0.9729
----------   Fold  3 ---------------
Train Errors
RMSE : 0.5321
MAE : 0.3174
Test Errors
RMSE : 1.5265
MAE : 0.9975
----------   Fold  4 ---------------
Train Errors
RMSE : 0.5481
MAE : 0.3196
Test Errors
RMSE : 1.4405
MAE : 0.9494
----------   Fold  5 ---------------
Train Errors
RMSE : 0.5365
MAE : 0.3240
Test Errors
RMSE : 1.5860
MAE : 0.9903
-------------------------------------
Training Avg Error:
AVG RMSE : 0.536017370057
AVG MAE : 0.318078249953

Testing Avg Error:
AVG RMSE : 1.53522580324
AVG MAE : 0.988939567019
 
adjcos 15
----------   Fold  1 ---------------
Train Errors
RMSE : 0.5331
MAE : 0.3149
Test Errors
RMSE : 1.6373
MAE : 1.0331
----------   Fold  2 ---------------
Train Errors
RMSE : 0.5359
MAE : 0.3167
Test Errors
RMSE : 1.4850
MAE : 0.9726
----------   Fold  3 ---------------
Train Errors
RMSE : 0.5342
MAE : 0.3183
Test Errors
RMSE : 1.5252
MAE : 0.9962
----------   Fold  4 ---------------
Train Errors
RMSE : 0.5476
MAE : 0.3194
Test Errors
RMSE : 1.4397
MAE : 0.9487
----------   Fold  5 ---------------
Train Errors
RMSE : 0.5391
MAE : 0.3247
Test Errors
RMSE : 1.5853
MAE : 0.9894
-------------------------------------
Training Avg Error:
AVG RMSE : 0.537950792482
AVG MAE : 0.318823505564

Testing Avg Error:
AVG RMSE : 1.53452592719
AVG MAE : 0.988008670984
 
adjcos 20
----------   Fold  1 ---------------
Train Errors
RMSE : 0.5340
MAE : 0.3152
Test Errors
RMSE : 1.6372
MAE : 1.0329
----------   Fold  2 ---------------
Train Errors
RMSE : 0.5359
MAE : 0.3165
Test Errors
RMSE : 1.4847
MAE : 0.9726
----------   Fold  3 ---------------
Train Errors
RMSE : 0.5348
MAE : 0.3185
Test Errors
RMSE : 1.5247
MAE : 0.9965
----------   Fold  4 ---------------
Train Errors
RMSE : 0.5476
MAE : 0.3196
Test Errors
RMSE : 1.4390
MAE : 0.9480
----------   Fold  5 ---------------
Train Errors
RMSE : 0.5392
MAE : 0.3248
Test Errors
RMSE : 1.5850
MAE : 0.9894
-------------------------------------
Training Avg Error:
AVG RMSE : 0.538312810284
AVG MAE : 0.318905774643

Testing Avg Error:
AVG RMSE : 1.53411531134
AVG MAE : 0.987877882138
 
adjcos 25
----------   Fold  1 ---------------
Train Errors
RMSE : 0.5343
MAE : 0.3153
Test Errors
RMSE : 1.6370
MAE : 1.0327
----------   Fold  2 ---------------
Train Errors
RMSE : 0.5366
MAE : 0.3168
Test Errors
RMSE : 1.4847
MAE : 0.9726
----------   Fold  3 ---------------
Train Errors
RMSE : 0.5346
MAE : 0.3185
Test Errors
RMSE : 1.5242
MAE : 0.9965
----------   Fold  4 ---------------
Train Errors
RMSE : 0.5481
MAE : 0.3197
Test Errors
RMSE : 1.4391
MAE : 0.9483
----------   Fold  5 ---------------
Train Errors
RMSE : 0.5395
MAE : 0.3249
Test Errors
RMSE : 1.5850
MAE : 0.9893
-------------------------------------
Training Avg Error:
AVG RMSE : 0.538626661922
AVG MAE : 0.319038764909

Testing Avg Error:
AVG RMSE : 1.53399550691
AVG MAE : 0.987850580012
 

Best K Value for  adjcos

Min MAE
0.987850580012 7
Min RMSE
1.53399550691 7


In [24]:
cos_res = sims[1]
euc_res = sims[0]
pr_res = sims[3]
adjcos_res = sims[4]
k = [2, 3, 4, 5, 10, 15, 20, 25]

Plot of MAE


In [25]:
results_df1 = pd.DataFrame({'K': k, 'COS': cos_res, 'EUC': euc_res, 'Pearson': pr_res, 'Adjusted Cosine': adjcos_res})
plot1 = results_df1.plot(x='K', y=['COS', 'EUC', 'Pearson', 'Adjusted Cosine'], ylim=(0.95, 1.1), title = 'Item-Item CF: MAE for different similarity metrics at different Ks')
fig = plot1.get_figure()
fig.savefig('../figures/Kmae_item.png')


Plot of RMSE


In [26]:
cos_res = sims_rmse[1]
euc_res = sims_rmse[0]
pr_res = sims_rmse[3]
adjcos_res = sims_rmse[4]
k = [2, 3, 4, 5, 10, 15, 20, 25]
results_df1 = pd.DataFrame({'K': k, 'COS': cos_res, 'EUC': euc_res, 'Pearson': pr_res, 'Adjusted Cosine': adjcos_res})
plot1 = results_df1.plot(x='K', y=['COS', 'EUC', 'Pearson', 'Adjusted Cosine'], ylim=(1.5, 1.6), title = 'Item-Item CF: RMSE for different similarity metrics at different Ks')
fig = plot1.get_figure()
fig.savefig('../figures/Krmse_item.png')


We observe that there is no significant change in rmse and mae values beyond k =5, simple explaination of these can be that average books rated per user is around 3.3


In [53]:
import time
trtimer = []
tetimer = []
for data1 in datasets:
    rows1 = data1.user_id.unique()
    cols1 = data1['Book-Title'].unique()
    print(data1.user_id.nunique(), data1.isbn.nunique())
    data1 = data1[['user_id', 'Book-Title', 'book_rating']]
    
    idict  = dict(zip(cols1, range(len(cols1))))
    udict = dict(zip(rows1, range(len(rows1))))
    
    data1.user_id = [
    udict[i] for i in data1.user_id
    ]
    data1['Book-Title'] = [
    idict[i] for i in data1['Book-Title']
    ]
    
    nmat1 = data1.as_matrix()
    trt, tet = get_results(nmat1, 'euc', len(rows1), len(cols1), 5, 5, True)
    
    trtimer.append(trt)
    tetimer.append(tet)

    print()


2517 100
----------   Fold  1 ---------------
/Library/Frameworks/Python.framework/Versions/3.4/lib/python3.4/site-packages/pandas/core/generic.py:2701: SettingWithCopyWarning: 
A value is trying to be set on a copy of a slice from a DataFrame.
Try using .loc[row_indexer,col_indexer] = value instead

See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
  self[name] = value
/Library/Frameworks/Python.framework/Versions/3.4/lib/python3.4/site-packages/ipykernel/__main__.py:17: SettingWithCopyWarning: 
A value is trying to be set on a copy of a slice from a DataFrame.
Try using .loc[row_indexer,col_indexer] = value instead

See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
Training Time :  1.2959749698638916
Train Errors
RMSE : 0.3009
MAE : 0.1471
Prediction Time :  0.03315901756286621
Test Errors
RMSE : 1.5606
MAE : 0.9771

3086 150
----------   Fold  1 ---------------
Training Time :  3.4505748748779297
Train Errors
RMSE : 0.2923
MAE : 0.1374
Prediction Time :  0.06331110000610352
Test Errors
RMSE : 1.5621
MAE : 0.9972

3543 200
----------   Fold  1 ---------------
Training Time :  6.2445690631866455
Train Errors
RMSE : 0.2718
MAE : 0.1257
Prediction Time :  0.05825400352478027
Test Errors
RMSE : 1.5301
MAE : 0.9583

4280 300
----------   Fold  1 ---------------
Training Time :  19.66141104698181
Train Errors
RMSE : 0.2572
MAE : 0.1115
Prediction Time :  0.07999801635742188
Test Errors
RMSE : 1.5282
MAE : 0.9387

5282 500
----------   Fold  1 ---------------
Training Time :  76.0420880317688
Train Errors
RMSE : 0.2256
MAE : 0.0944
Prediction Time :  0.11560916900634766
Test Errors
RMSE : 1.4864
MAE : 0.9209


In [55]:
results_df1 = pd.DataFrame({'Items': [100, 150, 200, 300, 500], 'Time': trtimer})
plot1 = results_df1.plot(x='Items', y='Time', ylim=(0, 80), title = 'Item-Item CF: Time to train over dataset with increase in items')
fig = plot1.get_figure()
fig.savefig('../figures/traintime.png')



In [59]:
results_df1 = pd.DataFrame({'Items': [100, 150, 200, 300, 500], 'Time': tetimer})
plot1 = results_df1.plot(x='Items', y='Time', ylim=(0, 1), title = 'Item-Item CF: Time to Predict over Test Set with increase in items')
fig = plot1.get_figure()
fig.savefig('../figures/testtime.png')


getmrec function is used to get top m recommendation for a user_id based on the similarity matrix (option), k neighbours.


In [30]:
full_mat = np.zeros((len(rows),len(cols)))
for row in nmat:
    full_mat[row[0], row[1]] = row[2]
item_similarity, amean, umean, imean = itemsimilar(full_mat, 'euc')

In [31]:
def getmrec(full_mat, user_id, item_similarity, k, m, idict,  cov = False):
    
    n = item_similarity.shape[0]
    nzero = full_mat[user_id].nonzero()[0]
    
    preds = {}
    for row in range(n):
        preds[row] = predict(user_id, row, full_mat, item_similarity, amean, umean, imean, k)
    
    flipped_dict = dict(zip(idict.values(), idict.keys()))
    
    if not cov:
        print("Books Read -----")
        for i in nzero:
            print(flipped_dict[i])
            del preds[i]
    
    
    res = sorted(preds.items(), key=lambda x: x[1], reverse = True)
    
    ans = [flipped_dict[i[0]] for i in res[:m]]
    return ans

In [32]:
for m in [5, 8, 10, 15]:   
    cov = []
    for i in range(len(rows)):
        cov.extend(getmrec(full_mat, i, item_similarity, 5, m, idict, True))
    print("Coverage with", m, "recs:", len(set(cov)), "%")


Coverage with 5 recs: 92 %
Coverage with 8 recs: 95 %
Coverage with 10 recs: 99 %
Coverage with 15 recs: 100 %

In [33]:
getmrec(full_mat, 313, item_similarity, 5, 10, idict)


Books Read -----
Purity in Death
Divine Secrets of the Ya-Ya Sisterhood : A Novel
Ender's Game (Ender Wiggins Saga (Paperback))
Middlesex: A Novel
Invisible Man
Out[33]:
['Pigs in Heaven',
 "A Thousand Acres (Ballantine Reader's Circle)",
 'Chicken Soup for the Soul (Chicken Soup for the Soul)',
 "Pretend You Don't See Her",
 'Seabiscuit',
 'Wild Animus',
 "Left Behind: A Novel of the Earth's Last Days (Left Behind #1)",
 "A Second Chicken Soup for the Woman's Soul (Chicken Soup for the Soul Series)",
 'About a Boy (Movie Tie-In)',
 "The Princess Bride: S Morgenstern's Classic Tale of True Love and High Adventure"]

In [ ]:


In [ ]:


In [ ]:

Algo 2: Model Based Algorithm : NMF

We used scikit-surprise to implement NMF and tune its hyperparameter and regularisation. We made an attempt at manually implement NMF but the evaluation metrics were not as comparable to regularised NMF using surprise-scikit. The code for manual NMF can be found in analyze/NMF.ipynb


In [34]:
from surprise import evaluate, Reader, Dataset, SVD, NMF, GridSearch, KNNWithMeans

In [35]:
reader = Reader(rating_scale=(1, 10))
data2 = Dataset.load_from_df(data[['user_id', 'Book-Title', 'book_rating']], reader)
data2.split(5)

In [36]:
param_grid = {'n_factors': [30, 40, 50, 60, 70], 'n_epochs': [40, 50, 60], 'reg_pu': [0.001, 0.1, 1],
              'reg_qi': [ 0.1, 1, 3, 5]}
grid_search = GridSearch(NMF, param_grid, measures=['RMSE', 'MAE'])
grid_search.evaluate(data2)
results_df = pd.DataFrame.from_dict(grid_search.cv_results)
print(results_df)


[{'n_epochs': 40, 'reg_pu': 0.001, 'n_factors': 30, 'reg_qi': 0.1}, {'n_epochs': 40, 'reg_pu': 0.1, 'n_factors': 30, 'reg_qi': 0.1}, {'n_epochs': 40, 'reg_pu': 1, 'n_factors': 30, 'reg_qi': 0.1}, {'n_epochs': 40, 'reg_pu': 0.001, 'n_factors': 40, 'reg_qi': 0.1}, {'n_epochs': 40, 'reg_pu': 0.1, 'n_factors': 40, 'reg_qi': 0.1}, {'n_epochs': 40, 'reg_pu': 1, 'n_factors': 40, 'reg_qi': 0.1}, {'n_epochs': 40, 'reg_pu': 0.001, 'n_factors': 50, 'reg_qi': 0.1}, {'n_epochs': 40, 'reg_pu': 0.1, 'n_factors': 50, 'reg_qi': 0.1}, {'n_epochs': 40, 'reg_pu': 1, 'n_factors': 50, 'reg_qi': 0.1}, {'n_epochs': 40, 'reg_pu': 0.001, 'n_factors': 60, 'reg_qi': 0.1}, {'n_epochs': 40, 'reg_pu': 0.1, 'n_factors': 60, 'reg_qi': 0.1}, {'n_epochs': 40, 'reg_pu': 1, 'n_factors': 60, 'reg_qi': 0.1}, {'n_epochs': 40, 'reg_pu': 0.001, 'n_factors': 70, 'reg_qi': 0.1}, {'n_epochs': 40, 'reg_pu': 0.1, 'n_factors': 70, 'reg_qi': 0.1}, {'n_epochs': 40, 'reg_pu': 1, 'n_factors': 70, 'reg_qi': 0.1}, {'n_epochs': 40, 'reg_pu': 0.001, 'n_factors': 30, 'reg_qi': 1}, {'n_epochs': 40, 'reg_pu': 0.1, 'n_factors': 30, 'reg_qi': 1}, {'n_epochs': 40, 'reg_pu': 1, 'n_factors': 30, 'reg_qi': 1}, {'n_epochs': 40, 'reg_pu': 0.001, 'n_factors': 40, 'reg_qi': 1}, {'n_epochs': 40, 'reg_pu': 0.1, 'n_factors': 40, 'reg_qi': 1}, {'n_epochs': 40, 'reg_pu': 1, 'n_factors': 40, 'reg_qi': 1}, {'n_epochs': 40, 'reg_pu': 0.001, 'n_factors': 50, 'reg_qi': 1}, {'n_epochs': 40, 'reg_pu': 0.1, 'n_factors': 50, 'reg_qi': 1}, {'n_epochs': 40, 'reg_pu': 1, 'n_factors': 50, 'reg_qi': 1}, {'n_epochs': 40, 'reg_pu': 0.001, 'n_factors': 60, 'reg_qi': 1}, {'n_epochs': 40, 'reg_pu': 0.1, 'n_factors': 60, 'reg_qi': 1}, {'n_epochs': 40, 'reg_pu': 1, 'n_factors': 60, 'reg_qi': 1}, {'n_epochs': 40, 'reg_pu': 0.001, 'n_factors': 70, 'reg_qi': 1}, {'n_epochs': 40, 'reg_pu': 0.1, 'n_factors': 70, 'reg_qi': 1}, {'n_epochs': 40, 'reg_pu': 1, 'n_factors': 70, 'reg_qi': 1}, {'n_epochs': 40, 'reg_pu': 0.001, 'n_factors': 30, 'reg_qi': 3}, {'n_epochs': 40, 'reg_pu': 0.1, 'n_factors': 30, 'reg_qi': 3}, {'n_epochs': 40, 'reg_pu': 1, 'n_factors': 30, 'reg_qi': 3}, {'n_epochs': 40, 'reg_pu': 0.001, 'n_factors': 40, 'reg_qi': 3}, {'n_epochs': 40, 'reg_pu': 0.1, 'n_factors': 40, 'reg_qi': 3}, {'n_epochs': 40, 'reg_pu': 1, 'n_factors': 40, 'reg_qi': 3}, {'n_epochs': 40, 'reg_pu': 0.001, 'n_factors': 50, 'reg_qi': 3}, {'n_epochs': 40, 'reg_pu': 0.1, 'n_factors': 50, 'reg_qi': 3}, {'n_epochs': 40, 'reg_pu': 1, 'n_factors': 50, 'reg_qi': 3}, {'n_epochs': 40, 'reg_pu': 0.001, 'n_factors': 60, 'reg_qi': 3}, {'n_epochs': 40, 'reg_pu': 0.1, 'n_factors': 60, 'reg_qi': 3}, {'n_epochs': 40, 'reg_pu': 1, 'n_factors': 60, 'reg_qi': 3}, {'n_epochs': 40, 'reg_pu': 0.001, 'n_factors': 70, 'reg_qi': 3}, {'n_epochs': 40, 'reg_pu': 0.1, 'n_factors': 70, 'reg_qi': 3}, {'n_epochs': 40, 'reg_pu': 1, 'n_factors': 70, 'reg_qi': 3}, {'n_epochs': 40, 'reg_pu': 0.001, 'n_factors': 30, 'reg_qi': 5}, {'n_epochs': 40, 'reg_pu': 0.1, 'n_factors': 30, 'reg_qi': 5}, {'n_epochs': 40, 'reg_pu': 1, 'n_factors': 30, 'reg_qi': 5}, {'n_epochs': 40, 'reg_pu': 0.001, 'n_factors': 40, 'reg_qi': 5}, {'n_epochs': 40, 'reg_pu': 0.1, 'n_factors': 40, 'reg_qi': 5}, {'n_epochs': 40, 'reg_pu': 1, 'n_factors': 40, 'reg_qi': 5}, {'n_epochs': 40, 'reg_pu': 0.001, 'n_factors': 50, 'reg_qi': 5}, {'n_epochs': 40, 'reg_pu': 0.1, 'n_factors': 50, 'reg_qi': 5}, {'n_epochs': 40, 'reg_pu': 1, 'n_factors': 50, 'reg_qi': 5}, {'n_epochs': 40, 'reg_pu': 0.001, 'n_factors': 60, 'reg_qi': 5}, {'n_epochs': 40, 'reg_pu': 0.1, 'n_factors': 60, 'reg_qi': 5}, {'n_epochs': 40, 'reg_pu': 1, 'n_factors': 60, 'reg_qi': 5}, {'n_epochs': 40, 'reg_pu': 0.001, 'n_factors': 70, 'reg_qi': 5}, {'n_epochs': 40, 'reg_pu': 0.1, 'n_factors': 70, 'reg_qi': 5}, {'n_epochs': 40, 'reg_pu': 1, 'n_factors': 70, 'reg_qi': 5}, {'n_epochs': 50, 'reg_pu': 0.001, 'n_factors': 30, 'reg_qi': 0.1}, {'n_epochs': 50, 'reg_pu': 0.1, 'n_factors': 30, 'reg_qi': 0.1}, {'n_epochs': 50, 'reg_pu': 1, 'n_factors': 30, 'reg_qi': 0.1}, {'n_epochs': 50, 'reg_pu': 0.001, 'n_factors': 40, 'reg_qi': 0.1}, {'n_epochs': 50, 'reg_pu': 0.1, 'n_factors': 40, 'reg_qi': 0.1}, {'n_epochs': 50, 'reg_pu': 1, 'n_factors': 40, 'reg_qi': 0.1}, {'n_epochs': 50, 'reg_pu': 0.001, 'n_factors': 50, 'reg_qi': 0.1}, {'n_epochs': 50, 'reg_pu': 0.1, 'n_factors': 50, 'reg_qi': 0.1}, {'n_epochs': 50, 'reg_pu': 1, 'n_factors': 50, 'reg_qi': 0.1}, {'n_epochs': 50, 'reg_pu': 0.001, 'n_factors': 60, 'reg_qi': 0.1}, {'n_epochs': 50, 'reg_pu': 0.1, 'n_factors': 60, 'reg_qi': 0.1}, {'n_epochs': 50, 'reg_pu': 1, 'n_factors': 60, 'reg_qi': 0.1}, {'n_epochs': 50, 'reg_pu': 0.001, 'n_factors': 70, 'reg_qi': 0.1}, {'n_epochs': 50, 'reg_pu': 0.1, 'n_factors': 70, 'reg_qi': 0.1}, {'n_epochs': 50, 'reg_pu': 1, 'n_factors': 70, 'reg_qi': 0.1}, {'n_epochs': 50, 'reg_pu': 0.001, 'n_factors': 30, 'reg_qi': 1}, {'n_epochs': 50, 'reg_pu': 0.1, 'n_factors': 30, 'reg_qi': 1}, {'n_epochs': 50, 'reg_pu': 1, 'n_factors': 30, 'reg_qi': 1}, {'n_epochs': 50, 'reg_pu': 0.001, 'n_factors': 40, 'reg_qi': 1}, {'n_epochs': 50, 'reg_pu': 0.1, 'n_factors': 40, 'reg_qi': 1}, {'n_epochs': 50, 'reg_pu': 1, 'n_factors': 40, 'reg_qi': 1}, {'n_epochs': 50, 'reg_pu': 0.001, 'n_factors': 50, 'reg_qi': 1}, {'n_epochs': 50, 'reg_pu': 0.1, 'n_factors': 50, 'reg_qi': 1}, {'n_epochs': 50, 'reg_pu': 1, 'n_factors': 50, 'reg_qi': 1}, {'n_epochs': 50, 'reg_pu': 0.001, 'n_factors': 60, 'reg_qi': 1}, {'n_epochs': 50, 'reg_pu': 0.1, 'n_factors': 60, 'reg_qi': 1}, {'n_epochs': 50, 'reg_pu': 1, 'n_factors': 60, 'reg_qi': 1}, {'n_epochs': 50, 'reg_pu': 0.001, 'n_factors': 70, 'reg_qi': 1}, {'n_epochs': 50, 'reg_pu': 0.1, 'n_factors': 70, 'reg_qi': 1}, {'n_epochs': 50, 'reg_pu': 1, 'n_factors': 70, 'reg_qi': 1}, {'n_epochs': 50, 'reg_pu': 0.001, 'n_factors': 30, 'reg_qi': 3}, {'n_epochs': 50, 'reg_pu': 0.1, 'n_factors': 30, 'reg_qi': 3}, {'n_epochs': 50, 'reg_pu': 1, 'n_factors': 30, 'reg_qi': 3}, {'n_epochs': 50, 'reg_pu': 0.001, 'n_factors': 40, 'reg_qi': 3}, {'n_epochs': 50, 'reg_pu': 0.1, 'n_factors': 40, 'reg_qi': 3}, {'n_epochs': 50, 'reg_pu': 1, 'n_factors': 40, 'reg_qi': 3}, {'n_epochs': 50, 'reg_pu': 0.001, 'n_factors': 50, 'reg_qi': 3}, {'n_epochs': 50, 'reg_pu': 0.1, 'n_factors': 50, 'reg_qi': 3}, {'n_epochs': 50, 'reg_pu': 1, 'n_factors': 50, 'reg_qi': 3}, {'n_epochs': 50, 'reg_pu': 0.001, 'n_factors': 60, 'reg_qi': 3}, {'n_epochs': 50, 'reg_pu': 0.1, 'n_factors': 60, 'reg_qi': 3}, {'n_epochs': 50, 'reg_pu': 1, 'n_factors': 60, 'reg_qi': 3}, {'n_epochs': 50, 'reg_pu': 0.001, 'n_factors': 70, 'reg_qi': 3}, {'n_epochs': 50, 'reg_pu': 0.1, 'n_factors': 70, 'reg_qi': 3}, {'n_epochs': 50, 'reg_pu': 1, 'n_factors': 70, 'reg_qi': 3}, {'n_epochs': 50, 'reg_pu': 0.001, 'n_factors': 30, 'reg_qi': 5}, {'n_epochs': 50, 'reg_pu': 0.1, 'n_factors': 30, 'reg_qi': 5}, {'n_epochs': 50, 'reg_pu': 1, 'n_factors': 30, 'reg_qi': 5}, {'n_epochs': 50, 'reg_pu': 0.001, 'n_factors': 40, 'reg_qi': 5}, {'n_epochs': 50, 'reg_pu': 0.1, 'n_factors': 40, 'reg_qi': 5}, {'n_epochs': 50, 'reg_pu': 1, 'n_factors': 40, 'reg_qi': 5}, {'n_epochs': 50, 'reg_pu': 0.001, 'n_factors': 50, 'reg_qi': 5}, {'n_epochs': 50, 'reg_pu': 0.1, 'n_factors': 50, 'reg_qi': 5}, {'n_epochs': 50, 'reg_pu': 1, 'n_factors': 50, 'reg_qi': 5}, {'n_epochs': 50, 'reg_pu': 0.001, 'n_factors': 60, 'reg_qi': 5}, {'n_epochs': 50, 'reg_pu': 0.1, 'n_factors': 60, 'reg_qi': 5}, {'n_epochs': 50, 'reg_pu': 1, 'n_factors': 60, 'reg_qi': 5}, {'n_epochs': 50, 'reg_pu': 0.001, 'n_factors': 70, 'reg_qi': 5}, {'n_epochs': 50, 'reg_pu': 0.1, 'n_factors': 70, 'reg_qi': 5}, {'n_epochs': 50, 'reg_pu': 1, 'n_factors': 70, 'reg_qi': 5}, {'n_epochs': 60, 'reg_pu': 0.001, 'n_factors': 30, 'reg_qi': 0.1}, {'n_epochs': 60, 'reg_pu': 0.1, 'n_factors': 30, 'reg_qi': 0.1}, {'n_epochs': 60, 'reg_pu': 1, 'n_factors': 30, 'reg_qi': 0.1}, {'n_epochs': 60, 'reg_pu': 0.001, 'n_factors': 40, 'reg_qi': 0.1}, {'n_epochs': 60, 'reg_pu': 0.1, 'n_factors': 40, 'reg_qi': 0.1}, {'n_epochs': 60, 'reg_pu': 1, 'n_factors': 40, 'reg_qi': 0.1}, {'n_epochs': 60, 'reg_pu': 0.001, 'n_factors': 50, 'reg_qi': 0.1}, {'n_epochs': 60, 'reg_pu': 0.1, 'n_factors': 50, 'reg_qi': 0.1}, {'n_epochs': 60, 'reg_pu': 1, 'n_factors': 50, 'reg_qi': 0.1}, {'n_epochs': 60, 'reg_pu': 0.001, 'n_factors': 60, 'reg_qi': 0.1}, {'n_epochs': 60, 'reg_pu': 0.1, 'n_factors': 60, 'reg_qi': 0.1}, {'n_epochs': 60, 'reg_pu': 1, 'n_factors': 60, 'reg_qi': 0.1}, {'n_epochs': 60, 'reg_pu': 0.001, 'n_factors': 70, 'reg_qi': 0.1}, {'n_epochs': 60, 'reg_pu': 0.1, 'n_factors': 70, 'reg_qi': 0.1}, {'n_epochs': 60, 'reg_pu': 1, 'n_factors': 70, 'reg_qi': 0.1}, {'n_epochs': 60, 'reg_pu': 0.001, 'n_factors': 30, 'reg_qi': 1}, {'n_epochs': 60, 'reg_pu': 0.1, 'n_factors': 30, 'reg_qi': 1}, {'n_epochs': 60, 'reg_pu': 1, 'n_factors': 30, 'reg_qi': 1}, {'n_epochs': 60, 'reg_pu': 0.001, 'n_factors': 40, 'reg_qi': 1}, {'n_epochs': 60, 'reg_pu': 0.1, 'n_factors': 40, 'reg_qi': 1}, {'n_epochs': 60, 'reg_pu': 1, 'n_factors': 40, 'reg_qi': 1}, {'n_epochs': 60, 'reg_pu': 0.001, 'n_factors': 50, 'reg_qi': 1}, {'n_epochs': 60, 'reg_pu': 0.1, 'n_factors': 50, 'reg_qi': 1}, {'n_epochs': 60, 'reg_pu': 1, 'n_factors': 50, 'reg_qi': 1}, {'n_epochs': 60, 'reg_pu': 0.001, 'n_factors': 60, 'reg_qi': 1}, {'n_epochs': 60, 'reg_pu': 0.1, 'n_factors': 60, 'reg_qi': 1}, {'n_epochs': 60, 'reg_pu': 1, 'n_factors': 60, 'reg_qi': 1}, {'n_epochs': 60, 'reg_pu': 0.001, 'n_factors': 70, 'reg_qi': 1}, {'n_epochs': 60, 'reg_pu': 0.1, 'n_factors': 70, 'reg_qi': 1}, {'n_epochs': 60, 'reg_pu': 1, 'n_factors': 70, 'reg_qi': 1}, {'n_epochs': 60, 'reg_pu': 0.001, 'n_factors': 30, 'reg_qi': 3}, {'n_epochs': 60, 'reg_pu': 0.1, 'n_factors': 30, 'reg_qi': 3}, {'n_epochs': 60, 'reg_pu': 1, 'n_factors': 30, 'reg_qi': 3}, {'n_epochs': 60, 'reg_pu': 0.001, 'n_factors': 40, 'reg_qi': 3}, {'n_epochs': 60, 'reg_pu': 0.1, 'n_factors': 40, 'reg_qi': 3}, {'n_epochs': 60, 'reg_pu': 1, 'n_factors': 40, 'reg_qi': 3}, {'n_epochs': 60, 'reg_pu': 0.001, 'n_factors': 50, 'reg_qi': 3}, {'n_epochs': 60, 'reg_pu': 0.1, 'n_factors': 50, 'reg_qi': 3}, {'n_epochs': 60, 'reg_pu': 1, 'n_factors': 50, 'reg_qi': 3}, {'n_epochs': 60, 'reg_pu': 0.001, 'n_factors': 60, 'reg_qi': 3}, {'n_epochs': 60, 'reg_pu': 0.1, 'n_factors': 60, 'reg_qi': 3}, {'n_epochs': 60, 'reg_pu': 1, 'n_factors': 60, 'reg_qi': 3}, {'n_epochs': 60, 'reg_pu': 0.001, 'n_factors': 70, 'reg_qi': 3}, {'n_epochs': 60, 'reg_pu': 0.1, 'n_factors': 70, 'reg_qi': 3}, {'n_epochs': 60, 'reg_pu': 1, 'n_factors': 70, 'reg_qi': 3}, {'n_epochs': 60, 'reg_pu': 0.001, 'n_factors': 30, 'reg_qi': 5}, {'n_epochs': 60, 'reg_pu': 0.1, 'n_factors': 30, 'reg_qi': 5}, {'n_epochs': 60, 'reg_pu': 1, 'n_factors': 30, 'reg_qi': 5}, {'n_epochs': 60, 'reg_pu': 0.001, 'n_factors': 40, 'reg_qi': 5}, {'n_epochs': 60, 'reg_pu': 0.1, 'n_factors': 40, 'reg_qi': 5}, {'n_epochs': 60, 'reg_pu': 1, 'n_factors': 40, 'reg_qi': 5}, {'n_epochs': 60, 'reg_pu': 0.001, 'n_factors': 50, 'reg_qi': 5}, {'n_epochs': 60, 'reg_pu': 0.1, 'n_factors': 50, 'reg_qi': 5}, {'n_epochs': 60, 'reg_pu': 1, 'n_factors': 50, 'reg_qi': 5}, {'n_epochs': 60, 'reg_pu': 0.001, 'n_factors': 60, 'reg_qi': 5}, {'n_epochs': 60, 'reg_pu': 0.1, 'n_factors': 60, 'reg_qi': 5}, {'n_epochs': 60, 'reg_pu': 1, 'n_factors': 60, 'reg_qi': 5}, {'n_epochs': 60, 'reg_pu': 0.001, 'n_factors': 70, 'reg_qi': 5}, {'n_epochs': 60, 'reg_pu': 0.1, 'n_factors': 70, 'reg_qi': 5}, {'n_epochs': 60, 'reg_pu': 1, 'n_factors': 70, 'reg_qi': 5}]
------------
Parameters combination 1 of 180
params:  {'n_epochs': 40, 'reg_pu': 0.001, 'n_factors': 30, 'reg_qi': 0.1}
------------
Mean RMSE: 1.5926
Mean MAE : 1.0474
------------
------------
Parameters combination 2 of 180
params:  {'n_epochs': 40, 'reg_pu': 0.1, 'n_factors': 30, 'reg_qi': 0.1}
------------
Mean RMSE: 1.6965
Mean MAE : 1.1527
------------
------------
Parameters combination 3 of 180
params:  {'n_epochs': 40, 'reg_pu': 1, 'n_factors': 30, 'reg_qi': 0.1}
------------
Mean RMSE: 1.7863
Mean MAE : 1.2920
------------
------------
Parameters combination 4 of 180
params:  {'n_epochs': 40, 'reg_pu': 0.001, 'n_factors': 40, 'reg_qi': 0.1}
------------
Mean RMSE: 1.7938
Mean MAE : 1.3092
------------
------------
Parameters combination 5 of 180
params:  {'n_epochs': 40, 'reg_pu': 0.1, 'n_factors': 40, 'reg_qi': 0.1}
------------
Mean RMSE: 1.6283
Mean MAE : 1.1149
------------
------------
Parameters combination 6 of 180
params:  {'n_epochs': 40, 'reg_pu': 1, 'n_factors': 40, 'reg_qi': 0.1}
------------
Mean RMSE: 1.7758
Mean MAE : 1.2778
------------
------------
Parameters combination 7 of 180
params:  {'n_epochs': 40, 'reg_pu': 0.001, 'n_factors': 50, 'reg_qi': 0.1}
------------
Mean RMSE: 2.1240
Mean MAE : 1.6382
------------
------------
Parameters combination 8 of 180
params:  {'n_epochs': 40, 'reg_pu': 0.1, 'n_factors': 50, 'reg_qi': 0.1}
------------
Mean RMSE: 1.6734
Mean MAE : 1.2061
------------
------------
Parameters combination 9 of 180
params:  {'n_epochs': 40, 'reg_pu': 1, 'n_factors': 50, 'reg_qi': 0.1}
------------
Mean RMSE: 1.7649
Mean MAE : 1.2629
------------
------------
Parameters combination 10 of 180
params:  {'n_epochs': 40, 'reg_pu': 0.001, 'n_factors': 60, 'reg_qi': 0.1}
------------
Mean RMSE: 2.3140
Mean MAE : 1.8058
------------
------------
Parameters combination 11 of 180
params:  {'n_epochs': 40, 'reg_pu': 0.1, 'n_factors': 60, 'reg_qi': 0.1}
------------
Mean RMSE: 1.7665
Mean MAE : 1.3077
------------
------------
Parameters combination 12 of 180
params:  {'n_epochs': 40, 'reg_pu': 1, 'n_factors': 60, 'reg_qi': 0.1}
------------
Mean RMSE: 1.7488
Mean MAE : 1.2415
------------
------------
Parameters combination 13 of 180
params:  {'n_epochs': 40, 'reg_pu': 0.001, 'n_factors': 70, 'reg_qi': 0.1}
------------
Mean RMSE: 2.4169
Mean MAE : 1.8884
------------
------------
Parameters combination 14 of 180
params:  {'n_epochs': 40, 'reg_pu': 0.1, 'n_factors': 70, 'reg_qi': 0.1}
------------
Mean RMSE: 1.8672
Mean MAE : 1.4056
------------
------------
Parameters combination 15 of 180
params:  {'n_epochs': 40, 'reg_pu': 1, 'n_factors': 70, 'reg_qi': 0.1}
------------
Mean RMSE: 1.7453
Mean MAE : 1.2355
------------
------------
Parameters combination 16 of 180
params:  {'n_epochs': 40, 'reg_pu': 0.001, 'n_factors': 30, 'reg_qi': 1}
------------
Mean RMSE: 1.5569
Mean MAE : 0.9894
------------
------------
Parameters combination 17 of 180
params:  {'n_epochs': 40, 'reg_pu': 0.1, 'n_factors': 30, 'reg_qi': 1}
------------
Mean RMSE: 1.6611
Mean MAE : 1.1881
------------
------------
Parameters combination 18 of 180
params:  {'n_epochs': 40, 'reg_pu': 1, 'n_factors': 30, 'reg_qi': 1}
------------
Mean RMSE: 1.8775
Mean MAE : 1.5097
------------
------------
Parameters combination 19 of 180
params:  {'n_epochs': 40, 'reg_pu': 0.001, 'n_factors': 40, 'reg_qi': 1}
------------
Mean RMSE: 1.5702
Mean MAE : 1.0238
------------
------------
Parameters combination 20 of 180
params:  {'n_epochs': 40, 'reg_pu': 0.1, 'n_factors': 40, 'reg_qi': 1}
------------
Mean RMSE: 1.6549
Mean MAE : 1.1765
------------
------------
Parameters combination 21 of 180
params:  {'n_epochs': 40, 'reg_pu': 1, 'n_factors': 40, 'reg_qi': 1}
------------
Mean RMSE: 1.8782
Mean MAE : 1.5100
------------
------------
Parameters combination 22 of 180
params:  {'n_epochs': 40, 'reg_pu': 0.001, 'n_factors': 50, 'reg_qi': 1}
------------
Mean RMSE: 1.6012
Mean MAE : 1.0872
------------
------------
Parameters combination 23 of 180
params:  {'n_epochs': 40, 'reg_pu': 0.1, 'n_factors': 50, 'reg_qi': 1}
------------
Mean RMSE: 1.6486
Mean MAE : 1.1667
------------
------------
Parameters combination 24 of 180
params:  {'n_epochs': 40, 'reg_pu': 1, 'n_factors': 50, 'reg_qi': 1}
------------
Mean RMSE: 1.8785
Mean MAE : 1.5103
------------
------------
Parameters combination 25 of 180
params:  {'n_epochs': 40, 'reg_pu': 0.001, 'n_factors': 60, 'reg_qi': 1}
------------
Mean RMSE: 1.6514
Mean MAE : 1.1553
------------
------------
Parameters combination 26 of 180
params:  {'n_epochs': 40, 'reg_pu': 0.1, 'n_factors': 60, 'reg_qi': 1}
------------
Mean RMSE: 1.6428
Mean MAE : 1.1556
------------
------------
Parameters combination 27 of 180
params:  {'n_epochs': 40, 'reg_pu': 1, 'n_factors': 60, 'reg_qi': 1}
------------
Mean RMSE: 1.8789
Mean MAE : 1.5113
------------
------------
Parameters combination 28 of 180
params:  {'n_epochs': 40, 'reg_pu': 0.001, 'n_factors': 70, 'reg_qi': 1}
------------
Mean RMSE: 1.7096
Mean MAE : 1.2256
------------
------------
Parameters combination 29 of 180
params:  {'n_epochs': 40, 'reg_pu': 0.1, 'n_factors': 70, 'reg_qi': 1}
------------
Mean RMSE: 1.6404
Mean MAE : 1.1492
------------
------------
Parameters combination 30 of 180
params:  {'n_epochs': 40, 'reg_pu': 1, 'n_factors': 70, 'reg_qi': 1}
------------
Mean RMSE: 1.8768
Mean MAE : 1.5087
------------
------------
Parameters combination 31 of 180
params:  {'n_epochs': 40, 'reg_pu': 0.001, 'n_factors': 30, 'reg_qi': 3}
------------
Mean RMSE: 1.5516
Mean MAE : 0.9915
------------
------------
Parameters combination 32 of 180
params:  {'n_epochs': 40, 'reg_pu': 0.1, 'n_factors': 30, 'reg_qi': 3}
------------
Mean RMSE: 1.7087
Mean MAE : 1.2807
------------
------------
Parameters combination 33 of 180
params:  {'n_epochs': 40, 'reg_pu': 1, 'n_factors': 30, 'reg_qi': 3}
------------
Mean RMSE: 2.2669
Mean MAE : 1.9484
------------
------------
Parameters combination 34 of 180
params:  {'n_epochs': 40, 'reg_pu': 0.001, 'n_factors': 40, 'reg_qi': 3}
------------
Mean RMSE: 1.5479
Mean MAE : 0.9928
------------
------------
Parameters combination 35 of 180
params:  {'n_epochs': 40, 'reg_pu': 0.1, 'n_factors': 40, 'reg_qi': 3}
------------
Mean RMSE: 1.7107
Mean MAE : 1.2833
------------
------------
Parameters combination 36 of 180
params:  {'n_epochs': 40, 'reg_pu': 1, 'n_factors': 40, 'reg_qi': 3}
------------
Mean RMSE: 2.2675
Mean MAE : 1.9490
------------
------------
Parameters combination 37 of 180
params:  {'n_epochs': 40, 'reg_pu': 0.001, 'n_factors': 50, 'reg_qi': 3}
------------
Mean RMSE: 1.5424
Mean MAE : 0.9941
------------
------------
Parameters combination 38 of 180
params:  {'n_epochs': 40, 'reg_pu': 0.1, 'n_factors': 50, 'reg_qi': 3}
------------
Mean RMSE: 1.7090
Mean MAE : 1.2819
------------
------------
Parameters combination 39 of 180
params:  {'n_epochs': 40, 'reg_pu': 1, 'n_factors': 50, 'reg_qi': 3}
------------
Mean RMSE: 2.2672
Mean MAE : 1.9486
------------
------------
Parameters combination 40 of 180
params:  {'n_epochs': 40, 'reg_pu': 0.001, 'n_factors': 60, 'reg_qi': 3}
------------
Mean RMSE: 1.5489
Mean MAE : 1.0059
------------
------------
Parameters combination 41 of 180
params:  {'n_epochs': 40, 'reg_pu': 0.1, 'n_factors': 60, 'reg_qi': 3}
------------
Mean RMSE: 1.7076
Mean MAE : 1.2804
------------
------------
Parameters combination 42 of 180
params:  {'n_epochs': 40, 'reg_pu': 1, 'n_factors': 60, 'reg_qi': 3}
------------
Mean RMSE: 2.2670
Mean MAE : 1.9485
------------
------------
Parameters combination 43 of 180
params:  {'n_epochs': 40, 'reg_pu': 0.001, 'n_factors': 70, 'reg_qi': 3}
------------
Mean RMSE: 1.5516
Mean MAE : 1.0122
------------
------------
Parameters combination 44 of 180
params:  {'n_epochs': 40, 'reg_pu': 0.1, 'n_factors': 70, 'reg_qi': 3}
------------
Mean RMSE: 1.7074
Mean MAE : 1.2804
------------
------------
Parameters combination 45 of 180
params:  {'n_epochs': 40, 'reg_pu': 1, 'n_factors': 70, 'reg_qi': 3}
------------
Mean RMSE: 2.2672
Mean MAE : 1.9488
------------
------------
Parameters combination 46 of 180
params:  {'n_epochs': 40, 'reg_pu': 0.001, 'n_factors': 30, 'reg_qi': 5}
------------
Mean RMSE: 1.5569
Mean MAE : 0.9984
------------
------------
Parameters combination 47 of 180
params:  {'n_epochs': 40, 'reg_pu': 0.1, 'n_factors': 30, 'reg_qi': 5}
------------
Mean RMSE: 1.7504
Mean MAE : 1.3467
------------
------------
Parameters combination 48 of 180
params:  {'n_epochs': 40, 'reg_pu': 1, 'n_factors': 30, 'reg_qi': 5}
------------
Mean RMSE: 2.6263
Mean MAE : 2.3195
------------
------------
Parameters combination 49 of 180
params:  {'n_epochs': 40, 'reg_pu': 0.001, 'n_factors': 40, 'reg_qi': 5}
------------
Mean RMSE: 1.5486
Mean MAE : 0.9945
------------
------------
Parameters combination 50 of 180
params:  {'n_epochs': 40, 'reg_pu': 0.1, 'n_factors': 40, 'reg_qi': 5}
------------
Mean RMSE: 1.7494
Mean MAE : 1.3469
------------
------------
Parameters combination 51 of 180
params:  {'n_epochs': 40, 'reg_pu': 1, 'n_factors': 40, 'reg_qi': 5}
------------
Mean RMSE: 2.6264
Mean MAE : 2.3197
------------
------------
Parameters combination 52 of 180
params:  {'n_epochs': 40, 'reg_pu': 0.001, 'n_factors': 50, 'reg_qi': 5}
------------
Mean RMSE: 1.5423
Mean MAE : 0.9905
------------
------------
Parameters combination 53 of 180
params:  {'n_epochs': 40, 'reg_pu': 0.1, 'n_factors': 50, 'reg_qi': 5}
------------
Mean RMSE: 1.7503
Mean MAE : 1.3468
------------
------------
Parameters combination 54 of 180
params:  {'n_epochs': 40, 'reg_pu': 1, 'n_factors': 50, 'reg_qi': 5}
------------
Mean RMSE: 2.6266
Mean MAE : 2.3198
------------
------------
Parameters combination 55 of 180
params:  {'n_epochs': 40, 'reg_pu': 0.001, 'n_factors': 60, 'reg_qi': 5}
------------
Mean RMSE: 1.5361
Mean MAE : 0.9853
------------
------------
Parameters combination 56 of 180
params:  {'n_epochs': 40, 'reg_pu': 0.1, 'n_factors': 60, 'reg_qi': 5}
------------
Mean RMSE: 1.7509
Mean MAE : 1.3476
------------
------------
Parameters combination 57 of 180
params:  {'n_epochs': 40, 'reg_pu': 1, 'n_factors': 60, 'reg_qi': 5}
------------
Mean RMSE: 2.6264
Mean MAE : 2.3197
------------
------------
Parameters combination 58 of 180
params:  {'n_epochs': 40, 'reg_pu': 0.001, 'n_factors': 70, 'reg_qi': 5}
------------
Mean RMSE: 1.5358
Mean MAE : 0.9828
------------
------------
Parameters combination 59 of 180
params:  {'n_epochs': 40, 'reg_pu': 0.1, 'n_factors': 70, 'reg_qi': 5}
------------
Mean RMSE: 1.7508
Mean MAE : 1.3474
------------
------------
Parameters combination 60 of 180
params:  {'n_epochs': 40, 'reg_pu': 1, 'n_factors': 70, 'reg_qi': 5}
------------
Mean RMSE: 2.6265
Mean MAE : 2.3197
------------
------------
Parameters combination 61 of 180
params:  {'n_epochs': 50, 'reg_pu': 0.001, 'n_factors': 30, 'reg_qi': 0.1}
------------
Mean RMSE: 1.6070
Mean MAE : 1.0467
------------
------------
Parameters combination 62 of 180
params:  {'n_epochs': 50, 'reg_pu': 0.1, 'n_factors': 30, 'reg_qi': 0.1}
------------
Mean RMSE: 1.7092
Mean MAE : 1.1610
------------
------------
Parameters combination 63 of 180
params:  {'n_epochs': 50, 'reg_pu': 1, 'n_factors': 30, 'reg_qi': 0.1}
------------
Mean RMSE: 1.7849
Mean MAE : 1.2911
------------
------------
Parameters combination 64 of 180
params:  {'n_epochs': 50, 'reg_pu': 0.001, 'n_factors': 40, 'reg_qi': 0.1}
------------
Mean RMSE: 1.7726
Mean MAE : 1.2885
------------
------------
Parameters combination 65 of 180
params:  {'n_epochs': 50, 'reg_pu': 0.1, 'n_factors': 40, 'reg_qi': 0.1}
------------
Mean RMSE: 1.6310
Mean MAE : 1.0972
------------
------------
Parameters combination 66 of 180
params:  {'n_epochs': 50, 'reg_pu': 1, 'n_factors': 40, 'reg_qi': 0.1}
------------
Mean RMSE: 1.7729
Mean MAE : 1.2807
------------
------------
Parameters combination 67 of 180
params:  {'n_epochs': 50, 'reg_pu': 0.001, 'n_factors': 50, 'reg_qi': 0.1}
------------
Mean RMSE: 2.0672
Mean MAE : 1.5879
------------
------------
Parameters combination 68 of 180
params:  {'n_epochs': 50, 'reg_pu': 0.1, 'n_factors': 50, 'reg_qi': 0.1}
------------
Mean RMSE: 1.6380
Mean MAE : 1.1449
------------
------------
Parameters combination 69 of 180
params:  {'n_epochs': 50, 'reg_pu': 1, 'n_factors': 50, 'reg_qi': 0.1}
------------
Mean RMSE: 1.7690
Mean MAE : 1.2746
------------
------------
Parameters combination 70 of 180
params:  {'n_epochs': 50, 'reg_pu': 0.001, 'n_factors': 60, 'reg_qi': 0.1}
------------
Mean RMSE: 2.2650
Mean MAE : 1.7651
------------
------------
Parameters combination 71 of 180
params:  {'n_epochs': 50, 'reg_pu': 0.1, 'n_factors': 60, 'reg_qi': 0.1}
------------
Mean RMSE: 1.6647
Mean MAE : 1.2015
------------
------------
Parameters combination 72 of 180
params:  {'n_epochs': 50, 'reg_pu': 1, 'n_factors': 60, 'reg_qi': 0.1}
------------
Mean RMSE: 1.7632
Mean MAE : 1.2696
------------
------------
Parameters combination 73 of 180
params:  {'n_epochs': 50, 'reg_pu': 0.001, 'n_factors': 70, 'reg_qi': 0.1}
------------
Mean RMSE: 2.3662
Mean MAE : 1.8483
------------
------------
Parameters combination 74 of 180
params:  {'n_epochs': 50, 'reg_pu': 0.1, 'n_factors': 70, 'reg_qi': 0.1}
------------
Mean RMSE: 1.7182
Mean MAE : 1.2607
------------
------------
Parameters combination 75 of 180
params:  {'n_epochs': 50, 'reg_pu': 1, 'n_factors': 70, 'reg_qi': 0.1}
------------
Mean RMSE: 1.7638
Mean MAE : 1.2656
------------
------------
Parameters combination 76 of 180
params:  {'n_epochs': 50, 'reg_pu': 0.001, 'n_factors': 30, 'reg_qi': 1}
------------
Mean RMSE: 1.5474
Mean MAE : 0.9855
------------
------------
Parameters combination 77 of 180
params:  {'n_epochs': 50, 'reg_pu': 0.1, 'n_factors': 30, 'reg_qi': 1}
------------
Mean RMSE: 1.6735
Mean MAE : 1.1964
------------
------------
Parameters combination 78 of 180
params:  {'n_epochs': 50, 'reg_pu': 1, 'n_factors': 30, 'reg_qi': 1}
------------
Mean RMSE: 1.8685
Mean MAE : 1.4993
------------
------------
Parameters combination 79 of 180
params:  {'n_epochs': 50, 'reg_pu': 0.001, 'n_factors': 40, 'reg_qi': 1}
------------
Mean RMSE: 1.5534
Mean MAE : 1.0032
------------
------------
Parameters combination 80 of 180
params:  {'n_epochs': 50, 'reg_pu': 0.1, 'n_factors': 40, 'reg_qi': 1}
------------
Mean RMSE: 1.6704
Mean MAE : 1.1926
------------
------------
Parameters combination 81 of 180
params:  {'n_epochs': 50, 'reg_pu': 1, 'n_factors': 40, 'reg_qi': 1}
------------
Mean RMSE: 1.8692
Mean MAE : 1.5001
------------
------------
Parameters combination 82 of 180
params:  {'n_epochs': 50, 'reg_pu': 0.001, 'n_factors': 50, 'reg_qi': 1}
------------
Mean RMSE: 1.5848
Mean MAE : 1.0529
------------
------------
Parameters combination 83 of 180
params:  {'n_epochs': 50, 'reg_pu': 0.1, 'n_factors': 50, 'reg_qi': 1}
------------
Mean RMSE: 1.6682
Mean MAE : 1.1888
------------
------------
Parameters combination 84 of 180
params:  {'n_epochs': 50, 'reg_pu': 1, 'n_factors': 50, 'reg_qi': 1}
------------
Mean RMSE: 1.8681
Mean MAE : 1.4988
------------
------------
Parameters combination 85 of 180
params:  {'n_epochs': 50, 'reg_pu': 0.001, 'n_factors': 60, 'reg_qi': 1}
------------
Mean RMSE: 1.6137
Mean MAE : 1.1034
------------
------------
Parameters combination 86 of 180
params:  {'n_epochs': 50, 'reg_pu': 0.1, 'n_factors': 60, 'reg_qi': 1}
------------
Mean RMSE: 1.6682
Mean MAE : 1.1856
------------
------------
Parameters combination 87 of 180
params:  {'n_epochs': 50, 'reg_pu': 1, 'n_factors': 60, 'reg_qi': 1}
------------
Mean RMSE: 1.8683
Mean MAE : 1.4994
------------
------------
Parameters combination 88 of 180
params:  {'n_epochs': 50, 'reg_pu': 0.001, 'n_factors': 70, 'reg_qi': 1}
------------
Mean RMSE: 1.6504
Mean MAE : 1.1533
------------
------------
Parameters combination 89 of 180
params:  {'n_epochs': 50, 'reg_pu': 0.1, 'n_factors': 70, 'reg_qi': 1}
------------
Mean RMSE: 1.6677
Mean MAE : 1.1837
------------
------------
Parameters combination 90 of 180
params:  {'n_epochs': 50, 'reg_pu': 1, 'n_factors': 70, 'reg_qi': 1}
------------
Mean RMSE: 1.8688
Mean MAE : 1.4998
------------
------------
Parameters combination 91 of 180
params:  {'n_epochs': 50, 'reg_pu': 0.001, 'n_factors': 30, 'reg_qi': 3}
------------
Mean RMSE: 1.5529
Mean MAE : 0.9921
------------
------------
Parameters combination 92 of 180
params:  {'n_epochs': 50, 'reg_pu': 0.1, 'n_factors': 30, 'reg_qi': 3}
------------
Mean RMSE: 1.7155
Mean MAE : 1.2868
------------
------------
Parameters combination 93 of 180
params:  {'n_epochs': 50, 'reg_pu': 1, 'n_factors': 30, 'reg_qi': 3}
------------
Mean RMSE: 2.2635
Mean MAE : 1.9450
------------
------------
Parameters combination 94 of 180
params:  {'n_epochs': 50, 'reg_pu': 0.001, 'n_factors': 40, 'reg_qi': 3}
------------
Mean RMSE: 1.5574
Mean MAE : 0.9928
------------
------------
Parameters combination 95 of 180
params:  {'n_epochs': 50, 'reg_pu': 0.1, 'n_factors': 40, 'reg_qi': 3}
------------
Mean RMSE: 1.7173
Mean MAE : 1.2874
------------
------------
Parameters combination 96 of 180
params:  {'n_epochs': 50, 'reg_pu': 1, 'n_factors': 40, 'reg_qi': 3}
------------
Mean RMSE: 2.2640
Mean MAE : 1.9455
------------
------------
Parameters combination 97 of 180
params:  {'n_epochs': 50, 'reg_pu': 0.001, 'n_factors': 50, 'reg_qi': 3}
------------
Mean RMSE: 1.5413
Mean MAE : 0.9852
------------
------------
Parameters combination 98 of 180
params:  {'n_epochs': 50, 'reg_pu': 0.1, 'n_factors': 50, 'reg_qi': 3}
------------
Mean RMSE: 1.7164
Mean MAE : 1.2869
------------
------------
Parameters combination 99 of 180
params:  {'n_epochs': 50, 'reg_pu': 1, 'n_factors': 50, 'reg_qi': 3}
------------
Mean RMSE: 2.2640
Mean MAE : 1.9454
------------
------------
Parameters combination 100 of 180
params:  {'n_epochs': 50, 'reg_pu': 0.001, 'n_factors': 60, 'reg_qi': 3}
------------
Mean RMSE: 1.5470
Mean MAE : 0.9933
------------
------------
Parameters combination 101 of 180
params:  {'n_epochs': 50, 'reg_pu': 0.1, 'n_factors': 60, 'reg_qi': 3}
------------
Mean RMSE: 1.7161
Mean MAE : 1.2861
------------
------------
Parameters combination 102 of 180
params:  {'n_epochs': 50, 'reg_pu': 1, 'n_factors': 60, 'reg_qi': 3}
------------
Mean RMSE: 2.2638
Mean MAE : 1.9453
------------
------------
Parameters combination 103 of 180
params:  {'n_epochs': 50, 'reg_pu': 0.001, 'n_factors': 70, 'reg_qi': 3}
------------
Mean RMSE: 1.5404
Mean MAE : 0.9898
------------
------------
Parameters combination 104 of 180
params:  {'n_epochs': 50, 'reg_pu': 0.1, 'n_factors': 70, 'reg_qi': 3}
------------
Mean RMSE: 1.7135
Mean MAE : 1.2840
------------
------------
Parameters combination 105 of 180
params:  {'n_epochs': 50, 'reg_pu': 1, 'n_factors': 70, 'reg_qi': 3}
------------
Mean RMSE: 2.2638
Mean MAE : 1.9453
------------
------------
Parameters combination 106 of 180
params:  {'n_epochs': 50, 'reg_pu': 0.001, 'n_factors': 30, 'reg_qi': 5}
------------
Mean RMSE: 1.5698
Mean MAE : 1.0111
------------
------------
Parameters combination 107 of 180
params:  {'n_epochs': 50, 'reg_pu': 0.1, 'n_factors': 30, 'reg_qi': 5}
------------
Mean RMSE: 1.7505
Mean MAE : 1.3456
------------
------------
Parameters combination 108 of 180
params:  {'n_epochs': 50, 'reg_pu': 1, 'n_factors': 30, 'reg_qi': 5}
------------
Mean RMSE: 2.6252
Mean MAE : 2.3185
------------
------------
Parameters combination 109 of 180
params:  {'n_epochs': 50, 'reg_pu': 0.001, 'n_factors': 40, 'reg_qi': 5}
------------
Mean RMSE: 1.5507
Mean MAE : 0.9897
------------
------------
Parameters combination 110 of 180
params:  {'n_epochs': 50, 'reg_pu': 0.1, 'n_factors': 40, 'reg_qi': 5}
------------
Mean RMSE: 1.7519
Mean MAE : 1.3464
------------
------------
Parameters combination 111 of 180
params:  {'n_epochs': 50, 'reg_pu': 1, 'n_factors': 40, 'reg_qi': 5}
------------
Mean RMSE: 2.6253
Mean MAE : 2.3185
------------
------------
Parameters combination 112 of 180
params:  {'n_epochs': 50, 'reg_pu': 0.001, 'n_factors': 50, 'reg_qi': 5}
------------
Mean RMSE: 1.5447
Mean MAE : 0.9830
------------
------------
Parameters combination 113 of 180
params:  {'n_epochs': 50, 'reg_pu': 0.1, 'n_factors': 50, 'reg_qi': 5}
------------
Mean RMSE: 1.7521
Mean MAE : 1.3461
------------
------------
Parameters combination 114 of 180
params:  {'n_epochs': 50, 'reg_pu': 1, 'n_factors': 50, 'reg_qi': 5}
------------
Mean RMSE: 2.6252
Mean MAE : 2.3184
------------
------------
Parameters combination 115 of 180
params:  {'n_epochs': 50, 'reg_pu': 0.001, 'n_factors': 60, 'reg_qi': 5}
------------
Mean RMSE: 1.5356
Mean MAE : 0.9788
------------
------------
Parameters combination 116 of 180
params:  {'n_epochs': 50, 'reg_pu': 0.1, 'n_factors': 60, 'reg_qi': 5}
------------
Mean RMSE: 1.7523
Mean MAE : 1.3475
------------
------------
Parameters combination 117 of 180
params:  {'n_epochs': 50, 'reg_pu': 1, 'n_factors': 60, 'reg_qi': 5}
------------
Mean RMSE: 2.6252
Mean MAE : 2.3185
------------
------------
Parameters combination 118 of 180
params:  {'n_epochs': 50, 'reg_pu': 0.001, 'n_factors': 70, 'reg_qi': 5}
------------
Mean RMSE: 1.5416
Mean MAE : 0.9838
------------
------------
Parameters combination 119 of 180
params:  {'n_epochs': 50, 'reg_pu': 0.1, 'n_factors': 70, 'reg_qi': 5}
------------
Mean RMSE: 1.7518
Mean MAE : 1.3463
------------
------------
Parameters combination 120 of 180
params:  {'n_epochs': 50, 'reg_pu': 1, 'n_factors': 70, 'reg_qi': 5}
------------
Mean RMSE: 2.6252
Mean MAE : 2.3184
------------
------------
Parameters combination 121 of 180
params:  {'n_epochs': 60, 'reg_pu': 0.001, 'n_factors': 30, 'reg_qi': 0.1}
------------
Mean RMSE: 1.6013
Mean MAE : 1.0465
------------
------------
Parameters combination 122 of 180
params:  {'n_epochs': 60, 'reg_pu': 0.1, 'n_factors': 30, 'reg_qi': 0.1}
------------
Mean RMSE: 1.7187
Mean MAE : 1.1659
------------
------------
Parameters combination 123 of 180
params:  {'n_epochs': 60, 'reg_pu': 1, 'n_factors': 30, 'reg_qi': 0.1}
------------
Mean RMSE: 1.7797
Mean MAE : 1.2845
------------
------------
Parameters combination 124 of 180
params:  {'n_epochs': 60, 'reg_pu': 0.001, 'n_factors': 40, 'reg_qi': 0.1}
------------
Mean RMSE: 1.7471
Mean MAE : 1.2656
------------
------------
Parameters combination 125 of 180
params:  {'n_epochs': 60, 'reg_pu': 0.1, 'n_factors': 40, 'reg_qi': 0.1}
------------
Mean RMSE: 1.6517
Mean MAE : 1.0983
------------
------------
Parameters combination 126 of 180
params:  {'n_epochs': 60, 'reg_pu': 1, 'n_factors': 40, 'reg_qi': 0.1}
------------
Mean RMSE: 1.7784
Mean MAE : 1.2858
------------
------------
Parameters combination 127 of 180
params:  {'n_epochs': 60, 'reg_pu': 0.001, 'n_factors': 50, 'reg_qi': 0.1}
------------
Mean RMSE: 2.0291
Mean MAE : 1.5503
------------
------------
Parameters combination 128 of 180
params:  {'n_epochs': 60, 'reg_pu': 0.1, 'n_factors': 50, 'reg_qi': 0.1}
------------
Mean RMSE: 1.6289
Mean MAE : 1.1123
------------
------------
Parameters combination 129 of 180
params:  {'n_epochs': 60, 'reg_pu': 1, 'n_factors': 50, 'reg_qi': 0.1}
------------
Mean RMSE: 1.7694
Mean MAE : 1.2774
------------
------------
Parameters combination 130 of 180
params:  {'n_epochs': 60, 'reg_pu': 0.001, 'n_factors': 60, 'reg_qi': 0.1}
------------
Mean RMSE: 2.2030
Mean MAE : 1.7132
------------
------------
Parameters combination 131 of 180
params:  {'n_epochs': 60, 'reg_pu': 0.1, 'n_factors': 60, 'reg_qi': 0.1}
------------
Mean RMSE: 1.6323
Mean MAE : 1.1440
------------
------------
Parameters combination 132 of 180
params:  {'n_epochs': 60, 'reg_pu': 1, 'n_factors': 60, 'reg_qi': 0.1}
------------
Mean RMSE: 1.7654
Mean MAE : 1.2728
------------
------------
Parameters combination 133 of 180
params:  {'n_epochs': 60, 'reg_pu': 0.001, 'n_factors': 70, 'reg_qi': 0.1}
------------
Mean RMSE: 2.3225
Mean MAE : 1.8141
------------
------------
Parameters combination 134 of 180
params:  {'n_epochs': 60, 'reg_pu': 0.1, 'n_factors': 70, 'reg_qi': 0.1}
------------
Mean RMSE: 1.6532
Mean MAE : 1.1843
------------
------------
Parameters combination 135 of 180
params:  {'n_epochs': 60, 'reg_pu': 1, 'n_factors': 70, 'reg_qi': 0.1}
------------
Mean RMSE: 1.7681
Mean MAE : 1.2760
------------
------------
Parameters combination 136 of 180
params:  {'n_epochs': 60, 'reg_pu': 0.001, 'n_factors': 30, 'reg_qi': 1}
------------
Mean RMSE: 1.5607
Mean MAE : 0.9834
------------
------------
Parameters combination 137 of 180
params:  {'n_epochs': 60, 'reg_pu': 0.1, 'n_factors': 30, 'reg_qi': 1}
------------
Mean RMSE: 1.6872
Mean MAE : 1.2074
------------
------------
Parameters combination 138 of 180
params:  {'n_epochs': 60, 'reg_pu': 1, 'n_factors': 30, 'reg_qi': 1}
------------
Mean RMSE: 1.8609
Mean MAE : 1.4916
------------
------------
Parameters combination 139 of 180
params:  {'n_epochs': 60, 'reg_pu': 0.001, 'n_factors': 40, 'reg_qi': 1}
------------
Mean RMSE: 1.5585
Mean MAE : 0.9969
------------
------------
Parameters combination 140 of 180
params:  {'n_epochs': 60, 'reg_pu': 0.1, 'n_factors': 40, 'reg_qi': 1}
------------
Mean RMSE: 1.6837
Mean MAE : 1.2041
------------
------------
Parameters combination 141 of 180
params:  {'n_epochs': 60, 'reg_pu': 1, 'n_factors': 40, 'reg_qi': 1}
------------
Mean RMSE: 1.8627
Mean MAE : 1.4930
------------
------------
Parameters combination 142 of 180
params:  {'n_epochs': 60, 'reg_pu': 0.001, 'n_factors': 50, 'reg_qi': 1}
------------
Mean RMSE: 1.5608
Mean MAE : 1.0244
------------
------------
Parameters combination 143 of 180
params:  {'n_epochs': 60, 'reg_pu': 0.1, 'n_factors': 50, 'reg_qi': 1}
------------
Mean RMSE: 1.6819
Mean MAE : 1.2010
------------
------------
Parameters combination 144 of 180
params:  {'n_epochs': 60, 'reg_pu': 1, 'n_factors': 50, 'reg_qi': 1}
------------
Mean RMSE: 1.8615
Mean MAE : 1.4921
------------
------------
Parameters combination 145 of 180
params:  {'n_epochs': 60, 'reg_pu': 0.001, 'n_factors': 60, 'reg_qi': 1}
------------
Mean RMSE: 1.5911
Mean MAE : 1.0705
------------
------------
Parameters combination 146 of 180
params:  {'n_epochs': 60, 'reg_pu': 0.1, 'n_factors': 60, 'reg_qi': 1}
------------
Mean RMSE: 1.6776
Mean MAE : 1.1992
------------
------------
Parameters combination 147 of 180
params:  {'n_epochs': 60, 'reg_pu': 1, 'n_factors': 60, 'reg_qi': 1}
------------
Mean RMSE: 1.8621
Mean MAE : 1.4926
------------
------------
Parameters combination 148 of 180
params:  {'n_epochs': 60, 'reg_pu': 0.001, 'n_factors': 70, 'reg_qi': 1}
------------
Mean RMSE: 1.6109
Mean MAE : 1.1061
------------
------------
Parameters combination 149 of 180
params:  {'n_epochs': 60, 'reg_pu': 0.1, 'n_factors': 70, 'reg_qi': 1}
------------
Mean RMSE: 1.6785
Mean MAE : 1.1984
------------
------------
Parameters combination 150 of 180
params:  {'n_epochs': 60, 'reg_pu': 1, 'n_factors': 70, 'reg_qi': 1}
------------
Mean RMSE: 1.8618
Mean MAE : 1.4923
------------
------------
Parameters combination 151 of 180
params:  {'n_epochs': 60, 'reg_pu': 0.001, 'n_factors': 30, 'reg_qi': 3}
------------
Mean RMSE: 1.5603
Mean MAE : 0.9929
------------
------------
Parameters combination 152 of 180
params:  {'n_epochs': 60, 'reg_pu': 0.1, 'n_factors': 30, 'reg_qi': 3}
------------
Mean RMSE: 1.7194
Mean MAE : 1.2873
------------
------------
Parameters combination 153 of 180
params:  {'n_epochs': 60, 'reg_pu': 1, 'n_factors': 30, 'reg_qi': 3}
------------
Mean RMSE: 2.2622
Mean MAE : 1.9435
------------
------------
Parameters combination 154 of 180
params:  {'n_epochs': 60, 'reg_pu': 0.001, 'n_factors': 40, 'reg_qi': 3}
------------
Mean RMSE: 1.5460
Mean MAE : 0.9799
------------
------------
Parameters combination 155 of 180
params:  {'n_epochs': 60, 'reg_pu': 0.1, 'n_factors': 40, 'reg_qi': 3}
------------
Mean RMSE: 1.7193
Mean MAE : 1.2873
------------
------------
Parameters combination 156 of 180
params:  {'n_epochs': 60, 'reg_pu': 1, 'n_factors': 40, 'reg_qi': 3}
------------
Mean RMSE: 2.2620
Mean MAE : 1.9434
------------
------------
Parameters combination 157 of 180
params:  {'n_epochs': 60, 'reg_pu': 0.001, 'n_factors': 50, 'reg_qi': 3}
------------
Mean RMSE: 1.5454
Mean MAE : 0.9786
------------
------------
Parameters combination 158 of 180
params:  {'n_epochs': 60, 'reg_pu': 0.1, 'n_factors': 50, 'reg_qi': 3}
------------
Mean RMSE: 1.7210
Mean MAE : 1.2892
------------
------------
Parameters combination 159 of 180
params:  {'n_epochs': 60, 'reg_pu': 1, 'n_factors': 50, 'reg_qi': 3}
------------
Mean RMSE: 2.2621
Mean MAE : 1.9435
------------
------------
Parameters combination 160 of 180
params:  {'n_epochs': 60, 'reg_pu': 0.001, 'n_factors': 60, 'reg_qi': 3}
------------
Mean RMSE: 1.5451
Mean MAE : 0.9836
------------
------------
Parameters combination 161 of 180
params:  {'n_epochs': 60, 'reg_pu': 0.1, 'n_factors': 60, 'reg_qi': 3}
------------
Mean RMSE: 1.7192
Mean MAE : 1.2878
------------
------------
Parameters combination 162 of 180
params:  {'n_epochs': 60, 'reg_pu': 1, 'n_factors': 60, 'reg_qi': 3}
------------
Mean RMSE: 2.2621
Mean MAE : 1.9435
------------
------------
Parameters combination 163 of 180
params:  {'n_epochs': 60, 'reg_pu': 0.001, 'n_factors': 70, 'reg_qi': 3}
------------
Mean RMSE: 1.5397
Mean MAE : 0.9853
------------
------------
Parameters combination 164 of 180
params:  {'n_epochs': 60, 'reg_pu': 0.1, 'n_factors': 70, 'reg_qi': 3}
------------
Mean RMSE: 1.7208
Mean MAE : 1.2892
------------
------------
Parameters combination 165 of 180
params:  {'n_epochs': 60, 'reg_pu': 1, 'n_factors': 70, 'reg_qi': 3}
------------
Mean RMSE: 2.2622
Mean MAE : 1.9435
------------
------------
Parameters combination 166 of 180
params:  {'n_epochs': 60, 'reg_pu': 0.001, 'n_factors': 30, 'reg_qi': 5}
------------
Mean RMSE: 1.5665
Mean MAE : 1.0058
------------
------------
Parameters combination 167 of 180
params:  {'n_epochs': 60, 'reg_pu': 0.1, 'n_factors': 30, 'reg_qi': 5}
------------
Mean RMSE: 1.7528
Mean MAE : 1.3459
------------
------------
Parameters combination 168 of 180
params:  {'n_epochs': 60, 'reg_pu': 1, 'n_factors': 30, 'reg_qi': 5}
------------
Mean RMSE: 2.6246
Mean MAE : 2.3178
------------
------------
Parameters combination 169 of 180
params:  {'n_epochs': 60, 'reg_pu': 0.001, 'n_factors': 40, 'reg_qi': 5}
------------
Mean RMSE: 1.5529
Mean MAE : 0.9925
------------
------------
Parameters combination 170 of 180
params:  {'n_epochs': 60, 'reg_pu': 0.1, 'n_factors': 40, 'reg_qi': 5}
------------
Mean RMSE: 1.7533
Mean MAE : 1.3464
------------
------------
Parameters combination 171 of 180
params:  {'n_epochs': 60, 'reg_pu': 1, 'n_factors': 40, 'reg_qi': 5}
------------
Mean RMSE: 2.6246
Mean MAE : 2.3179
------------
------------
Parameters combination 172 of 180
params:  {'n_epochs': 60, 'reg_pu': 0.001, 'n_factors': 50, 'reg_qi': 5}
------------
Mean RMSE: 1.5470
Mean MAE : 0.9845
------------
------------
Parameters combination 173 of 180
params:  {'n_epochs': 60, 'reg_pu': 0.1, 'n_factors': 50, 'reg_qi': 5}
------------
Mean RMSE: 1.7551
Mean MAE : 1.3475
------------
------------
Parameters combination 174 of 180
params:  {'n_epochs': 60, 'reg_pu': 1, 'n_factors': 50, 'reg_qi': 5}
------------
Mean RMSE: 2.6246
Mean MAE : 2.3178
------------
------------
Parameters combination 175 of 180
params:  {'n_epochs': 60, 'reg_pu': 0.001, 'n_factors': 60, 'reg_qi': 5}
------------
Mean RMSE: 1.5466
Mean MAE : 0.9834
------------
------------
Parameters combination 176 of 180
params:  {'n_epochs': 60, 'reg_pu': 0.1, 'n_factors': 60, 'reg_qi': 5}
------------
Mean RMSE: 1.7538
Mean MAE : 1.3475
------------
------------
Parameters combination 177 of 180
params:  {'n_epochs': 60, 'reg_pu': 1, 'n_factors': 60, 'reg_qi': 5}
------------
Mean RMSE: 2.6246
Mean MAE : 2.3178
------------
------------
Parameters combination 178 of 180
params:  {'n_epochs': 60, 'reg_pu': 0.001, 'n_factors': 70, 'reg_qi': 5}
------------
Mean RMSE: 1.5411
Mean MAE : 0.9786
------------
------------
Parameters combination 179 of 180
params:  {'n_epochs': 60, 'reg_pu': 0.1, 'n_factors': 70, 'reg_qi': 5}
------------
Mean RMSE: 1.7555
Mean MAE : 1.3483
------------
------------
Parameters combination 180 of 180
params:  {'n_epochs': 60, 'reg_pu': 1, 'n_factors': 70, 'reg_qi': 5}
------------
Mean RMSE: 2.6246
Mean MAE : 2.3178
------------
          MAE      RMSE  n_epochs  \
0    1.047391  1.592606        40   
1    1.152684  1.696457        40   
2    1.291998  1.786308        40   
3    1.309240  1.793775        40   
4    1.114917  1.628319        40   
5    1.277830  1.775838        40   
6    1.638210  2.124048        40   
7    1.206065  1.673400        40   
8    1.262851  1.764878        40   
9    1.805812  2.314023        40   
10   1.307679  1.766544        40   
11   1.241507  1.748786        40   
12   1.888426  2.416916        40   
13   1.405554  1.867178        40   
14   1.235505  1.745313        40   
15   0.989384  1.556870        40   
16   1.188116  1.661143        40   
17   1.509687  1.877478        40   
18   1.023788  1.570169        40   
19   1.176513  1.654946        40   
20   1.510002  1.878225        40   
21   1.087189  1.601189        40   
22   1.166657  1.648582        40   
23   1.510348  1.878546        40   
24   1.155288  1.651392        40   
25   1.155628  1.642761        40   
26   1.511300  1.878910        40   
27   1.225608  1.709598        40   
28   1.149233  1.640395        40   
29   1.508655  1.876776        40   
..        ...       ...       ...   
150  0.992860  1.560272        60   
151  1.287299  1.719395        60   
152  1.943538  2.262193        60   
153  0.979907  1.546042        60   
154  1.287319  1.719253        60   
155  1.943410  2.262037        60   
156  0.978590  1.545363        60   
157  1.289165  1.721004        60   
158  1.943517  2.262121        60   
159  0.983626  1.545122        60   
160  1.287775  1.719192        60   
161  1.943480  2.262142        60   
162  0.985345  1.539670        60   
163  1.289158  1.720800        60   
164  1.943512  2.262177        60   
165  1.005794  1.566511        60   
166  1.345919  1.752850        60   
167  2.317818  2.624605        60   
168  0.992473  1.552887        60   
169  1.346394  1.753300        60   
170  2.317863  2.624625        60   
171  0.984486  1.547026        60   
172  1.347493  1.755075        60   
173  2.317845  2.624629        60   
174  0.983421  1.546599        60   
175  1.347491  1.753816        60   
176  2.317836  2.624617        60   
177  0.978645  1.541066        60   
178  1.348310  1.755536        60   
179  2.317835  2.624618        60   

                                                params  reg_pu  \
0    {'n_epochs': 40, 'reg_pu': 0.001, 'n_factors':...   0.001   
1    {'n_epochs': 40, 'reg_pu': 0.1, 'n_factors': 3...   0.100   
2    {'n_epochs': 40, 'reg_pu': 1, 'n_factors': 30,...   1.000   
3    {'n_epochs': 40, 'reg_pu': 0.001, 'n_factors':...   0.001   
4    {'n_epochs': 40, 'reg_pu': 0.1, 'n_factors': 4...   0.100   
5    {'n_epochs': 40, 'reg_pu': 1, 'n_factors': 40,...   1.000   
6    {'n_epochs': 40, 'reg_pu': 0.001, 'n_factors':...   0.001   
7    {'n_epochs': 40, 'reg_pu': 0.1, 'n_factors': 5...   0.100   
8    {'n_epochs': 40, 'reg_pu': 1, 'n_factors': 50,...   1.000   
9    {'n_epochs': 40, 'reg_pu': 0.001, 'n_factors':...   0.001   
10   {'n_epochs': 40, 'reg_pu': 0.1, 'n_factors': 6...   0.100   
11   {'n_epochs': 40, 'reg_pu': 1, 'n_factors': 60,...   1.000   
12   {'n_epochs': 40, 'reg_pu': 0.001, 'n_factors':...   0.001   
13   {'n_epochs': 40, 'reg_pu': 0.1, 'n_factors': 7...   0.100   
14   {'n_epochs': 40, 'reg_pu': 1, 'n_factors': 70,...   1.000   
15   {'n_epochs': 40, 'reg_pu': 0.001, 'n_factors':...   0.001   
16   {'n_epochs': 40, 'reg_pu': 0.1, 'n_factors': 3...   0.100   
17   {'n_epochs': 40, 'reg_pu': 1, 'n_factors': 30,...   1.000   
18   {'n_epochs': 40, 'reg_pu': 0.001, 'n_factors':...   0.001   
19   {'n_epochs': 40, 'reg_pu': 0.1, 'n_factors': 4...   0.100   
20   {'n_epochs': 40, 'reg_pu': 1, 'n_factors': 40,...   1.000   
21   {'n_epochs': 40, 'reg_pu': 0.001, 'n_factors':...   0.001   
22   {'n_epochs': 40, 'reg_pu': 0.1, 'n_factors': 5...   0.100   
23   {'n_epochs': 40, 'reg_pu': 1, 'n_factors': 50,...   1.000   
24   {'n_epochs': 40, 'reg_pu': 0.001, 'n_factors':...   0.001   
25   {'n_epochs': 40, 'reg_pu': 0.1, 'n_factors': 6...   0.100   
26   {'n_epochs': 40, 'reg_pu': 1, 'n_factors': 60,...   1.000   
27   {'n_epochs': 40, 'reg_pu': 0.001, 'n_factors':...   0.001   
28   {'n_epochs': 40, 'reg_pu': 0.1, 'n_factors': 7...   0.100   
29   {'n_epochs': 40, 'reg_pu': 1, 'n_factors': 70,...   1.000   
..                                                 ...     ...   
150  {'n_epochs': 60, 'reg_pu': 0.001, 'n_factors':...   0.001   
151  {'n_epochs': 60, 'reg_pu': 0.1, 'n_factors': 3...   0.100   
152  {'n_epochs': 60, 'reg_pu': 1, 'n_factors': 30,...   1.000   
153  {'n_epochs': 60, 'reg_pu': 0.001, 'n_factors':...   0.001   
154  {'n_epochs': 60, 'reg_pu': 0.1, 'n_factors': 4...   0.100   
155  {'n_epochs': 60, 'reg_pu': 1, 'n_factors': 40,...   1.000   
156  {'n_epochs': 60, 'reg_pu': 0.001, 'n_factors':...   0.001   
157  {'n_epochs': 60, 'reg_pu': 0.1, 'n_factors': 5...   0.100   
158  {'n_epochs': 60, 'reg_pu': 1, 'n_factors': 50,...   1.000   
159  {'n_epochs': 60, 'reg_pu': 0.001, 'n_factors':...   0.001   
160  {'n_epochs': 60, 'reg_pu': 0.1, 'n_factors': 6...   0.100   
161  {'n_epochs': 60, 'reg_pu': 1, 'n_factors': 60,...   1.000   
162  {'n_epochs': 60, 'reg_pu': 0.001, 'n_factors':...   0.001   
163  {'n_epochs': 60, 'reg_pu': 0.1, 'n_factors': 7...   0.100   
164  {'n_epochs': 60, 'reg_pu': 1, 'n_factors': 70,...   1.000   
165  {'n_epochs': 60, 'reg_pu': 0.001, 'n_factors':...   0.001   
166  {'n_epochs': 60, 'reg_pu': 0.1, 'n_factors': 3...   0.100   
167  {'n_epochs': 60, 'reg_pu': 1, 'n_factors': 30,...   1.000   
168  {'n_epochs': 60, 'reg_pu': 0.001, 'n_factors':...   0.001   
169  {'n_epochs': 60, 'reg_pu': 0.1, 'n_factors': 4...   0.100   
170  {'n_epochs': 60, 'reg_pu': 1, 'n_factors': 40,...   1.000   
171  {'n_epochs': 60, 'reg_pu': 0.001, 'n_factors':...   0.001   
172  {'n_epochs': 60, 'reg_pu': 0.1, 'n_factors': 5...   0.100   
173  {'n_epochs': 60, 'reg_pu': 1, 'n_factors': 50,...   1.000   
174  {'n_epochs': 60, 'reg_pu': 0.001, 'n_factors':...   0.001   
175  {'n_epochs': 60, 'reg_pu': 0.1, 'n_factors': 6...   0.100   
176  {'n_epochs': 60, 'reg_pu': 1, 'n_factors': 60,...   1.000   
177  {'n_epochs': 60, 'reg_pu': 0.001, 'n_factors':...   0.001   
178  {'n_epochs': 60, 'reg_pu': 0.1, 'n_factors': 7...   0.100   
179  {'n_epochs': 60, 'reg_pu': 1, 'n_factors': 70,...   1.000   

                                             scores  
0      {'MAE': 1.0473913673, 'RMSE': 1.59260581977}  
1     {'MAE': 1.15268362911, 'RMSE': 1.69645721798}  
2      {'MAE': 1.29199805905, 'RMSE': 1.7863084875}  
3     {'MAE': 1.30924044958, 'RMSE': 1.79377487827}  
4     {'MAE': 1.11491701517, 'RMSE': 1.62831872943}  
5     {'MAE': 1.27783041066, 'RMSE': 1.77583803001}  
6      {'MAE': 1.6382101101, 'RMSE': 2.12404838468}  
7       {'MAE': 1.2060651047, 'RMSE': 1.6734004025}  
8     {'MAE': 1.26285121432, 'RMSE': 1.76487759265}  
9     {'MAE': 1.80581207801, 'RMSE': 2.31402297084}  
10     {'MAE': 1.3076789622, 'RMSE': 1.76654388625}  
11     {'MAE': 1.2415068343, 'RMSE': 1.74878639472}  
12    {'MAE': 1.88842566882, 'RMSE': 2.41691575401}  
13     {'MAE': 1.40555374458, 'RMSE': 1.8671783624}  
14    {'MAE': 1.23550476745, 'RMSE': 1.74531269255}  
15   {'MAE': 0.989383801516, 'RMSE': 1.55687026078}  
16    {'MAE': 1.18811606174, 'RMSE': 1.66114280694}  
17      {'MAE': 1.509687487, 'RMSE': 1.87747824947}  
18    {'MAE': 1.02378760548, 'RMSE': 1.57016904189}  
19    {'MAE': 1.17651258283, 'RMSE': 1.65494602078}  
20    {'MAE': 1.51000204791, 'RMSE': 1.87822534518}  
21      {'MAE': 1.087188921, 'RMSE': 1.60118883889}  
22    {'MAE': 1.16665714725, 'RMSE': 1.64858230302}  
23     {'MAE': 1.51034768218, 'RMSE': 1.8785461523}  
24    {'MAE': 1.15528797452, 'RMSE': 1.65139159937}  
25    {'MAE': 1.15562828663, 'RMSE': 1.64276063062}  
26    {'MAE': 1.51130024532, 'RMSE': 1.87890953142}  
27    {'MAE': 1.22560822088, 'RMSE': 1.70959772396}  
28    {'MAE': 1.14923274353, 'RMSE': 1.64039491383}  
29    {'MAE': 1.50865511529, 'RMSE': 1.87677634948}  
..                                              ...  
150  {'MAE': 0.992860052224, 'RMSE': 1.56027242065}  
151   {'MAE': 1.28729904768, 'RMSE': 1.71939523812}  
152   {'MAE': 1.94353820367, 'RMSE': 2.26219259322}  
153  {'MAE': 0.979907492114, 'RMSE': 1.54604164037}  
154   {'MAE': 1.28731885645, 'RMSE': 1.71925294302}  
155    {'MAE': 1.9434100586, 'RMSE': 2.26203697691}  
156  {'MAE': 0.978589949289, 'RMSE': 1.54536301746}  
157   {'MAE': 1.28916513617, 'RMSE': 1.72100379248}  
158   {'MAE': 1.94351697566, 'RMSE': 2.26212142397}  
159  {'MAE': 0.983625610695, 'RMSE': 1.54512190738}  
160   {'MAE': 1.28777535161, 'RMSE': 1.71919234018}  
161   {'MAE': 1.94348025889, 'RMSE': 2.26214211464}  
162   {'MAE': 0.98534519492, 'RMSE': 1.53967024353}  
163   {'MAE': 1.28915773762, 'RMSE': 1.72079973031}  
164    {'MAE': 1.94351240229, 'RMSE': 2.2621767331}  
165   {'MAE': 1.00579370181, 'RMSE': 1.56651073558}  
166   {'MAE': 1.34591906431, 'RMSE': 1.75284992394}  
167    {'MAE': 2.3178180624, 'RMSE': 2.62460547389}  
168  {'MAE': 0.992472996721, 'RMSE': 1.55288741024}  
169   {'MAE': 1.34639445732, 'RMSE': 1.75329985241}  
170    {'MAE': 2.31786321953, 'RMSE': 2.6246254043}  
171  {'MAE': 0.984485775797, 'RMSE': 1.54702587003}  
172   {'MAE': 1.34749338318, 'RMSE': 1.75507521532}  
173   {'MAE': 2.31784508857, 'RMSE': 2.62462928828}  
174  {'MAE': 0.983421227294, 'RMSE': 1.54659939988}  
175   {'MAE': 1.34749114735, 'RMSE': 1.75381637663}  
176    {'MAE': 2.3178357373, 'RMSE': 2.62461725026}  
177  {'MAE': 0.978644825545, 'RMSE': 1.54106554677}  
178   {'MAE': 1.34830995604, 'RMSE': 1.75553614851}  
179    {'MAE': 2.3178354598, 'RMSE': 2.62461750469}  

[180 rows x 6 columns]

In [37]:
print(grid_search.best_score['RMSE'])
print(grid_search.best_params['RMSE'])
print(grid_search.best_score['MAE'])
print(grid_search.best_params['MAE'])


1.53559006122
{'n_epochs': 50, 'reg_pu': 0.001, 'n_factors': 60, 'reg_qi': 5}
0.978589949289
{'n_epochs': 60, 'reg_pu': 0.001, 'n_factors': 50, 'reg_qi': 3}

60 latent factor seem to be optimal,


In [67]:
maelist = []
rmselist = []
factors = [20, 30, 40 ,50 ,60, 70, 80]
for i in factors:
    algo = NMF(n_factors = i, reg_pu = 0.001, reg_qi = 3)
    perf = evaluate(algo, data2)
    maelist.append(np.mean(perf['mae']))
    rmselist.append(np.mean(perf['rmse']))


Evaluating RMSE, MAE of algorithm NMF.

------------
Fold 1
RMSE: 1.5361
MAE:  1.0125
------------
Fold 2
RMSE: 1.6069
MAE:  1.0474
------------
Fold 3
RMSE: 1.5928
MAE:  1.0471
------------
Fold 4
RMSE: 1.5861
MAE:  1.0453
------------
Fold 5
RMSE: 1.5574
MAE:  1.0176
------------
------------
Mean RMSE: 1.5759
Mean MAE : 1.0340
------------
------------
Evaluating RMSE, MAE of algorithm NMF.

------------
Fold 1
RMSE: 1.5333
MAE:  0.9861
------------
Fold 2
RMSE: 1.5837
MAE:  0.9955
------------
Fold 3
RMSE: 1.5855
MAE:  1.0074
------------
Fold 4
RMSE: 1.5966
MAE:  1.0258
------------
Fold 5
RMSE: 1.5514
MAE:  0.9984
------------
------------
Mean RMSE: 1.5701
Mean MAE : 1.0026
------------
------------
Evaluating RMSE, MAE of algorithm NMF.

------------
Fold 1
RMSE: 1.5015
MAE:  0.9667
------------
Fold 2
RMSE: 1.5848
MAE:  0.9986
------------
Fold 3
RMSE: 1.5501
MAE:  0.9791
------------
Fold 4
RMSE: 1.5714
MAE:  1.0055
------------
Fold 5
RMSE: 1.5558
MAE:  0.9890
------------
------------
Mean RMSE: 1.5527
Mean MAE : 0.9878
------------
------------
Evaluating RMSE, MAE of algorithm NMF.

------------
Fold 1
RMSE: 1.5033
MAE:  0.9678
------------
Fold 2
RMSE: 1.5779
MAE:  1.0031
------------
Fold 3
RMSE: 1.5622
MAE:  0.9963
------------
Fold 4
RMSE: 1.5551
MAE:  0.9975
------------
Fold 5
RMSE: 1.5262
MAE:  0.9755
------------
------------
Mean RMSE: 1.5450
Mean MAE : 0.9880
------------
------------
Evaluating RMSE, MAE of algorithm NMF.

------------
Fold 1
RMSE: 1.4870
MAE:  0.9728
------------
Fold 2
RMSE: 1.5774
MAE:  1.0000
------------
Fold 3
RMSE: 1.5373
MAE:  0.9885
------------
Fold 4
RMSE: 1.5493
MAE:  0.9972
------------
Fold 5
RMSE: 1.5294
MAE:  0.9755
------------
------------
Mean RMSE: 1.5361
Mean MAE : 0.9868
------------
------------
Evaluating RMSE, MAE of algorithm NMF.

------------
Fold 1
RMSE: 1.4983
MAE:  0.9781
------------
Fold 2
RMSE: 1.5719
MAE:  1.0034
------------
Fold 3
RMSE: 1.5546
MAE:  1.0040
------------
Fold 4
RMSE: 1.5759
MAE:  1.0174
------------
Fold 5
RMSE: 1.5361
MAE:  0.9873
------------
------------
Mean RMSE: 1.5473
Mean MAE : 0.9980
------------
------------
Evaluating RMSE, MAE of algorithm NMF.

------------
Fold 1
RMSE: 1.5025
MAE:  0.9839
------------
Fold 2
RMSE: 1.5689
MAE:  1.0039
------------
Fold 3
RMSE: 1.5469
MAE:  1.0028
------------
Fold 4
RMSE: 1.5626
MAE:  1.0186
------------
Fold 5
RMSE: 1.5246
MAE:  0.9828
------------
------------
Mean RMSE: 1.5411
Mean MAE : 0.9984
------------
------------

Plot of varying evaluation metrics vs number of latent factors for NMF


In [68]:
results_df = pd.DataFrame({'Factors': factors, 'MAE': maelist, 'RMSE': rmselist})
plot1 = results_df.plot(x='Factors', y=['MAE', 'RMSE'], ylim=(0.9, 1.7), title = 'NMF: evaluation metrics vs number of latent factors')
fig = plot1.get_figure()
fig.savefig('../figures/NMFfactor.png')



In [46]:
from collections import defaultdict
def get_top_n(predictions, n=10):

    top_n = defaultdict(list)
    for uid, iid, true_r, est, _ in predictions:
        top_n[uid].append((iid, est))

    # Then sort the predictions for each user and retrieve the k highest ones.
    for uid, user_ratings in top_n.items():
        user_ratings.sort(key=lambda x: x[1], reverse=True)
        top_n[uid] = user_ratings[:n]

    return top_n


trainset = data2.build_full_trainset()
algo = NMF(n_epochs = 60, n_factors = 50, reg_pu = 0.001, reg_qi = 3)
algo.train(trainset)

# Than predict ratings for all pairs (u, i) that are NOT in the training set.
testset = trainset.build_anti_testset()
predictions = algo.test(testset)

top_n = get_top_n(predictions, n=10)

# Print the recommended items for each user

RecBooks is used to recommend books to a user.


In [77]:
def recbooks(mat, user_id, idict, cov = False):
    full_mat = np.zeros((len(rows),len(cols)))
    for row in mat:
        full_mat[row[0], row[1]] = row[2]
    
    nzero = full_mat[user_id].nonzero()[0]
    
    flipped_dict = dict(zip(idict.values(), idict.keys()))
    ans = [flipped_dict[i[0]] for i in top_n[user_id]]
    if not cov:
        print("Books Read -----")
        for i in nzero:
            print(flipped_dict[i])
        print()
        print("Recs -----")
        for i in ans:
            print(i)
    return ans

recbooks(nmat, 1,idict)


Books Read -----
A Second Chicken Soup for the Woman's Soul (Chicken Soup for the Soul Series)
Seabiscuit
The God of Small Things
Plainsong (Vintage Contemporaries)
Manhattan Hunt Club
Divine Secrets of the Ya-Ya Sisterhood : A Novel
Fall On Your Knees (Oprah #45)

Recs -----
Chicken Soup for the Woman's Soul (Chicken Soup for the Soul Series (Paper))
Invisible Man
Vittorio the Vampire: New Tales of the Vampires
Bridget Jones : The Edge of Reason
Table For Two
The Four Agreements: A Practical Guide to Personal Freedom
Purity in Death
On the Road (Penguin 20th Century Classics)
Black and Blue
Roses Are Red (Alex Cross Novels)
Pretend You Don't See Her
Someone to Watch Over Me : A Novel
All That Remains (Kay Scarpetta Mysteries (Paperback))
Guilty as Sin
The Color of Water: A Black Man's Tribute to His White Mother
Out[77]:
["Chicken Soup for the Woman's Soul (Chicken Soup for the Soul Series (Paper))",
 'Invisible Man',
 'Vittorio the Vampire: New Tales of the Vampires',
 'Bridget Jones : The Edge of Reason',
 'Table For Two',
 'The Four Agreements: A Practical Guide to Personal Freedom',
 'Purity in Death',
 'On the Road (Penguin 20th Century Classics)',
 'Black and Blue',
 'Roses Are Red (Alex Cross Novels)',
 "Pretend You Don't See Her",
 'Someone to Watch Over Me : A Novel',
 'All That Remains (Kay Scarpetta Mysteries (Paperback))',
 'Guilty as Sin',
 "The Color of Water: A Black Man's Tribute to His White Mother"]

Coverage : Percentage of books coverage from all the books when recommending top-m books


In [48]:
for m in [5, 8, 10, 15]:   
    cov = []
    top_n = get_top_n(predictions, m)
    for i in range(len(rows)):
        cov.extend(recbooks(nmat, i,idict, True))
    print("Coverage with", m, "recs:", len(set(cov)), "%")


Coverage with 5 recs: 81 %
Coverage with 8 recs: 87 %
Coverage with 10 recs: 92 %
Coverage with 15 recs: 95 %

NMF Scaling with items


In [49]:
trtimer = []
tetimer = []
for data4 in datasets:
    rows4 = data4.user_id.unique()
    cols4 = data4['Book-Title'].unique()
    print(data4.user_id.nunique(), data4.isbn.nunique())
    data4 = data4[['user_id', 'Book-Title', 'book_rating']]
    
    idict  = dict(zip(cols4, range(len(cols4))))
    udict = dict(zip(rows4, range(len(rows4))))
    
    data4.user_id = [
    udict[i] for i in data4.user_id
    ]
    data4['Book-Title'] = [
    idict[i] for i in data4['Book-Title']
    ]
    
    start = time.time()
    
    reader = Reader(rating_scale=(1, 10))
    data4 = Dataset.load_from_df(data4[['user_id', 'Book-Title', 'book_rating']], reader)
    data4.split(5)
    
    trainset = data4.build_full_trainset()
    algo = NMF(n_epochs = 60, n_factors = 70, reg_pu = 0.001, reg_qi = 5)
    
    
    algo.train(trainset)
    end = time.time()
    
    trt = end - start
    print(trt)
    
    testset = trainset.build_testset()
    
    start = time.time()
    predictions = algo.test(testset)
    end = time.time()
    
    tet = end - start
    print(tet)
    
    trtimer.append(trt)
    tetimer.append(tet)

    print()


2517 100
/Library/Frameworks/Python.framework/Versions/3.4/lib/python3.4/site-packages/pandas/core/generic.py:2701: SettingWithCopyWarning: 
A value is trying to be set on a copy of a slice from a DataFrame.
Try using .loc[row_indexer,col_indexer] = value instead

See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
  self[name] = value
/Library/Frameworks/Python.framework/Versions/3.4/lib/python3.4/site-packages/ipykernel/__main__.py:16: SettingWithCopyWarning: 
A value is trying to be set on a copy of a slice from a DataFrame.
Try using .loc[row_indexer,col_indexer] = value instead

See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
2.0220389366149902
0.12902402877807617

3086 150
2.6030781269073486
0.08180785179138184

3543 200
3.2038509845733643
0.10391497611999512

4280 300
4.060749053955078
0.12465620040893555

5282 500
5.435039043426514
0.16994094848632812


In [50]:
results_df1 = pd.DataFrame({'Items': [100, 150, 200, 300, 500], 'Time': trtimer})
plot1 = results_df1.plot(x='Items', y='Time', ylim=(0, 25), title = 'NMF Scaling: Time to train the dataset with increase in items')
fig = plot1.get_figure()
fig.savefig('../figures/traintimeNMF.png')



In [52]:
results_df1 = pd.DataFrame({'Items': [100, 150, 200, 300, 500], 'Time': tetimer})
plot1 = results_df1.plot(x='Items', y='Time', ylim=(0, 1), title = 'NMF Scaling: Time to Predict over Test Set with increase in items')
fig = plot1.get_figure()
fig.savefig('../figures/testtimeNMF.png')



In [ ]:


In [ ]:

Comparing our implementation with Surprise


In [71]:
sim_options = {
    'name': 'MSD',
    'user_based' : False
}
algo = KNNWithMeans(sim_options = sim_options, k = 5, min_k =2)

In [72]:
perf = evaluate(algo, data2)


Evaluating RMSE, MAE of algorithm KNNWithMeans.

------------
Fold 1
Computing the msd similarity matrix...
Done computing similarity matrix.
RMSE: 1.5986
MAE:  1.1492
------------
Fold 2
Computing the msd similarity matrix...
Done computing similarity matrix.
RMSE: 1.5865
MAE:  1.1323
------------
Fold 3
Computing the msd similarity matrix...
Done computing similarity matrix.
RMSE: 1.6025
MAE:  1.1575
------------
Fold 4
Computing the msd similarity matrix...
Done computing similarity matrix.
RMSE: 1.5493
MAE:  1.1212
------------
Fold 5
Computing the msd similarity matrix...
Done computing similarity matrix.
RMSE: 1.5827
MAE:  1.1369
------------
------------
Mean RMSE: 1.5839
Mean MAE : 1.1394
------------
------------