Latent Factor Models for Collaborative Filtering

Load Pandas, we are going to need it for manipulating data


In [1]:
import pandas as pd
import numpy as np
from IPython.display import Image
np.set_printoptions(precision = 3)

Now load the data


In [87]:
user_features_df = pd.read_csv("user_features.csv")
item_features_df = pd.read_csv("item_features.csv")


user_features_df["key"] = 0
user_features_df["user_id"] = range(0,user_features_df.shape[0])
item_features_df["key"] = 0
item_features_df["item_id"] = range(0,item_features_df.shape[0])

merged_df = pd.merge(user_features_df, item_features_df,left_index=True,on="key")
merged_df[["item_id", "user_id"]]




merged_df["rating"] = map(lambda ids: user_ratings_df.values[ids[1]][ids[2]], 
                          merged[["user_id", "item_id"]].itertuples())

train = merged_df.dropna()

test = merged_df[merged.isnull().any(axis=1)]

print test.to_latex()


\begin{tabular}{lrrrrrrrr}
\toprule
{} &  Sex &   Over60 &  key &  user\_id &  Critic0 &   Critic1 &  item\_id &  rating \\
\midrule
2 &  1.0 &      0.0 &    0 &        0 &      0.6 &       0.4 &        2 &     NaN \\
2 &  0.0 &      1.0 &    0 &        1 &      0.6 &       0.4 &        2 &     NaN \\
1 &  0.0 &      0.0 &    0 &        2 &      0.9 &       0.3 &        1 &     NaN \\
0 &  1.0 &      0.0 &    0 &        3 &      0.3 &       0.9 &        0 &     NaN \\
1 &  1.0 &      0.0 &    0 &        3 &      0.9 &       0.3 &        1 &     NaN \\
0 &  0.0 &      1.0 &    0 &        4 &      0.3 &       0.9 &        0 &     NaN \\
3 &  0.0 &      0.0 &    0 &        5 &      0.2 &       0.1 &        3 &     NaN \\
4 &  0.0 &      0.0 &    0 &        5 &      0.7 &       0.8 &        4 &     NaN \\
2 &  0.0 &      0.0 &    0 &        6 &      0.6 &       0.4 &        2 &     NaN \\
2 &  0.0 &      1.0 &    0 &        8 &      0.6 &       0.4 &        2 &     NaN \\
1 &  1.0 &      0.0 &    0 &        9 &      0.9 &       0.3 &        1 &     NaN \\
\bottomrule
\end{tabular}


In [69]:
n_latent_features = 2

user_ratings = data.values
latent_user_preferences = np.random.random((user_ratings.shape[0], n_latent_features))
latent_item_features = np.random.random((user_ratings.shape[1],n_latent_features))

user_features = user_features_df.values
item_features = item_features_df.values

print item_features_df.to_latex()


user_features = np.concatenate([np.ones(shape = (user_features.shape[0],1)), user_features], axis = 1)
item_features = np.concatenate([np.ones(shape = (item_features.shape[0],1)), item_features], axis = 1)



user_features_weights = np.random.random((user_ratings.shape[0], user_features.shape[1] ))
item_features_weights = np.random.random((user_ratings.shape[1],item_features.shape[1] ))



# print user_features


\begin{tabular}{lrrrr}
\toprule
{} &  Critic0 &   Critic1 &  key &  item\_id \\
\midrule
0 &      0.3 &       0.9 &    0 &        0 \\
1 &      0.9 &       0.3 &    0 &        1 \\
2 &      0.6 &       0.4 &    0 &        2 \\
3 &      0.2 &       0.1 &    0 &        3 \\
4 &      0.7 &       0.8 &    0 &        4 \\
5 &      0.9 &       0.1 &    0 &        5 \\
\bottomrule
\end{tabular}


In [10]:
def predict_rating(user_id,item_id):
    """ Predict a rating given a user_id and an item_id.
    """
    user_preference = latent_user_preferences[user_id]
    item_preference = latent_item_features[item_id]
    
    user_score = user_features_weights[user_id].dot(user_features[user_id])
    item_score = item_features_weights[item_id].dot(item_features[item_id])
    #print user_preference.dot(item_preference), user_score, item_score
    return user_preference.dot(item_preference) + user_score + item_score

def train(user_id, item_id, rating,alpha = 0.001, 
                                   latent_feature_weight_decay = 0.1, 
                                   user_weight_decay = 0.01,
                                   item_weight_decay = 0.0001):
    
    #print item_id
    prediction_rating = predict_rating(user_id, item_id)
    err =  ( prediction_rating - rating );
    #print err
    user_pref_values = latent_user_preferences[user_id][:]
    latent_user_preferences[user_id] -= alpha * err *  ( latent_item_features[item_id] + latent_feature_weight_decay*latent_user_preferences[user_id])
    latent_item_features[item_id] -= alpha * err * ( user_pref_values + latent_feature_weight_decay*latent_item_features[item_id])
    
    user_features_weights[user_id] -=alpha * err *(  user_features[user_id] + user_weight_decay* user_features_weights[user_id])
    item_features_weights[item_id] -=alpha * err * ( item_features_weights[item_id] + item_weight_decay* item_features_weights[item_id])
    
    
    return err
    


def sgd(iterations = 30000):
    """ Iterate over all users and all items and train for 
        a certain number of iterations
    """
    for iteration in range(0,iterations):
        error = []
        for user_id in range(0,latent_user_preferences.shape[0]):
            for item_id in range(0,latent_item_features.shape[0]):
                rating = user_ratings[user_id][item_id]
                if(not np.isnan(rating)):
                    err = train(user_id,item_id,rating)
                    error.append(err)
    mse = (np.array(error) ** 2).mean()          
    print mse

In [11]:
for _ in range(0,10): 
    sgd()


0.273995183504
0.271054522336
0.269496669735
0.26854000934
0.267895517342
0.267433701847
---------------------------------------------------------------------------
KeyboardInterrupt                         Traceback (most recent call last)
<ipython-input-11-b56334b95ae2> in <module>()
      1 for _ in range(0,10):
----> 2     sgd()

<ipython-input-10-c711abf4de8e> in sgd(iterations)
     41                 rating = user_ratings[user_id][item_id]
     42                 if(not np.isnan(rating)):
---> 43                     err = train(user_id,item_id,rating)
     44                     error.append(err)
     45     mse = (np.array(error) ** 2).mean()

<ipython-input-10-c711abf4de8e> in train(user_id, item_id, rating, alpha, latent_feature_weight_decay, user_weight_decay, item_weight_decay)
     20     #print err
     21     user_pref_values = latent_user_preferences[user_id][:]
---> 22     latent_user_preferences[user_id] -= alpha * err *  ( latent_item_features[item_id] + latent_feature_weight_decay*latent_user_preferences[user_id])
     23     latent_item_features[item_id] -= alpha * err * ( user_pref_values + latent_feature_weight_decay*latent_item_features[item_id])
     24 

KeyboardInterrupt: 

In [6]:
predictions = np.zeros(shape = (latent_user_preferences.shape[0], latent_item_features.shape[0]) )
#print latent_user_preferences
print user_features_weights
print item_features_weights
for user_id in range(0,latent_user_preferences.shape[0]):
            for item_id in range(0,latent_item_features.shape[0]):
                predictions[user_id,item_id] =  predict_rating(user_id,item_id)


[[-0.627 -0.511  0.762]
 [ 1.574  0.771  1.696]
 [ 1.507  0.536  0.049]
 [ 1.668  1.417  0.83 ]
 [ 0.455  0.575 -0.285]
 [ 3.277  0.431  0.516]
 [-1.045  0.348  0.151]
 [ 0.578  0.486  0.999]
 [ 1.412  0.137  1.74 ]
 [-0.656 -1.067  0.195]]
[[ 0.214  0.047  0.489]
 [ 0.027  0.07   0.054]
 [ 3.252  5.666  0.442]
 [ 1.318  3.561  2.598]
 [ 3.442  3.224  3.44 ]]

In [7]:
values = [zip(user_ratings[i], predictions[i]) for i in range(0,predictions.shape[0])]
comparison_data = pd.DataFrame(values)
comparison_data.columns = data.columns
comparison_data.applymap(lambda (x,y): "(%2.3f|%2.3f)"%(x,y))


Out[7]:
The Call of Cthulhu Frankenstein Dracula Neuromancer Space Odyssey
0 (8.000|8.008) (2.000|1.681) (nan|-0.816) (5.000|5.245) (4.000|4.073)
1 (3.000|2.965) (2.000|2.776) (nan|20.372) (7.000|6.429) (7.000|6.824)
2 (9.000|9.171) (nan|3.729) (7.000|7.051) (8.000|7.749) (5.000|5.027)
3 (nan|8.891) (nan|4.804) (7.000|7.001) (8.000|8.000) (9.000|9.001)
4 (nan|2.049) (1.000|0.606) (8.000|7.995) (3.000|3.315) (7.000|7.092)
5 (2.000|2.015) (3.000|2.965) (5.000|5.002) (nan|3.709) (nan|16.105)
6 (4.000|4.101) (2.000|0.584) (nan|-0.616) (2.000|2.990) (7.000|7.332)
7 (7.000|6.925) (1.000|2.890) (2.000|2.018) (7.000|5.596) (9.000|8.575)
8 (3.000|3.050) (3.000|2.529) (nan|26.458) (7.000|7.303) (3.000|3.111)
9 (4.000|3.744) (nan|-0.187) (5.000|4.925) (3.000|3.376) (3.000|2.959)

In [8]:
comparison_data


Out[8]:
The Call of Cthulhu Frankenstein Dracula Neuromancer Space Odyssey
0 (8.0, 8.00753213201) (2.0, 1.68132476702) (nan, -0.816458005082) (5.0, 5.24534140815) (4.0, 4.07284713322)
1 (3.0, 2.9651437535) (2.0, 2.77574260418) (nan, 20.371555042) (7.0, 6.42856542196) (7.0, 6.82402220025)
2 (9.0, 9.17066467322) (nan, 3.72857419036) (7.0, 7.05097693395) (8.0, 7.74902903692) (5.0, 5.02693050564)
3 (nan, 8.89059466545) (nan, 4.80400274168) (7.0, 7.00071427519) (8.0, 8.00026744083) (9.0, 9.00146942092)
4 (nan, 2.04931531603) (1.0, 0.605664829651) (8.0, 7.99524753077) (3.0, 3.31531949311) (7.0, 7.09230434669)
5 (2.0, 2.01498637399) (3.0, 2.96545438759) (5.0, 5.00171715631) (nan, 3.70906735537) (nan, 16.1046429873)
6 (4.0, 4.10071806365) (2.0, 0.584157794895) (nan, -0.615769387347) (2.0, 2.9904970354) (7.0, 7.3316935937)
7 (7.0, 6.92484500343) (1.0, 2.88968696942) (2.0, 2.01830509523) (7.0, 5.59586294301) (9.0, 8.57455552152)
8 (3.0, 3.05046895637) (3.0, 2.52948108941) (nan, 26.4577215043) (7.0, 7.30250562055) (3.0, 3.11091030954)
9 (4.0, 3.7436458588) (nan, -0.186539411277) (5.0, 4.92523954912) (3.0, 3.37640884275) (3.0, 2.95913673392)

In [9]:
d = comparison_data.to_latex()
text_file = open("comparison.txt", "w")
text_file.write(d)
text_file.close()

In [ ]:


In [ ]:


In [ ]:


In [ ]: