In [70]:
# A dictionary of movie critics and their ratings of a small
# set of movies
critics={'Lisa Rose': {'Lady in the Water': 2.5, 'Snakes on a Plane': 3.5,
'Just My Luck': 3.0, 'Superman Returns': 3.5, 'You, Me and Dupree': 2.5,
'The Night Listener': 3.0},
'Gene Seymour': {'Lady in the Water': 3.0, 'Snakes on a Plane': 3.5,
'Just My Luck': 1.5, 'Superman Returns': 5.0, 'The Night Listener': 3.0,
'You, Me and Dupree': 3.5},
'Michael Phillips': {'Lady in the Water': 2.5, 'Snakes on a Plane': 3.0,
'Superman Returns': 3.5, 'The Night Listener': 4.0},
'Claudia Puig': {'Snakes on a Plane': 3.5, 'Just My Luck': 3.0,
'The Night Listener': 4.5, 'Superman Returns': 4.0,
'You, Me and Dupree': 2.5},
'Mick LaSalle': {'Lady in the Water': 3.0, 'Snakes on a Plane': 4.0,
'Just My Luck': 2.0, 'Superman Returns': 3.0, 'The Night Listener': 3.0,
'You, Me and Dupree': 2.0},
'Jack Matthews': {'Lady in the Water': 3.0, 'Snakes on a Plane': 4.0,
'The Night Listener': 3.0, 'Superman Returns': 5.0, 'You, Me and Dupree': 3.5},
'Toby': {'Snakes on a Plane':4.5,'You, Me and Dupree':1.0,'Superman Returns':4.0}}
In [39]:
critics['Lisa Rose']['Lady in the Water']
Out[39]:
In [40]:
critics['Toby']['Snakes on a Plane']=4.5
In [41]:
critics['Toby']
Out[41]:
In [48]:
# 欧几里得距离
import numpy as np
np.sqrt(np.power(5-4, 2) + np.power(4-1, 2))
Out[48]:
This formula calculates the distance, which will be smaller for people who are more similar. However, you need a function that gives higher values for people who are similar. This can be done by adding 1 to the function (so you don’t get a division-by- zero error) and inverting it:
In [49]:
1.0 /(1 + np.sqrt(np.power(5-4, 2) + np.power(4-1, 2)) )
Out[49]:
In [53]:
# Returns a distance-based similarity score for person1 and person2
def sim_distance(prefs,person1,person2):
# Get the list of shared_items
si={}
for item in prefs[person1]:
if item in prefs[person2]:
si[item]=1
# if they have no ratings in common, return 0
if len(si)==0: return 0
# Add up the squares of all the differences
sum_of_squares=np.sum([np.power(prefs[person1][item]-prefs[person2][item],2)
for item in prefs[person1] if item in prefs[person2]])
return 1/(1+np.sqrt(sum_of_squares) )
In [54]:
sim_distance(critics, 'Lisa Rose','Gene Seymour')
Out[54]:
In [59]:
# Returns the Pearson correlation coefficient for p1 and p2
def sim_pearson(prefs,p1,p2):
# Get the list of mutually rated items
si={}
for item in prefs[p1]:
if item in prefs[p2]: si[item]=1
# Find the number of elements
n=len(si)
# if they are no ratings in common, return 0
if n==0: return 0
# Add up all the preferences
sum1=np.sum([prefs[p1][it] for it in si])
sum2=np.sum([prefs[p2][it] for it in si])
# Sum up the squares
sum1Sq=np.sum([np.power(prefs[p1][it],2) for it in si])
sum2Sq=np.sum([np.power(prefs[p2][it],2) for it in si])
# Sum up the products
pSum=np.sum([prefs[p1][it]*prefs[p2][it] for it in si])
# Calculate Pearson score
num=pSum-(sum1*sum2/n)
den=np.sqrt((sum1Sq-np.power(sum1,2)/n)*(sum2Sq-np.power(sum2,2)/n))
if den==0: return 0
return num/den
In [60]:
sim_pearson(critics, 'Lisa Rose','Gene Seymour')
Out[60]:
In [75]:
# Returns the best matches for person from the prefs dictionary.
# Number of results and similarity function are optional params.
def topMatches(prefs,person,n=5,similarity=sim_pearson):
scores=[(similarity(prefs,person,other),other)
for other in prefs if other!=person]
# Sort the list so the highest scores appear at the top
scores.sort( )
scores.reverse( )
return scores[0:n]
In [77]:
topMatches(critics,'Toby',n=3) # topN
Out[77]:
In [66]:
# Gets recommendations for a person by using a weighted average
# of every other user's rankings
def getRecommendations(prefs,person,similarity=sim_pearson):
totals={}
simSums={}
for other in prefs:
# don't compare me to myself
if other==person: continue
sim=similarity(prefs,person,other)
# ignore scores of zero or lower
if sim<=0: continue
for item in prefs[other]:
# only score movies I haven't seen yet
if item not in prefs[person] or prefs[person][item]==0:
# Similarity * Score
totals.setdefault(item,0)
totals[item]+=prefs[other][item]*sim
# Sum of similarities
simSums.setdefault(item,0)
simSums[item]+=sim
# Create the normalized list
rankings=[(total/simSums[item],item) for item,total in totals.items()]
# Return the sorted list
rankings.sort()
rankings.reverse()
return rankings
In [78]:
# Now you can find out what movies I should watch next:
getRecommendations(critics,'Toby')
Out[78]:
In [79]:
# You’ll find that the results are only affected very slightly by the choice of similarity metric.
getRecommendations(critics,'Toby',similarity=sim_distance)
Out[79]:
In [85]:
# you just need to swap the people and the items.
def transformPrefs(prefs):
result={}
for person in prefs:
for item in prefs[person]:
result.setdefault(item,{})
# Flip item and person
result[item][person]=prefs[person][item]
return result
movies = transformPrefs(critics)
In [83]:
topMatches(movies,'Superman Returns')
Out[83]:
In [86]:
def calculateSimilarItems(prefs,n=10):
# Create a dictionary of items showing which other items they
# are most similar to.
result={}
# Invert the preference matrix to be item-centric
itemPrefs=transformPrefs(prefs)
c=0
for item in itemPrefs:
# Status updates for large datasets
c+=1
if c%100==0:
print "%d / %d" % (c,len(itemPrefs))
# Find the most similar items to this one
scores=topMatches(itemPrefs,item,n=n,similarity=sim_distance)
result[item]=scores
return result
itemsim=calculateSimilarItems(critics)
itemsim
Out[86]:
In [88]:
def getRecommendedItems(prefs,itemMatch,user):
userRatings=prefs[user]
scores={}
totalSim={}
# Loop over items rated by this user
for (item,rating) in userRatings.items( ):
# Loop over items similar to this one
for (similarity,item2) in itemMatch[item]:
# Ignore if this user has already rated this item
if item2 in userRatings: continue
# Weighted sum of rating times similarity
scores.setdefault(item2,0)
scores[item2]+=similarity*rating
# Sum of all the similarities
totalSim.setdefault(item2,0)
totalSim[item2]+=similarity
# Divide each total score by total weighting to get an average
rankings=[(score/totalSim[item],item) for item,score in scores.items( )]
# Return the rankings from highest to lowest
rankings.sort( )
rankings.reverse( )
return rankings
getRecommendedItems(critics,itemsim,'Toby')
Out[88]:
In [81]:
getRecommendations(movies,'Just My Luck')
Out[81]:
In [84]:
getRecommendations(movies, 'You, Me and Dupree')
Out[84]:
In [11]:
# https://github.com/ParticleWave/RecommendationSystemStudy/blob/d1960056b96cfaad62afbfe39225ff680240d37e/PersonalRank.py
import os
import random
class Graph:
def __init__(self):
self.G = dict()
def addEdge(self, p, q):
if p not in self.G: self.G[p] = dict()
if q not in self.G: self.G[q] = dict()
self.G[p][q] = 1
self.G[q][p] = 1
def getGraphMatrix(self):
return self.G
In [12]:
graph = Graph()
graph.addEdge('A', 'a')
graph.addEdge('A', 'c')
graph.addEdge('B', 'a')
graph.addEdge('B', 'b')
graph.addEdge('B', 'c')
graph.addEdge('B', 'd')
graph.addEdge('C', 'c')
graph.addEdge('C', 'd')
G = graph.getGraphMatrix()
print(G.keys())
In [10]:
G
Out[10]:
In [13]:
def PersonalRank(G, alpha, root, max_step):
# G is the biparitite graph of users' ratings on items
# alpha is the probability of random walk forward
# root is the studied User
# max_step if the steps of iterations.
rank = dict()
rank = {x:0.0 for x in G.keys()}
rank[root] = 1.0
for k in range(max_step):
tmp = {x:0.0 for x in G.keys()}
for i,ri in G.items():
for j,wij in ri.items():
if j not in tmp: tmp[j] = 0.0 #
tmp[j] += alpha * rank[i] / (len(ri)*1.0)
if j == root: tmp[j] += 1.0 - alpha
rank = tmp
print(k, rank)
return rank
In [9]:
print(PersonalRank(G, 0.8, 'A', 20))
# print(PersonalRank(G, 0.8, 'B', 20))
# print(PersonalRank(G, 0.8, 'C', 20))
MovieLens是一个电影评价的真实数据,由明尼苏达州立大学的GroupLens项目组开发。
http://grouplens.org/datasets/movielens/1m/
These files contain 1,000,209 anonymous ratings of approximately 3,900 movies made by 6,040 MovieLens users who joined MovieLens in 2000.
In [91]:
def loadMovieLens(path='/Users/chengjun/bigdata/ml-1m/'):
# Get movie titles
movies={}
for line in open(path+'movies.dat'):
(id,title)=line.split('::')[0:2]
movies[id]=title
# Load data
prefs={}
for line in open(path+'/ratings.dat'):
(user,movieid,rating,ts)=line.split('::')
prefs.setdefault(user,{})
prefs[user][movies[movieid]]=float(rating)
return prefs
In [92]:
prefs=loadMovieLens()
prefs['87']
Out[92]:
In [93]:
getRecommendations(prefs,'87')[0:30]
Out[93]:
In [94]:
itemsim=calculateSimilarItems(prefs,n=50)
In [95]:
getRecommendedItems(prefs,itemsim,'87')[0:30]
Out[95]:
In this notebook we will import GraphLab Create and use it to
In [2]:
# set product key using GraphLab Create API
#import graphlab
#graphlab.product_key.set_product_key('4972-65DF-8E02-816C-AB15-021C-EC1B-0367')
In [16]:
%matplotlib inline
import graphlab
# set canvas to show sframes and sgraphs in ipython notebook
gl.canvas.set_target('ipynb')
import matplotlib.pyplot as plt
In [17]:
sf = graphlab.SFrame({'user_id': ["0", "0", "0", "1", "1", "2", "2", "2"],
'item_id': ["a", "b", "c", "a", "b", "b", "c", "d"],
'rating': [1, 3, 2, 5, 4, 1, 4, 3]})
sf
Out[17]:
In [18]:
m = graphlab.recommender.create(sf, target='rating')
recs = m.recommend()
print recs
In [19]:
m['coefficients']
Out[19]:
After importing GraphLab Create, we can download data directly from S3. We have placed a preprocessed version of the Million Song Dataset on S3. This data set was used for a Kaggle challenge and includes data from The Echo Nest, SecondHandSongs, musiXmatch, and Last.fm. This file includes data for a subset of 10000 songs.
In [7]:
#train_file = 'http://s3.amazonaws.com/dato-datasets/millionsong/10000.txt'
train_file = '/Users/chengjun/GitHub/cjc2016/data/ratings.dat'
sf = gl.SFrame.read_csv(train_file, header=False, delimiter='|', verbose=False)
sf.rename({'X1':'user_id', 'X2':'course_id', 'X3':'rating'}).show()
In order to evaluate the performance of our model, we randomly split the observations in our data set into two partitions: we will use train_set
when creating our model and test_set
for evaluating its performance.
In [8]:
(train_set, test_set) = sf.random_split(0.8, seed=1)
Create a model that makes recommendations using item popularity. When no target column is provided, the popularity is determined by the number of observations involving each item. When a target is provided, popularity is computed using the item’s mean target value. When the target column contains ratings, for example, the model computes the mean rating for each item and uses this to rank items for recommendations.
One typically wants to initially create a simple recommendation system that can be used as a baseline and to verify that the rest of the pipeline works as expected. The recommender
package has several models available for this purpose. For example, we can create a model that predicts songs based on their overall popularity across all users.
In [10]:
import graphlab as gl
popularity_model = gl.popularity_recommender.create(train_set, 'user_id', 'course_id', target = 'rating')
If your data is implicit, i.e., you only observe interactions between users and items, without a rating, then use ItemSimilarityModel with Jaccard similarity.
If your data is explicit, i.e., the observations include an actual rating given by the user, then you have a wide array of options. ItemSimilarityModel with cosine or Pearson similarity can incorporate ratings. In addition, MatrixFactorizationModel, FactorizationModel, as well as LinearRegressionModel all support rating prediction.
itemsim_cosine_model = graphlab.recommender.create(data, target=’rating’, method=’item_similarity’, similarity_type=’cosine’)
factorization_machine_model = graphlab.recommender.create(data, target=’rating’, method=’factorization_model’)
In the following code block, we compute all the item-item similarities and create an object that can be used for recommendations.
In [30]:
item_sim_model = gl.item_similarity_recommender.create(train_set, 'user_id', 'course_id', target = 'rating',
similarity_type='cosine')
Create a FactorizationRecommender that learns latent factors for each user and item and uses them to make rating predictions. This includes both standard matrix factorization as well as factorization machines models (in the situation where side data is available for users and/or items). link
In [35]:
factorization_machine_model = gl.recommender.factorization_recommender.create(train_set, 'user_id', 'course_id',
target='rating')
It's straightforward to use GraphLab to compare models on a small subset of users in the test_set
. The precision-recall plot that is computed shows the benefits of using the similarity-based model instead of the baseline popularity_model
: better curves tend toward the upper-right hand corner of the plot.
The following command finds the top-ranked items for all users in the first 500 rows of test_set
. The observations in train_set
are not included in the predicted items.
In [36]:
result = gl.recommender.util.compare_models(test_set, [popularity_model, item_sim_model, factorization_machine_model],
user_sample=.1, skip_set=train_set)
Now let's ask the item similarity model for song recommendations on several users. We first create a list of users and create a subset of observations, users_ratings
, that pertain to these users.
In [37]:
K = 10
users = gl.SArray(sf['user_id'].unique().head(100))
Next we use the recommend()
function to query the model we created for recommendations. The returned object has four columns: user_id
, song_id
, the score
that the algorithm gave this user for this song, and the song's rank (an integer from 0 to K-1). To see this we can grab the top few rows of recs
:
In [15]:
recs = item_sim_model.recommend(users=users, k=K)
recs.head()
Out[15]:
To learn what songs these ids pertain to, we can merge in metadata about each song.
In [22]:
# Get the meta data of the courses
courses = gl.SFrame.read_csv('/Users/chengjun/GitHub/cjc2016/data/cursos.dat', header=False, delimiter='|', verbose=False)
courses.rename({'X1':'course_id', 'X2':'title', 'X3':'avg_rating',
'X4':'workload', 'X5':'university', 'X6':'difficulty', 'X7':'provider'}).show()
courses = courses[['course_id', 'title', 'provider']]
results = recs.join(courses, on='course_id', how='inner')
# Populate observed user-course data with course info
userset = frozenset(users)
ix = sf['user_id'].apply(lambda x: x in userset, int)
user_data = sf[ix]
user_data = user_data.join(courses, on='course_id')[['user_id', 'title', 'provider']]
In [23]:
# Print out some recommendations
for i in range(5):
user = list(users)[i]
print "User: " + str(i + 1)
user_obs = user_data[user_data['user_id'] == user].head(K)
del user_obs['user_id']
user_recs = results[results['user_id'] == str(user)][['title', 'provider']]
print "We were told that the user liked these courses: "
print user_obs.head(K)
print "We recommend these other courses:"
print user_recs.head(K)
print ""
In [ ]: