第七次作业

1.练习实现UserCF和ItemCF的python代码

In [1]:
# A dictionary of movie critics and their ratings of a small
# set of movies
critics={'Lisa Rose': {'Lady in the Water': 2.5, 'Snakes on a Plane': 3.5,
      'Just My Luck': 3.0, 'Superman Returns': 3.5, 'You, Me and Dupree': 2.5,
      'The Night Listener': 3.0},
     'Gene Seymour': {'Lady in the Water': 3.0, 'Snakes on a Plane': 3.5,
      'Just My Luck': 1.5, 'Superman Returns': 5.0, 'The Night Listener': 3.0,
      'You, Me and Dupree': 3.5},
     'Michael Phillips': {'Lady in the Water': 2.5, 'Snakes on a Plane': 3.0,
      'Superman Returns': 3.5, 'The Night Listener': 4.0},
     'Claudia Puig': {'Snakes on a Plane': 3.5, 'Just My Luck': 3.0,
      'The Night Listener': 4.5, 'Superman Returns': 4.0,
      'You, Me and Dupree': 2.5},
     'Mick LaSalle': {'Lady in the Water': 3.0, 'Snakes on a Plane': 4.0,
      'Just My Luck': 2.0, 'Superman Returns': 3.0, 'The Night Listener': 3.0,
      'You, Me and Dupree': 2.0},
     'Jack Matthews': {'Lady in the Water': 3.0, 'Snakes on a Plane': 4.0,
      'The Night Listener': 3.0, 'Superman Returns': 5.0, 'You, Me and Dupree': 3.5},
     'Toby': {'Snakes on a Plane':4.5,'You, Me and Dupree':1.0,'Superman Returns':4.0}}
In [2]:
critics['Lisa Rose']['Lady in the Water']
Out[2]:
2.5
In [3]:
critics['Toby']
Out[3]:
{'Snakes on a Plane': 4.5, 'Superman Returns': 4.0, 'You, Me and Dupree': 1.0}

Finding similar users

In [4]:
# 相似性计算逻辑1:欧几里得距离
import numpy as np
np.sqrt(np.power(5-4, 2) + np.power(4-1, 2)) # 5-4是坐标点,power为幂次,2即表示平方
Out[4]:
3.1622776601683795
In [5]:
1.0 /(1 + np.sqrt(np.power(5-4, 2) + np.power(4-1, 2)) )
Out[5]:
0.2402530733520421
In [6]:
# Returns a distance-based similarity score for person1 and person2       利用上述逻辑来判断某两个人之间的相似性
def sim_distance(prefs,person1,person2):
    # Get the list of shared_items 找到两个人相同的物品
    si={}
    for item in prefs[person1]:
        if item in prefs[person2]:
            si[item]=1
    # if they have no ratings in common, return 0
    if len(si)==0: return 0
    # Add up the squares of all the differences
    sum_of_squares=np.sum([np.power(prefs[person1][item]-prefs[person2][item],2) for item in si.keys()])
    
    #for item in prefs[person1] if item in prefs[person2]])
    
    return 1/(1+np.sqrt(sum_of_squares) )
In [7]:
sim_distance(critics, 'Lisa Rose','Toby')
Out[7]:
0.3483314773547883
In [8]:
# 相似性计算逻辑2:Pearson correlation coefficient
# Returns the Pearson correlation coefficient for p1 and p2
def sim_pearson(prefs,p1,p2):
    # Get the list of mutually rated items  
    si={}
    for item in prefs[p1]:
        if item in prefs[p2]: si[item]=1
    # Find the number of elements
    n=len(si)
    # if they are no ratings in common, return 0
    if n==0: return 0
    # Add up all the preferences
    sum1=np.sum([prefs[p1][it] for it in si])
    sum2=np.sum([prefs[p2][it] for it in si])
    # Sum up the squares
    sum1Sq=np.sum([np.power(prefs[p1][it],2) for it in si])
    sum2Sq=np.sum([np.power(prefs[p2][it],2) for it in si])
    # Sum up the products
    pSum=np.sum([prefs[p1][it]*prefs[p2][it] for it in si])
    # Calculate Pearson score
    num=pSum-(sum1*sum2/n)
    den=np.sqrt((sum1Sq-np.power(sum1,2)/n)*(sum2Sq-np.power(sum2,2)/n))
    if den==0: return 0
    return num/den
In [9]:
sim_pearson(critics, 'Lisa Rose','Toby')
Out[9]:
0.9912407071619299
In [10]:
# Returns the best matches for person from the prefs dictionary.
# Number of results and similarity function are optional params.
def topMatches(prefs,person,n=5,similarity=sim_pearson):
    scores=[(similarity(prefs,person,other),other)
        for other in prefs if other!=person]
    # Sort the list so the highest scores appear at the top 
    scores.sort( )
    scores.reverse( )
    return scores[0:n]
In [11]:
topMatches(critics,'Toby',n=3) # topN
Out[11]:
[(0.9912407071619299, 'Lisa Rose'),
 (0.9244734516419049, 'Mick LaSalle'),
 (0.8934051474415647, 'Claudia Puig')]

Recommending Items

In [12]:
# Gets recommendations for a person by using a weighted average
# of every other user's rankings
def getRecommendations(prefs,person,similarity=sim_pearson):
    totals={}
    simSums={}
    for other in prefs:    
        # don't compare me to myself
        if other==person: continue
        sim=similarity(prefs,person,other)
        # ignore scores of zero or lower
        if sim<=0: continue
        for item in prefs[other]:   
            # only score movies I haven't seen yet
            if item not in prefs[person] or prefs[person][item]==0:     
                # Similarity * Score
                totals.setdefault(item,0)
                totals[item]+=prefs[other][item]*sim
                # Sum of similarities
                simSums.setdefault(item,0)
                simSums[item]+=sim
    # Create the normalized list
    rankings=[(total/simSums[item],item) for item,total in totals.items()]
    # Return the sorted list
    rankings.sort()
    rankings.reverse()
    return rankings
In [13]:
# Now you can find out what movies I should watch next:
getRecommendations(critics,'Toby')
Out[13]:
[(3.3477895267131017, 'The Night Listener'),
 (2.8325499182641614, 'Lady in the Water'),
 (2.530980703765565, 'Just My Luck')]
In [14]:
# You’ll find that the results are only affected very slightly by the choice of similarity metric.
getRecommendations(critics,'Toby',similarity=sim_distance)
Out[14]:
[(3.457128694491423, 'The Night Listener'),
 (2.778584003814924, 'Lady in the Water'),
 (2.422482042361917, 'Just My Luck')]

Item-based filtering

In [15]:
# you just need to swap the people and the items. 
def transformPrefs(prefs):
    result={}
    for person in prefs:
        for item in prefs[person]:
            result.setdefault(item,{})
            # Flip item and person
            result[item][person]=prefs[person][item]
    return result

movies = transformPrefs(critics)
In [16]:
movies
Out[16]:
{'Just My Luck': {'Claudia Puig': 3.0,
  'Gene Seymour': 1.5,
  'Lisa Rose': 3.0,
  'Mick LaSalle': 2.0},
 'Lady in the Water': {'Gene Seymour': 3.0,
  'Jack Matthews': 3.0,
  'Lisa Rose': 2.5,
  'Michael Phillips': 2.5,
  'Mick LaSalle': 3.0},
 'Snakes on a Plane': {'Claudia Puig': 3.5,
  'Gene Seymour': 3.5,
  'Jack Matthews': 4.0,
  'Lisa Rose': 3.5,
  'Michael Phillips': 3.0,
  'Mick LaSalle': 4.0,
  'Toby': 4.5},
 'Superman Returns': {'Claudia Puig': 4.0,
  'Gene Seymour': 5.0,
  'Jack Matthews': 5.0,
  'Lisa Rose': 3.5,
  'Michael Phillips': 3.5,
  'Mick LaSalle': 3.0,
  'Toby': 4.0},
 'The Night Listener': {'Claudia Puig': 4.5,
  'Gene Seymour': 3.0,
  'Jack Matthews': 3.0,
  'Lisa Rose': 3.0,
  'Michael Phillips': 4.0,
  'Mick LaSalle': 3.0},
 'You, Me and Dupree': {'Claudia Puig': 2.5,
  'Gene Seymour': 3.5,
  'Jack Matthews': 3.5,
  'Lisa Rose': 2.5,
  'Mick LaSalle': 2.0,
  'Toby': 1.0}}
In [17]:
topMatches(movies,'Superman Returns')  
Out[17]:
[(0.6579516949597695, 'You, Me and Dupree'),
 (0.4879500364742689, 'Lady in the Water'),
 (0.11180339887498941, 'Snakes on a Plane'),
 (-0.1798471947990544, 'The Night Listener'),
 (-0.42289003161103106, 'Just My Luck')]
In [18]:
def calculateSimilarItems(prefs,n=10):
    # Create a dictionary of items showing which other items they
    # are most similar to.
    result={}
    # Invert the preference matrix to be item-centric
    itemPrefs=transformPrefs(prefs)
    c=0
    for item in itemPrefs:
        # Status updates for large datasets
        c+=1
        if c%100==0: 
            print ("%d / %d" % (c,len(itemPrefs)))
        # Find the most similar items to this one
        scores=topMatches(itemPrefs,item,n=n,similarity=sim_distance)
        result[item]=scores
    return result

itemsim=calculateSimilarItems(critics) 
itemsim
Out[18]:
{'Just My Luck': [(0.3483314773547883, 'Lady in the Water'),
  (0.32037724101704074, 'You, Me and Dupree'),
  (0.2989350844248255, 'The Night Listener'),
  (0.2553967929896867, 'Snakes on a Plane'),
  (0.20799159651347807, 'Superman Returns')],
 'Lady in the Water': [(0.4494897427831781, 'You, Me and Dupree'),
  (0.38742588672279304, 'The Night Listener'),
  (0.3483314773547883, 'Snakes on a Plane'),
  (0.3483314773547883, 'Just My Luck'),
  (0.2402530733520421, 'Superman Returns')],
 'Snakes on a Plane': [(0.3483314773547883, 'Lady in the Water'),
  (0.32037724101704074, 'The Night Listener'),
  (0.3090169943749474, 'Superman Returns'),
  (0.2553967929896867, 'Just My Luck'),
  (0.1886378647726465, 'You, Me and Dupree')],
 'Superman Returns': [(0.3090169943749474, 'Snakes on a Plane'),
  (0.252650308587072, 'The Night Listener'),
  (0.2402530733520421, 'Lady in the Water'),
  (0.20799159651347807, 'Just My Luck'),
  (0.1918253663634734, 'You, Me and Dupree')],
 'The Night Listener': [(0.38742588672279304, 'Lady in the Water'),
  (0.32037724101704074, 'Snakes on a Plane'),
  (0.2989350844248255, 'Just My Luck'),
  (0.29429805508554946, 'You, Me and Dupree'),
  (0.252650308587072, 'Superman Returns')],
 'You, Me and Dupree': [(0.4494897427831781, 'Lady in the Water'),
  (0.32037724101704074, 'Just My Luck'),
  (0.29429805508554946, 'The Night Listener'),
  (0.1918253663634734, 'Superman Returns'),
  (0.1886378647726465, 'Snakes on a Plane')]}
In [19]:
def getRecommendedItems(prefs,itemMatch,user):    
    userRatings=prefs[user]
    scores={}
    totalSim={}
    # Loop over items rated by this user
    for (item,rating) in userRatings.items( ):
        # Loop over items similar to this one
        for (similarity,item2) in itemMatch[item]:
            # Ignore if this user has already rated this item
            if item2 in userRatings: continue                   
            # Weighted sum of rating times similarity
            scores.setdefault(item2,0)
            scores[item2]+=similarity*rating
            # Sum of all the similarities
            totalSim.setdefault(item2,0)
            totalSim[item2]+=similarity
    # Divide each total score by total weighting to get an average
    rankings=[(score/totalSim[item],item) for item,score in scores.items( )]
    # Return the rankings from highest to lowest
    rankings.sort( )
    rankings.reverse( )
    return rankings

getRecommendedItems(critics,itemsim,'Toby')
Out[19]:
[(3.1667425234070894, 'The Night Listener'),
 (2.9366294028444346, 'Just My Luck'),
 (2.868767392626467, 'Lady in the Water')]
In [20]:
getRecommendations(movies,'Just My Luck')
Out[20]:
[(4.0, 'Michael Phillips'), (3.0, 'Jack Matthews')]
In [21]:
getRecommendations(movies, 'You, Me and Dupree')
Out[21]:
[(3.1637361366111816, 'Michael Phillips')]

2. 使用graphlab对于音乐数据构建推荐系统

In [2]:
import graphlab as gl
# set canvas to show sframes and sgraphs in ipython notebook
gl.canvas.set_target('ipynb')
import matplotlib.pyplot as plt
%matplotlib inline
In [4]:
#train_file = 'http://s3.amazonaws.com/dato-datasets/millionsong/10000.txt'
train_file = 'F:/Anaconda/cjc/millionsong/song_usage_10000.txt'
sf = gl.SFrame.read_csv(train_file, header=False, delimiter='\t', verbose=False)
sf.rename({'X1':'user_id', 'X2':'music_id', 'X3':'rating'}).show()
In [5]:
(train_set, test_set) = sf.random_split(0.8, seed=1)
In [6]:
popularity_model = gl.popularity_recommender.create(train_set, 'user_id', 'music_id', target = 'rating')
Recsys training: model = popularity
Preparing data set.
    Data has 1599753 observations with 76085 users and 10000 items.
    Data prepared in: 2.04653s
1599753 observations to process; with 10000 unique items.
In [17]:
item_sim_model = gl.item_similarity_recommender.create(train_set, 'user_id', 'music_id', target = 'rating', 
                                                       similarity_type='cosine')
Recsys training: model = item_similarity
Preparing data set.
    Data has 1899873 observations with 76291 users and 10000 items.
    Data prepared in: 2.06946s
Training model from provided data.
Gathering per-item and per-user statistics.
+--------------------------------+------------+
| Elapsed Time (Item Statistics) | % Complete |
+--------------------------------+------------+
| 998us                          | 1.25       |
| 47.873ms                       | 100        |
+--------------------------------+------------+
Setting up lookup tables.
Processing data in one pass using dense lookup tables.
+-------------------------------------+------------------+-----------------+
| Elapsed Time (Constructing Lookups) | Total % Complete | Items Processed |
+-------------------------------------+------------------+-----------------+
| 290.225ms                           | 0                | 0               |
| 1.29s                               | 73.25            | 7334            |
| 3.09s                               | 100              | 10000           |
+-------------------------------------+------------------+-----------------+
Finalizing lookup tables.
Generating candidate set for working with new users.
Finished training in 3.20343s
In [18]:
factorization_machine_model = gl.recommender.factorization_recommender.create(train_set, 'user_id', 'music_id',
                                                                              target='rating')
Recsys training: model = factorization_recommender
Preparing data set.
    Data has 1899873 observations with 76291 users and 10000 items.
    Data prepared in: 1.81016s
Training factorization_recommender for recommendations.
+--------------------------------+--------------------------------------------------+----------+
| Parameter                      | Description                                      | Value    |
+--------------------------------+--------------------------------------------------+----------+
| num_factors                    | Factor Dimension                                 | 8        |
| regularization                 | L2 Regularization on Factors                     | 1e-008   |
| solver                         | Solver used for training                         | sgd      |
| linear_regularization          | L2 Regularization on Linear Coefficients         | 1e-010   |
| max_iterations                 | Maximum Number of Iterations                     | 50       |
+--------------------------------+--------------------------------------------------+----------+
  Optimizing model using SGD; tuning step size.
  Using 237484 / 1899873 points for tuning the step size.
+---------+-------------------+------------------------------------------+
| Attempt | Initial Step Size | Estimated Objective Value                |
+---------+-------------------+------------------------------------------+
| 0       | 25                | No Decrease (237.317 >= 43.9434)         |
| 1       | 6.25              | No Decrease (229.517 >= 43.9434)         |
| 2       | 1.5625            | No Decrease (195.704 >= 43.9434)         |
| 3       | 0.390625          | No Decrease (96.2422 >= 43.9434)         |
| 4       | 0.0976562         | 18.4495                                  |
| 5       | 0.0488281         | 11.599                                   |
| 6       | 0.0244141         | 22.825                                   |
| 7       | 0.012207          | 33.5884                                  |
+---------+-------------------+------------------------------------------+
| Final   | 0.0488281         | 11.599                                   |
+---------+-------------------+------------------------------------------+
Starting Optimization.
+---------+--------------+-------------------+-----------------------+-------------+
| Iter.   | Elapsed Time | Approx. Objective | Approx. Training RMSE | Step Size   |
+---------+--------------+-------------------+-----------------------+-------------+
| Initial | 0us          | 43.2236           | 6.57446               |             |
+---------+--------------+-------------------+-----------------------+-------------+
| 1       | 504.65ms     | 43.0618           | 6.56171               | 0.0488281   |
| 2       | 828.782ms    | 40.4118           | 6.35665               | 0.0290334   |
| 3       | 1.20s        | 37.958            | 6.16065               | 0.0214205   |
| 4       | 1.66s        | 35.6245           | 5.96824               | 0.0172633   |
| 5       | 1.96s        | 33.5182           | 5.78907               | 0.014603    |
| 6       | 2.28s        | 31.3282           | 5.59669               | 0.0127367   |
| 10      | 3.31s        | 26.0345           | 5.10178               | 0.008683    |
| 11      | 3.57s        | 24.7764           | 4.97693               | 0.00808399  |
| 15      | 4.62s        | 21.8381           | 4.67235               | 0.00640622  |
| 20      | 6.11s        | 19.2638           | 4.38816               | 0.00516295  |
| 25      | 7.47s        | 17.3424           | 4.16342               | 0.00436732  |
| 30      | 8.73s        | 16.133            | 4.01553               | 0.00380916  |
| 35      | 9.84s        | 14.9881           | 3.8703                | 0.00339327  |
| 40      | 10.96s       | 14.1486           | 3.76025               | 0.00306991  |
| 45      | 12.20s       | 13.4125           | 3.66102               | 0.00281035  |
| 50      | 13.34s       | 12.8959           | 3.58974               | 0.00259682  |
+---------+--------------+-------------------+-----------------------+-------------+
Optimization Complete: Maximum number of passes through the data reached.
Computing final objective value and training RMSE.
       Final objective value: 11.5708
       Final training RMSE: 3.40017
In [19]:
result = gl.recommender.util.compare_models(test_set, 
                                            [popularity_model, item_sim_model, factorization_machine_model],
                                            user_sample=.5, skip_set=train_set)
compare_models: using 21888 users to estimate model performance
PROGRESS: Evaluate model M0
recommendations finished on 1000/21888 queries. users per second: 9653.91
recommendations finished on 2000/21888 queries. users per second: 6643.28
recommendations finished on 3000/21888 queries. users per second: 6374.8
recommendations finished on 4000/21888 queries. users per second: 6554.92
recommendations finished on 5000/21888 queries. users per second: 6486.77
recommendations finished on 6000/21888 queries. users per second: 6504.73
recommendations finished on 7000/21888 queries. users per second: 6603.61
recommendations finished on 8000/21888 queries. users per second: 6570.28
recommendations finished on 9000/21888 queries. users per second: 6516.23
recommendations finished on 10000/21888 queries. users per second: 6387.03
recommendations finished on 11000/21888 queries. users per second: 6263.64
recommendations finished on 12000/21888 queries. users per second: 6208.95
recommendations finished on 13000/21888 queries. users per second: 6204.49
recommendations finished on 14000/21888 queries. users per second: 6206.15
recommendations finished on 15000/21888 queries. users per second: 6189.7
recommendations finished on 16000/21888 queries. users per second: 6128.21
recommendations finished on 17000/21888 queries. users per second: 6040.51
recommendations finished on 18000/21888 queries. users per second: 5954.79
recommendations finished on 19000/21888 queries. users per second: 5909.32
recommendations finished on 20000/21888 queries. users per second: 5786.01
recommendations finished on 21000/21888 queries. users per second: 5753.27
Precision and recall summary statistics by cutoff
+--------+-------------------+-------------------+
| cutoff |   mean_precision  |    mean_recall    |
+--------+-------------------+-------------------+
|   1    | 0.000137061403509 | 4.18798732943e-05 |
|   2    | 0.000137061403509 | 0.000119548001949 |
|   3    | 9.13742690058e-05 | 0.000119548001949 |
|   4    | 0.000102796052632 |  0.00015533625731 |
|   5    |  0.00015533625731 | 0.000345699317739 |
|   6    | 0.000137061403509 | 0.000360928362573 |
|   7    |  0.00015011487051 | 0.000536062378168 |
|   8    | 0.000148483187135 | 0.000593171296296 |
|   9    | 0.000177672189734 | 0.000811001026873 |
|   10   | 0.000173611111111 | 0.000917604340713 |
+--------+-------------------+-------------------+
[10 rows x 3 columns]

('\nOverall RMSE: ', 6.2663755058529915)

Per User RMSE (best)
+-------------------------------+-------+------+
|            user_id            | count | rmse |
+-------------------------------+-------+------+
| 5860ee9518b29f9c18429c6c19... |   1   | 0.0  |
+-------------------------------+-------+------+
[1 rows x 3 columns]


Per User RMSE (worst)
+-------------------------------+-------+---------------+
|            user_id            | count |      rmse     |
+-------------------------------+-------+---------------+
| ac41abca59d8cab8ec94e0f912... |   1   | 444.131147541 |
+-------------------------------+-------+---------------+
[1 rows x 3 columns]


Per Item RMSE (best)
+--------------------+-------+------+
|      music_id      | count | rmse |
+--------------------+-------+------+
| SOJJGPT12A6701C91D |   1   | 0.0  |
+--------------------+-------+------+
[1 rows x 3 columns]


Per Item RMSE (worst)
+--------------------+-------+---------------+
|      music_id      | count |      rmse     |
+--------------------+-------+---------------+
| SOXQIUR12A8AE4654A |   3   | 256.533641075 |
+--------------------+-------+---------------+
[1 rows x 3 columns]

PROGRESS: Evaluate model M1
recommendations finished on 1000/21888 queries. users per second: 7957.67
recommendations finished on 2000/21888 queries. users per second: 7712.84
recommendations finished on 3000/21888 queries. users per second: 7792.75
recommendations finished on 4000/21888 queries. users per second: 7553.1
recommendations finished on 5000/21888 queries. users per second: 7427.2
recommendations finished on 6000/21888 queries. users per second: 6820.91
recommendations finished on 7000/21888 queries. users per second: 6547.31
recommendations finished on 8000/21888 queries. users per second: 6366.18
recommendations finished on 9000/21888 queries. users per second: 6159.78
recommendations finished on 10000/21888 queries. users per second: 5957.66
recommendations finished on 11000/21888 queries. users per second: 5792.77
recommendations finished on 12000/21888 queries. users per second: 5702.41
recommendations finished on 13000/21888 queries. users per second: 5558.54
recommendations finished on 14000/21888 queries. users per second: 5421.96
recommendations finished on 15000/21888 queries. users per second: 5293.95
recommendations finished on 16000/21888 queries. users per second: 5084.88
recommendations finished on 17000/21888 queries. users per second: 5091.24
recommendations finished on 18000/21888 queries. users per second: 5134.61
recommendations finished on 19000/21888 queries. users per second: 5100.62
recommendations finished on 20000/21888 queries. users per second: 5057.63
recommendations finished on 21000/21888 queries. users per second: 5019.35
Precision and recall summary statistics by cutoff
+--------+-----------------+-----------------+
| cutoff |  mean_precision |   mean_recall   |
+--------+-----------------+-----------------+
|   1    |  0.061860380117 | 0.0347308076677 |
|   2    | 0.0556240862573 | 0.0588643296013 |
|   3    |  0.049631457115 | 0.0770049432722 |
|   4    | 0.0452531067251 |  0.092618410169 |
|   5    | 0.0416027046784 |  0.10499794746  |
|   6    | 0.0388340643275 |  0.116469883263 |
|   7    | 0.0363473788638 |  0.125971869498 |
|   8    | 0.0342025310673 |  0.134823693763 |
|   9    | 0.0325038580247 |  0.143184116038 |
|   10   | 0.0309713084795 |  0.151272266522 |
+--------+-----------------+-----------------+
[10 rows x 3 columns]

('\nOverall RMSE: ', 6.9668466085852705)

Per User RMSE (best)
+-------------------------------+-------+-----------------+
|            user_id            | count |       rmse      |
+-------------------------------+-------+-----------------+
| e20521f2adc479ff9bf63dca52... |   1   | 0.0054383618491 |
+-------------------------------+-------+-----------------+
[1 rows x 3 columns]


Per User RMSE (worst)
+-------------------------------+-------+---------------+
|            user_id            | count |      rmse     |
+-------------------------------+-------+---------------+
| ac41abca59d8cab8ec94e0f912... |   1   | 454.990590693 |
+-------------------------------+-------+---------------+
[1 rows x 3 columns]


Per Item RMSE (best)
+--------------------+-------+---------------+
|      music_id      | count |      rmse     |
+--------------------+-------+---------------+
| SOBCRUE12A81C224B5 |   1   | 0.20188156267 |
+--------------------+-------+---------------+
[1 rows x 3 columns]


Per Item RMSE (worst)
+--------------------+-------+---------------+
|      music_id      | count |      rmse     |
+--------------------+-------+---------------+
| SOXQIUR12A8AE4654A |   3   | 262.692105049 |
+--------------------+-------+---------------+
[1 rows x 3 columns]

PROGRESS: Evaluate model M2
recommendations finished on 1000/21888 queries. users per second: 7061
recommendations finished on 2000/21888 queries. users per second: 6797.77
recommendations finished on 3000/21888 queries. users per second: 6596.52
recommendations finished on 4000/21888 queries. users per second: 6316.05
recommendations finished on 5000/21888 queries. users per second: 6402.76
recommendations finished on 6000/21888 queries. users per second: 6393.24
recommendations finished on 7000/21888 queries. users per second: 6369.06
recommendations finished on 8000/21888 queries. users per second: 6386.46
recommendations finished on 9000/21888 queries. users per second: 6372.91
recommendations finished on 10000/21888 queries. users per second: 6310.1
recommendations finished on 11000/21888 queries. users per second: 6178.94
recommendations finished on 12000/21888 queries. users per second: 6001.04
recommendations finished on 13000/21888 queries. users per second: 5868.87
recommendations finished on 14000/21888 queries. users per second: 5722.56
recommendations finished on 15000/21888 queries. users per second: 5738.31
recommendations finished on 16000/21888 queries. users per second: 5701.06
recommendations finished on 17000/21888 queries. users per second: 5700.83
recommendations finished on 18000/21888 queries. users per second: 5592.86
recommendations finished on 19000/21888 queries. users per second: 5480.67
recommendations finished on 20000/21888 queries. users per second: 5364.77
recommendations finished on 21000/21888 queries. users per second: 5317.21
Precision and recall summary statistics by cutoff
+--------+-------------------+-------------------+
| cutoff |   mean_precision  |    mean_recall    |
+--------+-------------------+-------------------+
|   1    | 0.000137061403509 | 5.33016569201e-05 |
|   2    | 0.000114217836257 | 0.000104699683236 |
|   3    | 0.000152290448343 | 0.000176744779952 |
|   4    |  0.00015990497076 | 0.000241468220498 |
|   5    | 0.000173611111111 | 0.000347056264682 |
|   6    | 0.000197977582846 | 0.000489828560004 |
|   7    | 0.000189275271512 | 0.000626889963513 |
|   8    | 0.000182748538012 | 0.000699227926476 |
|   9    | 0.000172595841455 | 0.000760144105813 |
|   10   | 0.000191885964912 | 0.000979584600747 |
+--------+-------------------+-------------------+
[10 rows x 3 columns]

('\nOverall RMSE: ', 7.877362702670098)

Per User RMSE (best)
+-------------------------------+-------+------------------+
|            user_id            | count |       rmse       |
+-------------------------------+-------+------------------+
| 3649d30507613e0d72c1066e00... |   1   | 0.00015681895492 |
+-------------------------------+-------+------------------+
[1 rows x 3 columns]


Per User RMSE (worst)
+-------------------------------+-------+---------------+
|            user_id            | count |      rmse     |
+-------------------------------+-------+---------------+
| ac41abca59d8cab8ec94e0f912... |   1   | 430.148133547 |
+-------------------------------+-------+---------------+
[1 rows x 3 columns]


Per Item RMSE (best)
+--------------------+-------+------------------+
|      music_id      | count |       rmse       |
+--------------------+-------+------------------+
| SOJUYUH12A8C1422D3 |   1   | 0.00176996278641 |
+--------------------+-------+------------------+
[1 rows x 3 columns]


Per Item RMSE (worst)
+--------------------+-------+---------------+
|      music_id      | count |      rmse     |
+--------------------+-------+---------------+
| SOXQIUR12A8AE4654A |   3   | 248.408390604 |
+--------------------+-------+---------------+
[1 rows x 3 columns]

In [20]:
K = 10
users = gl.SArray(sf['user_id'].unique().head(100))
In [21]:
recs = item_sim_model.recommend(users=users, k=K)
recs.head()
Out[21]:
user_id music_id score rank
c66c10a9567f0d82ff31441a9
fd5063e5cd9dfe8 ...
SOHLDSF12A58A772A8 0.228466063738 1
c66c10a9567f0d82ff31441a9
fd5063e5cd9dfe8 ...
SOOQJRN12A6310EDC0 0.148439466953 2
c66c10a9567f0d82ff31441a9
fd5063e5cd9dfe8 ...
SODEZUP12A6310D7E8 0.143794847859 3
c66c10a9567f0d82ff31441a9
fd5063e5cd9dfe8 ...
SOYQQAC12A6D4FD59E 0.139993498723 4
c66c10a9567f0d82ff31441a9
fd5063e5cd9dfe8 ...
SODWUBY12A6D4F8E8A 0.135766728057 5
c66c10a9567f0d82ff31441a9
fd5063e5cd9dfe8 ...
SOTEZXJ12A8C1365AA 0.131541397836 6
c66c10a9567f0d82ff31441a9
fd5063e5cd9dfe8 ...
SOZOIUU12A67ADFA39 0.126514189773 7
c66c10a9567f0d82ff31441a9
fd5063e5cd9dfe8 ...
SOXKFRI12A8C137A5F 0.124773820241 8
c66c10a9567f0d82ff31441a9
fd5063e5cd9dfe8 ...
SOAEURY12A8C13CA33 0.1181436744 9
c66c10a9567f0d82ff31441a9
fd5063e5cd9dfe8 ...
SOIQXJH12A8AE46CF6 0.116050759951 10
[10 rows x 4 columns]