-
Notifications
You must be signed in to change notification settings - Fork 3k
/
python_evaluation.py
1716 lines (1465 loc) · 60.5 KB
/
python_evaluation.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
# Copyright (c) Recommenders contributors.
# Licensed under the MIT License.
import numpy as np
import pandas as pd
from functools import wraps
from sklearn.metrics import (
mean_squared_error,
mean_absolute_error,
r2_score,
explained_variance_score,
roc_auc_score,
log_loss,
)
from recommenders.utils.constants import (
DEFAULT_USER_COL,
DEFAULT_ITEM_COL,
DEFAULT_RATING_COL,
DEFAULT_PREDICTION_COL,
DEFAULT_RELEVANCE_COL,
DEFAULT_SIMILARITY_COL,
DEFAULT_ITEM_FEATURES_COL,
DEFAULT_ITEM_SIM_MEASURE,
DEFAULT_K,
DEFAULT_THRESHOLD,
)
from recommenders.datasets.pandas_df_utils import (
has_columns,
has_same_base_dtype,
lru_cache_df,
)
class ColumnMismatchError(Exception):
"""Exception raised when there is a mismatch in columns.
This exception is raised when an operation involving columns
encounters a mismatch or inconsistency.
Attributes:
message (str): Explanation of the error.
"""
pass
class ColumnTypeMismatchError(Exception):
"""Exception raised when there is a mismatch in column types.
This exception is raised when an operation involving column types
encounters a mismatch or inconsistency.
Attributes:
message (str): Explanation of the error.
"""
pass
def _check_column_dtypes(func):
"""Checks columns of DataFrame inputs
This includes the checks on:
* whether the input columns exist in the input DataFrames
* whether the data types of col_user as well as col_item are matched in the two input DataFrames.
Args:
func (function): function that will be wrapped
Returns:
function: Wrapper function for checking dtypes.
"""
@wraps(func)
def check_column_dtypes_wrapper(
rating_true,
rating_pred,
col_user=DEFAULT_USER_COL,
col_item=DEFAULT_ITEM_COL,
col_prediction=DEFAULT_PREDICTION_COL,
*args,
**kwargs,
):
"""Check columns of DataFrame inputs
Args:
rating_true (pandas.DataFrame): True data
rating_pred (pandas.DataFrame): Predicted data
col_user (str): column name for user
col_item (str): column name for item
col_rating (str): column name for rating
col_prediction (str): column name for prediction
"""
# Some ranking metrics don't have the rating column, so we don't need to check.
expected_true_columns = {col_user, col_item}
if "col_rating" in kwargs:
expected_true_columns.add(kwargs["col_rating"])
if not has_columns(rating_true, expected_true_columns):
raise ColumnMismatchError("Missing columns in true rating DataFrame")
if not has_columns(rating_pred, {col_user, col_item, col_prediction}):
raise ColumnMismatchError("Missing columns in predicted rating DataFrame")
if not has_same_base_dtype(
rating_true, rating_pred, columns=[col_user, col_item]
):
raise ColumnTypeMismatchError(
"Columns in provided DataFrames are not the same datatype"
)
return func(
rating_true=rating_true,
rating_pred=rating_pred,
col_user=col_user,
col_item=col_item,
col_prediction=col_prediction,
*args,
**kwargs,
)
return check_column_dtypes_wrapper
@_check_column_dtypes
@lru_cache_df(maxsize=1)
def merge_rating_true_pred(
rating_true,
rating_pred,
col_user=DEFAULT_USER_COL,
col_item=DEFAULT_ITEM_COL,
col_rating=DEFAULT_RATING_COL,
col_prediction=DEFAULT_PREDICTION_COL,
):
"""Join truth and prediction data frames on userID and itemID and return the true
and predicted rated with the correct index.
Args:
rating_true (pandas.DataFrame): True data
rating_pred (pandas.DataFrame): Predicted data
col_user (str): column name for user
col_item (str): column name for item
col_rating (str): column name for rating
col_prediction (str): column name for prediction
Returns:
numpy.ndarray: Array with the true ratings
numpy.ndarray: Array with the predicted ratings
"""
# pd.merge will apply suffixes to columns which have the same name across both dataframes
suffixes = ["_true", "_pred"]
rating_true_pred = pd.merge(
rating_true, rating_pred, on=[col_user, col_item], suffixes=suffixes
)
if col_rating in rating_pred.columns:
col_rating = col_rating + suffixes[0]
if col_prediction in rating_true.columns:
col_prediction = col_prediction + suffixes[1]
return rating_true_pred[col_rating], rating_true_pred[col_prediction]
def rmse(
rating_true,
rating_pred,
col_user=DEFAULT_USER_COL,
col_item=DEFAULT_ITEM_COL,
col_rating=DEFAULT_RATING_COL,
col_prediction=DEFAULT_PREDICTION_COL,
):
"""Calculate Root Mean Squared Error
Args:
rating_true (pandas.DataFrame): True data. There should be no duplicate (userID, itemID) pairs
rating_pred (pandas.DataFrame): Predicted data. There should be no duplicate (userID, itemID) pairs
col_user (str): column name for user
col_item (str): column name for item
col_rating (str): column name for rating
col_prediction (str): column name for prediction
Returns:
float: Root mean squared error
"""
y_true, y_pred = merge_rating_true_pred(
rating_true=rating_true,
rating_pred=rating_pred,
col_user=col_user,
col_item=col_item,
col_rating=col_rating,
col_prediction=col_prediction,
)
return np.sqrt(mean_squared_error(y_true, y_pred))
def mae(
rating_true,
rating_pred,
col_user=DEFAULT_USER_COL,
col_item=DEFAULT_ITEM_COL,
col_rating=DEFAULT_RATING_COL,
col_prediction=DEFAULT_PREDICTION_COL,
):
"""Calculate Mean Absolute Error.
Args:
rating_true (pandas.DataFrame): True data. There should be no duplicate (userID, itemID) pairs
rating_pred (pandas.DataFrame): Predicted data. There should be no duplicate (userID, itemID) pairs
col_user (str): column name for user
col_item (str): column name for item
col_rating (str): column name for rating
col_prediction (str): column name for prediction
Returns:
float: Mean Absolute Error.
"""
y_true, y_pred = merge_rating_true_pred(
rating_true=rating_true,
rating_pred=rating_pred,
col_user=col_user,
col_item=col_item,
col_rating=col_rating,
col_prediction=col_prediction,
)
return mean_absolute_error(y_true, y_pred)
def rsquared(
rating_true,
rating_pred,
col_user=DEFAULT_USER_COL,
col_item=DEFAULT_ITEM_COL,
col_rating=DEFAULT_RATING_COL,
col_prediction=DEFAULT_PREDICTION_COL,
):
"""Calculate R squared
Args:
rating_true (pandas.DataFrame): True data. There should be no duplicate (userID, itemID) pairs
rating_pred (pandas.DataFrame): Predicted data. There should be no duplicate (userID, itemID) pairs
col_user (str): column name for user
col_item (str): column name for item
col_rating (str): column name for rating
col_prediction (str): column name for prediction
Returns:
float: R squared (min=0, max=1).
"""
y_true, y_pred = merge_rating_true_pred(
rating_true=rating_true,
rating_pred=rating_pred,
col_user=col_user,
col_item=col_item,
col_rating=col_rating,
col_prediction=col_prediction,
)
return r2_score(y_true, y_pred)
def exp_var(
rating_true,
rating_pred,
col_user=DEFAULT_USER_COL,
col_item=DEFAULT_ITEM_COL,
col_rating=DEFAULT_RATING_COL,
col_prediction=DEFAULT_PREDICTION_COL,
):
"""Calculate explained variance.
Args:
rating_true (pandas.DataFrame): True data. There should be no duplicate (userID, itemID) pairs
rating_pred (pandas.DataFrame): Predicted data. There should be no duplicate (userID, itemID) pairs
col_user (str): column name for user
col_item (str): column name for item
col_rating (str): column name for rating
col_prediction (str): column name for prediction
Returns:
float: Explained variance (min=0, max=1).
"""
y_true, y_pred = merge_rating_true_pred(
rating_true=rating_true,
rating_pred=rating_pred,
col_user=col_user,
col_item=col_item,
col_rating=col_rating,
col_prediction=col_prediction,
)
return explained_variance_score(y_true, y_pred)
def auc(
rating_true,
rating_pred,
col_user=DEFAULT_USER_COL,
col_item=DEFAULT_ITEM_COL,
col_rating=DEFAULT_RATING_COL,
col_prediction=DEFAULT_PREDICTION_COL,
):
"""Calculate the Area-Under-Curve metric for implicit feedback typed
recommender, where rating is binary and prediction is float number ranging
from 0 to 1.
https://en.wikipedia.org/wiki/Receiver_operating_characteristic#Area_under_the_curve
Note:
The evaluation does not require a leave-one-out scenario.
This metric does not calculate group-based AUC which considers the AUC scores
averaged across users. It is also not limited to k. Instead, it calculates the
scores on the entire prediction results regardless the users.
Args:
rating_true (pandas.DataFrame): True data
rating_pred (pandas.DataFrame): Predicted data
col_user (str): column name for user
col_item (str): column name for item
col_rating (str): column name for rating
col_prediction (str): column name for prediction
Returns:
float: auc_score (min=0, max=1)
"""
y_true, y_pred = merge_rating_true_pred(
rating_true=rating_true,
rating_pred=rating_pred,
col_user=col_user,
col_item=col_item,
col_rating=col_rating,
col_prediction=col_prediction,
)
return roc_auc_score(y_true, y_pred)
def logloss(
rating_true,
rating_pred,
col_user=DEFAULT_USER_COL,
col_item=DEFAULT_ITEM_COL,
col_rating=DEFAULT_RATING_COL,
col_prediction=DEFAULT_PREDICTION_COL,
):
"""Calculate the logloss metric for implicit feedback typed
recommender, where rating is binary and prediction is float number ranging
from 0 to 1.
https://en.wikipedia.org/wiki/Loss_functions_for_classification#Cross_entropy_loss_(Log_Loss)
Args:
rating_true (pandas.DataFrame): True data
rating_pred (pandas.DataFrame): Predicted data
col_user (str): column name for user
col_item (str): column name for item
col_rating (str): column name for rating
col_prediction (str): column name for prediction
Returns:
float: log_loss_score (min=-inf, max=inf)
"""
y_true, y_pred = merge_rating_true_pred(
rating_true=rating_true,
rating_pred=rating_pred,
col_user=col_user,
col_item=col_item,
col_rating=col_rating,
col_prediction=col_prediction,
)
return log_loss(y_true, y_pred)
@_check_column_dtypes
@lru_cache_df(maxsize=1)
def merge_ranking_true_pred(
rating_true,
rating_pred,
col_user,
col_item,
col_prediction,
relevancy_method,
k=DEFAULT_K,
threshold=DEFAULT_THRESHOLD,
**_,
):
"""Filter truth and prediction data frames on common users
Args:
rating_true (pandas.DataFrame): True DataFrame
rating_pred (pandas.DataFrame): Predicted DataFrame
col_user (str): column name for user
col_item (str): column name for item
col_prediction (str): column name for prediction
relevancy_method (str): method for determining relevancy ['top_k', 'by_threshold', None]. None means that the
top k items are directly provided, so there is no need to compute the relevancy operation.
k (int): number of top k items per user (optional)
threshold (float): threshold of top items per user (optional)
Returns:
pandas.DataFrame, pandas.DataFrame, int: DataFrame of recommendation hits, sorted by `col_user` and `rank`
DataFrame of hit counts vs actual relevant items per user number of unique user ids
"""
# Make sure the prediction and true data frames have the same set of users
common_users = set(rating_true[col_user]).intersection(set(rating_pred[col_user]))
rating_true_common = rating_true[rating_true[col_user].isin(common_users)]
rating_pred_common = rating_pred[rating_pred[col_user].isin(common_users)]
n_users = len(common_users)
# Return hit items in prediction data frame with ranking information. This is used for calculating NDCG and MAP.
# Use first to generate unique ranking values for each item. This is to align with the implementation in
# Spark evaluation metrics, where index of each recommended items (the indices are unique to items) is used
# to calculate penalized precision of the ordered items.
if relevancy_method == "top_k":
top_k = k
elif relevancy_method == "by_threshold":
top_k = threshold
elif relevancy_method is None:
top_k = None
else:
raise NotImplementedError("Invalid relevancy_method")
df_hit = get_top_k_items(
dataframe=rating_pred_common,
col_user=col_user,
col_rating=col_prediction,
k=top_k,
)
df_hit = pd.merge(df_hit, rating_true_common, on=[col_user, col_item])[
[col_user, col_item, "rank"]
]
# count the number of hits vs actual relevant items per user
df_hit_count = pd.merge(
df_hit.groupby(col_user, as_index=False)[col_user].agg({"hit": "count"}),
rating_true_common.groupby(col_user, as_index=False)[col_user].agg(
{"actual": "count"}
),
on=col_user,
)
return df_hit, df_hit_count, n_users
def precision_at_k(
rating_true,
rating_pred,
col_user=DEFAULT_USER_COL,
col_item=DEFAULT_ITEM_COL,
col_prediction=DEFAULT_PREDICTION_COL,
relevancy_method="top_k",
k=DEFAULT_K,
threshold=DEFAULT_THRESHOLD,
**_,
):
"""Precision at K.
Note:
We use the same formula to calculate precision@k as that in Spark.
More details can be found at
http://spark.apache.org/docs/2.1.1/api/python/pyspark.mllib.html#pyspark.mllib.evaluation.RankingMetrics.precisionAt
In particular, the maximum achievable precision may be < 1, if the number of items for a
user in rating_pred is less than k.
Args:
rating_true (pandas.DataFrame): True DataFrame
rating_pred (pandas.DataFrame): Predicted DataFrame
col_user (str): column name for user
col_item (str): column name for item
col_prediction (str): column name for prediction
relevancy_method (str): method for determining relevancy ['top_k', 'by_threshold', None]. None means that the
top k items are directly provided, so there is no need to compute the relevancy operation.
k (int): number of top k items per user
threshold (float): threshold of top items per user (optional)
Returns:
float: precision at k (min=0, max=1)
"""
df_hit, df_hit_count, n_users = merge_ranking_true_pred(
rating_true=rating_true,
rating_pred=rating_pred,
col_user=col_user,
col_item=col_item,
col_prediction=col_prediction,
relevancy_method=relevancy_method,
k=k,
threshold=threshold,
)
if df_hit.shape[0] == 0:
return 0.0
return (df_hit_count["hit"] / k).sum() / n_users
def recall_at_k(
rating_true,
rating_pred,
col_user=DEFAULT_USER_COL,
col_item=DEFAULT_ITEM_COL,
col_prediction=DEFAULT_PREDICTION_COL,
relevancy_method="top_k",
k=DEFAULT_K,
threshold=DEFAULT_THRESHOLD,
**_,
):
"""Recall at K.
Args:
rating_true (pandas.DataFrame): True DataFrame
rating_pred (pandas.DataFrame): Predicted DataFrame
col_user (str): column name for user
col_item (str): column name for item
col_prediction (str): column name for prediction
relevancy_method (str): method for determining relevancy ['top_k', 'by_threshold', None]. None means that the
top k items are directly provided, so there is no need to compute the relevancy operation.
k (int): number of top k items per user
threshold (float): threshold of top items per user (optional)
Returns:
float: recall at k (min=0, max=1). The maximum value is 1 even when fewer than
k items exist for a user in rating_true.
"""
df_hit, df_hit_count, n_users = merge_ranking_true_pred(
rating_true=rating_true,
rating_pred=rating_pred,
col_user=col_user,
col_item=col_item,
col_prediction=col_prediction,
relevancy_method=relevancy_method,
k=k,
threshold=threshold,
)
if df_hit.shape[0] == 0:
return 0.0
return (df_hit_count["hit"] / df_hit_count["actual"]).sum() / n_users
def r_precision_at_k(
rating_true,
rating_pred,
col_user=DEFAULT_USER_COL,
col_item=DEFAULT_ITEM_COL,
col_prediction=DEFAULT_PREDICTION_COL,
relevancy_method="top_k",
k=DEFAULT_K,
threshold=DEFAULT_THRESHOLD,
**_,
):
"""R-precision at K.
R-precision can be defined as the precision@R for each user, where R is the
numer of relevant items for the query. Its also equivalent to the recall at
the R-th position.
Note:
As R can be high, in this case, the k indicates the maximum possible R.
If every user has more than k true items, then r-precision@k is equal to
precision@k. You might need to raise the k value to get meaningful results.
Args:
rating_true (pandas.DataFrame): True DataFrame
rating_pred (pandas.DataFrame): Predicted DataFrame
col_user (str): column name for user
col_item (str): column name for item
col_prediction (str): column name for prediction
relevancy_method (str): method for determining relevancy ['top_k', 'by_threshold', None]. None means that the
top k items are directly provided, so there is no need to compute the relevancy operation.
k (int): number of top k items per user
threshold (float): threshold of top items per user (optional)
Returns:
float: recall at k (min=0, max=1). The maximum value is 1 even when fewer than
k items exist for a user in rating_true.
"""
df_hit, df_hit_count, n_users = merge_ranking_true_pred(
rating_true=rating_true,
rating_pred=rating_pred,
col_user=col_user,
col_item=col_item,
col_prediction=col_prediction,
relevancy_method=relevancy_method,
k=k,
threshold=threshold,
)
if df_hit.shape[0] == 0:
return 0.0
df_merged = df_hit.merge(df_hit_count[[col_user, 'actual']])
df_merged = df_merged[df_merged['rank'] <= df_merged['actual']]
return (df_merged.groupby(col_user).size() / df_hit_count.set_index(col_user)['actual']).mean()
def ndcg_at_k(
rating_true,
rating_pred,
col_user=DEFAULT_USER_COL,
col_item=DEFAULT_ITEM_COL,
col_rating=DEFAULT_RATING_COL,
col_prediction=DEFAULT_PREDICTION_COL,
relevancy_method="top_k",
k=DEFAULT_K,
threshold=DEFAULT_THRESHOLD,
score_type="binary",
discfun_type="loge",
**_,
):
"""Normalized Discounted Cumulative Gain (nDCG).
Info: https://en.wikipedia.org/wiki/Discounted_cumulative_gain
Args:
rating_true (pandas.DataFrame): True DataFrame
rating_pred (pandas.DataFrame): Predicted DataFrame
col_user (str): column name for user
col_item (str): column name for item
col_rating (str): column name for rating
col_prediction (str): column name for prediction
relevancy_method (str): method for determining relevancy ['top_k', 'by_threshold', None]. None means that the
top k items are directly provided, so there is no need to compute the relevancy operation.
k (int): number of top k items per user
threshold (float): threshold of top items per user (optional)
score_type (str): type of relevance scores ['binary', 'raw', 'exp']. With the default option 'binary', the
relevance score is reduced to either 1 (hit) or 0 (miss). Option 'raw' uses the raw relevance score.
Option 'exp' uses (2 ** RAW_RELEVANCE - 1) as the relevance score
discfun_type (str): type of discount function ['loge', 'log2'] used to calculate DCG.
Returns:
float: nDCG at k (min=0, max=1).
"""
df_hit, _, _ = merge_ranking_true_pred(
rating_true=rating_true,
rating_pred=rating_pred,
col_user=col_user,
col_item=col_item,
col_prediction=col_prediction,
relevancy_method=relevancy_method,
k=k,
threshold=threshold,
)
if df_hit.shape[0] == 0:
return 0.0
df_dcg = df_hit.merge(rating_pred, on=[col_user, col_item]).merge(
rating_true, on=[col_user, col_item], how="outer", suffixes=("_left", None)
)
if score_type == "binary":
df_dcg["rel"] = 1
elif score_type == "raw":
df_dcg["rel"] = df_dcg[col_rating]
elif score_type == "exp":
df_dcg["rel"] = 2 ** df_dcg[col_rating] - 1
else:
raise ValueError("score_type must be one of 'binary', 'raw', 'exp'")
if discfun_type == "loge":
discfun = np.log
elif discfun_type == "log2":
discfun = np.log2
else:
raise ValueError("discfun_type must be one of 'loge', 'log2'")
# Calculate the actual discounted gain for each record
df_dcg["dcg"] = df_dcg["rel"] / discfun(1 + df_dcg["rank"])
# Calculate the ideal discounted gain for each record
df_idcg = df_dcg.sort_values([col_user, col_rating], ascending=False)
df_idcg["irank"] = df_idcg.groupby(col_user, as_index=False, sort=False)[
col_rating
].rank("first", ascending=False)
df_idcg["idcg"] = df_idcg["rel"] / discfun(1 + df_idcg["irank"])
# Calculate the actual DCG for each user
df_user = df_dcg.groupby(col_user, as_index=False, sort=False).agg({"dcg": "sum"})
# Calculate the ideal DCG for each user
df_user = df_user.merge(
df_idcg.groupby(col_user, as_index=False, sort=False)
.head(k)
.groupby(col_user, as_index=False, sort=False)
.agg({"idcg": "sum"}),
on=col_user,
)
# DCG over IDCG is the normalized DCG
df_user["ndcg"] = df_user["dcg"] / df_user["idcg"]
return df_user["ndcg"].mean()
@lru_cache_df(maxsize=1)
def _get_reciprocal_rank(
rating_true,
rating_pred,
col_user=DEFAULT_USER_COL,
col_item=DEFAULT_ITEM_COL,
col_prediction=DEFAULT_PREDICTION_COL,
relevancy_method="top_k",
k=DEFAULT_K,
threshold=DEFAULT_THRESHOLD,
):
df_hit, df_hit_count, n_users = merge_ranking_true_pred(
rating_true=rating_true,
rating_pred=rating_pred,
col_user=col_user,
col_item=col_item,
col_prediction=col_prediction,
relevancy_method=relevancy_method,
k=k,
threshold=threshold,
)
if df_hit.shape[0] == 0:
return None, n_users
# calculate reciprocal rank of items for each user and sum them up
df_hit_sorted = df_hit.copy()
df_hit_sorted["rr"] = (
df_hit_sorted.groupby(col_user).cumcount() + 1
) / df_hit_sorted["rank"]
df_hit_sorted = df_hit_sorted.groupby(col_user).agg({"rr": "sum"}).reset_index()
return pd.merge(df_hit_sorted, df_hit_count, on=col_user), n_users
def map(
rating_true,
rating_pred,
col_user=DEFAULT_USER_COL,
col_item=DEFAULT_ITEM_COL,
col_prediction=DEFAULT_PREDICTION_COL,
relevancy_method="top_k",
k=DEFAULT_K,
threshold=DEFAULT_THRESHOLD,
**_,
):
"""Mean Average Precision for top k prediction items
The implementation of MAP is referenced from Spark MLlib evaluation metrics.
https://spark.apache.org/docs/2.3.0/mllib-evaluation-metrics.html#ranking-systems
A good reference can be found at:
http://web.stanford.edu/class/cs276/handouts/EvaluationNew-handout-6-per.pdf
Note:
The MAP is meant to calculate Avg. Precision for the relevant items, so it is normalized by the number of
relevant items in the ground truth data, instead of k.
Args:
rating_true (pandas.DataFrame): True DataFrame
rating_pred (pandas.DataFrame): Predicted DataFrame
col_user (str): column name for user
col_item (str): column name for item
col_prediction (str): column name for prediction
relevancy_method (str): method for determining relevancy ['top_k', 'by_threshold', None]. None means that the
top k items are directly provided, so there is no need to compute the relevancy operation.
k (int): number of top k items per user
threshold (float): threshold of top items per user (optional)
Returns:
float: MAP (min=0, max=1)
"""
df_merge, n_users = _get_reciprocal_rank(
rating_true=rating_true,
rating_pred=rating_pred,
col_user=col_user,
col_item=col_item,
col_prediction=col_prediction,
relevancy_method=relevancy_method,
k=k,
threshold=threshold,
)
if df_merge is None:
return 0.0
else:
return (df_merge["rr"] / df_merge["actual"]).sum() / n_users
def map_at_k(
rating_true,
rating_pred,
col_user=DEFAULT_USER_COL,
col_item=DEFAULT_ITEM_COL,
col_prediction=DEFAULT_PREDICTION_COL,
relevancy_method="top_k",
k=DEFAULT_K,
threshold=DEFAULT_THRESHOLD,
**_,
):
"""Mean Average Precision at k
The implementation of MAP@k is referenced from Spark MLlib evaluation metrics.
https://github.com/apache/spark/blob/b938ff9f520fd4e4997938284ffa0aba9ea271fc/mllib/src/main/scala/org/apache/spark/mllib/evaluation/RankingMetrics.scala#L99
Args:
rating_true (pandas.DataFrame): True DataFrame
rating_pred (pandas.DataFrame): Predicted DataFrame
col_user (str): column name for user
col_item (str): column name for item
col_prediction (str): column name for prediction
relevancy_method (str): method for determining relevancy ['top_k', 'by_threshold', None]. None means that the
top k items are directly provided, so there is no need to compute the relevancy operation.
k (int): number of top k items per user
threshold (float): threshold of top items per user (optional)
Returns:
float: MAP@k (min=0, max=1)
"""
df_merge, n_users = _get_reciprocal_rank(
rating_true=rating_true,
rating_pred=rating_pred,
col_user=col_user,
col_item=col_item,
col_prediction=col_prediction,
relevancy_method=relevancy_method,
k=k,
threshold=threshold,
)
if df_merge is None:
return 0.0
else:
return (
df_merge["rr"] / df_merge["actual"].apply(lambda x: min(x, k))
).sum() / n_users
def get_top_k_items(
dataframe, col_user=DEFAULT_USER_COL, col_rating=DEFAULT_RATING_COL, k=DEFAULT_K
):
"""Get the input customer-item-rating tuple in the format of Pandas
DataFrame, output a Pandas DataFrame in the dense format of top k items
for each user.
Note:
If it is implicit rating, just append a column of constants to be
ratings.
Args:
dataframe (pandas.DataFrame): DataFrame of rating data (in the format
customerID-itemID-rating)
col_user (str): column name for user
col_rating (str): column name for rating
k (int or None): number of items for each user; None means that the input has already been
filtered out top k items and sorted by ratings and there is no need to do that again.
Returns:
pandas.DataFrame: DataFrame of top k items for each user, sorted by `col_user` and `rank`
"""
# Sort dataframe by col_user and (top k) col_rating
if k is None:
top_k_items = dataframe
else:
top_k_items = (
dataframe.sort_values([col_user, col_rating], ascending=[True, False])
.groupby(col_user, as_index=False)
.head(k)
.reset_index(drop=True)
)
# Add ranks
top_k_items["rank"] = top_k_items.groupby(col_user, sort=False).cumcount() + 1
return top_k_items
"""Function name and function mapper.
Useful when we have to serialize evaluation metric names
and call the functions based on deserialized names"""
metrics = {
rmse.__name__: rmse,
mae.__name__: mae,
rsquared.__name__: rsquared,
exp_var.__name__: exp_var,
precision_at_k.__name__: precision_at_k,
recall_at_k.__name__: recall_at_k,
r_precision_at_k.__name__: r_precision_at_k,
ndcg_at_k.__name__: ndcg_at_k,
map_at_k.__name__: map_at_k,
map.__name__: map,
}
# diversity metrics
def _check_column_dtypes_diversity_serendipity(func):
"""Checks columns of DataFrame inputs
This includes the checks on:
* whether the input columns exist in the input DataFrames
* whether the data types of col_user as well as col_item are matched in the two input DataFrames.
* whether reco_df contains any user_item pairs that are already shown in train_df
* check relevance column in reco_df
* check column names in item_feature_df
Args:
func (function): function that will be wrapped
Returns:
function: Wrapper function for checking dtypes.
"""
@wraps(func)
def check_column_dtypes_diversity_serendipity_wrapper(
train_df,
reco_df,
item_feature_df=None,
item_sim_measure=DEFAULT_ITEM_SIM_MEASURE,
col_item_features=DEFAULT_ITEM_FEATURES_COL,
col_user=DEFAULT_USER_COL,
col_item=DEFAULT_ITEM_COL,
col_sim=DEFAULT_SIMILARITY_COL,
col_relevance=None,
*args,
**kwargs,
):
"""Check columns of DataFrame inputs
Args:
train_df (pandas.DataFrame): Data set with historical data for users and items they
have interacted with; contains col_user, col_item. Assumed to not contain any duplicate rows.
reco_df (pandas.DataFrame): Recommender's prediction output, containing col_user, col_item,
col_relevance (optional). Assumed to not contain any duplicate user-item pairs.
item_feature_df (pandas.DataFrame): (Optional) It is required only when item_sim_measure='item_feature_vector'.
It contains two columns: col_item and features (a feature vector).
item_sim_measure (str): (Optional) This column indicates which item similarity measure to be used.
Available measures include item_cooccurrence_count (default choice) and item_feature_vector.
col_item_features (str): item feature column name.
col_user (str): User id column name.
col_item (str): Item id column name.
col_sim (str): This column indicates the column name for item similarity.
col_relevance (str): This column indicates whether the recommended item is actually
relevant to the user or not.
"""
if not has_columns(train_df, [col_user, col_item]):
raise ValueError("Missing columns in train_df DataFrame")
if not has_columns(reco_df, [col_user, col_item]):
raise ValueError("Missing columns in reco_df DataFrame")
if not has_same_base_dtype(train_df, reco_df, columns=[col_user, col_item]):
raise ValueError("Columns in provided DataFrames are not the same datatype")
if col_relevance is None:
col_relevance = DEFAULT_RELEVANCE_COL
# relevance term, default is 1 (relevant) for all
reco_df = reco_df[[col_user, col_item]]
reco_df[col_relevance] = 1.0
else:
col_relevance = col_relevance
reco_df = reco_df[[col_user, col_item, col_relevance]].astype(
{col_relevance: np.float16}
)
if item_sim_measure == "item_feature_vector":
required_columns = [col_item, col_item_features]
if item_feature_df is not None:
if not has_columns(item_feature_df, required_columns):
raise ValueError("Missing columns in item_feature_df DataFrame")
else:
raise Exception(
"item_feature_df not specified! item_feature_df must be provided "
"if choosing to use item_feature_vector to calculate item similarity. "
"item_feature_df should have columns: " + str(required_columns)
)
# check if reco_df contains any user_item pairs that are already shown in train_df
count_intersection = pd.merge(
train_df, reco_df, how="inner", on=[col_user, col_item]
).shape[0]
if count_intersection != 0:
raise Exception(
"reco_df should not contain any user_item pairs that are already shown in train_df"
)
return func(
train_df=train_df,
reco_df=reco_df,
item_feature_df=item_feature_df,
item_sim_measure=item_sim_measure,
col_user=col_user,
col_item=col_item,
col_sim=col_sim,
col_relevance=col_relevance,
*args,
**kwargs,
)
return check_column_dtypes_diversity_serendipity_wrapper
def _check_column_dtypes_novelty_coverage(func):
"""Checks columns of DataFrame inputs
This includes the checks on:
* whether the input columns exist in the input DataFrames