/
fpmc.py
140 lines (110 loc) · 4.81 KB
/
fpmc.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
# -*- coding: utf-8 -*-
# @Time : 2020/8/28 14:32
# @Author : Yujie Lu
# @Email : yujielu1998@gmail.com
# UPDATE
# @Time : 2020/10/2
# @Author : Yujie Lu
# @Email : yujielu1998@gmail.com
r"""
FPMC
################################################
Reference:
Steffen Rendle et al. "Factorizing Personalized Markov Chains for Next-Basket Recommendation." in WWW 2010.
"""
import torch
from torch import nn
from torch.nn.init import xavier_normal_
from recbole.model.abstract_recommender import SequentialRecommender
from recbole.model.loss import BPRLoss
from recbole.utils import InputType
class FPMC(SequentialRecommender):
r"""The FPMC model is mainly used in the recommendation system to predict the possibility of
unknown items arousing user interest, and to discharge the item recommendation list.
Note:
In order that the generation method we used is common to other sequential models,
We set the size of the basket mentioned in the paper equal to 1.
For comparison with other models, the loss function used is BPR.
"""
input_type = InputType.PAIRWISE
def __init__(self, config, dataset):
super(FPMC, self).__init__(config, dataset)
# load parameters info
self.embedding_size = config["embedding_size"]
self.loss_type = config["loss_type"]
# load dataset info
self.n_users = dataset.user_num
# define layers and loss
# user embedding matrix
self.UI_emb = nn.Embedding(self.n_users, self.embedding_size)
# label embedding matrix
self.IU_emb = nn.Embedding(self.n_items, self.embedding_size)
# last click item embedding matrix
self.LI_emb = nn.Embedding(self.n_items, self.embedding_size, padding_idx=0)
# label embedding matrix
self.IL_emb = nn.Embedding(self.n_items, self.embedding_size)
if self.loss_type == "BPR":
self.loss_fct = BPRLoss()
else:
raise NotImplementedError("Make sure 'loss_type' in ['BPR']!")
# parameters initialization
self.apply(self._init_weights)
def _init_weights(self, module):
if isinstance(module, nn.Embedding):
xavier_normal_(module.weight.data)
def forward(self, user, item_seq, item_seq_len, next_item):
item_last_click_index = item_seq_len - 1
item_last_click = torch.gather(
item_seq, dim=1, index=item_last_click_index.unsqueeze(1)
)
item_seq_emb = self.LI_emb(item_last_click) # [b,1,emb]
user_emb = self.UI_emb(user)
user_emb = torch.unsqueeze(user_emb, dim=1) # [b,1,emb]
iu_emb = self.IU_emb(next_item)
iu_emb = torch.unsqueeze(iu_emb, dim=1) # [b,n,emb] in here n = 1
il_emb = self.IL_emb(next_item)
il_emb = torch.unsqueeze(il_emb, dim=1) # [b,n,emb] in here n = 1
# This is the core part of the FPMC model,can be expressed by a combination of a MF and a FMC model
# MF
mf = torch.matmul(user_emb, iu_emb.permute(0, 2, 1))
mf = torch.squeeze(mf, dim=1) # [B,1]
# FMC
fmc = torch.matmul(il_emb, item_seq_emb.permute(0, 2, 1))
fmc = torch.squeeze(fmc, dim=1) # [B,1]
score = mf + fmc
score = torch.squeeze(score)
return score
def calculate_loss(self, interaction):
user = interaction[self.USER_ID]
item_seq = interaction[self.ITEM_SEQ]
item_seq_len = interaction[self.ITEM_SEQ_LEN]
pos_items = interaction[self.POS_ITEM_ID]
neg_items = interaction[self.NEG_ITEM_ID]
pos_score = self.forward(user, item_seq, item_seq_len, pos_items)
neg_score = self.forward(user, item_seq, item_seq_len, neg_items)
loss = self.loss_fct(pos_score, neg_score)
return loss
def predict(self, interaction):
user = interaction[self.USER_ID]
item_seq = interaction[self.ITEM_SEQ]
item_seq_len = interaction[self.ITEM_SEQ_LEN]
test_item = interaction[self.ITEM_ID]
score = self.forward(user, item_seq, item_seq_len, test_item) # [B]
return score
def full_sort_predict(self, interaction):
user = interaction[self.USER_ID]
item_seq = interaction[self.ITEM_SEQ]
item_seq_len = interaction[self.ITEM_SEQ_LEN]
user_emb = self.UI_emb(user)
all_iu_emb = self.IU_emb.weight
mf = torch.matmul(user_emb, all_iu_emb.transpose(0, 1))
all_il_emb = self.IL_emb.weight
item_last_click_index = item_seq_len - 1
item_last_click = torch.gather(
item_seq, dim=1, index=item_last_click_index.unsqueeze(1)
)
item_seq_emb = self.LI_emb(item_last_click) # [b,1,emb]
fmc = torch.matmul(item_seq_emb, all_il_emb.transpose(0, 1))
fmc = torch.squeeze(fmc, dim=1)
score = mf + fmc
return score