-
Notifications
You must be signed in to change notification settings - Fork 13
/
afm.py
48 lines (42 loc) · 1.84 KB
/
afm.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
import torch
import torch.nn.functional as F
from models.layers import Embedding, Linear, get_triu_indices
class AttentionalFactorizationMachine(torch.nn.Module):
def __init__(self, nemb, nattn, dropout):
super().__init__()
self.attn_w = torch.nn.Linear(nemb, nattn)
self.attn_h = torch.nn.Linear(nattn, 1)
self.attn_p = torch.nn.Linear(nemb, 1)
self.dropout = torch.nn.Dropout(p=dropout)
def forward(self, x):
"""
:param x: FloatTensor B*F*E
"""
nfield = x.size(1)
vi_indices, vj_indices = get_triu_indices(nfield)
vi, vj = x[:, vi_indices], x[:, vj_indices] # B*(Fx(F-1)/2)*E
hadamard_prod = vi * vj
attn_weights = F.relu(self.attn_w(hadamard_prod)) # B*(Fx(F-1)/2)*nattn
attn_weights = F.softmax(self.attn_h(attn_weights), dim=1) # B*(Fx(F-1)/2)*1
attn_weights = self.dropout(attn_weights)
afm = torch.sum(attn_weights*hadamard_prod, dim=1) # B*E
afm = self.dropout(afm)
return self.attn_p(afm).squeeze(1) # B
class AFMModel(torch.nn.Module):
"""
Model: Attentional Factorization Machine
Ref: J Xiao, et al. Attentional Factorization Machines:
Learning the Weight of Feature Interactions via Attention Networks, 2017.
"""
def __init__(self, nfeat, nemb, nattn, dropout):
super().__init__()
self.embedding = Embedding(nfeat, nemb)
self.linear = Linear(nfeat)
self.afm = AttentionalFactorizationMachine(nemb, nattn, dropout)
def forward(self, x):
"""
:param x: {'id': LongTensor B*F, 'value': FloatTensor B*F}
:return: y of size B, Regression and Classification (+sigmoid)
"""
y = self.linear(x) + self.afm(self.embedding(x))
return y