-
Notifications
You must be signed in to change notification settings - Fork 0
/
graph_transformer_layer.py
157 lines (114 loc) · 4.75 KB
/
graph_transformer_layer.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
import torch
import torch.nn as nn
import torch.nn.functional as F
import dgl
import dgl.function as fn
import numpy as np
"""
Graph Transformer Layer
"""
"""
Util functions
"""
def src_dot_dst(src_field, dst_field, out_field):
def func(edges):
return {out_field: (edges.src[src_field] * edges.dst[dst_field]).sum(-1, keepdim=True)}
return func
def scaled_exp(field, scale_constant):
def func(edges):
# clamp for softmax numerical stability
return {field: torch.exp((edges.data[field] / scale_constant).clamp(-5, 5))}
return func
"""
Single Attention Head
"""
class MultiHeadAttentionLayer(nn.Module):
def __init__(self, in_dim, out_dim, num_heads, use_bias):
super().__init__()
self.out_dim = out_dim
self.num_heads = num_heads
if use_bias:
self.Q = nn.Linear(in_dim, out_dim * num_heads, bias=True)
self.K = nn.Linear(in_dim, out_dim * num_heads, bias=True)
self.V = nn.Linear(in_dim, out_dim * num_heads, bias=True)
else:
self.Q = nn.Linear(in_dim, out_dim * num_heads, bias=False)
self.K = nn.Linear(in_dim, out_dim * num_heads, bias=False)
self.V = nn.Linear(in_dim, out_dim * num_heads, bias=False)
# TODO: handle weighted edges if needed
def propagate_attention(self, g):
# Compute attention score
g.apply_edges(src_dot_dst('K_h', 'Q_h', 'score')) #, edges)
g.apply_edges(scaled_exp('score', np.sqrt(self.out_dim)))
# Send weighted values to target nodes
eids = g.edges()
g.send_and_recv(eids, fn.src_mul_edge('V_h', 'score', 'V_h'), fn.sum('V_h', 'wV'))
g.send_and_recv(eids, fn.copy_edge('score', 'score'), fn.sum('score', 'z'))
def forward(self, g, h):
Q_h = self.Q(h)
K_h = self.K(h)
V_h = self.V(h)
# Reshaping into [num_nodes, num_heads, feat_dim] to
# get projections for multi-head attention
g.ndata['Q_h'] = Q_h.view(-1, self.num_heads, self.out_dim)
g.ndata['K_h'] = K_h.view(-1, self.num_heads, self.out_dim)
g.ndata['V_h'] = V_h.view(-1, self.num_heads, self.out_dim)
self.propagate_attention(g)
head_out = g.ndata['wV']/g.ndata['z']
return head_out
class GraphTransformerLayer(nn.Module):
"""
Param:
"""
def __init__(self, in_dim, out_dim, num_heads, dropout=0.0, layer_norm=False, batch_norm=True, residual=True, use_bias=False):
super().__init__()
self.in_channels = in_dim
self.out_channels = out_dim
self.num_heads = num_heads
self.dropout = dropout
self.residual = residual
self.layer_norm = layer_norm
self.batch_norm = batch_norm
self.attention = MultiHeadAttentionLayer(in_dim, out_dim//num_heads, num_heads, use_bias)
self.O = nn.Linear(out_dim, out_dim)
if self.layer_norm:
self.layer_norm1 = nn.LayerNorm(out_dim)
if self.batch_norm:
self.batch_norm1 = nn.BatchNorm1d(out_dim)
# FFN
self.FFN_layer1 = nn.Linear(out_dim, out_dim*2)
self.FFN_layer2 = nn.Linear(out_dim*2, out_dim)
if self.layer_norm:
self.layer_norm2 = nn.LayerNorm(out_dim)
if self.batch_norm:
self.batch_norm2 = nn.BatchNorm1d(out_dim)
def forward(self, g, h):
h_in1 = h # for first residual connection
# multi-head attention out
attn_out = self.attention(g, h)
h = attn_out.view(-1, self.out_channels)
h = F.dropout(h, self.dropout, training=self.training)
h = self.O(h)
if self.residual:
h = h_in1 + h # residual connection
if self.layer_norm:
h = self.layer_norm1(h)
if self.batch_norm:
h = self.batch_norm1(h)
h_in2 = h # for second residual connection
# FFN
h = self.FFN_layer1(h)
h = F.relu(h)
h = F.dropout(h, self.dropout, training=self.training)
h = self.FFN_layer2(h)
if self.residual:
h = h_in2 + h # residual connection
if self.layer_norm:
h = self.layer_norm2(h)
if self.batch_norm:
h = self.batch_norm2(h)
return h
def __repr__(self):
return '{}(in_channels={}, out_channels={}, heads={}, residual={})'.format(self.__class__.__name__,
self.in_channels,
self.out_channels, self.num_heads, self.residual)