-
Notifications
You must be signed in to change notification settings - Fork 0
/
Transformer.py
35 lines (29 loc) · 1.24 KB
/
Transformer.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
import torch.nn as nn
from FeedForward import FeedForward
from MultiHeadSelfAttention import MultiHeadSelfAttention
class Transformer(nn.Module):
def __init__(self,
hidden_size,
attention_head_num,
attention_head_size,
intermediate_size,
dropout_prob=0.1
):
super(Transformer, self).__init__()
self.multi_attention = MultiHeadSelfAttention(
attention_head_num=attention_head_num,
attention_head_size=attention_head_size)
self.attention_layernorm = nn.LayerNorm(hidden_size)
self.feedforward = FeedForward(
hidden_size,
intermediate_size,
dropout_prob)
self.feedforward_layernorm = nn.LayerNorm(hidden_size)
def forward(self, x, attention_mask):
attention_x = self.multi_attention(x, attention_mask)
attention_x = x + attention_x
attention_x = self.attention_layernorm(attention_x)
feedforward_x = self.feedforward(attention_x)
feedforward_x = attention_x + feedforward_x
feedforward_x = self.feedforward_layernorm(feedforward_x)
return feedforward_x