-
Notifications
You must be signed in to change notification settings - Fork 3
/
AngryBERT.py
130 lines (107 loc) · 3.82 KB
/
AngryBERT.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
from re import S
import torch
import torch.nn as nn
from torch.nn.utils.rnn import pad_packed_sequence
from ..base_model import BaseModel
from ..register_model import RegisterModel
from transformers import BertTokenizer, BertModel
class FFN(nn.Module):
def __init__(self, in_feat, out_feat, dropout):
super(FFN, self).__init__()
self.in2hid = nn.Linear(in_feat, in_feat)
self.hid2out = nn.Linear(in_feat, out_feat)
self.activation = nn.ReLU()
self.dropout = nn.Dropout(dropout)
def forward(self, input):
hid = self.activation(self.dropout(self.in2hid(input)))
return self.hid2out(hid)
@RegisterModel("AngryBERT")
class AngryBERT(BaseModel):
"""
AngryBERT https://arxiv.org/pdf/2103.11800.pdf
TODO: Secondary task - framework doesn't support it yet
"""
def __init__(
self,
bilstm_n_layers,
bilstm_hidden_dim,
ffn_dim,
out_feat,
embeddings,
dropout,
device,
):
super(AngryBERT, self).__init__()
self.embeddings = embeddings # bilstm embeddings
self.bilstm = nn.LSTM(
input_size=self.embeddings.weight.shape[-1],
hidden_size=bilstm_hidden_dim,
num_layers=bilstm_n_layers,
bias=True,
batch_first=True,
dropout=dropout,
bidirectional=True,
)
self.tokenizer = BertTokenizer.from_pretrained("bert-base-uncased")
self.bert = BertModel.from_pretrained("bert-base-uncased")
# a gate is simply a weighted linear transformation
self.gate = nn.Linear(768 + bilstm_hidden_dim * 2, ffn_dim)
self.ffn = FFN(ffn_dim, out_feat, dropout)
self.dropout = nn.Dropout(dropout)
self.activation = nn.ReLU()
self.device = device
def forward(self, data):
emb = self.embeddings(data["tokens"])
self.bilstm.flatten_parameters()
padded_data = nn.utils.rnn.pack_padded_sequence(emb, data['mask'].sum(dim=-1).cpu(), batch_first=True, enforce_sorted=False)
bilstmout, _ = self.bilstm(padded_data)
bilstmout, _ = nn.utils.rnn.pad_packed_sequence(bilstmout, batch_first=True)
bilstmout = self.dropout(bilstmout[:, 0, :])
inputs = self.tokenizer(data["text"], padding=True, return_tensors="pt").to(self.device)
bertout = self.bert(**inputs).pooler_output
bertout = self.dropout(bertout)
gatein = torch.cat((bilstmout, bertout), dim=-1)
chosen = self.activation(self.dropout(self.gate(gatein)))
res = self.ffn(chosen)
return res
@staticmethod
def add_required_arguments(parser):
group = parser.add_argument_group()
group.add_argument(
"--angrybert-bilstm-hidden-size",
type=int,
default=64,
help="BiLSTM hidden size",
)
group.add_argument(
"--angrybert-bilstm-n-layers",
type=int,
default=2,
help="Number of layers in the BiLSTM",
)
group.add_argument(
"--angrybert-bilstm-bidirectional",
type=bool,
default=True,
help="Train BiLSTM or LSTM",
)
group.add_argument(
"--angrybert-ffn-in-dim",
type=int,
default=512,
help="The input size of the FFN layer, which will also be the output size of the Gate",
)
@staticmethod
def make_model(args):
return AngryBERT(
args.angrybert_bilstm_n_layers,
args.angrybert_bilstm_hidden_size,
args.angrybert_ffn_in_dim,
args.out_feat,
args.embeddings,
args.dropout,
args.device,
)
@staticmethod
def data_requirements():
return ["tokens", "mask", "text"]