-
Notifications
You must be signed in to change notification settings - Fork 0
/
train_bert_dthc.py
281 lines (250 loc) · 12.5 KB
/
train_bert_dthc.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
# -*- coding: utf-8 -*-
import os
import math
import argparse
import random
import numpy
import torch
import torch.nn as nn
from bucket_iterator import BertBucketIterator
from sklearn import metrics
from data_utils import BertDatasetReader,MyBertTokenizer
from models import ICGCN, LSTM, ICGCNBert
from transformers import BertConfig
from transformers.optimization import AdamW
class Instructor:
def __init__(self, opt):
self.opt = opt
stance_dataset = BertDatasetReader(dataset=opt.dataset, tokenizer=opt.tokenizer)
self.train_data_loader = BertBucketIterator(data=stance_dataset.train_data, batch_size=opt.batch_size, shuffle=True)
self.test_data_loader = BertBucketIterator(data=stance_dataset.test_data, batch_size=opt.eval_batch_size, shuffle=False)
self.model = opt.model_class.from_pretrained(opt.pretrained_path,config = opt.config).to(opt.device)
self._print_args()
self.global_f1 = 0.
if torch.cuda.is_available():
print('cuda memory allocated:', torch.cuda.memory_allocated(device=opt.device.index))
def _print_args(self):
n_trainable_params, n_nontrainable_params = 0, 0
for p in self.model.parameters():
n_params = torch.prod(torch.tensor(p.shape)).item()
if p.requires_grad:
n_trainable_params += n_params
else:
n_nontrainable_params += n_params
print('n_trainable_params: {0}, n_nontrainable_params: {1}'.format(n_trainable_params, n_nontrainable_params))
print('> training arguments:')
for arg in vars(self.opt):
print('>>> {0}: {1}'.format(arg, getattr(self.opt, arg)))
def _reset_params(self):
for n,p in self.model.named_parameters():
if 'bert' not in n and p.requires_grad:
if len(p.shape) > 1:
self.opt.initializer(p)
else:
stdv = 1. / math.sqrt(p.shape[0])
torch.nn.init.uniform_(p, a=-stdv, b=stdv)
def _train(self, criterion, optimizer):
max_test_acc = 0
max_test_f1 = 0
max_test_f1_m = 0
global_step = 0
continue_not_increase = 0
# seed_everything(self.opt.seed)
for epoch in range(self.opt.num_epoch):
print('>' * 100)
print('epoch: ', epoch)
n_correct, n_total = 0, 0
increase_flag = False
for i_batch, sample_batched in enumerate(self.train_data_loader):
global_step += 1
# switch model to training mode, clear gradient accumulators
self.model.train()
optimizer.zero_grad()
#print('sample_batched:')
#print(sample_batched)
inputs = [sample_batched[col].to(self.opt.device) for col in self.opt.inputs_cols]
targets = sample_batched['stance'].to(self.opt.device)
outputs = self.model(inputs)
loss = criterion(outputs, targets)
loss.backward()
optimizer.step()
if global_step % self.opt.log_step == 0:
n_correct += (torch.argmax(outputs, -1) == targets).sum().item()
n_total += len(outputs)
train_acc = n_correct / n_total
test_acc, test_f1, test_f1_m = self._evaluate_acc_f1()
if test_f1_m > max_test_f1_m:
max_test_f1_m = test_f1_m
if test_acc > max_test_acc:
max_test_acc = test_acc
if test_f1 > max_test_f1:
increase_flag = True
max_test_f1 = test_f1
if self.opt.save and test_f1 > self.global_f1:
self.global_f1 = test_f1
torch.save(self.model.state_dict(), 'state_dict/'+self.opt.model_name+'_'+self.opt.dataset+'.pkl')
print('>>> best model saved.')
print('loss: {:.4f}, acc: {:.4f}, test_acc: {:.4f}, test_f1: {:.4f}, test_f1_m: {:.4f}'.format(loss.item(), train_acc, test_acc, test_f1, test_f1_m))
if increase_flag == False:
continue_not_increase += 1
if continue_not_increase >= 5:
print('early stop.')
break
else:
continue_not_increase = 0
return max_test_acc, max_test_f1, max_test_f1_m
def _evaluate_acc_f1(self):
# switch model to evaluation mode
self.model.eval()
n_test_correct, n_test_total = 0, 0
t_targets_all, t_outputs_all = None, None
with torch.no_grad():
for t_batch, t_sample_batched in enumerate(self.test_data_loader):
t_inputs = [t_sample_batched[col].to(opt.device) for col in self.opt.inputs_cols]
t_targets = t_sample_batched['stance'].to(opt.device)
t_outputs = self.model(t_inputs)
n_test_correct += (torch.argmax(t_outputs, -1) == t_targets).sum().item()
n_test_total += len(t_outputs)
if t_targets_all is None:
t_targets_all = t_targets
t_outputs_all = t_outputs
else:
t_targets_all = torch.cat((t_targets_all, t_targets), dim=0)
t_outputs_all = torch.cat((t_outputs_all, t_outputs), dim=0)
test_acc = n_test_correct / n_test_total
f1 = metrics.f1_score(t_targets_all.cpu(), torch.argmax(t_outputs_all, -1).cpu(), labels=[0, 2], average='macro')
f1_mi = metrics.f1_score(t_targets_all.cpu(), torch.argmax(t_outputs_all, -1).cpu(), labels=[0, 2], average='micro')
#f1_favor = metrics.f1_score(t_targets_all.cpu(), torch.argmax(t_outputs_all, -1).cpu(), labels=[2], average='macro')
#f1_against = metrics.f1_score(t_targets_all.cpu(), torch.argmax(t_outputs_all, -1).cpu(), labels=[0], average='macro')
f1_against, _, f1_favor = metrics.f1_score(t_targets_all.cpu(), torch.argmax(t_outputs_all, -1).cpu(), average=None)
#print('=====================>', f1_against, f1_favor, (f1_against+f1_favor)*0.5)
f1_m = 0.5 * (f1+f1_mi)
return test_acc, f1, f1_m
def run(self, repeats=12):
# Loss and Optimizer
criterion = nn.CrossEntropyLoss()
# _params = filter(lambda p: p.requires_grad, self.model.parameters())
param_optimizer = filter(lambda np: np[1].requires_grad, self.model.named_parameters())
no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']
optimizer_grouped_parameters = [
{'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)],
'weight_decay': 0.0},
{'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay)],
'weight_decay': self.opt.l2reg}
]
# optimizer = self.opt.optimizer(_params, lr=self.opt.learning_rate, weight_decay=self.opt.l2reg)
optimizer = AdamW(optimizer_grouped_parameters, lr=self.opt.learning_rate, eps=self.opt.adam_epsilon)
if not os.path.exists('log/'):
os.mkdir('log/')
f_out = open('log/'+self.opt.model_name+'_'+self.opt.dataset+'_val.txt', 'w', encoding='utf-8')
max_test_acc_avg = 0
max_test_f1_avg = 0
max_test_f1_m_avg = 0
for i in range(repeats):
print('repeat: ', (i+1))
f_out.write('repeat: '+str(i+1))
self._reset_params()
max_test_acc, max_test_f1, max_test_f1_m = self._train(criterion, optimizer)
print('max_test_acc: {0} max_test_f1: {1} max_test_f1_m: {2}'.format(max_test_acc, max_test_f1, max_test_f1_m))
f_out.write('max_test_acc: {0}, max_test_f1: {1}, max_test_f1_m: {2}'.format(max_test_acc, max_test_f1, max_test_f1_m))
if max_test_acc > max_test_acc_avg:
max_test_acc_avg = max_test_acc
if max_test_f1 > max_test_f1_avg:
max_test_f1_avg = max_test_f1
if max_test_f1_m > max_test_f1_m_avg:
max_test_f1_m_avg = max_test_f1_m
#max_test_acc_avg += max_test_acc
#max_test_f1_avg += max_test_f1
print('#' * 100)
print("max_test_acc_avg:", max_test_acc_avg)
print("max_test_f1_avg:", max_test_f1_avg)
print('max_test_f1_m_avg:', max_test_f1_m_avg)
f_out.close()
return max_test_acc_avg,max_test_f1_avg,max_test_f1_m_avg
def seed_everything(seed=1029):
'''
设置整个开发环境的seed
:param seed:
:param device:
:return:
'''
random.seed(seed)
os.environ['PYTHONHASHSEED'] = str(seed)
numpy.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
# some cudnn methods can be random even after fixing the seed
# unless you tell it to be deterministic
torch.backends.cudnn.deterministic = True
if __name__ == '__main__':
# Hyper Parameters
parser = argparse.ArgumentParser()
parser.add_argument('--model_name', default='icgcn', type=str)
parser.add_argument('--dataset', default='dt_hc', type=str, help='twitter, rest14, lap14, rest15, rest16')
parser.add_argument('--optimizer', default='adam', type=str)
parser.add_argument('--initializer', default='xavier_uniform_', type=str)
parser.add_argument('--learning_rate', default=5e-5, type=float)
parser.add_argument('--adam_epsilon', default=1e-6, type=float)
parser.add_argument('--l2reg', default=0.00001, type=float)
parser.add_argument('--num_epoch', default=15, type=int)
parser.add_argument('--batch_size', default=4, type=int)
parser.add_argument('--eval_batch_size', default=16, type=int)
parser.add_argument('--log_step', default=5, type=int)
parser.add_argument('--embed_dim', default=300, type=int)
parser.add_argument('--hidden_dim', default=300, type=int)
parser.add_argument('--polarities_dim', default=3, type=int)
parser.add_argument('--save', default=False, type=bool)
parser.add_argument('--seed', default=776, type=int)
parser.add_argument('--device', default=None, type=str)
parser.add_argument('--gcn_layers', default=3, type=int)
parser.add_argument("--pretrained_path",default="./bert_base_en")
opt = parser.parse_args()
os.environ['CUDA_VISIBLE_DEVICES'] = '0'
model_classes = {
'lstm': LSTM,
'icgcn': ICGCNBert,
}
input_colses = {
#'lstm': ['text_indices'],
#'ascnn': ['text_indices', 'aspect_indices', 'left_indices'],
#'asgcn': ['text_indices', 'aspect_indices', 'left_indices', 'dependency_graph'],
#'astcn': ['text_indices', 'aspect_indices', 'left_indices', 'dependency_graph'],
#'senticgcn': ['text_indices', 'aspect_indices', 'left_indices', 'sdat_graph'],
#'sdgcn': ['text_indices', 'aspect_indices', 'left_indices', 'sentic_graph', 'sdat_graph'],
#'affgcn': ['text_indices', 'aspect_indices', 'left_indices', 'sentic_graph'],
'icgcn': ['text_indices', 'attention_mask', 'in_graph', 'cross_graph'],
}
initializers = {
'xavier_uniform_': torch.nn.init.xavier_uniform_,
'xavier_normal_': torch.nn.init.xavier_normal,
'orthogonal_': torch.nn.init.orthogonal_,
}
optimizers = {
'adadelta': torch.optim.Adadelta, # default lr=1.0
'adagrad': torch.optim.Adagrad, # default lr=0.01
'adam': torch.optim.Adam, # default lr=0.001
'adamax': torch.optim.Adamax, # default lr=0.002
'asgd': torch.optim.ASGD, # default lr=0.01
'rmsprop': torch.optim.RMSprop, # default lr=0.01
'sgd': torch.optim.SGD,
}
opt.model_class = model_classes[opt.model_name]
opt.inputs_cols = input_colses[opt.model_name]
opt.initializer = initializers[opt.initializer]
opt.optimizer = optimizers[opt.optimizer]
opt.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') \
if opt.device is None else torch.device(opt.device)
opt.config = BertConfig.from_pretrained(opt.pretrained_path)
opt.config.gcn_layers = opt.gcn_layers
opt.config.polarities_size = opt.polarities_dim
opt.tokenizer = MyBertTokenizer.from_pretrained(opt.pretrained_path)
if opt.seed is not None:
random.seed(opt.seed)
numpy.random.seed(opt.seed)
torch.manual_seed(opt.seed)
torch.cuda.manual_seed(opt.seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
ins = Instructor(opt)
ins.run(repeats=5)