-
Notifications
You must be signed in to change notification settings - Fork 0
/
train_extractive.py
165 lines (124 loc) · 5.81 KB
/
train_extractive.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
import os
import torch
import torch.nn as nn
import torch.optim as optim
import numpy as np
import math
import random
import datetime
import pickle
from torch.utils.data import DataLoader, ConcatDataset, TensorDataset
from tqdm import tqdm
from datasets import load_dataset
from data_loader import P2CDataset_ext
from model import load_backbone, Classifier, Classifier_multi, Classifier_pref_ensemble
from common import parse_args
from utils import Logger, set_seed, set_model_path, save_model, AverageMeter, ECE
from src.train_ext import set_loader_extractive, train_base_extractive, train_preference_extractive
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
def main():
args = parse_args()
# Set seed
set_seed(args)
prefix = f"{args.dataset}_{args.train_type}"
if args.sampling is not None:
prefix = prefix + "_" + args.sampling
if args.pair_loss:
log_name = f"{prefix}_pair_cons{args.lambda_cons}_div{args.lambda_div}_S{args.seed}"
elif args.consistency:
log_name = f"{prefix}_cons_cons{args.lambda_cons}_div{args.lambda_div}_S{args.seed}"
else:
log_name = f"{prefix}_{args.base}_S{args.seed}"
logger = Logger(log_name)
log_dir = logger.logdir
logger.log(args)
logger.log(log_name)
logger.log('Loading pre-trained backbone network... ({})'.format(args.backbone))
backbone, tokenizer = load_backbone(args.backbone)
logger.log('Initializing model and optimizer...')
if 'dynasent' in args.dataset or 'mnli' in args.dataset:
args.n_class = 3
else:
args.n_class = 2
if args.pref_type == 'none':
if args.base == 'multi':
model = Classifier_multi(args, args.backbone, backbone, args.n_class, args.train_type).to(device)
else:
model = Classifier(args, args.backbone, backbone, args.n_class, args.train_type).to(device)
else:
model = Classifier_pref_ensemble(args, args.backbone, backbone, args.n_class, args.train_type).to(device)
if args.pre_ckpt is not None:
logger.log('Loading from pre-trained model')
model.load_state_dict(torch.load(args.pre_ckpt))
# Set optimizer (1) fixed learning rate and (2) no weight decay
optimizer = optim.Adam(model.parameters(), lr=args.model_lr, weight_decay=0)
logger.log('Initializing dataset...')
dataset = P2CDataset_ext(args.dataset, tokenizer, args.backbone)
# Added for preference
orig_loader = DataLoader(dataset.train_dataset, shuffle=True, drop_last=True, batch_size=args.batch_size, num_workers=4)
val_loader = DataLoader(dataset.val_dataset, shuffle=False, batch_size=args.batch_size, num_workers=4)
test_loader = DataLoader(dataset.test_dataset, shuffle=False, batch_size=args.batch_size, num_workers=4)
logger.log('==========> Start training ({})'.format(args.train_type))
best_acc, final_acc, final_ece = 0, 0, 0
# Add Tensorboard logger
train_labels = dataset.train_dataset[:][1]
pref_train = None
prob_train = None
train_loader, pair_idx = set_loader_extractive(args, dataset, orig_loader, 1, pref_train, prob_train, train_labels)
for epoch in range(1, 1+args.epochs):
# Set Dataloader
if args.pref_type == 'none':
train_base_extractive(args, train_loader, model, optimizer, epoch, logger)
else:
if epoch > 1:
train_loader, pair_idx = set_loader_extractive(args, dataset, orig_loader, epoch, pref_train, prob_train, train_labels)
pref_train, prob_train = train_preference_extractive(args, train_loader, pair_idx, model, optimizer, epoch, logger)
best_acc, final_acc, final_ece = eval_func(args, model, val_loader, test_loader, logger, log_dir, epoch,
best_acc, final_acc, final_ece)
logger.log('===========>>>>> Final ECE: {}'.format(final_ece))
logger.log('===========>>>>> Final Test Accuracy: {}'.format(final_acc))
def eval_func(args, model, val_loader, test_loader, logger, log_dir, epoch, best_acc, final_acc, final_ece):
acc, ece_temp = test_acc(args, val_loader, model, logger)
if acc > best_acc:
# As val_data == test_data in GLUE, do not inference it again.
t_acc, ece = test_acc(args, test_loader, model, logger, ece_temp)
# Update test accuracy based on validation performance
best_acc = acc
final_acc = t_acc
final_ece = ece
logger.log('========== Val Acc ==========')
logger.log('Val acc: {:.3f}'.format(best_acc))
logger.log('========== Test Acc ==========')
logger.log('Test acc: {:.3f}'.format(final_acc))
logger.log('========== Test ECE ==========')
logger.log('Test ece: {:.3f}'.format(final_ece))
# Save model
if args.save_ckpt:
logger.log('Save model...')
save_model(args, model, log_dir, epoch)
return best_acc, final_acc, final_ece
def test_acc(args, loader, model, logger=None, temp_opt=None):
if logger is not None:
logger.log('Compute test accuracy...')
model.eval()
all_preds = []
all_labels = []
for i, (tokens, labels, indices) in enumerate(loader):
tokens = tokens.long().to(device)
labels = labels.to(device)
with torch.no_grad():
outputs = model(tokens)
all_preds.append(outputs)
all_labels.append(labels)
all_preds = torch.cat(all_preds, dim=0)
all_labels = torch.cat(all_labels, dim=0)
if temp_opt is None:
ece = ECE(all_preds, all_labels)
else:
ece = ECE(all_preds, all_labels, temp_opt=temp_opt)
all_preds = all_preds.cpu().max(1)[1]
all_labels = all_labels.cpu()
acc = 100.0 * (all_preds == all_labels).float().sum() / len(all_preds)
return acc, ece
if __name__ == "__main__":
main()