-
Notifications
You must be signed in to change notification settings - Fork 1
/
run.py
189 lines (181 loc) · 8.83 KB
/
run.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
import numpy as np
import torch.nn as nn
import torch
import os
import pickle
import utils
from termcolor import colored
from utils import rank_logger_info
from utils import report_result
from utils import check_pooling_parser,check_filename
from torch.utils.data import DataLoader
from drlstm.data import NLIDataset
from drlstm.model import DRLSTM
from drlstm.multi_model import multi_model as Multi_DRLSTM
from parameters import create_parser
import random
from torch.utils.data.distributed import DistributedSampler
import torch.distributed as dist
from train import train
from validate import validate
from test import main_test
"""
Utility functions for training and validating models.
"""
def set_seed(seed):
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
def main(args, logger):
device = args.local_rank if args.local_rank != -1 else (
torch.device('cuda:0') if torch.cuda.is_available() else torch.device('cpu'))
if args.local_rank != -1:
torch.cuda.set_device(args.local_rank)
info = 20 * "=" + " Preparing for training " + 20 * "="
rank_logger_info(logger, args.local_rank, info)
info = "\t* Loading training data..."
rank_logger_info(logger, args.local_rank, info)
with open(args.train_data, "rb") as pkl:
train_data = NLIDataset(pickle.load(pkl))
train_sampler = DistributedSampler(train_data) if args.local_rank != - 1 else None
train_loader = DataLoader(train_data,
shuffle=True if not train_sampler else False,
sampler = train_sampler,
batch_size=args.batch_size)
info = "\t* Loading validation data..."
rank_logger_info(logger, args.local_rank, info)
with open(args.valid_data, "rb") as pkl:
valid_data = NLIDataset(pickle.load(pkl))
valid_loader = DataLoader(valid_data,
shuffle=False,
batch_size=args.batch_size)
# -------------------- Model definition ------------------- #
info = "\t* Building model..."
rank_logger_info(logger, args.local_rank, info)
with open(args.embeddings, "rb") as pkl:
embeddings = torch.tensor(pickle.load(pkl), dtype=torch.float).to(device)
if args.multimodel:
model = Multi_DRLSTM(embeddings.shape[0],
embeddings.shape[1],
hidden_size=args.hidden_size,
embeddings=embeddings,
padding_idx=0,
dropout=args.dropout,
num_classes=args.num_classes,
device=device,
pooling_method_lst=args.pooling_method)
else:
model = DRLSTM(embeddings.shape[0],
embeddings.shape[1],
hidden_size = args.hidden_size,
embeddings=embeddings,
padding_idx=0,
dropout=args.dropout,
num_classes=args.num_classes,
device=device,
pooling_method_lst = args.pooling_method,
embedding_dropout = args.embedding_dropout)
model.to(device)
total_num = sum(p.numel() for p in model.parameters())
trainable_num = sum(p.numel() for p in model.parameters() if p.requires_grad)
info = "total_num:{} trainable_num:{}".format(total_num, trainable_num)
rank_logger_info(logger, local_rank, info)
if args.local_rank != -1:
model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.local_rank], output_device=args.local_rank,find_unused_parameters=True)
criterion = nn.CrossEntropyLoss()
if args.optim == "adam":
optimizer = torch.optim.Adam(model.parameters(), lr=args.lr)
elif args.optim == "rmsprop":
optimizer = torch.optim.RMSprop(model.parameters(), lr=args.lr)
scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer,
mode="max",
factor=0.5,
patience=0)
if not args.load_from:
best_score = 0.0
start_epoch = 1
# Data for loss curves plot.
epochs_count = []
train_losses = []
valid_losses = []
train_accuracy = []
valid_accuracy = []
info = "\n" + 20 * "=" + "Training model on device: {}".format(device) + 20 * "="
rank_logger_info(logger, args.local_rank, info)
patience_counter = 0
for epoch in range(start_epoch, args.epochs+1):
epochs_count.append(epoch)
info = "* Training epoch {}:".format(epoch)
rank_logger_info(logger, local_rank, info)
epoch_time, epoch_loss, epoch_accuracy = train(args,
epoch,
model,
train_loader,
optimizer,
criterion,
args.max_gradient_norm,
device)
train_losses.append(epoch_loss)
train_accuracy.append(epoch_accuracy)
info = "Training epoch: {}, time: {:.4f}s, loss: {:.4f}, accuracy: {:.4f}%\n".format(epoch, epoch_time,
epoch_loss,
(epoch_accuracy * 100))
rank_logger_info(logger, args.local_rank, info)
info = "* Validation for epoch {}:".format(epoch)
rank_logger_info(logger, local_rank, info)
epoch_time, epoch_loss, epoch_accuracy = validate(model,
valid_loader,
criterion,
device)
valid_losses.append(epoch_loss)
valid_accuracy.append(epoch_accuracy)
info = "Validing epoch: {}, time: {:.4f}s, loss: {:.4f}, accuracy: {:.4f}%\n".format(epoch, epoch_time, epoch_loss, (epoch_accuracy*100))
rank_logger_info(logger, args.local_rank, info)
scheduler.step(epoch_accuracy)
if epoch_accuracy <= best_score:
patience_counter += 1
else:
best_score = epoch_accuracy
best_model = model
patience_counter = 0
if args.local_rank in [-1, 0]:
torch.save({"epoch": epoch,
"model_state_dict": best_model.state_dict(),
"best_score": best_score,
"epochs_count": epochs_count,
"train_losses": train_losses,
"valid_losses": valid_losses},
os.path.join(args.save_path, "best.pth.tar"))
if patience_counter >= args.patience:
info = "-> Early stopping: patience limit reached, stopping..."
rank_logger_info(logger, args.local_rank, info)
break
if args.local_rank in [-1, 0]:
report_result(epochs_count, train_losses, valid_losses, train_accuracy, valid_accuracy,args.save_path)
info = "-> Test : Loadding model from {}".format(os.path.join(args.save_path, "best.pth.tar"))
rank_logger_info(logger, args.local_rank, info)
main_test(args,logger)
if __name__ == '__main__':
parser = create_parser()
parser = check_pooling_parser(parser)
# parser = check_filename(parser)
parser.data_dir = os.path.join("preprocessed_data", parser.data_dir)
parser.train_data = os.path.join(parser.data_dir, parser.train_data)
parser.valid_data = os.path.join(parser.data_dir, parser.valid_data)
parser.test_data = os.path.join(parser.data_dir, parser.test_data)
parser.embeddings = os.path.join(parser.data_dir, parser.embeddings)
parser.save_path = os.path.join("result", parser.save_path)
if not os.path.exists(parser.save_path):
os.mkdir(parser.save_path)
set_seed(parser.seed)
local_rank = parser.local_rank
if local_rank != -1:
dist_backend = 'nccl'
dist.init_process_group(backend=dist_backend)
device = local_rank if local_rank != -1 else (torch.device('cuda:0') if torch.cuda.is_available() else torch.device('cpu'))
print(local_rank)
torch.cuda.set_device(local_rank)
logger = utils.setup_logger(__name__, os.path.join("log", parser.checkpoint_path))
rank_logger_info(logger, parser.local_rank, colored(parser,"red"))
main(parser, logger)