-
Notifications
You must be signed in to change notification settings - Fork 1
/
bert_regression.py
175 lines (142 loc) · 6.57 KB
/
bert_regression.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
# coding=utf-8
from __future__ import absolute_import, division, print_function
import argparse
import csv
import logging
import os
import random
import sys
import pandas as pd
import numpy as np
import re
import torch
from torch.utils.data import (DataLoader, RandomSampler, SequentialSampler,
TensorDataset)
from torch.utils.data.distributed import DistributedSampler
from tqdm import tqdm, trange
from torch.nn import CrossEntropyLoss, MSELoss, BCEWithLogitsLoss
from scipy.stats import pearsonr, spearmanr
from sklearn.metrics import matthews_corrcoef, f1_score
from pytorch_pretrained_bert.file_utils import PYTORCH_PRETRAINED_BERT_CACHE, WEIGHTS_NAME, CONFIG_NAME
from pytorch_pretrained_bert.modeling import BertForSequenceClassification, BertConfig
from pytorch_pretrained_bert.tokenization import BertTokenizer
from pytorch_pretrained_bert.optimization import BertAdam, WarmupLinearSchedule
from Data_management.data_helpers import InputFeatures, InputExample, convert_examples_to_features, read_examples, read_from_pkl
import torch.nn.functional as F
logging.basicConfig(format = '%(asctime)s - %(levelname)s - %(name)s - %(message)s',
datefmt = '%m/%d/%Y %H:%M:%S',
level = logging.INFO)
logger = logging.getLogger(__name__)
# weights_loader
def load_weights_sequential(target, source_state):
model_to_load= {k: v for k, v in source_state.items() if k in target.state_dict().keys()}
target.load_state_dict(model_to_load)
# Regress or classify
mode = 'regression'
# Read dataset and train vs test indices
data = read_from_pkl('Data_management/classification_slen84.pkl')
idx_partitions = np.load('Data_management/partition_idx.npy').item()
# Convert dataset to tensors
all_label_ids = torch.tensor(data['all_label_ids'], dtype=torch.float)
all_input_ids = torch.tensor(data['all_input_ids'], dtype= torch.long)
all_input_mask = torch.tensor(data['all_input_mask'], dtype= torch.long)
all_segment_ids = torch.tensor(data['all_segment_ids'], dtype= torch.long)
all_weights = torch.tensor(data['all_weights'], dtype= torch.long)
# Create dataset clases from previous tensors
train_data = TensorDataset(all_input_ids[idx_partitions['train']], all_input_mask[idx_partitions['train']], all_segment_ids[idx_partitions['train']], all_label_ids[idx_partitions['train']])
train_sampler = RandomSampler(train_data)
test_data = TensorDataset(all_input_ids[idx_partitions['val']], all_input_mask[idx_partitions['val']], all_segment_ids[idx_partitions['val']], all_label_ids[idx_partitions['val']])
test_sampler = RandomSampler(train_data)
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
batch_size = 72
# Parameters of the data loader
params = {'batch_size': batch_size ,
'sampler': train_sampler,
'num_workers': 6,
'pin_memory': True}
train_dataloader = DataLoader(train_data, **params)
num_labels= 1
# Load weights
weights_path = 'bert_trained_1_epoch'
trained = torch.load(weights_path,map_location='cpu')
# Delete state_dict = trained if u want to take original weights
model = BertForSequenceClassification.from_pretrained('bert-base-uncased', num_labels= num_labels, state_dict = trained)
print(model)
if mode == "classification":
loss_fct = CrossEntropyLoss()
elif mode == "regression":
#Class weights
pos_weight = torch.tensor([1.5]).to(device)
loss_fct = MSELoss()#BCEWithLogitsLoss()#pos_weight=pos_weight)
param_optimizer = list(model.named_parameters())
no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']
optimizer_grouped_parameters = [
{'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)], 'weight_decay': 0.01},
{'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay)], 'weight_decay': 0.0}
]
num_train_epochs = 4
gradient_accumulation_steps = 1
num_train_optimization_steps = int(len(train_data) / batch_size ) * num_train_epochs
optimizer = BertAdam(optimizer_grouped_parameters,
lr=2e-5,
warmup=0.1,
t_total=num_train_optimization_steps)
global_step = 0
nb_tr_steps = 0
tr_loss = 0
model.train()
history = []
model.to(device)
for _ in trange(int(num_train_epochs), desc="Epoch"):
running_corrects = 0.
tr_loss = 0
nb_tr_examples, nb_tr_steps = 0, 0
for step, batch in enumerate(train_dataloader):
batch = tuple(t.to(device) for t in batch)
input_ids, input_mask, segment_ids, label_ids = batch
# define a new function to compute loss values for both output_modes
logits = model(input_ids, segment_ids, input_mask, labels=None)
if mode == "classification":
loss = loss_fct(logits, label_ids)
elif mode == "regression":
# Weights
#loss_fct = MSELoss()
# Target tocicity is between 0 and 1
logits = F.sigmoid(logits)
if step%250 == 0:
print( logits.view(-1))
print(label_ids)
loss = loss_fct(10*label_ids,10*logits.view(-1) )
#Boolean torchie tensor for toxics vs no tocisx
#logits = F.sigmoid(logits)
#print("Diff:",logits.view(-1)-label_ids)
preds = logits.view(-1) >= 0.5
#print(logits,preds)
ground_truth = label_ids >= 0.5
#print(label_ids, ground_truth)
running_corrects += torch.sum(ground_truth==preds)
loss.backward()
# Select maximum score index
if mode == "classification":
___, preds = torch.max(logits, 1)
#print(running_corrects, ground_truth == preds)
# Track losses, amont of samples and amount of gradient steps
tr_loss += loss.item()#*input_ids.size(0)
nb_tr_examples += input_ids.size(0)
#print(float(running_corrects), nb_tr_examples)
nb_tr_steps += 1
if (step+1)%gradient_accumulation_steps == 0:
optimizer.step()
optimizer.zero_grad()
global_step += 1
if step%250 == 0:
print(running_corrects, nb_tr_examples, nb_tr_steps)
print(" Step {}: , MSE_loss: {}, accuracy: {}".format( step,
float(tr_loss)/nb_tr_steps,float(running_corrects)/nb_tr_examples))
torch.save(model.state_dict(), 'bert_regression_Epoch_'+str(_))
epoch_acc = running_corrects.double().detach() / nb_tr_examples
epoch_acc = epoch_acc.data.cpu().numpy()
train_loss = tr_loss/nb_tr_steps
print("Epoch {}, accuracy: {}, loss: {}".format(_, epoch_acc,train_loss ))
history.append([train_loss,epoch_acc])
np.save('history.npy',history )