-
Notifications
You must be signed in to change notification settings - Fork 0
/
test1.py
352 lines (288 loc) · 12.2 KB
/
test1.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
# 引入相应的包 Importing libraries
import os,json
import numpy as np
import pandas as pd
import torch
import torch.nn.functional as F
from torch.utils.data import Dataset, DataLoader, RandomSampler, SequentialSampler
import os,time
# Importing the T5 modules from huggingface/transformers
from transformers import T5Tokenizer, T5ForConditionalGeneration
# rich: for a better display on terminal
from rich.table import Column, Table
from rich import box
from rich.console import Console
print("开始训练")
console = Console(record=True)
# to display dataframe in ASCII format
def display_df(df):
"""display dataframe in ASCII format"""
console = Console()
table = Table(
Column("source_text", justify="center"),
Column("target_text", justify="center"),
title="Sample Data",
pad_edge=False,
box=box.ASCII,
)
for i, row in enumerate(df.values.tolist()):
table.add_row(row[0], row[1])
# console.print(table) # TODO TODO TODO
# training logger to log training progress
training_logger = Table(
Column("Epoch", justify="center"),
Column("Steps", justify="center"),
Column("Loss", justify="center"),
title="Training Status",
pad_edge=False,
box=box.ASCII,
)
# Setting up the device for GPU usage
from torch import cuda
device = 'cuda' if cuda.is_available() else 'cpu'
class YourDataSetClass(Dataset):
def __init__(
self, dataframe, tokenizer, source_len, target_len, source_text, target_text
):
"""
Initializes a Dataset class
Args:
dataframe (pandas.DataFrame): Input dataframe
tokenizer (transformers.tokenizer): Transformers tokenizer
source_len (int): Max length of source text
target_len (int): Max length of target text
source_text (str): column name of source text
target_text (str): column name of target text
"""
self.tokenizer = tokenizer
self.data = dataframe
self.source_len = source_len
self.summ_len = target_len
self.target_text = self.data[target_text]
self.source_text = self.data[source_text]
def __len__(self):
"""returns the length of dataframe"""
return len(self.target_text)
def __getitem__(self, index):
"""return the input ids, attention masks and target ids"""
source_text = str(self.source_text[index])
target_text = str(self.target_text[index])
# cleaning data so as to ensure data is in string type
source_text = " ".join(source_text.split())
target_text = " ".join(target_text.split())
source = self.tokenizer.batch_encode_plus(
[source_text],
max_length=self.source_len,
pad_to_max_length=True,
truncation=True,
padding="max_length",
return_tensors="pt",
)
target = self.tokenizer.batch_encode_plus(
[target_text],
max_length=self.summ_len,
pad_to_max_length=True,
truncation=True,
padding="max_length",
return_tensors="pt",
)
source_ids = source["input_ids"].squeeze()
source_mask = source["attention_mask"].squeeze()
target_ids = target["input_ids"].squeeze()
target_mask = target["attention_mask"].squeeze()
return {
"source_ids": source_ids.to(dtype=torch.long),
"source_mask": source_mask.to(dtype=torch.long),
"target_ids": target_ids.to(dtype=torch.long),
"target_ids_y": target_ids.to(dtype=torch.long),
}
def train(epoch, tokenizer, model, device, loader, optimizer):
"""
用于训练的方法
Function to be called for training with the parameters passed from main function
"""
model.train()
time1=time.time()
for _, data in enumerate(loader, 0):
y = data["target_ids"].to(device, dtype=torch.long)
y_ids = y[:, :-1].contiguous() # target, from start to end(except end of token, <EOS>). e.g. "你好吗?"
lm_labels = y[:, 1:].clone().detach() # target, for second to end.e.g."好吗?<EOS>"
lm_labels[y[:, 1:] == tokenizer.pad_token_id] = -100 # releted to pad_token and loss. for detail, check here: https://github.com/Shivanandroy/T5-Finetuning-PyTorch/issues/3
ids = data["source_ids"].to(device, dtype=torch.long) # input. e.g. "how are you?"
mask = data["source_mask"].to(device, dtype=torch.long)
outputs = model(
input_ids=ids,
attention_mask=mask,
decoder_input_ids=y_ids,
labels=lm_labels,
)
loss = outputs[0]
# 每50步打印日志
if _ % 50 == 0 and _!=0:
time2=time.time()
print(_,"epoch:"+str(epoch)+"-loss:"+str(loss)+";each step's time spent:"+str(float(time2-time1)/float(_+0.0001)))
# training_logger.add_row(str(epoch), str(_), str(loss))
# console.print(training_logger)
optimizer.zero_grad()
loss.backward()
optimizer.step()
def validate(epoch, tokenizer, model, device, loader,max_length):
"""
用于验证的方法:输入用于验证的数据,返回模型预测的结果和正确的标签
Function to evaluate model for predictions
"""
model.eval()
predictions = []
actuals = []
with torch.no_grad():
for _, data in enumerate(loader, 0):
y = data['target_ids'].to(device, dtype = torch.long)
ids = data['source_ids'].to(device, dtype = torch.long)
mask = data['source_mask'].to(device, dtype = torch.long)
generated_ids = model.generate(
input_ids = ids,
attention_mask = mask,
max_length=max_length,
num_beams=2,
repetition_penalty=2.5,
length_penalty=1.0,
early_stopping=True
)
preds = [tokenizer.decode(g, skip_special_tokens=True, clean_up_tokenization_spaces=True) for g in generated_ids]
target = [tokenizer.decode(t, skip_special_tokens=True, clean_up_tokenization_spaces=True)for t in y]
if _%1000==0:
console.print(f'Completed {_}')
predictions.extend(preds)
actuals.extend(target)
return predictions, actuals
# 训练类:整合数据集类、训练方法、验证方法,加载数据进行训练并验证训练过程的效果
def T5Trainer(
dataframe, source_text, target_text, model_params, output_dir="./outputs/"
):
"""
T5 trainer
"""
# Set random seeds and deterministic pytorch for reproducibility
torch.manual_seed(model_params["SEED"]) # pytorch random seed
np.random.seed(model_params["SEED"]) # numpy random seed
torch.backends.cudnn.deterministic = True
# logging
console.log(f"""[Model]: Loading {model_params["MODEL"]}...\n""")
# tokenzier for encoding the text
tokenizer = T5Tokenizer.from_pretrained(model_params["MODEL"])
# Defining the model. We are using ChatYuan model and added a Language model layer on top for generation of prediction.
# Further this model is sent to device (GPU/TPU) for using the hardware.
model = T5ForConditionalGeneration.from_pretrained(model_params["MODEL"])
model = model.to(device)
# logging
console.log(f"[Data]: Reading data...\n")
# Importing the raw dataset
dataframe = dataframe[[source_text, target_text]]
# display_df(dataframe.head(2))
# Creation of Dataset and Dataloader
# Defining the train size So 94% of the data will be used for training and the rest for validation.
train_size = 0.94
train_dataset = dataframe.sample(frac=train_size, random_state=model_params["SEED"])
val_dataset = dataframe.drop(train_dataset.index).reset_index(drop=True)
train_dataset = train_dataset.reset_index(drop=True)
# 打印数据集相关日志:数据量、训练步数
console.print(f"FULL Dataset: {dataframe.shape}")
console.print(f"TRAIN Dataset: {train_dataset.shape}")
console.print(f"TEST Dataset: {val_dataset.shape}\n")
total_train_steps = int((train_dataset.shape[0] * model_params["TRAIN_EPOCHS"]) / model_params["TRAIN_BATCH_SIZE"])
console.print(f"Total Train Steps: {total_train_steps}\n")
# Creating the Training and Validation dataset for further creation of Dataloader
training_set = YourDataSetClass(
train_dataset,
tokenizer,
model_params["MAX_SOURCE_TEXT_LENGTH"],
model_params["MAX_TARGET_TEXT_LENGTH"],
source_text,
target_text,
)
val_set = YourDataSetClass(
val_dataset,
tokenizer,
model_params["MAX_SOURCE_TEXT_LENGTH"],
model_params["MAX_TARGET_TEXT_LENGTH"],
source_text,
target_text,
)
# Defining the parameters for creation of dataloaders
train_params = {
"batch_size": model_params["TRAIN_BATCH_SIZE"],
"shuffle": True,
"num_workers": 0,
}
val_params = {
"batch_size": model_params["VALID_BATCH_SIZE"],
"shuffle": False,
"num_workers": 0,
}
# Creation of Dataloaders for testing and validation. This will be used down for training and validation stage for the model.
training_loader = DataLoader(training_set, **train_params)
val_loader = DataLoader(val_set, **val_params)
# Defining the optimizer that will be used to tune the weights of the network in the training session.
optimizer = torch.optim.Adam(
params=model.parameters(), lr=model_params["LEARNING_RATE"]
)
# Training loop
console.log(f"[Initiating Fine Tuning]...\n")
for epoch in range(model_params["TRAIN_EPOCHS"]):
# 1) train for one epoch
train(epoch, tokenizer, model, device, training_loader, optimizer)
# 2) save model for each epoch
console.log(f"[Saving Model]...\n")
path = os.path.join(output_dir, "model_files")
model.save_pretrained(path)
tokenizer.save_pretrained(path)
# 3) evaluating test dataset
console.log(f"[Initiating Validation]...\n")
with torch.no_grad(): # add 2022.10.4
# for epoch in range(model_params["VAL_EPOCHS"]):
predictions, actuals = validate(epoch, tokenizer, model, device, val_loader,
model_params["MAX_TARGET_TEXT_LENGTH"])
final_df = pd.DataFrame({"Generated Text": predictions, "Actual Text": actuals})
final_df.to_csv(os.path.join(output_dir, "predictions.csv"))
console.save_text(os.path.join(output_dir, "logs.txt"))
console.log(f"[Validation Completed.]\n")
console.print(
f"""[Model] Model saved @ {os.path.join(output_dir, "model_files")}\n"""
)
console.print(
f"""[Validation] Generation on Validation data saved @ {os.path.join(output_dir, 'predictions.csv')}\n"""
)
console.print(f"""[Logs] Logs saved @ {os.path.join(output_dir, 'logs.txt')}\n""")
# 定义模型的参数
model_params = {
"MODEL": "ClueAI/ChatYuan-large-v2", # model_type
"TRAIN_BATCH_SIZE": 1, # training batch size, 8
"VALID_BATCH_SIZE": 1, # validation batch size,8
"TRAIN_EPOCHS": 1, # number of training epochs
"VAL_EPOCHS": 1, # number of validation epochs
"LEARNING_RATE": 1e-4, # learning rate
"MAX_SOURCE_TEXT_LENGTH": 512, # max length of source text, 512
"MAX_TARGET_TEXT_LENGTH": 64, # max length of target text,64
"SEED": 42, # set seed for reproducibility
}
# 训练模型
# 使用 pCLUE:1200000+多任务提示学习数据集 的部分数据
# dataframe必须有2列:
# - input: 文本输入
# - target: 目标输出
df = pd.read_csv('train.csv') # 数据量:1200k数据。
# df = df.sample(frac=0.01) # TODO 取消本行代码,如果你需要更多数据训练
print("df.head:",df.head(n=5))
print("df.shape:",df.shape)
# 显存占用说明:如果运行现在显存不足,请使用nvidia-smi查看显存;如果显卡多数被占用了,请重启colab程序
T5Trainer(
dataframe=df,
source_text="input",
target_text="target",
model_params=model_params,
output_dir="outputs",
)
print("end..")
# !nvidia-smi -r
# 使用以下命令清除训练中残存的GPU显存缓存
torch.cuda.empty_cache()