-
Notifications
You must be signed in to change notification settings - Fork 0
/
run_intermediate_source_GPT2.py
92 lines (76 loc) · 3.1 KB
/
run_intermediate_source_GPT2.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
# coding=utf-8
""" Fine-tuning GPT2 on a intermediate tasks.
Author: Karen Garcia"""
import argparse
from transformers import DataCollatorForLanguageModeling
from transformers import GPT2Tokenizer, GPT2LMHeadModel
from transformers import Trainer, TrainingArguments
import utils
def load_data_collator(tokenizer, mlm=False):
data_collator = DataCollatorForLanguageModeling(
tokenizer=tokenizer,
mlm=mlm,
)
return data_collator
def train(dataset_name,
model_name,
output_dir,
overwrite_output_dir,
per_device_train_batch_size,
num_train_epochs,
max_steps,
save_total_limit):
tokenizer = GPT2Tokenizer.from_pretrained('gpt2')
print('Download dataset')
train_dataset = utils.download_dataset(dataset_name, True)
data_collator = load_data_collator(tokenizer)
print('Prepare for training')
model = GPT2LMHeadModel.from_pretrained(model_name)
training_args = TrainingArguments(
output_dir=output_dir,
overwrite_output_dir=overwrite_output_dir,
per_device_train_batch_size=per_device_train_batch_size,
num_train_epochs=num_train_epochs,
max_steps=max_steps,
save_total_limit=save_total_limit
)
trainer = Trainer(
model=model,
args=training_args,
data_collator=data_collator,
train_dataset=train_dataset
)
print('Training')
trainer.train()
trainer.save_model(output_dir)
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--dataset_name", default='squad', type=str,
help="Dataset name of the intermediate source task.")
parser.add_argument("--output_dir", default='./pretrained_model', type=str,
help="The output directory where the model predictions and checkpoints will be written.")
parser.add_argument("--model_name", default='gpt2', type=str,
help="Pretrained model name or directory")
parser.add_argument('--overwrite_output_dir', default=False,
help="Overwrite the content of the output directory")
parser.add_argument('--per_device_train_batch_size', default=8, type=int,
help="Batch size per GPU/CPU for training")
parser.add_argument("--num_train_epochs", default=1, type=int,
help="Total number of training epochs to perform.")
parser.add_argument("--max_steps", default=1000, type=int,
help="Total number of training steps to perform.")
parser.add_argument("--save_total_limit", default=1, type=int,
help="Steps to save.")
args = parser.parse_args()
train(
dataset_name=args.dataset_name,
model_name=args.model_name,
output_dir=args.output_dir,
overwrite_output_dir=args.overwrite_output_dir,
per_device_train_batch_size=args.per_device_train_batch_size,
num_train_epochs=args.num_train_epochs,
max_steps=args.max_steps,
save_total_limit=args.save_total_limit
)
if __name__ == "__main__":
main()