-
Notifications
You must be signed in to change notification settings - Fork 5
/
training.py
104 lines (85 loc) · 3.2 KB
/
training.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
import os
import torch
import transformers
from datasets import load_from_disk
import transformers
from transformers import AutoTokenizer, AutoModelForCausalLM, BitsAndBytesConfig
from peft import prepare_model_for_kbit_training
from peft import LoraConfig, get_peft_model
import shutil
import os
import nvidia
cuda_install_dir = '/'.join(nvidia.__file__.split('/')[:-1]) + '/cuda_runtime/lib/'
os.environ['LD_LIBRARY_PATH'] = cuda_install_dir
print('*'*100)
print(torch.__version__)
print('*'*100)
# log_bucket = f"s3://{os.environ['SMP_S3BUCKETNAME']}/falcon-40b-qlora-finetune"
model_id = "tiiuae/falcon-7b"
# model_id = "tiiuae/falcon-40b"
device_map="auto"
bnb_config = BitsAndBytesConfig(
load_in_4bit=True,
bnb_4bit_use_double_quant=True,
bnb_4bit_quant_type="nf4",
bnb_4bit_compute_dtype=torch.bfloat16
)
# alternate config for loading unquantized model on cpu
'''
device_map = {
"transformer.word_embeddings": 0,
"transformer.word_embeddings_layernorm": 0,
"lm_head": "cpu",
"transformer.h": 0,
"transformer.ln_f": 0,
}
bnb_config = BitsAndBytesConfig(llm_int8_enable_fp32_cpu_offload=True)
'''
lm_train_dataset= load_from_disk(dataset_path=f"/opt/ml/input/data/falcon-text-summarization-preprocess-training/")
lm_test_dataset= load_from_disk(dataset_path=f"/opt/ml/input/data/falcon-text-summarization-preprocess-testing/")
tokenizer= AutoTokenizer.from_pretrained(model_id)
model = AutoModelForCausalLM.from_pretrained(model_id, trust_remote_code=True, quantization_config=bnb_config, device_map= device_map) #model prepared for LoRA training using PEFT
tokenizer.pad_token = tokenizer.eos_token
model.gradient_checkpointing_enable()
model = prepare_model_for_kbit_training(model)
model.config.use_cache = False
config = LoraConfig(
r=8,
lora_alpha=32,
target_modules=[
"query_key_value",
"dense",
"dense_h_to_4h",
"dense_4h_to_h",
],
lora_dropout=0.05,
bias="none",
task_type="CAUSAL_LM"
)
model = get_peft_model(model, config)
trainer = transformers.Trainer(
model=model,
train_dataset= lm_train_dataset,
eval_dataset=lm_test_dataset,
args=transformers.TrainingArguments(
per_device_train_batch_size=8,
per_device_eval_batch_size=8,
# logging_dir=f'{log_bucket}/', # connect tensorboard for visualizing live training logs
logging_steps=2,
num_train_epochs=1, #num_train_epochs=1 for demonstration
learning_rate=2e-4,
bf16=True,
save_strategy = "no",
output_dir="outputs",
report_to="tensorboard"
),
data_collator=transformers.DataCollatorForLanguageModeling(tokenizer, mlm=False),
)
trainer.train()
eval_metrics= trainer.evaluate()
# throw an error if evaluation loss is above threshold. Alternatively output an evaluation.json and add the pass/fail logic as a Sagemaker pipeline step.
if eval_metrics['eval_loss'] > 2:
raise ValueError("Evaluation loss is too high.")
# create a tarball of the model. For inference logic, untar and load appropriate .bin and .config for the llm from hugging face and serve.
trainer.save_model('/opt/ml/model')
# shutil.copytree('/opt/ml/code', os.path.join(os.environ['SM_MODEL_DIR'], 'code'))