-
Notifications
You must be signed in to change notification settings - Fork 4
/
run_pt.sh
52 lines (49 loc) · 1.62 KB
/
run_pt.sh
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
# Adapted from: https://github.com/ymcui/Chinese-LLaMA-Alpaca/blob/main/scripts/training/run_pt.sh
lr=2e-4
lora_rank=8
lora_alpha=32
lora_trainable="q_proj,v_proj,k_proj,o_proj,gate_proj,down_proj,up_proj"
modules_to_save="embed_tokens,lm_head"
lora_dropout=0.05
pretrained_model=/path/to/llama
amharic_tokenizer_path=/path/to/amharic/tokenizer
dataset_dir=/path/to/pt/amharic/dataset
data_cache=/path/to/data/cache
per_device_train_batch_size=32
per_device_eval_batch_size=1
gradient_accumulation_steps=1
output_dir=/path/to/output/dir
python pretrain.py \
--model_name_or_path ${pretrained_model} \
--tokenizer_name_or_path ${amharic_tokenizer_path} \
--dataset_dir ${dataset_dir} \
--data_cache_dir ${data_cache} \
--validation_split_percentage 0.001 \
--per_device_train_batch_size ${per_device_train_batch_size} \
--per_device_eval_batch_size ${per_device_eval_batch_size} \
--do_train \
--seed $RANDOM \
--num_train_epochs 1 \
--lr_scheduler_type cosine \
--learning_rate ${lr} \
--warmup_ratio 0.05 \
--weight_decay 0.01 \
--logging_strategy steps \
--logging_steps 10 \
--save_strategy steps \
--save_total_limit 1 \
--save_steps 7528 \
--evaluation_strategy steps \
--eval_steps 3000 \
--preprocessing_num_workers 8 \
--block_size 512 \
--output_dir ${output_dir} \
--bf16 \
--overwrite_output_dir \
--logging_first_step True \
--lora_rank ${lora_rank} \
--lora_alpha ${lora_alpha} \
--trainable ${lora_trainable} \
--modules_to_save ${modules_to_save} \
--lora_dropout ${lora_dropout} \
--gradient_checkpointing \