-
Notifications
You must be signed in to change notification settings - Fork 29
/
pretrain_aquila_70b_distributed_A800_16n_80g_A100_48n_40g_hetero_pp.sh
executable file
·142 lines (125 loc) · 3.23 KB
/
pretrain_aquila_70b_distributed_A800_16n_80g_A100_48n_40g_hetero_pp.sh
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
#!/bin/bash
PROJ_HOME=$1
EXPNAME=$2
HOSTFILE=$3
DATA_PATH=$4
# Preapre the environment related configuration
source ../examples/aquila/env.sh
# Define files related to tokenizer
VOCAB_FILE=../examples/aquila/tokenizer/vocab.json
MERGE_FILE=../examples/aquila/tokenizer/merges.txt
SPECIAL_TOKENS_FILE=../examples/aquila/tokenizer/special_tokens.txt
# Build some paths for the current training
CHECKPOINT_PATH=$PROJ_HOME/checkpoints/$EXPNAME
mkdir -p $CHECKPOINT_PATH
LOG_PATH=$PROJ_HOME/logs/$EXPNAME
mkdir -p $LOG_PATH
cp $0 $LOG_PATH/
TB_PATH=$PROJ_HOME/tboard/$EXPNAME
mkdir -p $TB_PATH
WB_PATH=$PROJ_HOME/wandb/$EXPNAME
mkdir -p $WB_PATH
DISTRIBUTED_ARGS="
--nproc_per_node $NODE_DEVICES \
--nnodes $NUM_NODES \
--node_rank $NODE_RANK \
--master_addr $MASTER_ADDR \
--master_port $MASTER_PORT
"
HETERO_ARGS="
--hetero-mode pp \
--hetero-current-device-type $NODE_TYPE \
--hetero-device-types A800 A100 \
--hetero-pipeline-stages 1 20 3 20 20 20 \
"
TRAINING_ARGS="
--train-samples 488281250 \
--rampup-batch-size 32 32 2000000 \
--eval-iters 0 \
--eval-interval 2000 \
--tensor-model-parallel-size 8 \
--pipeline-model-parallel-size 4 \
--micro-batch-size 1 \
--global-batch-size 1024 \
--disable-bias-linear \
--use-flash-attn \
--sequence-parallel \
--use-distributed-optimizer
"
MIXED_PRECISION_ARGS="
--bf16 \
--attention-softmax-in-fp32 \
--accumulate-allreduce-grads-in-fp32
"
DATA_ARGS="
--data-path $DATA_PATH \
--tokenizer-type AquilaTokenizer \
--vocab-file $VOCAB_FILE \
--vocab-size 100008\
--make-vocab-size-divisible-by 64 \
--merge-file $MERGE_FILE \
--special-tokens-file $SPECIAL_TOKENS_FILE \
--split 1
"
NETWORK_ARGS="
--num-layers 80 \
--hidden-size 8192 \
--num-attention-heads 64 \
--group-query-attention \
--num-query-groups 8 \
--hidden-dim-multiplier 1.3 \
--seq-length 4096 \
--max-position-embeddings 4096 \
--norm-epsilon 1e-5 \
--norm-init-weight 0.25 \
--use-rotary-position-embeddings \
--no-position-embedding \
--swiglu \
--multiple-of 4096 \
--normalization RMSNorm \
--untie-embeddings-and-output-weights
"
INITIALIZATION_ARGS="
--init-method-std 0.0149 \
--seed 42
"
REGULARIZATION_ARGS="
--attention-dropout 0.0 \
--hidden-dropout 0.0 \
--weight-decay 0.1 \
--adam-beta1 0.9 \
--adam-beta2 0.95 \
--clip-grad 1.0
"
LEARNING_RATE_ARGS="
--lr 1.5e-4 \
--lr-decay-style cosine \
--lr-warmup-samples 500000 \
--min-lr 1.5e-5
"
CHECKPOINTING_ARGS="
--save-interval 500 \
--rampup-save-interval 5000 \
--save $CHECKPOINT_PATH \
--load $CHECKPOINT_PATH
"
LOGGING_ARGS="
--log-interval 1 \
--tensorboard-dir $TB_PATH \
--tensorboard-log-interval 1 \
--wandb-save-dir $WB_PATH
"
cmd="torchrun $DISTRIBUTED_ARGS pretrain_gpt.py \
$HETERO_ARGS \
$TRAINING_ARGS \
$MIXED_PRECISION_ARGS \
$DATA_ARGS \
$NETWORK_ARGS \
$INITIALIZATION_ARGS \
$REGULARIZATION_ARGS \
$LEARNING_RATE_ARGS \
$CHECKPOINTING_ARGS \
$LOGGING_ARGS
"
echo $cmd
eval $cmd