-
Notifications
You must be signed in to change notification settings - Fork 399
/
v1-mix-small.yaml
187 lines (160 loc) · 5.43 KB
/
v1-mix-small.yaml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
run_name: v1-mix-small-run-001
seed: 6198
dry_run: false
wandb:
name: ${run_name}
project: olmo-small
group: v1-mix
model:
d_model: 2048
n_heads: 16
n_layers: 16
mlp_ratio: 8
alibi: true
alibi_bias_max: 8.0
attention_dropout: 0.0
attention_layer_norm: true
multi_query_attention: true
include_bias: false
block_type: sequential
layer_norm_type: low_precision
layer_norm_with_affine: true # workaround for the layer norm bug
bias_for_layer_norm: true # workaround for the layer norm bug
activation_type: swiglu
residual_dropout: 0.0
embedding_dropout: 0.0
max_sequence_length: 2048
vocab_size: 50280
embedding_size: 50304
eos_token_id: 0
pad_token_id: 1
init_device: meta
init_fn: normal
compile: null # causes instability on AMD GPUs
optimizer:
name: adamw
learning_rate: 1.0e-3
weight_decay: 0.1
betas:
- 0.9
- 0.95
scheduler:
name: cosine_with_warmup
t_warmup: 5000
alpha_f: 0.1
data:
paths: ${path.glob:${oc.env:DATA_PATH}/v1-sample/gpt-neox-20b-pii-special/books/*.npy,${oc.env:DATA_PATH}/v1-sample/gpt-neox-20b-pii-special/c4/*.npy,${oc.env:DATA_PATH}/v1-sample/gpt-neox-20b-pii-special/common-crawl/*/*.npy,${oc.env:DATA_PATH}/v1-sample/gpt-neox-20b-pii-special/s2/*.npy,${oc.env:DATA_PATH}/v1-sample/gpt-neox-20b-pii-special/stack/*.npy,${oc.env:DATA_PATH}/v1-sample/gpt-neox-20b-pii-special/wiki/*.npy}
pad_direction: right
num_workers: 1
drop_last: true
pin_memory: true
prefetch_factor: 16
persistent_workers: true
timeout: 0
tokenizer:
identifier: tokenizers/allenai_eleuther-ai-gpt-neox-20b-pii-special.json
truncate_direction: right
save_folder: ${oc.env:CHECKPOINTS_PATH}/${oc.env:SLURM_JOB_ID,${run_name}}
save_overwrite: false
# Sharded checkpoints (best for restarts)
save_interval: 5000
save_num_checkpoints_to_keep: -1
# Unsharded checkpoints (for final storage)
save_interval_unsharded: null
save_num_unsharded_checkpoints_to_keep: -1
load_path: null
max_duration: 476837 # 2T tokens
global_train_batch_size: 2048
device_train_microbatch_size: 8
precision: amp_bf16
max_grad_norm: 1.0
speed_monitor:
window_size: 20
eval_interval: ${save_interval}
eval_subset_num_batches: -1
device_eval_batch_size: ${device_train_microbatch_size}
evaluators:
##########################
# Perplexity evaluations #
##########################
# TODO: do we care about c4 and RP validation? We don't have these tokenized at the moment.
# - label: c4-validation
# subset_num_batches: 10
# data:
# paths: ${path.glob:${path.choose:${oc.env:SCRATCH_DIR,no_exist}/pretraining_data/preprocessed,/net/nfs.cirrascale/allennlp/llm-data}/c4/en/c4-validation.*.npy}
# num_workers: 2
# drop_last: true
# pin_memory: true
# persistent_workers: true
# prefetch_factor: 4
# - label: rp-validation
# subset_num_batches: 10
# data:
# paths: ${path.glob:${path.choose:${oc.env:SCRATCH_DIR,no_exist}/pretraining_data/preprocessed,/net/nfs.cirrascale/allennlp/llm-data}/redpajama/redpajama-validation.npy}
# num_workers: 2
# drop_last: true
# pin_memory: true
# persistent_workers: true
# prefetch_factor: 4
# lump all the small datasets together (we still get separate metrics).
- label: all-small-ppl-validation
data:
datasets:
4chan-validation:
- ${oc.env:EVAL_DATA_PATH}/perplexity/v2_small_gptneox20b/4chan/val.npy
c4_100_domains-validation:
- ${oc.env:EVAL_DATA_PATH}/perplexity/v2_small_gptneox20b/c4_100_domains/val.npy
c4_en-validation:
- ${oc.env:EVAL_DATA_PATH}/perplexity/v2_small_gptneox20b/c4_en/val.npy
gab-validation:
- ${oc.env:EVAL_DATA_PATH}/perplexity/v2_small_gptneox20b/gab/val.npy
ice-validation:
- ${oc.env:EVAL_DATA_PATH}/perplexity/v2_small_gptneox20b/ice/val.npy
m2d2_s2orc-validation:
- ${oc.env:EVAL_DATA_PATH}/perplexity/v2_small_gptneox20b/m2d2_s2orc/val.npy
m2d2_wiki-validation:
- ${oc.env:EVAL_DATA_PATH}/perplexity/v2_small_gptneox20b/m2d2_wiki/val.npy
manosphere-validation:
- ${oc.env:EVAL_DATA_PATH}/perplexity/v2_small_gptneox20b/manosphere/val.npy
mc4_en-validation:
- ${oc.env:EVAL_DATA_PATH}/perplexity/v2_small_gptneox20b/mc4_en/val.npy
pile-validation:
- ${oc.env:EVAL_DATA_PATH}/perplexity/v2_small_gptneox20b/pile/val.npy
ptb-validation:
- ${oc.env:EVAL_DATA_PATH}/perplexity/v2_small_gptneox20b/ptb/val.npy
twitterAEE-validation:
- ${oc.env:EVAL_DATA_PATH}/perplexity/v2_small_gptneox20b/twitterAEE/val.npy
wikitext_103-validation:
- ${oc.env:EVAL_DATA_PATH}/perplexity/v2_small_gptneox20b/wikitext_103/val.npy
drop_last: true
##########################
# Downstream evaluations #
##########################
- label: piqa
type: downstream
- label: hellaswag
type: downstream
- label: winogrande
type: downstream
- label: openbook_qa
type: downstream
# - label: boolq # requires implemention of the pmi_dc matrix
# type: downstream
#
- label: sciq
type: downstream
- label: arc_easy
type: downstream
# - label: arc_challenge # requires implemention of the pmi_dc matrix
# type: downstream
#
- label: copa
type: downstream
- label: rte
type: downstream
- label: commitment_bank
type: downstream
- label: mrpc
type: downstream
- label: sst2
type: downstream