forked from EleutherAI/lm-evaluation-harness
-
Notifications
You must be signed in to change notification settings - Fork 0
/
eval.sh
executable file
·30 lines (26 loc) · 1.05 KB
/
eval.sh
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
# python main.py \
# --model hf-causal \
# --model_args pretrained=PY007/TinyLlama-1.1B-Chat-v0.1,dtype="float" \
# --tasks hellaswag,openbookqa,winogrande,arc_easy,arc_challenge,boolq,piqa\
# --device cuda:0 --batch_size 32
# lm_eval \
# --model microllama-pretrained_300M_115000 \
# --tasks hellaswag,openbookqa,winogrande,arc_easy,arc_challenge,boolq,piqa \
# --device cuda:0 \
# --batch_size 64
# lm_eval \
# --model microllama-pretrained_300M_260000 \
# --tasks hellaswag,openbookqa,winogrande,arc_easy,arc_challenge,boolq,piqa \
# --device cuda:0 \
# --batch_size 64
# lm_eval \
# --model hf \
# --model_args pretrained=PY007/TinyLlama-1.1B-Chat-v0.1,dtype="float" \
# --tasks hellaswag,openbookqa,winogrande,arc_easy,arc_challenge,boolq,piqa\
# --device cuda:0 --batch_size 32
lm_eval \
--model hf \
--model_args pretrained=google-bert/bert-large-uncased,dtype="float" \
--tasks hellaswag,openbookqa,winogrande,arc_easy,arc_challenge,boolq,piqa \
--device cuda:0 \
--batch_size 128