-
Notifications
You must be signed in to change notification settings - Fork 2
/
config.py
83 lines (74 loc) · 2.47 KB
/
config.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
from dataclasses import dataclass, field
from typing import Optional
@dataclass
class ModelArguments:
model_name_or_path: Optional[str] = field(
default=None,
metadata={"help": "Name of the model on HuggingFace Hub or local path."}
)
model_name: Optional[str] = field(
default=None,
metadata={"help": "Name of the model, used for output files naming."}
)
revision: Optional[str] = field(
default=None,
metadata={"help": "Revision of the model on HuggingFace Hub or local path."}
)
# Arguments for CODAL-Bench
model_test: Optional[str] = field(
default=None,
metadata={"help": "Name of the model to judge when generating ratings."}
)
model_judge: Optional[str] = field(
default=None,
metadata={"help": "Name of the LLM judge (must be an OpenAI model)."}
)
model_reference: Optional[str] = field(
default=None,
metadata={"help": "Name of the model responses to use as references for single-answer grading."}
)
@dataclass
class DataArguments:
dataset_name_or_path: Optional[str] = field(
default=None,
metadata={"help": "Name of the dataset on HuggingFace Hub or local path."}
)
dataset_split: Optional[str] = field(
default=None,
metadata={"help": "Split of the dataset to load."}
)
output_dir: Optional[str] = field(
default=None,
metadata={"help": "Output directory path."}
)
chat_template: Optional[str] = field(
default=None,
metadata={"help": "The chat template to use."}
)
# Arguments for CODAL-Bench
model_responses_dir: Optional[str] = field(
default=None,
metadata={"help": "Directory containing model responses."}
)
preprocessing_num_workers: Optional[int] = field(
default=None,
metadata={"help": "The number of processes to use for the preprocessing."},
)
@dataclass
class GenerationConfig:
max_new_tokens: Optional[int] = field(
default=1024,
metadata={"help": "Maximum number of new tokens to generate."},
)
temperature: Optional[int] = field(
default=0.8,
metadata={"help": "Temperature value for generation."},
)
top_p: Optional[int] = field(
default=0.8,
metadata={"help": "Top-p value for generation."},
)
greedy: Optional[bool] = field(
default=False,
metadata={"help": "Whether to use greedy decoding."}
)