-
Notifications
You must be signed in to change notification settings - Fork 620
/
lightning_config.py
86 lines (82 loc) · 3.29 KB
/
lightning_config.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
from dataclasses import dataclass
from typing import Any
from typing import Optional
@dataclass
class ModelCheckpointConf:
_target_: str = "pytorch_lightning.callbacks.ModelCheckpoint"
filepath: Optional[str] = None
monitor: Optional[str] = None
verbose: bool = False
save_last: Optional[bool] = None
save_top_k: Optional[int] = 1
save_weights_only: bool = False
mode: str = "min"
dirpath: Any = None # Union[str, Path, NoneType]
filename: Optional[str] = None
auto_insert_metric_name: bool = True
every_n_train_steps: Optional[int] = None
train_time_interval: Optional[str] = None
every_n_epochs: Optional[int] = None
save_on_train_epoch_end: Optional[bool] = None
@dataclass
class TrainerConf:
_target_: str = "pytorch_lightning.trainer.Trainer"
logger: Any = True # Union[LightningLoggerBase, Iterable[LightningLoggerBase], bool]
enable_checkpointing: bool = True
callbacks: Any = None # Optional[List[Callback]]
default_root_dir: Optional[str] = None
gradient_clip_val: float = 0
process_position: int = 0
num_nodes: int = 1
num_processes: int = 1
gpus: Any = None # Union[int, str, List[int], NoneType]
auto_select_gpus: bool = False
tpu_cores: Any = None # Union[int, str, List[int], NoneType]
log_gpu_memory: Optional[str] = None
progress_bar_refresh_rate: int = 1
overfit_batches: Any = 0.0 # Union[int, float]
track_grad_norm: Any = -1 # Union[int, float, str]
check_val_every_n_epoch: int = 1
fast_dev_run: Any = False # Union[int, bool]
accumulate_grad_batches: Any = 1 # Union[int, Dict[int, int], List[list]]
max_epochs: int = 1000
min_epochs: int = 1
max_steps: Optional[int] = None
min_steps: Optional[int] = None
limit_train_batches: Any = 1.0 # Union[int, float]
limit_val_batches: Any = 1.0 # Union[int, float]
limit_test_batches: Any = 1.0 # Union[int, float]
val_check_interval: Any = 1.0 # Union[int, float]
flush_logs_every_n_steps: int = 100
log_every_n_steps: int = 50
accelerator: Any = None # Union[str, Accelerator, NoneType]
sync_batchnorm: bool = False
precision: int = 32
weights_summary: Optional[str] = "top"
weights_save_path: Optional[str] = None
num_sanity_val_steps: int = 2
resume_from_checkpoint: Any = None # Union[str, Path, NoneType]
profiler: Any = None # Union[BaseProfiler, bool, str, NoneType]
benchmark: bool = False
deterministic: bool = False
auto_lr_find: Any = False # Union[bool, str]
replace_sampler_ddp: bool = True
detect_anomaly: bool = False
auto_scale_batch_size: Any = False # Union[str, bool]
prepare_data_per_node: bool = True
plugins: Any = None # Union[str, list, NoneType]
amp_backend: str = "native"
amp_level: Any = None
move_metrics_to_cpu: bool = False
gradient_clip_algorithm: Optional[str] = None
devices: Any = None
ipus: Optional[int] = None
enable_progress_bar: bool = True
max_time: Optional[str] = None
limit_predict_batches: float = 1.0
strategy: Optional[str] = None
enable_model_summary: bool = True
reload_dataloaders_every_n_epochs: int = 0
multiple_trainloader_mode: str = "max_size_cycle"
stochastic_weight_avg: bool = False
terminate_on_nan: Optional[bool] = None