-
Notifications
You must be signed in to change notification settings - Fork 2
/
jaynes-template.yml
77 lines (77 loc) · 2.14 KB
/
jaynes-template.yml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
version: 0
hosts:
slurm: &slurm-host
ip: <cluster-ip>
username: <your-ssh-username>
password: <your-password>
port: 22
mounts:
- !mounts.SSHCode &e_maml_code
<<: *slurm-host
local_path: .
host_path: /usr/home/<your-username>/jaynes-mounts/e_maml_tf/{now:%Y-%m-%d}/{now:%H%M%S.%f}/e_maml_tf
pypath: true
excludes: >-
--exclude='data'
--exclude='samples'
--exclude='figures'
--exclude='results'
--exclude='analysis'
--exclude='*__pycache__'
--exclude='*.git'
--exclude='*.idea'
--exclude='*.egg-info'
--exclude='*.pkl'
compress: true
default: &default_run
mounts:
- *e_maml_code
runner: &default_runner
# example environment configuration for MuJoCo simulators
envs: >-
LC_CTYPE=en_US.UTF-8
PYTHONPATH=$PYTHONPATH:/user/home/<your-username>/.local/lib/python3.6/site-packages/
LD_LIBRARY_PATH=/user/home/<your-username>/.mujoco/mujoco200/bin:/usr/local/nvidia/lib64:$LD_LIBRARY_PATH
# this is the setup script. To see when/where this is run, use `jaynes.config(verbose=True)`.
pypath: "{mounts[0].host_path}"
launch_directory: "{mounts[0].host_path}"
partition: "p100" # p100 | max12hours | cpu
setup: | # The tensorflow-gpu module fails without cuda (or GPU)
. /usr/share/modules/init/profile.sh
module load pytorch-36
entry_script: "/pkgs/anaconda3/bin/python -u -m jaynes.entry"
args:
- preserve-env
n_cpu: 8
n_gpu: 0
time_limit: "6:0:0"
output: all
mem: 8G
comment: ICLR-2019
name: LeaF
launch:
type: ssh
<<: *slurm-host
host: !host
log_dir: /user/home/<your-username>/jaynes-mounts/leaf/{now:%Y-%m-%d}/{now:%H%M%S.%f}
modes:
default:
<<: *default_run
runner:
!runners.Slurm
<<: *default_runner
partition: cpu
n_cpu: 8
n_gpu: 0
default-gpu:
<<: *default_run
runner:
!runners.Slurm
<<: *default_runner
setup: | # this is not really used.
. /usr/share/modules/init/profile.sh
module load tensorflow-gpu-36
partition: "p100"
n_cpu: 8
n_gpu: 1
exclude: gpu027