forked from mfisk/filemap
-
Notifications
You must be signed in to change notification settings - Fork 0
/
filemap.conf
128 lines (109 loc) · 3.86 KB
/
filemap.conf
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
[global]
#
# How many distributed copies of source data should be stored
#
# replication = 1
#
# Build a work queue by hostname rather than by [Nodename]
# (Use this if you have multiple "nodes" (e.g. disks) on a hostname
# and sshd is rejecting the rate of connections. Better to raise
# MaxStartups in sshd_config).
#
# queuebyhost = True
#
# Number of times to retry an all2all command.
# Default is 7 to accomodate all2all over ssh with default MaxStartups value.
# For better performance when there is no MaxStartups cap, set this to 1
#
# all2all_replicate = 7
#
# Command to execute to connect to a machine where we can do SLURM scheduling.
# If not set, then assume that we are not using SLURM.
# If set to localhost, then SLURM commands will be run locally.
#
# slurmfe = ssh cluster
# slurmfe = localhost
#
# Extra directories to add to the search path for executables
# (: delimited like PATH)
#
# pathextras =
##
## Global defaults: (can be overridden per node)
##
#
# Determine number of items to run concurrently based on CPU and I/O load.
# The value is the minimum amount of time (in seconds) after a new process is
# started before determining if the system still has headroom for more.
# Set to 0/False/None in order to use static scheduling.
#
# Default: dynamicload = 2.0
#
# If not using dynamic load scheduling (see above) each node will run
# up to <processes> total concurrent processes shared across all concurrent jobs.
# Each job is further limited to (<cpusperjob> * <procspercpu>) processes.
# <procspercpu> is a job-specific argument for "fm map" and defaults to 1.
# By default that limit is 1 process per node and is appropriate for I/O bound
# jobs. For CPU-bound jobs on multicore nodes, either change these settings
# or let dynamicload be set in order to automatically add processes until the node is
# busy.
#
# Default: processes = 1000
# Default: cpusperjob = 1
#
# Directory used for synchronization files.
# Must be globally accessible from Nodes (but not necessarily this client machine)
# and coherent, but need not be large or fast. This path is relative to the Nodes,
# not necessarily this client machine. If the location is mounted in different
# locations on nodes, override the setting in node definitions.
#
syncdir = /tmp/locks
#
# FM command (specify alternate path)
# Note: Many versions of SSH will not execute your .login equivalent,
# so all commands must be explicitly referenced or in a PATH in your
# ~/.ssh/environment file (man ssh(1)).
# Default: fm = %(ROOTDIR)/sys/fm
#
# PYTHON command (specify alternate path)
# The fm script will be executed under the interpreter specified here.
# Default: python = python
#
# python = python2.4
#
# SSH command (specify alternate path or options)
# Note: for reduce operations to function, credential or agent forwarding must work
# Note: older versions of rsync don't support quoted arguments in the ssh command
# For improved performance use -S
# Default: ssh = ssh -o GSSAPIDelegateCredentials=yes -o ConnectTimeout=5 -o StrictHostKeyChecking=no -Ax
#
#ssh = srun -N 1 -n 1 --unbuffered -w #When running under SLURM
#ssh = ssh -o GSSAPIDelegateCredentials=yes -o ConnectTimeout=5 -o StrictHostKeyChecking=no -o ControlMaster=auto -S ~/.ssh/%l-%r@%h:%p -Ax
#
# RSYNC command (specify alternate path or options)
# Default: rsync = rsync -tO
#
#rsync = /opt/local/bin/rsync -t
##
## List your nodes here.
## The [Nodename] should be unique, but is not used.
## If a 'hostname' is specified, then this is a remote host and
## the given hostname must be accessible from all nodes.
## Most of the global settings can be over-ridden with per-node values.
##
[Node1]
rootdir = /tmp/foo
[Node4]
rootdir = /tmp/bar
[Node2]
#hostname = localhost
rootdir = /var/tmp/baz
##
## An example cluster config using ranges and lists:
##
#[(1-1024)]
#hostname = node%%04d
#rootdir = /data
#
#[(13,18,86,112)]
#inactive = True