/
pnfsmanager.properties
306 lines (270 loc) · 11.9 KB
/
pnfsmanager.properties
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
# -----------------------------------------------------------------------
# Default values for pnfsmanager
# -----------------------------------------------------------------------
@DEFAULTS_HEADER@
# ---- Name of the service
#
# This is the logical name of the service. The service name is usually
# the name other service use to reach this service.
#
pnfsmanager.cell.service = ${dcache.queue.pnfsmanager}
# Cell name of pnfsmanager service
pnfsmanager.cell.name = ${dcache.queue.pnfsmanager}
# ----- Whether the service is replicable
#
# Any service in dCache can have several instances as long as these
# represent separate logical services. Some services can have several
# instances representing the same logical service, providing some
# degree of fault tolerance and load balancing. Such services are said
# to be replicable.
#
# Instances of a logical service share the same service name, and it is
# important that the configuration for such instances is synchronized.
#
# This property indicates if this service is replicable.
#
(immutable)pnfsmanager.cell.replicable = true
# ---- Named queues to consume from
#
# A service can consume messages from named queues. Other service can
# write messages to such queues. A named queue has an unqualified cell
# address, that is, an address without a domain name.
#
# This property contains a comma separated list of named queues to
# consume from.
#
pnfsmanager.cell.consume = ${pnfsmanager.cell.service}
# ---- Topics to which the service subscribes
#
# A service can subscribe to messages on topics. Other services can
# write messages to such topics and all subscribers receive such
# messages. A topic has an unqualified cell address, that is, an
# address without a domain name.
#
# This property contains a comma separated list of topics to
# subscribe to.
#
pnfsmanager.cell.subscribe = ${dcache.topic.watched}
# -- replace with org.dcache.chimera.namespace.ChimeraEnstoreStorageInfoExtractor
# if you are running an enstore HSM backend.
#
pnfsmanager.plugins.storage-info-extractor = org.dcache.chimera.namespace.ChimeraOsmStorageInfoExtractor
# ---- Number of threads per thread group
#
# Depending on how powerful your chimera server host is you may set
# this to up to 50.
#
pnfsmanager.limits.threads-per-group = 12
# ---- Number of thread groups
#
# A PNFS tree may be split into multiple databases. Each database is
# single threaded and hence accessing the same database from
# multiple threads provides only a minor speed-up. To ensure good
# load balancing when using multiple databases, the PnfsManager
# supports thread groups. Any database is assigned to one and only
# one thread group, thus databases assigned to different thread
# groups are guaranteed not to block each other. Each thread group
# will have ${pnfsmanager.limits.threads-per-group} threads.
#
# For best performance isolation, set this to be equal the largest
# database ID defined in PNFS. When increasing
# pnfsmanager.limits.thread-groups, you may want to lower
# pnfsmanager.limits.threads-per-group.
#
# Notice that PNFS access is still subject to the number of threads
# created in the PNFS daemon. If this number is lower than the
# number of concurrent requests, then contention may still occur
# even though multiple databases are used.
#
pnfsmanager.limits.thread-groups = 1
# ---- Number of list threads
#
# The PnfsManager uses dedicated threads for directory list
# operations. This variable controls the number of threads to
# use.
#
pnfsmanager.limits.list-threads = 2
# ---- Max chunk size in list replies
#
# To avoid out of memory errors when listing large directories,
# PnfsManager breaks up directory listings in chunk of entries. This
# setting controls the maximum number of directory entries in a
# chunk.
#
pnfsmanager.limits.list-chunk-size = 100
# ---- Threshold for when to log slow requests
#
# Threshold in milliseconds for when to log slow requests. Requests
# with a processing time larger than the threshold are logged. Set
# to 0 to disable. This can also be enabled at runtime using the
# 'set log slow threshold' command.
#
pnfsmanager.limits.log-slow-threshold=0
# ---- Maximum number of requests in a processing queue
#
# PnfsManager maintains a request queue per processing thread. This
# setting specifies the queue length at which point new requests
# will be denied rather than enqueued for processing. Set to 0 for
# unlimitted queues.
#
pnfsmanager.limits.queue-length = 0
# ---- PnfsManager message folding
#
# Whether to use message folding in PnfsManager. When message folding
# is enabled, the PnfsManager will try to fold or collapse processing of
# identical messages. This can reduce the load on PNFS or Chimera if a
# large number of simultaneous requests on the same objects are performed.
#
(one-of?true|false)pnfsmanager.enable.folding = true
# ---- Inherit file ownership when creating files and directories
#
# By default new files and directories receive will be owned by the
# person who created the files and directories. The owner field will
# be the UID of the creator and the group field will be the primary
# GID of the creator.
#
# If this flag is set to true, then both the owner and the group
# field will inherit the values from the parent directory.
#
# In either case, a door may override the values with values
# provided by the user.
#
(one-of?true|false)pnfsmanager.enable.inherit-file-ownership = false
# ---- Whether to verify lookup permissions for the entire path
#
# For performance reasons dCache with PNFS only verified the lookup
# permissions of the directory containing the file system entry
# corresponding to the path. Ie only the lookup permissions for the
# last parent directory of the path were enforced. For compatibility
# reasons Chimera inherited these semantics.
#
# When this option is set to true, Chimera will verify the lookup
# permissions of all directories of a path.
#
(one-of?true|false)pnfsmanager.enable.full-path-permission-check = true
# ---- Whether to allow move to directory with different storage class and cache clss
#
# Pool selection may be based on directory tags. A move in the chimera namespace
# is purely a rename operation. Therefore, a possibility of conflict exists
# when files are "moved" from a source with tags that are used to direct data to
# non-tape back-end pools to a destination directory with tags that direct
# data to tape-backed pools. No data will be actually moved to tape. Moreover,
# if a directory or a whole directory tree is moved, any new files written
# to this directory or any directory underneath would not go to tape.
# To avoid confusion set this variable to false. Additionally,
# nfs.enable.pnfsmanager-query-on-move has to be set to true.
#
# A move will succeed if storage class and cache class of the destination
# directory match those of the source directory. Additionally a directory can
# only be moved to a destination that has storage class and cache class
# matching storage class and cache class of the directory being moved.
#
#
(one-of?true|false)pnfsmanager.enable.move-to-directory-with-different-storageclass = true
# ---- Enabled ACL support
#
# Set to true to enable ACL support.
#
(one-of?true|false)pnfsmanager.enable.acl = false
# ---- Whether to expect a space manager
(one-of?true|false|${dcache.enable.space-reservation})pnfsmanager.enable.space-reservation = ${dcache.enable.space-reservation}
# Comma separated list of cell addresses to which to send notifications when a file is flushed.
pnfsmanager.destination.flush-notification = ${pnfsmanager.destination.flush-notification-when-space-reservation-is-${pnfsmanager.enable.space-reservation}}
(immutable)pnfsmanager.destination.flush-notification-when-space-reservation-is-true=${pnfsmanager.service.spacemanager}
(immutable)pnfsmanager.destination.flush-notification-when-space-reservation-is-false=
# Cell address to which to send cache location change notifications
pnfsmanager.destination.cache-notification = CacheLocationTopic
# Cell address of space manager
pnfsmanager.service.spacemanager = ${dcache.service.spacemanager}
# ---- Default Access Latency and Retention Policy
#
# These variables affect only newly created files.
#
# Do not use OUTPUT.
#
(one-of?CUSTODIAL|REPLICA|OUTPUT)pnfsmanager.default-retention-policy = CUSTODIAL
(one-of?ONLINE|NEARLINE)pnfsmanager.default-access-latency = NEARLINE
# ---- Upload directory
#
pnfsmanager.upload-directory=${dcache.upload-directory}
# ---- Configuration for database connection pool
#
# The database connection pool reuses connections between successive
# database operations. By reusing connections dCache doesn't suffer
# the overhead of establishing new database connections for each
# operation.
#
# The options here determine how the pnfsmanager behaves as the
# number of concurrent requests fluctuates.
#
#
# The maximum number of concurrent database connections.
#
# NOTE: when running resilience embedded here, this number should
# be increased. The recommended minimum setting would be
#
# pnfsmanager.resilience.submit-threads
# + pnfsmanager.resilience.pnfs-op-threads
# + pnfsmanager.resilience.db.connections.max
# + whatever maximum allowed for normal namespace settings
# (default = 30)
#
# Submit and pnfs threads require 1 database connection, and scan
# threads need 2.
#
# Be sure to adjust postgresql.conf max connections to allow
# for the larger value here, plus the added pool scan
# connections specified by pnfsmanager.resilience.db.connections.max.
#
pnfsmanager.db.connections.max = 30
#
# The minimum number of idle database connections.
#
pnfsmanager.db.connections.idle = 1
#
# Database related settings reserved for internal use.
#
(immutable)pnfsmanager.db.host=${chimera.db.host}
(immutable)pnfsmanager.db.name=${chimera.db.name}
(immutable)pnfsmanager.db.user=${chimera.db.user}
(immutable)pnfsmanager.db.password=${chimera.db.password}
(immutable)pnfsmanager.db.password.file=${chimera.db.password.file}
(immutable)pnfsmanager.db.url=${chimera.db.url}
(immutable)pnfsmanager.db.schema.changelog=${chimera.db.schema.changelog}
pnfsmanager.db.schema.auto=${dcache.db.schema.auto}
#
# --- Last Access Time (atime) updates for files
#
# This integer value controls whether and when dCache updates the last access
# time of files on reading them.
#
# Values < 0: atimes are never updated.
# Values >= 0: The maximum absolute(!) difference (in seconds) between a file's
# "new" atime and its curently stored one, where the atime is not
# yet updated.
# For example, when using a value of "4" and the old atime is (in
# POSIX time) "1000000000", then atimes up to including
# "1000000004" (but also down to "999999996") are not written;
# "1000000005" or later (respectively "999999995" or earlier)
# would be saved.
#
# Updating the atimes less often (or not at all) may have performance benefits.
#
pnfsmanager.atime-gap=-1
# ---- Endpoint for contacting pool manager; this is needed for periodic
# refreshing of live pool information.
#
pnfsmanager.service.poolmanager=${dcache.service.poolmanager}
# ---- Endpoint for contacting pin manager (passed on to migration task).
#
pnfsmanager.service.pinmanager=${dcache.service.pinmanager}
# ---- How long to wait for a response from the poolmanager.
#
pnfsmanager.service.poolmanager.timeout=1
(one-of?MILLISECONDS|SECONDS|MINUTES|HOURS|DAYS)pnfsmanager.service.poolmanager.timeout.unit=MINUTES
# ---- How long to wait for a response from a pool.
#
pnfsmanager.service.pool.timeout=1
(one-of?MILLISECONDS|SECONDS|MINUTES|HOURS|DAYS)pnfsmanager.service.pool.timeout.unit=MINUTES
(obsolete)pnfsmanager.db.dialect= Not used any more
(obsolete)pnfsmanager.cell.export = See pnfsmanager.cell.consume