-
Notifications
You must be signed in to change notification settings - Fork 131
/
pool.properties
891 lines (766 loc) · 32.8 KB
/
pool.properties
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
# -----------------------------------------------------------------------
# Default values for pools
# -----------------------------------------------------------------------
@DEFAULTS_HEADER@
# Pool name
#
# Must be unique within this dCache instance
pool.name =
# Path to pool directory
pool.path =
# ---- Mover queues to create on a pool
#
# Mover queues schedule and execute transfers on a pool. Each mover
# queue can have individual limits and timeouts. These are
# configured at runtime through the admin interface.
#
# Doors can be configured to submit transfers to named queues.
#
# This property takes a comma separated list of named mover queues.
# The default mover queue is called 'regular' and is always created.
# The 'regular' mover queue must not be defined in this property.
#
# Named queues with names that begin with a hyphen are processed in
# FIFO order; all other queues are processed in LIFO order. Although
# LIFO is unfair, it tends to provide better behaviour in periods of
# overload. Neither queueing discipline can magically resolve sustained
# overload.
#
# This property is deprecated. It is preferred to create movers dynamically
# through the admin shell 'mover queue create' command.
#
(deprecated)pool.queues =
# ---- Large File Store
#
# Legacy option for disk only pools. There is usually no need to
# change this setting as the choice whether to write a file to tape
# is now controlled by the retention policy of each file.
#
(one-of?|volatile|transient|precious|none)pool.lfs=none
# ---- Maximum amount of space to be used by a pool
#
# In bytes. May also be configured at runtime through
# the admin interface. If not set, the pool size is
# determined from the size of the file system.
#
pool.size =
# ---- Pool tags
#
# White space separated list of key value pairs to associate with a
# pool.
#
pool.tags = hostname=${host.name}
# ---- Command to execute periodically to check pool health
#
# If defined and if pool.enable.repository-check is set to true, the
# command is executed once a minute. The command is usually a script
# that checks file system, disk array and host health. Any arguments
# are passed along.
#
# As the command is executed frequently, it should not perform any
# expensive checks, nor should the check take longer than at most a
# few seconds.
#
# If the exit code is 0, the pool is assumed to be okay. If the exit
# code is 1, the pool is marked read-only. Any other exit code will
# disabled the pool.
#
# Once marked read-only or disabled, the pool will not reenable
# itself, even if subsequent invocations exit with 0. The pool can
# manually be reenabled using the 'pool enable' admin shell command.
#
# stdout and stderr of the command are written to the domain's log
# file.
#
pool.check-health-command=
# ---- Whether to monitor pool health
#
# If true, then the pool periodically performs a number of health
# checks and disables itself if an error is detected.
#
# The health check also involves checking the amount of free space
# in the file system and adjusting the pool size if the amount of
# free space in the file system is less than the free space of the pool.
#
(one-of?true|false)pool.enable.repository-check = true
# ---- Record pool's HSM conectivity in the namespace
#
# If true, then pool will update file's 'h' flag in the namespace with value
# "yes", when pool expected to be connected to HSM, e.g. pool.lfs=precious, and
# with "no" otherwise.
#
# The flags are stored in the file's in the name space. Most dCache installations
# do not require this functionality.
#
(one-of?true|false)pool.enable.hsm-flag = false
# Worker thread pool size. Used by migration module, for pool to pool transfers,
# and for processing requests from cleaner.
pool.limits.worker-threads=5
# Nearline storage thread pool size. Used for blocking nearline storage operations,
# e.g. name space operations or callouts into installed nearline storage providers.
pool.limits.nearline-threads=30
# Worker thread pool to scan and check metadata from the pool repository.
pool.limits.scan-threads=1
# ---- Adjust the greediness of LRU removal of cached files when requested
# space exceeds free space.
#
# This value expresses a minimum of space (as a percentage of the
# total size of the pool), in addition to that of the needed space for the
# file, which should be reclaimed whenever the sweeper is triggered
# because there is not enough free space.
#
# The default value means the sweeper will try to remove just enough
# space to accommodate the file. Setting the value somewhat higher
# can serve to reduce the number of calls to the sweeper on busy pools
# that are near capacity.
#
# Please note, that if the space cost factor of your partition is higher
# than 0, pools that have just cleaned up space may be much more attractive
# than other pools. For a good load distribution, you may need to
# set the space cost factor to 0:
# pm set <partition name> -spacecostfactor=0
# or use a random partition.
#
pool.limits.sweeper-margin=0.0
# Pool cell name. Currently this has to be the same as the pool name.
pool.cell.name=${pool.name}
# ---- Named queues to consume from
#
# A service can consume messages from named queues. Other services can
# write messages to such queues. A named queue has an unqualified cell
# address, that is, an address without a domain name.
#
# This property contains a comma separated list of named queues to
# consume from.
#
pool.cell.consume = ${pool.cell.name}
# Cell message processing limits
(obsolete)pool.cell.limits.message.threads.min=
(obsolete)pool.cell.limits.message.threads.max-idle-time=
(obsolete)pool.cell.limits.message.threads.max-idle-time.unit=
(obsolete)pool.cell.limits.message.threads.max = See pool.cell.max-message-threads
(obsolete)pool.cell.limits.message.queue.max = See pool.cell.max-messages-queued
pool.cell.max-message-threads = 50
pool.cell.max-messages-queued = 1000
# ---- Do not start the pool until specified paths exists.
#
# If specified then pool startup procedure will block as long as
# specified paths does not exists. This is useful to delay pool startup
# until repository's directory is available.
#
# Format: [path1][:path2]...[:pathN]
# For example:
# pool.wait-for-files=${pool.path}/data
#
pool.wait-for-files=
# ---- Which meta data repository implementation to use.
#
# This selects which meta data repository implementation to use.
# This is essentially a choice between storing meta data in a large
# number of small files in the control/ directory, or to use the
# embedded Berkeley database stored in the meta/ directory. Both
# directories are within the pool directory.
#
(one-of?org.dcache.pool.repository.meta.file.FileMetaDataRepository|\
org.dcache.pool.repository.meta.db.BerkeleyDBMetaDataRepository|\
org.dcache.pool.repository.meta.mongo.MongoDbMetadataRepository)\
pool.plugins.meta = org.dcache.pool.repository.meta.db.BerkeleyDBMetaDataRepository
# ---- Garbage collector used when the pool runs out of space
pool.plugins.sweeper = org.dcache.pool.classic.SpaceSweeper2
# ---- Configuration properties for Berkeley DB Java meta data repository
#
# Berkeley DB Java edition is used by one of the available meta data
# repository plugins. The database provides a large number of tuning
# options. These are mapped to the pool.plugins.meta.db prefix.
#
# Consult the Oracle Berkeley DB Java Edition documentation at
#
# http://docs.oracle.com/cd/E17277_02/html/java/com/sleepycat/je/EnvironmentConfig.html
#
# for details.
#
# WARNING: Be aware that these settings may influence the consistency
# of the data.
#
(prefix)pool.plugins.meta.db = Configuration for the meta data database
pool.plugins.meta.db!je.maxMemoryPercent = 20
pool.plugins.meta.db!je.stats.collect = false
pool.plugins.meta.db!je.lock.nLockTables = 5
pool.plugins.meta.db!je.lock.timeout = 60 s
pool.plugins.meta.db!je.freeDisk = 0
#
# Configuration options for MongoDB backend
#
pool.plugins.meta.mongo.url=mongodb://localhost:27017
pool.plugins.meta.mongo.db=pdm
pool.plugins.meta.mongo.collection=poolMetadata
#
# Whether to enable RPCSEC_GSS for NFS mover
#
(one-of?true|false)pool.mover.nfs.rpcsec_gss = false
# ---- NFS mover port range
#
# This option controls TCP port range used by NFS mover. In most
# cases we would like to reduce the range to a single port to
# let client to reconnect. If client failed to reconnect to the
# mover, then a RPC request may stay in clients task queue in a
# 'D' state and increase CPU load by one.
#
# If range configured to have more than one port, pool will
# randomly select one and and reuse it after restart.
pool.mover.nfs.port.min = ${dcache.net.lan.port.min}
pool.mover.nfs.port.max = ${dcache.net.lan.port.max}
# ---- NFS mover's request processing policy ----
#
# When a NFS request received over the network there are two
# possible ways to process it - to use the same thread, which received the
# request, called SAME_THREAD, or to use a pool of worker threads,
# called WORKER_THREAD. WORKER_THREAD typically gives better throughput with
# the risk that more aggressive clients can degrade the performance of less
# aggressive clients. SAME_THREAD limits the impact of aggressive clients on
# less aggressive clients but also reduces the maximum throughput of any one client.
(one-of?SAME_THREAD|WORKER_THREAD)pool.mover.nfs.thread-policy = SAME_THREAD
# ---- Port used for passive DCAP movers
#
# When zero then a random port from the LAN port range is used.
#
pool.mover.dcap.port = 0
# ----- Whether to use memory mapping in FTP mover
#
# If true, the FTP mover utilizes memory mapping for checksum
# verification. This potentially improves performance, but may cause
# compatibility issues on some platforms.
#
(one-of?true|false)pool.mover.ftp.mmap = false
# ----- Whether to log incomplete transfers
#
# The following property controls whether the FTP mover will log
# statistics about transfers aborted by the client. This mover is
# responsible for direct uploads and downloads, and for third-party
# transfers initiated with the FTP protocol.
#
# It is not always possible for the pool to detect incomplete
# transfers; for example, if the transfer uses mode S and dCache
# accepts data then the pool cannot know if that transfer is
# incomplete. The pool can detect incomplete transfers if dCache is
# supplying data, or if the transfer uses mode E.
#
# An incomplete transfer may be due to the pool detecting a problem
# (e.g., unable to connect to the target TCP port) or from the
# client detecting some problem (e.g., insufficient progress).
# Logging the statistics when that transfer is aborted may yield
# information that helps diagnose such problems.
#
(one-of?true|false)pool.mover.ftp.enable.log-aborted-transfers = true
# ----- Distance between transfer and checksum computation in FTP mover
#
# When the checksum is computed on the fly, the FTP mover performs
# the checksum calculation in a separate thread. This property
# indicates the maximum distance in bytes between the transfer and
# the checksum calculation. If too small then the transfer may be
# throttled by a slow checksum calculation. If too large then data
# may have to be read back from disk rather than read from the
# cache.
#
pool.mover.ftp.read-ahead = 16777216
# Whether the FTP mover may accept incoming connections. If not, passive
# FTP connections will use the door as a proxy.
(one-of?true|false)pool.mover.ftp.allow-incoming-connections=true
# ---- Thread pool size for xroot disk IO threads
#
# 0 means use the Netty default, which is 2 X cpu cores.
# This should only be adjusted if it is known that the disk subsystem can do more.
#
pool.mover.xrootd.threads = 0
# ---- Maximum size of an xroot frame
#
# Specified using isoSymbols (KiB, MiB).
#
pool.mover.xrootd.frame-size = 8 MiB
# ---- Maximum size of the buffer used to hold write data
#
# Specified using isoSymbols (KiB, MiB, GiB). Setting the value to 0 eliminates
# the boundary (effectively setting the buffer max at INF).
#
pool.mover.xrootd.write-buffer-size = 0 KiB
# ---- xroot plugins
#
# Comma separated list of plugins to inject into the xroot request
# processing chain.
#
# dCache ships with a few plugins:
#
# authn:gsi - any xroot request to the door will use a key-exchange
# process to identify the end-user (pool only).
#
# authz:alice-token - ALICE token based authorization plugin.
#
# No plugins are required. If an authentication plugin is
# specified, then note that the subject will *not* be mapped by
# gPlazma.
#
# Pools authorize access using a one-time token generated by the door
# when redirecting the client to the pool. For this reason an
# authentication or authorization plugin is usually *not* needed
# on pools.
#
# Third party plugins can be used by adding the plugin to the plugin
# directory of dCache and specifying the plugin name here. Authentication
# and authorization plugins can be loaded as authn:<plugin> and
# authz:<plugin> respectively.
#
# NOTE: the access-log plugin, which generates a dCache style access log
# using the NetLogger format, is now added automatically.
#
pool.mover.xrootd.plugins=
# ---- xroot third-party client authentication plugins
#
# A comma separated list of authentication mechanisms.
#
# When dCache is the destination in an xroot third-party transfer it
# uses an embedded xroot client to fetch data from the remote xroot
# server. In order to fetch that data, the client must first
# authenticate with the remote xroot server.
#
# There are several ways the dCache xroot client can authenticate.
# The following configuration property controls which of these
# authentication schemes are enabled and available to the client.
#
# Authentication schemes are pluggable, so more mechanisms may be
# added, independently of dCache.org.
#
# Note that specifying no plugins means dCache can only fetch data
# from xroot servers that require no authentication.
#
# dCache ships with the following plugins.
#
# gsi - dCache will use an X.509 credential to authenticate.
#
# This requires that the pool has an X.509 credential:
# either the xroot client initiating the xroot-TPC
# delegates one to dCache or the pool has one it can
# use for all transfers.
#
# unix - dCache will use "unix authentication" to authenticate.
#
# NOTE: third-party copying between dCache instances or
# doors requires this plugin on destination pools in
# addition to any other auth plugins, if hash signing
# is enforced on the pools (see
# pool.xrootd.security.force-signing).
#
# ztn - dCache will use "ztn authentication" to authenticate.
#
pool.mover.xrootd.tpc-authn-plugins=gsi,unix,ztn
# ---- xroot mover-idle timeout
#
# Specifies the timeout before clients that connect to the
# pool request handler but don't open any files will be disconnected.
#
pool.mover.xrootd.timeout.idle = 300000
(one-of?MILLISECONDS|SECONDS|MINUTES|HOURS|DAYS)pool.mover.xrootd.timeout.idle.unit=MILLISECONDS
# ---- xroot connect timeout
#
# Timeout that the mover will wait for a client connection before
# shutting down.
#
pool.mover.xrootd.timeout.connect = 300
(one-of?MILLISECONDS|SECONDS|MINUTES|HOURS|DAYS)pool.mover.xrootd.timeout.connect.unit = SECONDS
# ---- xroot client reconnect timeout
#
# Timeout that the server will wait for a client to reconnect to the same mover
# in case an operation was stalled or interrupted (for READ only, not WRITE).
#
pool.mover.xrootd.read-reconnect-timeout = 5
(one-of?MILLISECONDS|SECONDS|MINUTES|HOURS|DAYS)pool.mover.xrootd.read-reconnect-timeout.unit = MINUTES
# ---- xroot tpc client read block size
#
# Block or chunk size used in third-party read requests to the source server.
# Specified using isoSymbols (KiB, MiB).
#
pool.mover.xrootd.tpc-client-chunk-size = 8 MiB
# ---- xroot tpc server response timeout
#
# Timeout that the third-party client will wait for a response from a server
# before raising a timeout exception.
#
pool.mover.xrootd.tpc-server-response-timeout = 30
(one-of?MILLISECONDS|SECONDS|MINUTES|HOURS|DAYS)pool.mover.xrootd.tpc-server-response-timeout.unit = SECONDS
# ---- xroot mover port range
pool.mover.xrootd.port.min = ${dcache.net.lan.port.min}
pool.mover.xrootd.port.max = ${dcache.net.lan.port.max}
# ----- Custom kXR_Qconfig responses
#
# xroot clients may query the server configuration using a
# kXR_Qconfig request. These key value pairs can be
# queried. Additional key value pairs may be added as needed, see
# the xroot protocol specification at http://xrootd.org/ for
# details.
#
(prefix)pool.mover.xrootd.query-config = kXR_Qconfig responses
pool.mover.xrootd.query-config!version = dCache ${dcache.version}
pool.mover.xrootd.query-config!sitename = ${dcache.description}
pool.mover.xrootd.query-config!role = none
#
# Signed hash verification ----- see dcache.properties
#
pool.mover.xrootd.security.level=${dcache.xrootd.security.level}
pool.mover.xrootd.security.force-signing=${dcache.xrootd.security.force-signing}
# TLS OPTIONS
#
# See the xrootd.properties defaults for an explanation
#
# These properties are not linked to a common dcache property
# because the door and pool very likely will have different requirements.
#
# NB: because the default is OPTIONAL, host cert/key will be required on
# the pool node. If it is not there, the pool will not start.
# Only with this option set to OFF will pools now start without the cert/key.
#
(one-of?OFF|OPTIONAL|STRICT)pool.mover.xrootd.security.tls.mode=OPTIONAL
(one-of?true|false)pool.mover.xrootd.security.tls.require-login=false
(one-of?true|false)pool.mover.xrootd.security.tls.require-session=false
(one-of?true|false)pool.mover.xrootd.security.tls.require-data=false
(one-of?true|false)pool.mover.xrootd.security.tls.require-tpc=false
(one-of?true|false)pool.mover.xrootd.security.tls.require-gpf=false
(immutable)pool.mover.xrootd.security.ssl-plugins-when-tls-mode-is-OFF=
(immutable)pool.mover.xrootd.security.ssl-plugins-when-tls-mode-is-STRICT=ssl-handler,ssl-client-handler
(immutable)pool.mover.xrootd.security.ssl-plugins-when-tls-mode-is-OPTIONAL=ssl-handler,ssl-client-handler
pool.mover.xrootd.security.ssl-plugins=${pool.mover.xrootd.security.ssl-plugins-when-tls-mode-is-${pool.mover.xrootd.security.tls.mode}}
# ---- Thread pool size for http disk IO threads
#
# 0 means use the Netty default, which is 2 X cpu cores.
# This should only be adjusted if it is known that the disk subsystem can do more.
#
pool.mover.http.threads = 0
# ----- IO chunk size in bytes used by HTTP mover to sent or received data.
#
# This options controls the IO block size used to read/write data from/to
# backend storage, like file system or ceph.
pool.mover.http.chunk-size = 8192
# Custom HTTP headers in response
#
# The following configuration prefix is used to add custom headers
# to dCache responses. The key part (after the prefix) is used as
# the header; the value is the header's value. For example, specifying
#
# pool.mover.http.custom-response-header!Foo = bar
#
# ensures that HTTP responses will include the line:
#
# Foo: bar
#
# The webdav.custom-response-header property has a similar effect
# for the webdav door.
#
(prefix)pool.mover.http.custom-response-header = HTTP headers that are always included in dCache responses
pool.mover.http.custom-response-header!Server = dCache/${dcache.version}
# ---- HTTP client timeout
#
# Period after which a client will be disconnected if the
# connection is idle (not reading or writing)
#
pool.mover.http.timeout.idle = 300
(one-of?MILLISECONDS|SECONDS|MINUTES|HOURS|DAYS)pool.mover.http.timeout.idle.unit = SECONDS
# ---- HTTP connect timeout
#
# Timeout that the mover will wait for a client connection before
# shutting down
#
pool.mover.http.timeout.connect = 300
(one-of?MILLISECONDS|SECONDS|MINUTES|HOURS|DAYS)pool.mover.http.timeout.connect.unit = SECONDS
# ---- HTTP mover port range
pool.mover.http.port.min = ${dcache.net.wan.port.min}
pool.mover.http.port.max = ${dcache.net.wan.port.max}
# ---- HTTPS mover port range
pool.mover.https.port.min = ${pool.mover.http.port.min}
pool.mover.https.port.max = ${pool.mover.http.port.max}
# ---- FTP data channel port range
#
# Currently only used by remote FTP mover
#
pool.mover.ftp.port.min = ${dcache.net.wan.port.min}
pool.mover.ftp.port.max = ${dcache.net.wan.port.max}
# --- HTTP-TPC settings
#
# These settings control how HTTP-TPC operates.
#
# Which certificate authorities (CAs) to trust when verifying the
# remote host's certificate.
#
pool.mover.http-tpc.authn.capath = ${pool.authn.capath}
pool.mover.http-tpc.authn.capath.refresh=${pool.authn.capath.refresh}
(one-of?MILLISECONDS|\
SECONDS|\
MINUTES|\
HOURS|\
DAYS|\
${pool.authn.capath.refresh.unit})\
pool.mover.http-tpc.authn.capath.refresh.unit=${pool.authn.capath.refresh.unit}
# The (certificate authority) Namespace usage mode
#
(one-of?GLOBUS_EUGRIDPMA|EUGRIDPMA_GLOBUS|GLOBUS|EUGRIDPMA|GLOBUS_EUGRIDPMA_REQUIRE|EUGRIDPMA_GLOBUS_REQUIRE|GLOBUS_REQUIRE|EUGRIDPMA_REQUIRE|EUGRIDPMA_AND_GLOBUS|EUGRIDPMA_AND_GLOBUS_REQUIRE|IGNORE|${pool.authn.namespace-mode})\
pool.mover.http-tpc.authn.namespace-mode=${pool.authn.namespace-mode}
# Certificate Revocation List (CRL) usage mode
#
(one-of?REQUIRE|IF_VALID|IGNORE|${pool.authn.crl-mode})\
pool.mover.http-tpc.authn.crl-mode=${pool.authn.crl-mode}
# On-line Certificate Status Protocol (OCSP) usage mode
#
(one-of?REQUIRE|IF_AVAILABLE|IGNORE|${pool.authn.ocsp-mode})\
pool.mover.http-tpc.authn.ocsp-mode=${pool.authn.ocsp-mode}
# Disabling specific ciphers
#
(any-of?DISABLE_EC|DISABLE_RC4|${pool.authn.ciphers})\
pool.mover.http-tpc.authn.ciphers = ${pool.authn.ciphers}
#
# When possible, the pool will try to reuse connections across
# multiple transfers. There are some limits to prevent this from
# getting out-of-hand.
#
# The overall maximum number of connections. This value should be
# at least the number of HTTP-TPC movers, otherwise the pool cannot
# achieve the desired number of concurrent transfers under peak
# load. You may wish to set this value somewhat higher, to allow
# some connection caching.
#
pool.mover.http-tpc.connections.max = 200
# The maximum number of transfers per endpoint, where an endpoint is
# a hostname and a port number.
#
pool.mover.http-tpc.connections.max-per-endpoint \
= ${pool.mover.http-tpc.connections.max}
# The maximum duration of an idle connection after which it is
# closed.
#
pool.mover.http-tpc.connections.max-idle = 5
(one-of?MILLISECONDS|\
SECONDS|\
MINUTES|\
HOURS|\
DAYS)\
pool.mover.http-tpc.connections.max-idle.unit = MINUTES
# --- Remote gsiftp transfers settings
#
# These are settings for the pool's embedded gsiftp client. Note:
# most gsiftp transfers do NOT use the embedded gsiftp client, as FTP
# can do third-party transfers without requiring an embedded client.
#
# Which certificate authorities (CAs) to trust when verifying the
# remote host's certificate.
#
pool.mover.remote-gsiftp.authn.capath = ${pool.authn.capath}
pool.mover.remote-gsiftp.authn.capath.refresh=${pool.authn.capath.refresh}
(one-of?MILLISECONDS|\
SECONDS|\
MINUTES|\
HOURS|\
DAYS|\
${pool.authn.capath.refresh.unit})\
pool.mover.remote-gsiftp.authn.capath.refresh.unit=${pool.authn.capath.refresh.unit}
# The (certificate authority) Namespace usage mode
#
(one-of?GLOBUS_EUGRIDPMA|EUGRIDPMA_GLOBUS|GLOBUS|EUGRIDPMA|GLOBUS_EUGRIDPMA_REQUIRE|EUGRIDPMA_GLOBUS_REQUIRE|GLOBUS_REQUIRE|EUGRIDPMA_REQUIRE|EUGRIDPMA_AND_GLOBUS|EUGRIDPMA_AND_GLOBUS_REQUIRE|IGNORE|${pool.authn.namespace-mode})\
pool.mover.remote-gsiftp.authn.namespace-mode=${pool.authn.namespace-mode}
# Certificate Revocation List (CRL) usage mode
#
(one-of?REQUIRE|IF_VALID|IGNORE|${pool.authn.crl-mode})\
pool.mover.remote-gsiftp.authn.crl-mode=${pool.authn.crl-mode}
# On-line Certificate Status Protocol (OCSP) usage mode
#
(one-of?REQUIRE|IF_AVAILABLE|IGNORE|${pool.authn.ocsp-mode})\
pool.mover.remote-gsiftp.authn.ocsp-mode=${pool.authn.ocsp-mode}
# Disabling specific ciphers
#
(any-of?DISABLE_EC|DISABLE_RC4|${pool.authn.ciphers})\
pool.mover.remote-gsiftp.authn.ciphers = ${pool.authn.ciphers}
# ---- Directory containing trusted CA certificates
#
# The directory containing the certificate authority (CA)
# certificates that the pool should trust when accepting or making
# remote connections. Used by HTTP-TPC and the gsiftp client.
#
pool.authn.capath = ${dcache.authn.capath}
# How often to check the CA certificates for updates
pool.authn.capath.refresh=${dcache.authn.capath.refresh}
(one-of?MILLISECONDS|\
SECONDS|\
MINUTES|\
HOURS|\
DAYS|\
${dcache.authn.capath.refresh.unit})\
pool.authn.capath.refresh.unit=${dcache.authn.capath.refresh.unit}
# ---- Certificate Authority Namespace usage mode
(one-of?GLOBUS_EUGRIDPMA|EUGRIDPMA_GLOBUS|GLOBUS|EUGRIDPMA|GLOBUS_EUGRIDPMA_REQUIRE|EUGRIDPMA_GLOBUS_REQUIRE|GLOBUS_REQUIRE|EUGRIDPMA_REQUIRE|EUGRIDPMA_AND_GLOBUS|EUGRIDPMA_AND_GLOBUS_REQUIRE|IGNORE|${dcache.authn.namespace-mode})\
pool.authn.namespace-mode=IGNORE
# ---- Certificate Revocation List usage mode
(one-of?REQUIRE|IF_VALID|IGNORE|${dcache.authn.crl-mode})\
pool.authn.crl-mode=${dcache.authn.crl-mode}
# ---- On-line Certificate Status Protocol usage mode
(one-of?REQUIRE|IF_AVAILABLE|IGNORE|${dcache.authn.ocsp-mode})\
pool.authn.ocsp-mode=${dcache.authn.ocsp-mode}
(any-of?DISABLE_EC|DISABLE_RC4|${dcache.authn.ciphers})pool.authn.ciphers = ${dcache.authn.ciphers}
# Cell address of pnfsmanager service
pool.service.pnfsmanager=${dcache.service.pnfsmanager}
# Timeout for pnfsmanager requests
pool.service.pnfsmanager.timeout=300
(one-of?MILLISECONDS|SECONDS|MINUTES|HOURS|DAYS)pool.service.pnfsmanager.timeout.unit=SECONDS
# Cell address of billing service
pool.service.billing=${dcache.topic.billing}
# Timeout for requests to other pools
pool.service.pool.timeout=60
(one-of?MILLISECONDS|SECONDS|MINUTES|HOURS|DAYS)pool.service.pool.timeout.unit=SECONDS
# Maximum number of pnfs manager requests per second
pool.service.pnfsmanager.rate=250
# Cell address of poolmanager service
pool.service.poolmanager=${dcache.service.poolmanager}
# Timeout for poolmanager requests
pool.service.poolmanager.timeout=120
(one-of?MILLISECONDS|SECONDS|MINUTES|HOURS|DAYS)pool.service.poolmanager.timeout.unit=SECONDS
# Cell address of pinmanager service
pool.service.pinmanager=${dcache.service.pinmanager}
# Timeout for pinmanager requests
pool.service.pinmanager.timeout=120
(one-of?MILLISECONDS|SECONDS|MINUTES|HOURS|DAYS)pool.service.pinmanager.timeout.unit=SECONDS
# Timeout for requests to sent to doors
pool.service.door.timeout=3
(one-of?MILLISECONDS|SECONDS|MINUTES|HOURS|DAYS)pool.service.door.timeout.unit=SECONDS
# Cell address to which to send poolup messages
pool.destination.heartbeat = ${dcache.topic.pool-heartbeat}
# Address of cell to notify with a replication request on arrival of new files
#
# This will typically be PoolManager or HoppingManager. Leave empty to disable the
# notification.
pool.destination.replicate =
# By default dCache will set a sticky bit on cached replicas that just
# have been staged. The duration of this sticky bit is 5 minutes. There
# are situations where this needs adjustment.
pool.sticky-on-stage-duration = 5
(one-of?MILLISECONDS|SECONDS|MINUTES|HOURS|DAYS)pool.sticky-on-stage-duration.unit = MINUTES
# IP address to include in replication requests
#
# This will typically be an IP address of a worker node or some other client machine.
#
pool.destination.replicate.ip=
##
# ------------------------------ Resilience ---------------------------------
##
# Cell address to which to send corrupt file messages
#
pool.destination.corrupt-file=CorruptFileTopic
# ---- Number of threads processing request coming from the resilience system.
#
pool.resilience.request-threads=10
# ---- Number of threads processing request coming from the qos system.
#
pool.qos.request-threads=10
## ---- Number of threads processing requests coming from frontend services.
#
pool.info-request-handler.threads=4
# Pool's file store backend type.
#
# currently supported types: CEPH or a POSIX file system
(one-of?ceph|posix)pool.backend = posix
# -------- CEPH backend for pools ----
#
# CEPH cluster name
pool.backend.ceph.cluster = admin
# path to ceph config file
pool.backend.ceph.config = /etc/ceph/ceph.conf
# ceph pool name
pool.backend.ceph.pool-name = ${pool.name}
#
# Document which TCP ports are opened
#
(immutable)pool.net.ports.tcp=${dcache.net.wan.port.min}-${dcache.net.wan.port.max} ${dcache.net.lan.port.min}-${dcache.net.lan.port.max}
#
# Set the order of the flush queue
#
(one-of?fifo|lifo)pool.flush-controller.queue-order=fifo
# Obsolete properties
(obsolete)pool.cell.export = See pool.cell.consume
(obsolete)pool.enable.remove-precious-files-on-delete = Precious replicas are always removed when file deleted in the namespace.
# ---- inotify support
#
# dCache generates IN_ACCESS or IN_MODIFY events for watched files in
# reaction to clients' read and write activity, respectively. On a
# busy dCache cluster, sending an event for each client IO operation
# would likely generate too much internal network traffic.
#
# Pools may be configured to suppress repeated IO events for a
# specific duration. A client that is continuously reading or
# continuously writing will triggering the corresponding IO event
# only once every pool.inotify-generation.io-suppression period.
#
# Setting pool.inotify-generation.io-suppression to zero disables
# suppression. Such a pool will send an IO event for all client IO
# operations.
#
# Setting this value too low will result in high internal network
# usage with corresponding symptoms; e.g., sluggish response from
# services and (in extreme) out-of-memory in the frontend door or in
# intermediate-hop domains.
#
# Setting this value too high will limit the ability of inotify
# clients to distinguish between an idle opened file from one that is
# being written to or read from actively.
#
pool.inotify-generation.io-suppression = 10
(one-of?MILLISECONDS|SECONDS|MINUTES|HOURS|DAYS)\
pool.inotify-generation.io-suppression.unit = SECONDS
#
# Inherit defaults values.
#
pool.inotify-generation.enable = \
${dcache.inotify-generation.enable}
pool.inotify-generation.backlog.initial = \
${dcache.inotify-generation.backlog.initial}
pool.inotify-generation.backlog.per-door = \
${dcache.inotify-generation.backlog.per-door}
pool.inotify-generation.message-batch-size = \
${dcache.inotify-generation.message-batch-size}
# Whether to enable file-based capacity usage tracking. This provides
# potentially useful information for discovering bugs that affect the
# pool capacity accounting. However, enabling this feature will
# result in dCache using more memory per replica stored. On pools
# with large numbers of replicas the memory overhead may be
# prohibitive, and could prevent the pool from starting.
(one-of?true|false)pool.enable.account-file-checking = false
# ---- Kafka service enabled
#
(one-of?true|false|${dcache.enable.kafka})pool.enable.kafka = ${dcache.enable.kafka}
# A list of host/port pairs (brokers) host1:port1,host2:port2,....
pool.kafka.producer.bootstrap.servers = ${dcache.kafka.bootstrap-servers}
# Kafka topic name
pool.kafka.topic = ${dcache.kafka.topic}
(prefix)pool.kafka.producer.configs = Configuration for Kafka Producer
# Support for encrypted transfers.
#
# HTTPS is currently the only supported encrypted transfer.
# 1. It's https connections that are encrypted.
# 2. internal transfers (pool-to-pool and proxied transfers) are now encrypted if desired.
# 3. enabling HTTPS support requires the pool to have a valid X.509 credential.
#
(one-of?true|false)pool.enable.encrypted-transfers=true
# enbaling/disabling https for p2p
# always: encrypted p2p transfer enabled
# when enabling p2p https transfer the sender pool is not local pool.enable.encrypted-transfers should be set to ALWAYS
#
# never: encrypted p2p transfer disabled
# when the sender pool is not local enabling https transfer might be expensive so NEVER will disable it.
#
# crosszones: enabled only for crosszones
# when the pools are in the different zone
(one-of?NEVER|ALWAYS|CROSSZONES)pool.enable.encrypted.p2p-transfers=NEVER
# Host (server) certificate for https authentication
pool.mover.https.hostcert.cert=${dcache.authn.hostcert.cert}
# Host (server) key for https authentication
pool.mover.https.hostcert.key=${dcache.authn.hostcert.key}
# ---- Directory containing trusted CA certificates
#
# The directory containing the certificate authority (CA)
# certificates that the pool should trust when accepting or making
# remote connections. Currently only secure HTTP (https)
# connections react to this property.
#
pool.mover.https.capath=${dcache.authn.capath}