Skip to content
Permalink
master
Switch branches/tags
Go to file
 
 
Cannot retrieve contributors at this time
## DAOS server configuration file.
#
## Location of this configuration file is determined by first checking for the
## path specified through the -o option of the daos_server command line.
## Otherwise, /etc/daos/daos_server.yml is used.
#
#
## Name associated with the DAOS system.
## Immutable after running "dmg storage format".
#
## NOTE: Changing the DAOS system name is not supported yet.
## It must not be changed from the default "daos_server".
#
## default: daos_server
#name: daos_server
#
#
## Access points
## Immutable after running "dmg storage format".
#
## To operate, DAOS will need a quorum of access point nodes to be available.
## Must have the same value for all agents and servers in a system.
## Hosts can be specified with or without port. The default port that is set
## up in port: will be used if a port is not specified here.
#
## default: hostname of this node
#access_points: ['hostname1']
#
#
## Default control plane port
#
## Port number to bind daos_server to. This will also be used when connecting
## to access points, unless a port is specified in access_points:
#
## default: 10001
#port: 10001
#
#
## Transport credentials specifying certificates to secure communications
#
#transport_config:
# # In order to disable transport security, uncomment and set allow_insecure
# # to true. Not recommended for production configurations.
# allow_insecure: false
#
# # Location where daos_server will look for Client certificates
# client_cert_dir: /etc/daos/certs/clients
# # Custom CA Root certificate for generated certs
# ca_cert: /etc/daos/certs/daosCA.crt
# # Server certificate for use in TLS handshakes
# cert: /etc/daos/certs/server.crt
# # Key portion of Server Certificate
# key: /etc/daos/certs/server.key
#
#
## Fault domain path
## Immutable after running "dmg storage format".
#
## default: /hostname for a local configuration w/o fault domain
#fault_path: /vcdu0/rack1/hostname
#
#
## Fault domain callback
## Immutable after running "dmg storage format".
#
## Path to executable which will return fault domain string.
#
#fault_cb: ./.daos/fd_callback
#
#
## Network provider
#
## Set the network provider to be used by all the engines.
## There is no default - run "daos_server network scan" to list the
## providers that are supported on the fabric interfaces. Examples:
##
## ofi+verbs;ofi_rxm for libfabric with Infiniband/RoCE
## ofi+tcp;ofi_rxm for libfabric with non-RDMA-capable fabrics
##
## (Starting with DAOS 2.2, ofi_rxm will be automatically added to the
## libfabric verbs and tcp providers, if not explicitly specified.)
#
#provider: ofi+verbs;ofi_rxm
#
#
## CART: Whether to enable share address in the network stack
## (also known as scalable endpoint)
## parameters shared with client.
#
#crt_ctx_share_addr: 0
#
#
## CART: global RPC timeout
## parameters shared with client.
#
#crt_timeout: 30
#
#
## CART: Disable SRX
## parameters shared with client. set it to true if network card
## does not support shared receive context, eg intel E810-C.
#
#disable_srx: false
#
## Core Dump Filter
## Optional filter to control which mappings are written to the core
## dump in the event of a crash. See the following URL for more detail:
## https://man7.org/linux/man-pages/man5/core.5.html
#
#core_dump_filter: 0x13
#
## NVMe SSD inclusion list
## Immutable after running "dmg storage format".
#
## Only use NVMe controllers with specific PCI addresses.
## Colons replaced by dots in PCI identifiers.
## default: Use all the NVMe SSDs that don't have active mount points.
#
#bdev_include: ["0000:81:00.1","0000:81:00.2","0000:81:00.3"]
#
#
## NVMe SSD exclusion list
## Immutable after running "dmg storage format".
#
## Only use NVMe controllers with specific PCI addresses.
## Excludes drives listed in bdev_include and forces auto-detection to
## skip those drives.
## Colons replaced by dots in PCI identifiers.
## default: Use all the NVMe SSDs that don't have active mount points.
#
#bdev_exclude: ["0000:81:00.1"]
#
#
## Disable VFIO Driver
#
## In some circumstances it may be preferable to force SPDK to use the UIO
## driver for NVMe device access even though an IOMMU is available.
## NOTE: Use of the UIO driver requires that daos_server must run as root.
#
## default: false
#disable_vfio: true
#
#
## Disable VMD Usage
#
## VMD (Intel Volume Management Devices) is enabled by default but can be
## optionally disabled in which case VMD backing devices will not be visible.
#
## VMD needs to be available and configured in the system BIOS before it
## can be used. The main use case for VMD is managing NVMe SSD LED activity.
#
## default: false
#disable_vmd: true
#
#
## Enable NVMe SSD Hotplug
#
## When hotplug is enabled, io engine will periodically check device hot
## plug/remove event, and setup/teardown the device automatically.
#
## default: false
#enable_hotplug: true
#
#
## Use Hyperthreads
#
## When Hyperthreading is enabled and supported on the system, this parameter
## defines whether the DAOS service should try to take advantage of
## hyperthreading to scheduling different task on each hardware thread.
## Not supported yet.
#
## default: false
#hyperthreads: true
#
#
## Use the given directory for creating unix domain sockets
#
## DAOS Agent and DAOS Server both use unix domain sockets for communication
## with other system components. This setting is the base location to place
## the sockets in.
#
## NOTE: Do not change this when running under systemd control. If it needs to
## be changed, then make sure that it matches the RuntimeDirectory setting
## in /usr/lib/systemd/system/daos_server.service
#
## default: /var/run/daos_server
#
#socket_dir: ./.daos/daos_server
#
#
## Number of hugepages to allocate for DMA buffer memory
#
## Specifies the number (not size) of hugepages to allocate for use by NVMe
## through SPDK. Note that each target requires 1 GiB of hugepage space.
## In DAOS version 2.2 and newer, nr_hugepages specifies the total across all
## engines on a host. It needs to represent the total amount of hugepages memory
## required for all targets across all engines on a host, divided by the system
## hugepage size. If not set here, it will be automatically calculated based on
## the number of targets (using the default system hugepage size).
#
## Example: (2 engines * (16 targets/engine * 1GiB)) / 2MiB hugepage size = 16834
#
## Hugepages are mandatory with NVME SSDs configured and optional without.
## To disabled the use of hugepages when no NVMe SSDs are configured, set
## nr_hugepages to -1.
#
##nr_hugepages: -1
#
#
## Set specific debug mask for daos_server (control plane).
## The mask specifies minimum level of message significance to pass to logger.
## Currently supported values are DISABLED, ERROR, INFO and DEBUG.
#
## default: INFO
#control_log_mask: ERROR
#
#
## Force specific path for daos_server (control plane) logs.
#
## default: print to stderr
#control_log_file: /tmp/daos_server.log
#
#
## Enable daos_admin (privileged helper) logging.
#
## default: disabled (errors only to control plane log)
#helper_log_file: /tmp/daos_admin.log
#
#
## Enable daos_firmware (privileged helper) logging.
#
## default: disabled (errors only to control plane log)
#firmware_helper_log_file: /tmp/daos_firmware.log
#
#
## Enable HTTP endpoint for remote telemetry collection.
#
## default endpoint state: disabled
## default endpoint port: 9191
#telemetry_port: 9191
#
#
## When per-engine definitions exist, auto-allocation of resources is not
## performed. Without per-engine definitions, node resources will
## automatically be assigned to engines based on NUMA ratings.
## There will be a one-to-one relationship between engines and sockets.
#
#engines:
#-
# # Rank to be assigned as identifier for this engine.
# # Immutable after running "dmg storage format".
# #
# # Optional parameter; must be unique across all engines in the DAOS system.
# #
# # default: will be auto generated if not supplied.
#
# rank: 0
#
# # Number of I/O service threads (and network endpoints) per engine.
# # Immutable after running "dmg storage format".
# #
# # Each storage target manages a fraction of the (interleaved) SCM storage space,
# # and a fraction of one of the NVMe SSDs that are managed by this engine.
# # For optimal balance regarding the NVMe space, the number of targets should be
# # an integer multiple of the number of NVMe disks configured in bdev_list:
# # To obtain the maximum SCM performance, a certain number of targets is needed.
# # This is device- and workload-dependent, but around 16 targets usually work well.
# #
# # The server should have sufficiently many physical cores to support the
# # number of targets, plus the additional service threads.
#
# targets: 16
#
# # Number of additional offload service threads per engine.
# # Immutable after running "dmg storage format".
# #
# # Helper threads to accelerate checksum and server-side RPC dispatch.
# #
# # The server should have sufficiently many physical cores to support the
# # number of helper threads, plus the number of targets.
#
# nr_xs_helpers: 4
#
# # Pin this engine instance to cores and memory that are local to the
# # NUMA node ID specified with this value.
# #
# # For best performance, it is necessary that the fabric_iface of this engine
# # resides on the same NUMA node as the pinned_numa_node.
# #
# # Optional parameter; set either this option or first_core, but not both.
#
# pinned_numa_node: 0
#
# # Offset of the first core to be used for I/O service threads (targets).
# # Immutable after running "dmg storage format".
# #
# # For best performance, it is necessary that the fabric_iface of this engine
# # resides on the same NUMA node as the first_core.
# #
# # Optional parameter; set either this option non-zero or pinned_numa_node but not both.
#
# first_core: 0
#
# # A boolean that instructs the I/O Engine instance to bypass the NVMe
# # health check. This eliminates the check and related log output for those
# # systems with NVMe that do not support the device health data query.
#
# bypass_health_chk: true
#
# # Use specific network interface.
# # Specify the fabric network interface that will be used by this engine.
# # Optionally specify the fabric network interface port that will be used
# # by this engine but please only if you have a specific need, this will
# # normally be chosen automatically.
#
# fabric_iface: ib0
# fabric_iface_port: 20000
#
# # Force specific debug mask for the engine at start up time.
# # By default, just use the default debug mask used by DAOS.
# # Mask specifies minimum level of message significance to pass to logger.
#
# # default: ERR
# log_mask: INFO
#
# # Force specific path for DAOS debug logs.
#
# # default: /tmp/daos.log
# log_file: /tmp/daos_engine.0.log
#
# # Pass specific environment variables to the engine process.
# # Empty by default. Values should be supplied without encapsulating quotes.
#
# env_vars:
# - CRT_TIMEOUT=30
#
# storage:
# -
# # Define a pre-configured mountpoint for storage class memory to be used
# # by this engine.
# # Path should be unique to engine instance (can use different subdirs).
# # Either the specified directory or its parent must be a mount point.
#
# scm_mount: /mnt/daos/1
#
# # Backend SCM device type. Either use PMem (Intel(R) Optane(TM) persistent
# # memory) modules configured in interleaved mode or emulate SCM with a
# # tmpfs running in RAM.
# # Options are:
# # - "dcpm" for real SCM (preferred option), scm_size ignored
# # - "ram" to emulate SCM with memory, scm_list ignored
# # Immutable after running "dmg storage format".
#
# class: ram
#
# # When class is set to ram, tmpfs will be used to emulate SCM.
# # The size of ram is specified by scm_size in GB units.
# scm_size: 16
#
# -
# # Backend block device type. Force a SPDK driver to be used by this engine
# # instance.
# # Options are:
# # - "nvme" for NVMe SSDs (preferred option), bdev_size ignored
# # - "file" to emulate a NVMe SSD with a regular file
# # - "kdev" to use a kernel block device, bdev_size ignored
# # Immutable after running "dmg storage format".
#
# class: nvme
#
# # Backend block device configuration to be used by this engine instance.
# # When class is set to nvme, bdev_list is the list of unique NVMe IDs
# # that should be different across different engine instance.
# # Immutable after running "dmg storage format".
# bdev_list: ["0000:81:00.0", "0000:82:00.0"] # generate regular nvme.conf
#
# # If VMD-enabled NVMe SSDs are used, the bdev_list should consist of the VMD
# # PCIe addresses, and not the BDF format transport IDs of the backing NVMe SSDs
# # behind the VMD address. Also, 'disable_vmd' needs to be set to false.
# #bdev_list: ["0000:5d:05.5"]
#
# # Optional override, will be automatically generated based on NUMA affinity.
# # Filter hot-pluggable devices by PCI bus-ID by specifying a hexadecimal
# # range. Hotplug events relating to devices with PCI bus-IDs outside this range
# # will not be processed by this engine. Empty or unset range signifies allow all.
# bdev_busid_range: 0x80-0x8f
# #bdev_busid_range: 128-143
#-
# # Rank to be assigned as identifier for this engine.
# # Immutable after running "dmg storage format".
# #
# # Optional parameter; must be unique across all engines in the DAOS system.
# #
# # default: will be auto generated if not supplied.
#
# rank: 1
#
# # Number of I/O service threads (and network endpoints) per engine.
# # Immutable after running "dmg storage format".
# #
# # Each storage target manages a fraction of the (interleaved) SCM storage space,
# # and a fraction of one of the NVMe SSDs that are managed by this engine.
# # For optimal balance regarding the NVMe space, the number of targets should be
# # an integer multiple of the number of NVMe disks configured in bdev_list:
# # To obtain the maximum SCM performance, a certain number of targets is needed.
# # This is device- and workload-dependent, but around 16 targets usually work well.
# #
# # The server should have sufficiently many physical cores to support the
# # number of targets, plus the additional service threads.
#
# targets: 16
#
# # Number of additional offload service threads per engine.
# # Immutable after running "dmg storage format".
# #
# # Helper threads to accelerate checksum and server-side RPC dispatch.
# #
# # The server should have sufficiently many physical cores to support the
# # number of helper threads, plus the number of targets.
#
# nr_xs_helpers: 4
#
# # Pin this engine instance to cores and memory that are local to the
# # NUMA node ID specified with this value.
# #
# # For best performance, it is necessary that the fabric_iface of this engine
# # resides on the same NUMA node as the pinned_numa_node.
# #
# # Optional parameter; set either this option or first_core, but not both.
#
# #pinned_numa_node: 1
#
# # Offset of the first core to be used for I/O service threads (targets).
# # Immutable after running "dmg storage format".
# #
# # For best performance, it is necessary that the fabric_iface of this engine
# # resides on the same NUMA node as the first_core.
# #
# # Optional parameter; set either this option non-zero or pinned_numa_node but not both.
#
# first_core: 22
#
# # A boolean that instructs the I/O Engine instance to bypass the NVMe
# # health check. This eliminates the check and related log output for those
# # systems with NVMe that do not support the device health data query.
#
# bypass_health_chk: true
#
# # Use specific network interface.
# # Specify the fabric network interface that will be used by this engine.
# # Optionally specify the fabric network interface port that will be used
# # by this engine but please only if you have a specific need, this will
# # normally be chosen automatically.
#
# fabric_iface: ib1
# fabric_iface_port: 20000
#
# # Force specific debug mask for the engine at start up time.
# # By default, just use the default debug mask used by DAOS.
# # Mask specifies minimum level of message significance to pass to logger.
#
# # default: ERR
# log_mask: INFO
#
# # Force specific path for DAOS debug logs.
#
# # default: /tmp/daos.log
# log_file: /tmp/daos_engine.1.log
#
# # Pass specific environment variables to the engine process.
# # Empty by default. Values should be supplied without encapsulating quotes.
#
# env_vars:
# - CRT_TIMEOUT=100
#
# storage:
# -
# # Define a pre-configured mountpoint for storage class memory to be used
# # by this engine.
# # Path should be unique to engine instance (can use different subdirs).
#
# scm_mount: /mnt/daos/2
#
# # Backend SCM device type. Either use PMem (Intel(R) Optane(TM) persistent
# # memory) modules configured in interleaved mode or emulate SCM with a
# # tmpfs running in RAM.
# # Options are:
# # - "dcpm" for real SCM (preferred option), scm_size is ignored
# # - "ram" to emulate SCM with memory, scm_list is ignored
# # Immutable after running "dmg storage format".
#
# class: dcpm
#
# # When class is set to dcpm, scm_list is the list of device paths for
# # PMem namespaces (currently only one per engine supported).
# scm_list: [/dev/pmem1]
#
# -
# # Backend block device type. Force a SPDK driver to be used by this engine
# # instance.
# # Options are:
# # - "nvme" for NVMe SSDs (preferred option), bdev_{size,number} ignored
# # - "file" to emulate a NVMe SSD with a regular file, bdev_number ignored
# # - "kdev" to use a kernel block device, bdev_{size,number} ignored
# # Immutable after running "dmg storage format".
#
# # When class is set to file, Linux AIO will be used to emulate NVMe.
# # The size of file that will be created is specified by bdev_size in GB units.
# # The location of the files that will be created is specified in bdev_list.
# class: file
# bdev_list: [/tmp/daos-bdev1,/tmp/daos-bdev2]
# bdev_size: 16
#
# # When class is set to kdev, bdev_list is the list of unique kernel
# # block devices that should be different across different engine instance.
# class: kdev
# bdev_list: [/dev/sdc,/dev/sdd]
#
# # If Volume Management Devices (VMD) are to be used, then the disable_vmd
# # flag needs to be set to false (default). The class will remain the
# # default "nvme" type, and bdev_list will include the VMD addresses.
# #class: nvme
# #bdev_list: ["0000:5d:05.5"]
#
# #class: nvme
# #bdev_list: ["0000:da:00.0", "0000:db:00.0"] # generate regular nvme.conf
#
# # Optional override, will be automatically generated based on NUMA affinity.
# # Filter hot-pluggable devices by PCI bus-ID by specifying a hexadecimal
# # range. Hotplug events relating to devices with PCI bus-IDs outside this range
# # will not be processed by this engine. Empty or unset range signifies allow all.
# #bdev_busid_range: 0xd0-0xdf
# #bdev_busid_range: 208-223