Permalink
Cannot retrieve contributors at this time
#################################### | |
# Akka Actor Reference Config File # | |
#################################### | |
# This is the reference config file that contains all the default settings. | |
# Make your edits/overrides in your application.conf. | |
# Akka version, checked against the runtime version of Akka. Loaded from generated conf file. | |
include "version" | |
akka { | |
# Home directory of Akka, modules in the deploy directory will be loaded | |
home = "" | |
# Loggers to register at boot time (akka.event.Logging$DefaultLogger logs | |
# to STDOUT) | |
loggers = ["akka.event.Logging$DefaultLogger"] | |
# Filter of log events that is used by the LoggingAdapter before | |
# publishing log events to the eventStream. It can perform | |
# fine grained filtering based on the log source. The default | |
# implementation filters on the `loglevel`. | |
# FQCN of the LoggingFilter. The Class of the FQCN must implement | |
# akka.event.LoggingFilter and have a public constructor with | |
# (akka.actor.ActorSystem.Settings, akka.event.EventStream) parameters. | |
logging-filter = "akka.event.DefaultLoggingFilter" | |
# Specifies the default loggers dispatcher | |
loggers-dispatcher = "akka.actor.default-dispatcher" | |
# Loggers are created and registered synchronously during ActorSystem | |
# start-up, and since they are actors, this timeout is used to bound the | |
# waiting time | |
logger-startup-timeout = 5s | |
# Log level used by the configured loggers (see "loggers") as soon | |
# as they have been started; before that, see "stdout-loglevel" | |
# Options: OFF, ERROR, WARNING, INFO, DEBUG | |
loglevel = "INFO" | |
# Log level for the very basic logger activated during ActorSystem startup. | |
# This logger prints the log messages to stdout (System.out). | |
# Options: OFF, ERROR, WARNING, INFO, DEBUG | |
stdout-loglevel = "WARNING" | |
# Log the complete configuration at INFO level when the actor system is started. | |
# This is useful when you are uncertain of what configuration is used. | |
log-config-on-start = off | |
# Log at info level when messages are sent to dead letters, or published to | |
# eventStream as `DeadLetter`, `Dropped` or `UnhandledMessage`. | |
# Possible values: | |
# on: all dead letters are logged | |
# off: no logging of dead letters | |
# n: positive integer, number of dead letters that will be logged | |
log-dead-letters = 10 | |
# Possibility to turn off logging of dead letters while the actor system | |
# is shutting down. Logging is only done when enabled by 'log-dead-letters' | |
# setting. | |
log-dead-letters-during-shutdown = off | |
# When log-dead-letters is enabled, this will re-enable the logging after configured duration. | |
# infinite: suspend the logging forever; | |
# or a duration (eg: 5 minutes), after which the logging will be re-enabled. | |
log-dead-letters-suspend-duration = 5 minutes | |
# List FQCN of extensions which shall be loaded at actor system startup. | |
# Library extensions are regular extensions that are loaded at startup and are | |
# available for third party library authors to enable auto-loading of extensions when | |
# present on the classpath. This is done by appending entries: | |
# 'library-extensions += "Extension"' in the library `reference.conf`. | |
# | |
# Should not be set by end user applications in 'application.conf', use the extensions property for that | |
# | |
library-extensions = ${?akka.library-extensions} ["akka.serialization.SerializationExtension$"] | |
# List FQCN of extensions which shall be loaded at actor system startup. | |
# Should be on the format: 'extensions = ["foo", "bar"]' etc. | |
# See the Akka Documentation for more info about Extensions | |
extensions = [] | |
# Toggles whether threads created by this ActorSystem should be daemons or not | |
daemonic = off | |
# JVM shutdown, System.exit(-1), in case of a fatal error, | |
# such as OutOfMemoryError | |
jvm-exit-on-fatal-error = on | |
# Akka installs JVM shutdown hooks by default, e.g. in CoordinatedShutdown and Artery. This property will | |
# not disable user-provided hooks registered using `CoordinatedShutdown#addCancellableJvmShutdownHook`. | |
# This property is related to `akka.coordinated-shutdown.run-by-jvm-shutdown-hook` below. | |
# This property makes it possible to disable all such hooks if the application itself | |
# or a higher level framework such as Play prefers to install the JVM shutdown hook and | |
# terminate the ActorSystem itself, with or without using CoordinatedShutdown. | |
jvm-shutdown-hooks = on | |
# Version must be the same across all modules and if they are different the startup | |
# will fail. It's possible but not recommended, to disable this check, and only log a warning, | |
# by setting this property to `off`. | |
fail-mixed-versions = on | |
# Some modules (remoting only right now) can emit custom events to the Java Flight Recorder if running | |
# on JDK 11 or later. If you for some reason do not want that, it can be disabled and switched to no-ops | |
# with this toggle. | |
java-flight-recorder { | |
enabled = true | |
} | |
actor { | |
# Either one of "local", "remote" or "cluster" or the | |
# FQCN of the ActorRefProvider to be used; the below is the built-in default, | |
# note that "remote" and "cluster" requires the akka-remote and akka-cluster | |
# artifacts to be on the classpath. | |
provider = "local" | |
# The guardian "/user" will use this class to obtain its supervisorStrategy. | |
# It needs to be a subclass of akka.actor.SupervisorStrategyConfigurator. | |
# In addition to the default there is akka.actor.StoppingSupervisorStrategy. | |
guardian-supervisor-strategy = "akka.actor.DefaultSupervisorStrategy" | |
# Timeout for Extension creation and a few other potentially blocking | |
# initialization tasks. | |
creation-timeout = 20s | |
# Serializes and deserializes (non-primitive) messages to ensure immutability, | |
# this is only intended for testing. | |
serialize-messages = off | |
# Serializes and deserializes creators (in Props) to ensure that they can be | |
# sent over the network, this is only intended for testing. Purely local deployments | |
# as marked with deploy.scope == LocalScope are exempt from verification. | |
serialize-creators = off | |
# If serialize-messages or serialize-creators are enabled classes that starts with | |
# a prefix listed here are not verified. | |
no-serialization-verification-needed-class-prefix = ["akka."] | |
# Timeout for send operations to top-level actors which are in the process | |
# of being started. This is only relevant if using a bounded mailbox or the | |
# CallingThreadDispatcher for a top-level actor. | |
unstarted-push-timeout = 10s | |
# TypedActor deprecated since 2.6.0. | |
typed { | |
# Default timeout for the deprecated TypedActor (not the new actor APIs in 2.6) | |
# methods with non-void return type. | |
timeout = 5s | |
} | |
# Mapping between ´deployment.router' short names to fully qualified class names | |
router.type-mapping { | |
from-code = "akka.routing.NoRouter" | |
round-robin-pool = "akka.routing.RoundRobinPool" | |
round-robin-group = "akka.routing.RoundRobinGroup" | |
random-pool = "akka.routing.RandomPool" | |
random-group = "akka.routing.RandomGroup" | |
balancing-pool = "akka.routing.BalancingPool" | |
smallest-mailbox-pool = "akka.routing.SmallestMailboxPool" | |
broadcast-pool = "akka.routing.BroadcastPool" | |
broadcast-group = "akka.routing.BroadcastGroup" | |
scatter-gather-pool = "akka.routing.ScatterGatherFirstCompletedPool" | |
scatter-gather-group = "akka.routing.ScatterGatherFirstCompletedGroup" | |
tail-chopping-pool = "akka.routing.TailChoppingPool" | |
tail-chopping-group = "akka.routing.TailChoppingGroup" | |
consistent-hashing-pool = "akka.routing.ConsistentHashingPool" | |
consistent-hashing-group = "akka.routing.ConsistentHashingGroup" | |
} | |
deployment { | |
# deployment id pattern - on the format: /parent/child etc. | |
default { | |
# The id of the dispatcher to use for this actor. | |
# If undefined or empty the dispatcher specified in code | |
# (Props.withDispatcher) is used, or default-dispatcher if not | |
# specified at all. | |
dispatcher = "" | |
# The id of the mailbox to use for this actor. | |
# If undefined or empty the default mailbox of the configured dispatcher | |
# is used or if there is no mailbox configuration the mailbox specified | |
# in code (Props.withMailbox) is used. | |
# If there is a mailbox defined in the configured dispatcher then that | |
# overrides this setting. | |
mailbox = "" | |
# routing (load-balance) scheme to use | |
# - available: "from-code", "round-robin", "random", "smallest-mailbox", | |
# "scatter-gather", "broadcast" | |
# - or: Fully qualified class name of the router class. | |
# The class must extend akka.routing.CustomRouterConfig and | |
# have a public constructor with com.typesafe.config.Config | |
# and optional akka.actor.DynamicAccess parameter. | |
# - default is "from-code"; | |
# Whether or not an actor is transformed to a Router is decided in code | |
# only (Props.withRouter). The type of router can be overridden in the | |
# configuration; specifying "from-code" means that the values specified | |
# in the code shall be used. | |
# In case of routing, the actors to be routed to can be specified | |
# in several ways: | |
# - nr-of-instances: will create that many children | |
# - routees.paths: will route messages to these paths using ActorSelection, | |
# i.e. will not create children | |
# - resizer: dynamically resizable number of routees as specified in | |
# resizer below | |
router = "from-code" | |
# number of children to create in case of a router; | |
# this setting is ignored if routees.paths is given | |
nr-of-instances = 1 | |
# within is the timeout used for routers containing future calls | |
within = 5 seconds | |
# number of virtual nodes per node for consistent-hashing router | |
virtual-nodes-factor = 10 | |
tail-chopping-router { | |
# interval is duration between sending message to next routee | |
interval = 10 milliseconds | |
} | |
routees { | |
# Alternatively to giving nr-of-instances you can specify the full | |
# paths of those actors which should be routed to. This setting takes | |
# precedence over nr-of-instances | |
paths = [] | |
} | |
# To use a dedicated dispatcher for the routees of the pool you can | |
# define the dispatcher configuration inline with the property name | |
# 'pool-dispatcher' in the deployment section of the router. | |
# For example: | |
# pool-dispatcher { | |
# fork-join-executor.parallelism-min = 5 | |
# fork-join-executor.parallelism-max = 5 | |
# } | |
# Routers with dynamically resizable number of routees; this feature is | |
# enabled by including (parts of) this section in the deployment | |
resizer { | |
enabled = off | |
# The fewest number of routees the router should ever have. | |
lower-bound = 1 | |
# The most number of routees the router should ever have. | |
# Must be greater than or equal to lower-bound. | |
upper-bound = 10 | |
# Threshold used to evaluate if a routee is considered to be busy | |
# (under pressure). Implementation depends on this value (default is 1). | |
# 0: number of routees currently processing a message. | |
# 1: number of routees currently processing a message has | |
# some messages in mailbox. | |
# > 1: number of routees with at least the configured pressure-threshold | |
# messages in their mailbox. Note that estimating mailbox size of | |
# default UnboundedMailbox is O(N) operation. | |
pressure-threshold = 1 | |
# Percentage to increase capacity whenever all routees are busy. | |
# For example, 0.2 would increase 20% (rounded up), i.e. if current | |
# capacity is 6 it will request an increase of 2 more routees. | |
rampup-rate = 0.2 | |
# Minimum fraction of busy routees before backing off. | |
# For example, if this is 0.3, then we'll remove some routees only when | |
# less than 30% of routees are busy, i.e. if current capacity is 10 and | |
# 3 are busy then the capacity is unchanged, but if 2 or less are busy | |
# the capacity is decreased. | |
# Use 0.0 or negative to avoid removal of routees. | |
backoff-threshold = 0.3 | |
# Fraction of routees to be removed when the resizer reaches the | |
# backoffThreshold. | |
# For example, 0.1 would decrease 10% (rounded up), i.e. if current | |
# capacity is 9 it will request an decrease of 1 routee. | |
backoff-rate = 0.1 | |
# Number of messages between resize operation. | |
# Use 1 to resize before each message. | |
messages-per-resize = 10 | |
} | |
# Routers with dynamically resizable number of routees based on | |
# performance metrics. | |
# This feature is enabled by including (parts of) this section in | |
# the deployment, cannot be enabled together with default resizer. | |
optimal-size-exploring-resizer { | |
enabled = off | |
# The fewest number of routees the router should ever have. | |
lower-bound = 1 | |
# The most number of routees the router should ever have. | |
# Must be greater than or equal to lower-bound. | |
upper-bound = 10 | |
# probability of doing a ramping down when all routees are busy | |
# during exploration. | |
chance-of-ramping-down-when-full = 0.2 | |
# Interval between each resize attempt | |
action-interval = 5s | |
# If the routees have not been fully utilized (i.e. all routees busy) | |
# for such length, the resizer will downsize the pool. | |
downsize-after-underutilized-for = 72h | |
# Duration exploration, the ratio between the largest step size and | |
# current pool size. E.g. if the current pool size is 50, and the | |
# explore-step-size is 0.1, the maximum pool size change during | |
# exploration will be +- 5 | |
explore-step-size = 0.1 | |
# Probability of doing an exploration v.s. optimization. | |
chance-of-exploration = 0.4 | |
# When downsizing after a long streak of underutilization, the resizer | |
# will downsize the pool to the highest utiliziation multiplied by a | |
# a downsize ratio. This downsize ratio determines the new pools size | |
# in comparison to the highest utilization. | |
# E.g. if the highest utilization is 10, and the down size ratio | |
# is 0.8, the pool will be downsized to 8 | |
downsize-ratio = 0.8 | |
# When optimizing, the resizer only considers the sizes adjacent to the | |
# current size. This number indicates how many adjacent sizes to consider. | |
optimization-range = 16 | |
# The weight of the latest metric over old metrics when collecting | |
# performance metrics. | |
# E.g. if the last processing speed is 10 millis per message at pool | |
# size 5, and if the new processing speed collected is 6 millis per | |
# message at pool size 5. Given a weight of 0.3, the metrics | |
# representing pool size 5 will be 6 * 0.3 + 10 * 0.7, i.e. 8.8 millis | |
# Obviously, this number should be between 0 and 1. | |
weight-of-latest-metric = 0.5 | |
} | |
} | |
"/IO-DNS/inet-address" { | |
mailbox = "unbounded" | |
router = "consistent-hashing-pool" | |
nr-of-instances = 4 | |
} | |
"/IO-DNS/inet-address/*" { | |
dispatcher = "akka.actor.default-blocking-io-dispatcher" | |
} | |
"/IO-DNS/async-dns" { | |
mailbox = "unbounded" | |
router = "round-robin-pool" | |
nr-of-instances = 1 | |
} | |
} | |
default-dispatcher { | |
# Must be one of the following | |
# Dispatcher, PinnedDispatcher, or a FQCN to a class inheriting | |
# MessageDispatcherConfigurator with a public constructor with | |
# both com.typesafe.config.Config parameter and | |
# akka.dispatch.DispatcherPrerequisites parameters. | |
# PinnedDispatcher must be used together with executor=thread-pool-executor. | |
type = "Dispatcher" | |
# Which kind of ExecutorService to use for this dispatcher | |
# Valid options: | |
# - "default-executor" requires a "default-executor" section | |
# - "fork-join-executor" requires a "fork-join-executor" section | |
# - "thread-pool-executor" requires a "thread-pool-executor" section | |
# - "affinity-pool-executor" requires an "affinity-pool-executor" section | |
# - A FQCN of a class extending ExecutorServiceConfigurator | |
executor = "default-executor" | |
# This will be used if you have set "executor = "default-executor"". | |
# If an ActorSystem is created with a given ExecutionContext, this | |
# ExecutionContext will be used as the default executor for all | |
# dispatchers in the ActorSystem configured with | |
# executor = "default-executor". Note that "default-executor" | |
# is the default value for executor, and therefore used if not | |
# specified otherwise. If no ExecutionContext is given, | |
# the executor configured in "fallback" will be used. | |
default-executor { | |
fallback = "fork-join-executor" | |
} | |
# This will be used if you have set "executor = "affinity-pool-executor"" | |
# Underlying thread pool implementation is akka.dispatch.affinity.AffinityPool. | |
# This executor is classified as "ApiMayChange". | |
affinity-pool-executor { | |
# Min number of threads to cap factor-based parallelism number to | |
parallelism-min = 4 | |
# The parallelism factor is used to determine thread pool size using the | |
# following formula: ceil(available processors * factor). Resulting size | |
# is then bounded by the parallelism-min and parallelism-max values. | |
parallelism-factor = 0.8 | |
# Max number of threads to cap factor-based parallelism number to. | |
parallelism-max = 64 | |
# Each worker in the pool uses a separate bounded MPSC queue. This value | |
# indicates the upper bound of the queue. Whenever an attempt to enqueue | |
# a task is made and the queue does not have capacity to accommodate | |
# the task, the rejection handler created by the rejection handler specified | |
# in "rejection-handler" is invoked. | |
task-queue-size = 512 | |
# FQCN of the Rejection handler used in the pool. | |
# Must have an empty public constructor and must | |
# implement akka.actor.affinity.RejectionHandlerFactory. | |
rejection-handler = "akka.dispatch.affinity.ThrowOnOverflowRejectionHandler" | |
# Level of CPU time used, on a scale between 1 and 10, during backoff/idle. | |
# The tradeoff is that to have low latency more CPU time must be used to be | |
# able to react quickly on incoming messages or send as fast as possible after | |
# backoff backpressure. | |
# Level 1 strongly prefer low CPU consumption over low latency. | |
# Level 10 strongly prefer low latency over low CPU consumption. | |
idle-cpu-level = 5 | |
# FQCN of the akka.dispatch.affinity.QueueSelectorFactory. | |
# The Class of the FQCN must have a public constructor with a | |
# (com.typesafe.config.Config) parameter. | |
# A QueueSelectorFactory create instances of akka.dispatch.affinity.QueueSelector, | |
# that is responsible for determining which task queue a Runnable should be enqueued in. | |
queue-selector = "akka.dispatch.affinity.FairDistributionHashCache" | |
# When using the "akka.dispatch.affinity.FairDistributionHashCache" queue selector | |
# internally the AffinityPool uses two methods to determine which task | |
# queue to allocate a Runnable to: | |
# - map based - maintains a round robin counter and a map of Runnable | |
# hashcodes to queues that they have been associated with. This ensures | |
# maximum fairness in terms of work distribution, meaning that each worker | |
# will get approximately equal amount of mailboxes to execute. This is suitable | |
# in cases where we have a small number of actors that will be scheduled on | |
# the pool and we want to ensure the maximum possible utilization of the | |
# available threads. | |
# - hash based - the task - queue in which the runnable should go is determined | |
# by using an uniformly distributed int to int hash function which uses the | |
# hash code of the Runnable as an input. This is preferred in situations where we | |
# have enough number of distinct actors to ensure statistically uniform | |
# distribution of work across threads or we are ready to sacrifice the | |
# former for the added benefit of avoiding map look-ups. | |
fair-work-distribution { | |
# The value serves as a threshold which determines the point at which the | |
# pool switches from the first to the second work distribution schemes. | |
# For example, if the value is set to 128, the pool can observe up to | |
# 128 unique actors and schedule their mailboxes using the map based | |
# approach. Once this number is reached the pool switches to hash based | |
# task distribution mode. If the value is set to 0, the map based | |
# work distribution approach is disabled and only the hash based is | |
# used irrespective of the number of unique actors. Valid range is | |
# 0 to 2048 (inclusive) | |
threshold = 128 | |
} | |
} | |
# This will be used if you have set "executor = "fork-join-executor"" | |
# Underlying thread pool implementation is java.util.concurrent.ForkJoinPool | |
fork-join-executor { | |
# Min number of threads to cap factor-based parallelism number to | |
parallelism-min = 8 | |
# The parallelism factor is used to determine thread pool size using the | |
# following formula: ceil(available processors * factor). Resulting size | |
# is then bounded by the parallelism-min and parallelism-max values. | |
parallelism-factor = 1.0 | |
# Max number of threads to cap factor-based parallelism number to | |
parallelism-max = 64 | |
# Setting to "FIFO" to use queue like peeking mode which "poll" or "LIFO" to use stack | |
# like peeking mode which "pop". | |
task-peeking-mode = "FIFO" | |
} | |
# This will be used if you have set "executor = "thread-pool-executor"" | |
# Underlying thread pool implementation is java.util.concurrent.ThreadPoolExecutor | |
thread-pool-executor { | |
# Keep alive time for threads | |
keep-alive-time = 60s | |
# Define a fixed thread pool size with this property. The corePoolSize | |
# and the maximumPoolSize of the ThreadPoolExecutor will be set to this | |
# value, if it is defined. Then the other pool-size properties will not | |
# be used. | |
# | |
# Valid values are: `off` or a positive integer. | |
fixed-pool-size = off | |
# Min number of threads to cap factor-based corePoolSize number to | |
core-pool-size-min = 8 | |
# The core-pool-size-factor is used to determine corePoolSize of the | |
# ThreadPoolExecutor using the following formula: | |
# ceil(available processors * factor). | |
# Resulting size is then bounded by the core-pool-size-min and | |
# core-pool-size-max values. | |
core-pool-size-factor = 3.0 | |
# Max number of threads to cap factor-based corePoolSize number to | |
core-pool-size-max = 64 | |
# Minimum number of threads to cap factor-based maximumPoolSize number to | |
max-pool-size-min = 8 | |
# The max-pool-size-factor is used to determine maximumPoolSize of the | |
# ThreadPoolExecutor using the following formula: | |
# ceil(available processors * factor) | |
# The maximumPoolSize will not be less than corePoolSize. | |
# It is only used if using a bounded task queue. | |
max-pool-size-factor = 3.0 | |
# Max number of threads to cap factor-based maximumPoolSize number to | |
max-pool-size-max = 64 | |
# Specifies the bounded capacity of the task queue (< 1 == unbounded) | |
task-queue-size = -1 | |
# Specifies which type of task queue will be used, can be "array" or | |
# "linked" (default) | |
task-queue-type = "linked" | |
# Allow core threads to time out | |
allow-core-timeout = on | |
} | |
# How long time the dispatcher will wait for new actors until it shuts down | |
shutdown-timeout = 1s | |
# Throughput defines the number of messages that are processed in a batch | |
# before the thread is returned to the pool. Set to 1 for as fair as possible. | |
throughput = 5 | |
# Throughput deadline for Dispatcher, set to 0 or negative for no deadline | |
throughput-deadline-time = 0ms | |
# For BalancingDispatcher: If the balancing dispatcher should attempt to | |
# schedule idle actors using the same dispatcher when a message comes in, | |
# and the dispatchers ExecutorService is not fully busy already. | |
attempt-teamwork = on | |
# If this dispatcher requires a specific type of mailbox, specify the | |
# fully-qualified class name here; the actually created mailbox will | |
# be a subtype of this type. The empty string signifies no requirement. | |
mailbox-requirement = "" | |
} | |
# Default separate internal dispatcher to run Akka internal tasks and actors on | |
# protecting them against starvation because of accidental blocking in user actors (which run on the | |
# default dispatcher) | |
internal-dispatcher { | |
type = "Dispatcher" | |
executor = "fork-join-executor" | |
throughput = 5 | |
fork-join-executor { | |
parallelism-min = 4 | |
parallelism-factor = 1.0 | |
parallelism-max = 64 | |
} | |
} | |
default-blocking-io-dispatcher { | |
type = "Dispatcher" | |
executor = "thread-pool-executor" | |
throughput = 1 | |
thread-pool-executor { | |
fixed-pool-size = 16 | |
} | |
} | |
default-mailbox { | |
# FQCN of the MailboxType. The Class of the FQCN must have a public | |
# constructor with | |
# (akka.actor.ActorSystem.Settings, com.typesafe.config.Config) parameters. | |
mailbox-type = "akka.dispatch.UnboundedMailbox" | |
# If the mailbox is bounded then it uses this setting to determine its | |
# capacity. The provided value must be positive. | |
# NOTICE: | |
# Up to version 2.1 the mailbox type was determined based on this setting; | |
# this is no longer the case, the type must explicitly be a bounded mailbox. | |
mailbox-capacity = 1000 | |
# If the mailbox is bounded then this is the timeout for enqueueing | |
# in case the mailbox is full. Negative values signify infinite | |
# timeout, which should be avoided as it bears the risk of dead-lock. | |
mailbox-push-timeout-time = 10s | |
# For Actor with Stash: The default capacity of the stash. | |
# If negative (or zero) then an unbounded stash is used (default) | |
# If positive then a bounded stash is used and the capacity is set using | |
# the property | |
stash-capacity = -1 | |
} | |
mailbox { | |
# Mapping between message queue semantics and mailbox configurations. | |
# Used by akka.dispatch.RequiresMessageQueue[T] to enforce different | |
# mailbox types on actors. | |
# If your Actor implements RequiresMessageQueue[T], then when you create | |
# an instance of that actor its mailbox type will be decided by looking | |
# up a mailbox configuration via T in this mapping | |
requirements { | |
"akka.dispatch.UnboundedMessageQueueSemantics" = | |
akka.actor.mailbox.unbounded-queue-based | |
"akka.dispatch.BoundedMessageQueueSemantics" = | |
akka.actor.mailbox.bounded-queue-based | |
"akka.dispatch.DequeBasedMessageQueueSemantics" = | |
akka.actor.mailbox.unbounded-deque-based | |
"akka.dispatch.UnboundedDequeBasedMessageQueueSemantics" = | |
akka.actor.mailbox.unbounded-deque-based | |
"akka.dispatch.BoundedDequeBasedMessageQueueSemantics" = | |
akka.actor.mailbox.bounded-deque-based | |
"akka.dispatch.MultipleConsumerSemantics" = | |
akka.actor.mailbox.unbounded-queue-based | |
"akka.dispatch.ControlAwareMessageQueueSemantics" = | |
akka.actor.mailbox.unbounded-control-aware-queue-based | |
"akka.dispatch.UnboundedControlAwareMessageQueueSemantics" = | |
akka.actor.mailbox.unbounded-control-aware-queue-based | |
"akka.dispatch.BoundedControlAwareMessageQueueSemantics" = | |
akka.actor.mailbox.bounded-control-aware-queue-based | |
"akka.event.LoggerMessageQueueSemantics" = | |
akka.actor.mailbox.logger-queue | |
} | |
unbounded-queue-based { | |
# FQCN of the MailboxType, The Class of the FQCN must have a public | |
# constructor with (akka.actor.ActorSystem.Settings, | |
# com.typesafe.config.Config) parameters. | |
mailbox-type = "akka.dispatch.UnboundedMailbox" | |
} | |
bounded-queue-based { | |
# FQCN of the MailboxType, The Class of the FQCN must have a public | |
# constructor with (akka.actor.ActorSystem.Settings, | |
# com.typesafe.config.Config) parameters. | |
mailbox-type = "akka.dispatch.BoundedMailbox" | |
} | |
unbounded-deque-based { | |
# FQCN of the MailboxType, The Class of the FQCN must have a public | |
# constructor with (akka.actor.ActorSystem.Settings, | |
# com.typesafe.config.Config) parameters. | |
mailbox-type = "akka.dispatch.UnboundedDequeBasedMailbox" | |
} | |
bounded-deque-based { | |
# FQCN of the MailboxType, The Class of the FQCN must have a public | |
# constructor with (akka.actor.ActorSystem.Settings, | |
# com.typesafe.config.Config) parameters. | |
mailbox-type = "akka.dispatch.BoundedDequeBasedMailbox" | |
} | |
unbounded-control-aware-queue-based { | |
# FQCN of the MailboxType, The Class of the FQCN must have a public | |
# constructor with (akka.actor.ActorSystem.Settings, | |
# com.typesafe.config.Config) parameters. | |
mailbox-type = "akka.dispatch.UnboundedControlAwareMailbox" | |
} | |
bounded-control-aware-queue-based { | |
# FQCN of the MailboxType, The Class of the FQCN must have a public | |
# constructor with (akka.actor.ActorSystem.Settings, | |
# com.typesafe.config.Config) parameters. | |
mailbox-type = "akka.dispatch.BoundedControlAwareMailbox" | |
} | |
# The LoggerMailbox will drain all messages in the mailbox | |
# when the system is shutdown and deliver them to the StandardOutLogger. | |
# Do not change this unless you know what you are doing. | |
logger-queue { | |
mailbox-type = "akka.event.LoggerMailboxType" | |
} | |
} | |
debug { | |
# enable function of Actor.loggable(), which is to log any received message | |
# at DEBUG level, see the “Testing Actor Systems” section of the Akka | |
# Documentation at http://akka.io/docs | |
receive = off | |
# enable DEBUG logging of all AutoReceiveMessages (Kill, PoisonPill etc.) | |
autoreceive = off | |
# enable DEBUG logging of actor lifecycle changes | |
lifecycle = off | |
# enable DEBUG logging of all LoggingFSMs for events, transitions and timers | |
fsm = off | |
# enable DEBUG logging of subscription changes on the eventStream | |
event-stream = off | |
# enable DEBUG logging of unhandled messages | |
unhandled = off | |
# enable WARN logging of misconfigured routers | |
router-misconfiguration = off | |
} | |
# SECURITY BEST-PRACTICE is to disable java serialization for its multiple | |
# known attack surfaces. | |
# | |
# This setting is a short-cut to | |
# - using DisabledJavaSerializer instead of JavaSerializer | |
# | |
# Completely disable the use of `akka.serialization.JavaSerialization` by the | |
# Akka Serialization extension, instead DisabledJavaSerializer will | |
# be inserted which will fail explicitly if attempts to use java serialization are made. | |
# | |
# The log messages emitted by such serializer SHOULD be treated as potential | |
# attacks which the serializer prevented, as they MAY indicate an external operator | |
# attempting to send malicious messages intending to use java serialization as attack vector. | |
# The attempts are logged with the SECURITY marker. | |
# | |
# Please note that this option does not stop you from manually invoking java serialization | |
# | |
allow-java-serialization = off | |
# Log warnings when the Java serialization is used to serialize messages. | |
# Java serialization is not very performant and should not be used in production | |
# environments unless you don't care about performance and security. In that case | |
# you can turn this off. | |
warn-about-java-serializer-usage = on | |
# To be used with the above warn-about-java-serializer-usage | |
# When warn-about-java-serializer-usage = on, and this warn-on-no-serialization-verification = off, | |
# warnings are suppressed for classes extending NoSerializationVerificationNeeded | |
# to reduce noise. | |
warn-on-no-serialization-verification = on | |
# Entries for pluggable serializers and their bindings. | |
serializers { | |
java = "akka.serialization.JavaSerializer" | |
bytes = "akka.serialization.ByteArraySerializer" | |
primitive-long = "akka.serialization.LongSerializer" | |
primitive-int = "akka.serialization.IntSerializer" | |
primitive-string = "akka.serialization.StringSerializer" | |
primitive-bytestring = "akka.serialization.ByteStringSerializer" | |
primitive-boolean = "akka.serialization.BooleanSerializer" | |
} | |
# Class to Serializer binding. You only need to specify the name of an | |
# interface or abstract base class of the messages. In case of ambiguity it | |
# is using the most specific configured class, or giving a warning and | |
# choosing the “first” one. | |
# | |
# To disable one of the default serializers, assign its class to "none", like | |
# "java.io.Serializable" = none | |
serialization-bindings { | |
"[B" = bytes | |
"java.io.Serializable" = java | |
"java.lang.String" = primitive-string | |
"akka.util.ByteString$ByteString1C" = primitive-bytestring | |
"akka.util.ByteString$ByteString1" = primitive-bytestring | |
"akka.util.ByteString$ByteStrings" = primitive-bytestring | |
"java.lang.Long" = primitive-long | |
"scala.Long" = primitive-long | |
"java.lang.Integer" = primitive-int | |
"scala.Int" = primitive-int | |
"java.lang.Boolean" = primitive-boolean | |
"scala.Boolean" = primitive-boolean | |
} | |
# Configuration namespace of serialization identifiers. | |
# Each serializer implementation must have an entry in the following format: | |
# `akka.actor.serialization-identifiers."FQCN" = ID` | |
# where `FQCN` is fully qualified class name of the serializer implementation | |
# and `ID` is globally unique serializer identifier number. | |
# Identifier values from 0 to 40 are reserved for Akka internal usage. | |
serialization-identifiers { | |
"akka.serialization.JavaSerializer" = 1 | |
"akka.serialization.ByteArraySerializer" = 4 | |
primitive-long = 18 | |
primitive-int = 19 | |
primitive-string = 20 | |
primitive-bytestring = 21 | |
primitive-boolean = 35 | |
} | |
} | |
serialization.protobuf { | |
# deprecated, use `allowed-classes` instead | |
whitelist-class = [ | |
"com.google.protobuf.GeneratedMessage", | |
"com.google.protobuf.GeneratedMessageV3", | |
"scalapb.GeneratedMessageCompanion", | |
"akka.protobuf.GeneratedMessage", | |
"akka.protobufv3.internal.GeneratedMessageV3" | |
] | |
# Additional classes that are allowed even if they are not defined in `serialization-bindings`. | |
# It can be exact class name or name of super class or interfaces (one level). | |
# This is useful when a class is not used for serialization any more and therefore removed | |
# from `serialization-bindings`, but should still be possible to deserialize. | |
allowed-classes = ${akka.serialization.protobuf.whitelist-class} | |
} | |
# Used to set the behavior of the scheduler. | |
# Changing the default values may change the system behavior drastically so make | |
# sure you know what you're doing! See the Scheduler section of the Akka | |
# Documentation for more details. | |
scheduler { | |
# The LightArrayRevolverScheduler is used as the default scheduler in the | |
# system. It does not execute the scheduled tasks on exact time, but on every | |
# tick, it will run everything that is (over)due. You can increase or decrease | |
# the accuracy of the execution timing by specifying smaller or larger tick | |
# duration. If you are scheduling a lot of tasks you should consider increasing | |
# the ticks per wheel. | |
# Note that it might take up to 1 tick to stop the Timer, so setting the | |
# tick-duration to a high value will make shutting down the actor system | |
# take longer. | |
tick-duration = 10ms | |
# The timer uses a circular wheel of buckets to store the timer tasks. | |
# This should be set such that the majority of scheduled timeouts (for high | |
# scheduling frequency) will be shorter than one rotation of the wheel | |
# (ticks-per-wheel * ticks-duration) | |
# THIS MUST BE A POWER OF TWO! | |
ticks-per-wheel = 512 | |
# This setting selects the timer implementation which shall be loaded at | |
# system start-up. | |
# The class given here must implement the akka.actor.Scheduler interface | |
# and offer a public constructor which takes three arguments: | |
# 1) com.typesafe.config.Config | |
# 2) akka.event.LoggingAdapter | |
# 3) java.util.concurrent.ThreadFactory | |
implementation = akka.actor.LightArrayRevolverScheduler | |
# When shutting down the scheduler, there will typically be a thread which | |
# needs to be stopped, and this timeout determines how long to wait for | |
# that to happen. In case of timeout the shutdown of the actor system will | |
# proceed without running possibly still enqueued tasks. | |
shutdown-timeout = 5s | |
} | |
io { | |
# By default the select loops run on dedicated threads, hence using a | |
# PinnedDispatcher | |
pinned-dispatcher { | |
type = "PinnedDispatcher" | |
executor = "thread-pool-executor" | |
thread-pool-executor.allow-core-timeout = off | |
} | |
tcp { | |
# The number of selectors to stripe the served channels over; each of | |
# these will use one select loop on the selector-dispatcher. | |
nr-of-selectors = 1 | |
# Maximum number of open channels supported by this TCP module; there is | |
# no intrinsic general limit, this setting is meant to enable DoS | |
# protection by limiting the number of concurrently connected clients. | |
# Also note that this is a "soft" limit; in certain cases the implementation | |
# will accept a few connections more or a few less than the number configured | |
# here. Must be an integer > 0 or "unlimited". | |
max-channels = 256000 | |
# When trying to assign a new connection to a selector and the chosen | |
# selector is at full capacity, retry selector choosing and assignment | |
# this many times before giving up | |
selector-association-retries = 10 | |
# The maximum number of connection that are accepted in one go, | |
# higher numbers decrease latency, lower numbers increase fairness on | |
# the worker-dispatcher | |
batch-accept-limit = 10 | |
# The number of bytes per direct buffer in the pool used to read or write | |
# network data from the kernel. | |
direct-buffer-size = 128 KiB | |
# The maximal number of direct buffers kept in the direct buffer pool for | |
# reuse. | |
direct-buffer-pool-limit = 1000 | |
# The duration a connection actor waits for a `Register` message from | |
# its commander before aborting the connection. | |
register-timeout = 5s | |
# The maximum number of bytes delivered by a `Received` message. Before | |
# more data is read from the network the connection actor will try to | |
# do other work. | |
# The purpose of this setting is to impose a smaller limit than the | |
# configured receive buffer size. When using value 'unlimited' it will | |
# try to read all from the receive buffer. | |
max-received-message-size = unlimited | |
# Enable fine grained logging of what goes on inside the implementation. | |
# Be aware that this may log more than once per message sent to the actors | |
# of the tcp implementation. | |
trace-logging = off | |
# Fully qualified config path which holds the dispatcher configuration | |
# to be used for running the select() calls in the selectors | |
selector-dispatcher = "akka.io.pinned-dispatcher" | |
# Fully qualified config path which holds the dispatcher configuration | |
# for the read/write worker actors | |
worker-dispatcher = "akka.actor.internal-dispatcher" | |
# Fully qualified config path which holds the dispatcher configuration | |
# for the selector management actors | |
management-dispatcher = "akka.actor.internal-dispatcher" | |
# Fully qualified config path which holds the dispatcher configuration | |
# on which file IO tasks are scheduled | |
file-io-dispatcher = "akka.actor.default-blocking-io-dispatcher" | |
# The maximum number of bytes (or "unlimited") to transfer in one batch | |
# when using `WriteFile` command which uses `FileChannel.transferTo` to | |
# pipe files to a TCP socket. On some OS like Linux `FileChannel.transferTo` | |
# may block for a long time when network IO is faster than file IO. | |
# Decreasing the value may improve fairness while increasing may improve | |
# throughput. | |
file-io-transferTo-limit = 512 KiB | |
# The number of times to retry the `finishConnect` call after being notified about | |
# OP_CONNECT. Retries are needed if the OP_CONNECT notification doesn't imply that | |
# `finishConnect` will succeed, which is the case on Android. | |
finish-connect-retries = 5 | |
# On Windows connection aborts are not reliably detected unless an OP_READ is | |
# registered on the selector _after_ the connection has been reset. This | |
# workaround enables an OP_CONNECT which forces the abort to be visible on Windows. | |
# Enabling this setting on other platforms than Windows will cause various failures | |
# and undefined behavior. | |
# Possible values of this key are on, off and auto where auto will enable the | |
# workaround if Windows is detected automatically. | |
windows-connection-abort-workaround-enabled = off | |
} | |
udp { | |
# The number of selectors to stripe the served channels over; each of | |
# these will use one select loop on the selector-dispatcher. | |
nr-of-selectors = 1 | |
# Maximum number of open channels supported by this UDP module Generally | |
# UDP does not require a large number of channels, therefore it is | |
# recommended to keep this setting low. | |
max-channels = 4096 | |
# The select loop can be used in two modes: | |
# - setting "infinite" will select without a timeout, hogging a thread | |
# - setting a positive timeout will do a bounded select call, | |
# enabling sharing of a single thread between multiple selectors | |
# (in this case you will have to use a different configuration for the | |
# selector-dispatcher, e.g. using "type=Dispatcher" with size 1) | |
# - setting it to zero means polling, i.e. calling selectNow() | |
select-timeout = infinite | |
# When trying to assign a new connection to a selector and the chosen | |
# selector is at full capacity, retry selector choosing and assignment | |
# this many times before giving up | |
selector-association-retries = 10 | |
# The maximum number of datagrams that are read in one go, | |
# higher numbers decrease latency, lower numbers increase fairness on | |
# the worker-dispatcher | |
receive-throughput = 3 | |
# The number of bytes per direct buffer in the pool used to read or write | |
# network data from the kernel. | |
direct-buffer-size = 128 KiB | |
# The maximal number of direct buffers kept in the direct buffer pool for | |
# reuse. | |
direct-buffer-pool-limit = 1000 | |
# Enable fine grained logging of what goes on inside the implementation. | |
# Be aware that this may log more than once per message sent to the actors | |
# of the tcp implementation. | |
trace-logging = off | |
# Fully qualified config path which holds the dispatcher configuration | |
# to be used for running the select() calls in the selectors | |
selector-dispatcher = "akka.io.pinned-dispatcher" | |
# Fully qualified config path which holds the dispatcher configuration | |
# for the read/write worker actors | |
worker-dispatcher = "akka.actor.internal-dispatcher" | |
# Fully qualified config path which holds the dispatcher configuration | |
# for the selector management actors | |
management-dispatcher = "akka.actor.internal-dispatcher" | |
} | |
udp-connected { | |
# The number of selectors to stripe the served channels over; each of | |
# these will use one select loop on the selector-dispatcher. | |
nr-of-selectors = 1 | |
# Maximum number of open channels supported by this UDP module Generally | |
# UDP does not require a large number of channels, therefore it is | |
# recommended to keep this setting low. | |
max-channels = 4096 | |
# The select loop can be used in two modes: | |
# - setting "infinite" will select without a timeout, hogging a thread | |
# - setting a positive timeout will do a bounded select call, | |
# enabling sharing of a single thread between multiple selectors | |
# (in this case you will have to use a different configuration for the | |
# selector-dispatcher, e.g. using "type=Dispatcher" with size 1) | |
# - setting it to zero means polling, i.e. calling selectNow() | |
select-timeout = infinite | |
# When trying to assign a new connection to a selector and the chosen | |
# selector is at full capacity, retry selector choosing and assignment | |
# this many times before giving up | |
selector-association-retries = 10 | |
# The maximum number of datagrams that are read in one go, | |
# higher numbers decrease latency, lower numbers increase fairness on | |
# the worker-dispatcher | |
receive-throughput = 3 | |
# The number of bytes per direct buffer in the pool used to read or write | |
# network data from the kernel. | |
direct-buffer-size = 128 KiB | |
# The maximal number of direct buffers kept in the direct buffer pool for | |
# reuse. | |
direct-buffer-pool-limit = 1000 | |
# Enable fine grained logging of what goes on inside the implementation. | |
# Be aware that this may log more than once per message sent to the actors | |
# of the tcp implementation. | |
trace-logging = off | |
# Fully qualified config path which holds the dispatcher configuration | |
# to be used for running the select() calls in the selectors | |
selector-dispatcher = "akka.io.pinned-dispatcher" | |
# Fully qualified config path which holds the dispatcher configuration | |
# for the read/write worker actors | |
worker-dispatcher = "akka.actor.internal-dispatcher" | |
# Fully qualified config path which holds the dispatcher configuration | |
# for the selector management actors | |
management-dispatcher = "akka.actor.internal-dispatcher" | |
} | |
dns { | |
# Fully qualified config path which holds the dispatcher configuration | |
# for the manager and resolver router actors. | |
# For actual router configuration see akka.actor.deployment./IO-DNS/* | |
dispatcher = "akka.actor.internal-dispatcher" | |
# Name of the subconfig at path akka.io.dns, see inet-address below | |
# | |
# Change to `async-dns` to use the new "native" DNS resolver, | |
# which is also capable of resolving SRV records. | |
resolver = "inet-address" | |
# To-be-deprecated DNS resolver implementation which uses the Java InetAddress to resolve DNS records. | |
# To be replaced by `akka.io.dns.async` which implements the DNS protocol natively and without blocking (which InetAddress does) | |
inet-address { | |
# Must implement akka.io.DnsProvider | |
provider-object = "akka.io.InetAddressDnsProvider" | |
# To set the time to cache name resolutions | |
# Possible values: | |
# default: sun.net.InetAddressCachePolicy.get() and getNegative() | |
# forever: cache forever | |
# never: no caching | |
# n [time unit]: positive timeout with unit, for example 30s | |
positive-ttl = default | |
negative-ttl = default | |
# How often to sweep out expired cache entries. | |
# Note that this interval has nothing to do with TTLs | |
cache-cleanup-interval = 120s | |
} | |
async-dns { | |
provider-object = "akka.io.dns.internal.AsyncDnsProvider" | |
# Set upper bound for caching successfully resolved dns entries | |
# if the DNS record has a smaller TTL value than the setting that | |
# will be used. Default is to use the record TTL with no cap. | |
# Possible values: | |
# forever: always use the minimum TTL from the found records | |
# never: never cache | |
# n [time unit] = cap the caching to this value | |
positive-ttl = forever | |
# Set how long the fact that a DNS record could not be found is | |
# cached. If a new resolution is done while the fact is cached it will | |
# be failed and not result in an actual DNS resolution. Default is | |
# to never cache. | |
# Possible values: | |
# never: never cache | |
# forever: cache a missing DNS record forever (you probably will not want to do this) | |
# n [time unit] = cache for this long | |
negative-ttl = never | |
# Configures nameservers to query during DNS resolution. | |
# Defaults to the nameservers that would be used by the JVM by default. | |
# Set to a list of IPs to override the servers, e.g. [ "8.8.8.8", "8.8.4.4" ] for Google's servers | |
# If multiple are defined then they are tried in order until one responds | |
nameservers = default | |
# The time that a request is allowed to live before being discarded | |
# given no reply. The lower bound of this should always be the amount | |
# of time to reasonably expect a DNS server to reply within. | |
# If multiple name servers are provided then each gets this long to response before trying | |
# the next one | |
resolve-timeout = 5s | |
# How often to sweep out expired cache entries. | |
# Note that this interval has nothing to do with TTLs | |
cache-cleanup-interval = 120s | |
# Configures the list of search domains. | |
# Defaults to a system dependent lookup (on Unix like OSes, will attempt to parse /etc/resolv.conf, on | |
# other platforms, will not make any attempt to lookup the search domains). Set to a single domain, or | |
# a list of domains, eg, [ "example.com", "example.net" ]. | |
search-domains = default | |
# Any hosts that have a number of dots less than this will not be looked up directly, instead, a search on | |
# the search domains will be tried first. This corresponds to the ndots option in /etc/resolv.conf, see | |
# https://linux.die.net/man/5/resolver for more info. | |
# Defaults to a system dependent lookup (on Unix like OSes, will attempt to parse /etc/resolv.conf, on | |
# other platforms, will default to 1). | |
ndots = default | |
} | |
} | |
} | |
# CoordinatedShutdown is an extension that will perform registered | |
# tasks in the order that is defined by the phases. It is started | |
# by calling CoordinatedShutdown(system).run(). This can be triggered | |
# by different things, for example: | |
# - JVM shutdown hook will by default run CoordinatedShutdown | |
# - Cluster node will automatically run CoordinatedShutdown when it | |
# sees itself as Exiting | |
# - A management console or other application specific command can | |
# run CoordinatedShutdown | |
coordinated-shutdown { | |
# The timeout that will be used for a phase if not specified with | |
# 'timeout' in the phase | |
default-phase-timeout = 5 s | |
# Terminate the ActorSystem in the last phase actor-system-terminate. | |
terminate-actor-system = on | |
# Exit the JVM (System.exit(0)) in the last phase actor-system-terminate | |
# if this is set to 'on'. It is done after termination of the | |
# ActorSystem if terminate-actor-system=on, otherwise it is done | |
# immediately when the last phase is reached. | |
exit-jvm = off | |
# Exit status to use on System.exit(int) when 'exit-jvm' is 'on'. | |
exit-code = 0 | |
# Run the coordinated shutdown when the JVM process exits, e.g. | |
# via kill SIGTERM signal (SIGINT ctrl-c doesn't work). | |
# This property is related to `akka.jvm-shutdown-hooks` above. | |
run-by-jvm-shutdown-hook = on | |
# Run the coordinated shutdown when ActorSystem.terminate is called. | |
# Enabling this and disabling terminate-actor-system is not a supported | |
# combination (will throw ConfigurationException at startup). | |
run-by-actor-system-terminate = on | |
# When Coordinated Shutdown is triggered an instance of `Reason` is | |
# required. That value can be used to override the default settings. | |
# Only 'exit-jvm', 'exit-code' and 'terminate-actor-system' may be | |
# overridden depending on the reason. | |
reason-overrides { | |
# Overrides are applied using the `reason.getClass.getName`. | |
# Overrides the `exit-code` when the `Reason` is a cluster | |
# Downing or a Cluster Join Unsuccessful event | |
"akka.actor.CoordinatedShutdown$ClusterDowningReason$" { | |
exit-code = -1 | |
} | |
"akka.actor.CoordinatedShutdown$ClusterJoinUnsuccessfulReason$" { | |
exit-code = -1 | |
} | |
} | |
#//#coordinated-shutdown-phases | |
# CoordinatedShutdown is enabled by default and will run the tasks that | |
# are added to these phases by individual Akka modules and user logic. | |
# | |
# The phases are ordered as a DAG by defining the dependencies between the phases | |
# to make sure shutdown tasks are run in the right order. | |
# | |
# In general user tasks belong in the first few phases, but there may be use | |
# cases where you would want to hook in new phases or register tasks later in | |
# the DAG. | |
# | |
# Each phase is defined as a named config section with the | |
# following optional properties: | |
# - timeout=15s: Override the default-phase-timeout for this phase. | |
# - recover=off: If the phase fails the shutdown is aborted | |
# and depending phases will not be executed. | |
# - enabled=off: Skip all tasks registered in this phase. DO NOT use | |
# this to disable phases unless you are absolutely sure what the | |
# consequences are. Many of the built in tasks depend on other tasks | |
# having been executed in earlier phases and may break if those are disabled. | |
# depends-on=[]: Run the phase after the given phases | |
phases { | |
# The first pre-defined phase that applications can add tasks to. | |
# Note that more phases can be added in the application's | |
# configuration by overriding this phase with an additional | |
# depends-on. | |
before-service-unbind { | |
} | |
# Stop accepting new incoming connections. | |
# This is where you can register tasks that makes a server stop accepting new connections. Already | |
# established connections should be allowed to continue and complete if possible. | |
service-unbind { | |
depends-on = [before-service-unbind] | |
} | |
# Wait for requests that are in progress to be completed. | |
# This is where you register tasks that will wait for already established connections to complete, potentially | |
# also first telling them that it is time to close down. | |
service-requests-done { | |
depends-on = [service-unbind] | |
} | |
# Final shutdown of service endpoints. | |
# This is where you would add tasks that forcefully kill connections that are still around. | |
service-stop { | |
depends-on = [service-requests-done] | |
} | |
# Phase for custom application tasks that are to be run | |
# after service shutdown and before cluster shutdown. | |
before-cluster-shutdown { | |
depends-on = [service-stop] | |
} | |
# Graceful shutdown of the Cluster Sharding regions. | |
# This phase is not meant for users to add tasks to. | |
cluster-sharding-shutdown-region { | |
timeout = 10 s | |
depends-on = [before-cluster-shutdown] | |
} | |
# Emit the leave command for the node that is shutting down. | |
# This phase is not meant for users to add tasks to. | |
cluster-leave { | |
depends-on = [cluster-sharding-shutdown-region] | |
} | |
# Shutdown cluster singletons | |
# This is done as late as possible to allow the shard region shutdown triggered in | |
# the "cluster-sharding-shutdown-region" phase to complete before the shard coordinator is shut down. | |
# This phase is not meant for users to add tasks to. | |
cluster-exiting { | |
timeout = 10 s | |
depends-on = [cluster-leave] | |
} | |
# Wait until exiting has been completed | |
# This phase is not meant for users to add tasks to. | |
cluster-exiting-done { | |
depends-on = [cluster-exiting] | |
} | |
# Shutdown the cluster extension | |
# This phase is not meant for users to add tasks to. | |
cluster-shutdown { | |
depends-on = [cluster-exiting-done] | |
} | |
# Phase for custom application tasks that are to be run | |
# after cluster shutdown and before ActorSystem termination. | |
before-actor-system-terminate { | |
depends-on = [cluster-shutdown] | |
} | |
# Last phase. See terminate-actor-system and exit-jvm above. | |
# Don't add phases that depends on this phase because the | |
# dispatcher and scheduler of the ActorSystem have been shutdown. | |
# This phase is not meant for users to add tasks to. | |
actor-system-terminate { | |
timeout = 10 s | |
depends-on = [before-actor-system-terminate] | |
} | |
} | |
#//#coordinated-shutdown-phases | |
} | |
} |