From b92eb9310039eb59d47964f0cb766cc516dff1e4 Mon Sep 17 00:00:00 2001 From: Giselle van Dongen Date: Tue, 4 Nov 2025 11:49:26 +0100 Subject: [PATCH 01/10] fix restate config viewer --- docs/references/server-config.mdx | 1062 +++++++-------------- package-lock.json | 44 + package.json | 1 + scripts/generate-restate-config-viewer.js | 181 ++-- 4 files changed, 495 insertions(+), 793 deletions(-) diff --git a/docs/references/server-config.mdx b/docs/references/server-config.mdx index 816fbd8e..ee22011f 100644 --- a/docs/references/server-config.mdx +++ b/docs/references/server-config.mdx @@ -8,7 +8,7 @@ import Intro from "/snippets/common/default-configuration.mdx" - + Worker options @@ -20,11 +20,11 @@ import Intro from "/snippets/common/default-configuration.mdx" The number of timers in memory limit is used to bound the amount of timers loaded in memory. If this limit is set, when exceeding it, the timers farther in the future will be spilled to disk. - - Non-zero duration string in either jiff human friendly or ISO8601 format. Check https://docs.rs/jiff/latest/jiff/struct.Span.html#parsing-and-printing for more details. + + In order to clean up completed invocations, that is invocations invoked with an idempotency id, or workflows, Restate periodically scans among the completed invocations to check whether they need to be removed or not. This interval sets the scan interval of the cleanup procedure. Default: 1 hour. - + Storage options @@ -34,20 +34,11 @@ import Intro from "/snippets/common/default-configuration.mdx" By default this uses the value defined in `default-num-partitions` in the common section of the config. - + The memory budget for rocksdb memtables in bytes The total is divided evenly across partitions. The divisor is defined in `num-partitions-to-share-memory-budget`. If this value is set, it overrides the ratio defined in `rocksdb-memory-ratio`. - - - - Non-zero human-readable bytes - - - - - - + Non-zero human-readable bytes @@ -80,76 +71,23 @@ Default: False (statistics enabled) Default: the number of CPU cores on this node. - + If non-zero, we perform bigger reads when doing compaction. If you're running RocksDB on spinning disks, you should set this to at least 2MB. That way RocksDB's compaction is doing sequential instead of random reads. - - - - Non-zero human-readable bytes - - - - - - + Non-zero human-readable bytes - + StatsLevel can be used to reduce statistics overhead by skipping certain types of stats in the stats collection process. Default: \"except-detailed-timers\" - - - - - - Disable all metrics - - - - Disable timer stats, and skip histogram stats - - - - Skip timer stats - - - - Collect all stats except time inside mutex lock AND time spent on compression. - - - - Collect all stats except the counters requiring to get time inside the mutex lock. - - - - Collect all stats, including measuring duration of mutex operations. If getting time is expensive on the platform to run, it can reduce scalability to more threads, especially for writes. - - - - - - - - - - + Verbosity of the LOG. Default: \"error\" - - - - Verbosity of the LOG. - - - - - - + Verbosity of the LOG. @@ -158,50 +96,34 @@ Default: \"error\" Default: 1 - + Max size of info LOG file Default: 64MB - - - - Non-zero human-readable bytes - - - - - - + Non-zero human-readable bytes - + Uncompressed block size Default: 64KiB - - - - Non-zero human-readable bytes - - - - - - + Non-zero human-readable bytes - + Invoker options - - Definition of a retry policy + + This is **deprecated** and will be removed in the next Restate releases. + +Please refer to `default-retry-policy` for the new configuration options. - + No retry strategy. @@ -211,7 +133,7 @@ Default: 64KiB - + Retry with a fixed delay strategy. @@ -219,7 +141,9 @@ Default: 64KiB - Duration string in either jiff human friendly or ISO8601 format. Check https://docs.rs/jiff/latest/jiff/struct.Span.html#parsing-and-printing for more details. + Interval between retries. + +Can be configured using the [`jiff::fmt::friendly`](https://docs.rs/jiff/latest/jiff/fmt/friendly/index.html) format or ISO8601, for example `5 hours`. @@ -229,7 +153,7 @@ Default: 64KiB - + Retry with an exponential strategy. The next retry is computed as `min(last_retry_interval * factor, max_interval)`. @@ -237,7 +161,9 @@ Default: 64KiB - Duration string in either jiff human friendly or ISO8601 format. Check https://docs.rs/jiff/latest/jiff/struct.Span.html#parsing-and-printing for more details. + Initial interval for the first retry attempt. + +Can be configured using the [`jiff::fmt::friendly`](https://docs.rs/jiff/latest/jiff/fmt/friendly/index.html) format or ISO8601, for example `5 hours`. @@ -248,20 +174,11 @@ Default: 64KiB Number of maximum attempts before giving up. Infinite retries if unset. - + Maximum interval between retries. Can be configured using the [`jiff::fmt::friendly`](https://docs.rs/jiff/latest/jiff/fmt/friendly/index.html) format or ISO8601, for example `5 hours`. - - - - Duration string in either jiff human friendly or ISO8601 format. Check https://docs.rs/jiff/latest/jiff/struct.Span.html#parsing-and-printing for more details. - - - - - - + Duration string in either jiff human friendly or ISO8601 format. Check https://docs.rs/jiff/latest/jiff/struct.Span.html#parsing-and-printing for more details. @@ -270,30 +187,21 @@ Can be configured using the [`jiff::fmt::friendly`](https://docs.rs/jiff/latest/ - + Duration string in either jiff human friendly or ISO8601 format. Check https://docs.rs/jiff/latest/jiff/struct.Span.html#parsing-and-printing for more details. - + Duration string in either jiff human friendly or ISO8601 format. Check https://docs.rs/jiff/latest/jiff/struct.Span.html#parsing-and-printing for more details. - + Non-zero human-readable bytes - + Threshold to fail the invocation in case protocol messages coming from a service are larger than the specified amount. - - - - Non-zero human-readable bytes - - - - - - + Non-zero human-readable bytes @@ -308,59 +216,39 @@ Can be configured using the [`jiff::fmt::friendly`](https://docs.rs/jiff/latest/ Number of concurrent invocations that can be processed by the invoker. - + Configures throttling for service invocations at the node level. This throttling mechanism uses a token bucket algorithm to control the rate at which invocations can be processed, helping to prevent resource exhaustion and maintain system stability under high load. The throttling limit is shared across all partitions running on this node, providing a global rate limit for the entire node rather than per-partition limits. When `unset`, no throttling is applied and invocations are processed without throttling. - - - Throttling options per invoker. - - - - The rate at which the tokens are replenished. - -Syntax: `/` where `` is `s|sec|second`, `m|min|minute`, or `h|hr|hour`. unit defaults to per second if not specified. - - - - The maximum number of tokens the bucket can hold. Default to the rate value if not specified. - + + + The rate at which the tokens are replenished. - +Syntax: `<rate>/<unit>` where `<unit>` is `s|sec|second`, `m|min|minute`, or `h|hr|hour`. unit defaults to per second if not specified. - + + The maximum number of tokens the bucket can hold. Default to the rate value if not specified. - + Configures rate limiting for service actions at the node level. This throttling mechanism uses a token bucket algorithm to control the rate at which actions can be processed, helping to prevent resource exhaustion and maintain system stability under high load. The throttling limit is shared across all partitions running on this node, providing a global rate limit for the entire node rather than per-partition limits. When `unset`, no throttling is applied and actions are processed without throttling. - - - Throttling options per invoker. - - - - The rate at which the tokens are replenished. - -Syntax: `/` where `` is `s|sec|second`, `m|min|minute`, or `h|hr|hour`. unit defaults to per second if not specified. - - - - The maximum number of tokens the bucket can hold. Default to the rate value if not specified. - + + + The rate at which the tokens are replenished. - +Syntax: `<rate>/<unit>` where `<unit>` is `s|sec|second`, `m|min|minute`, or `h|hr|hour`. unit defaults to per second if not specified. - + + The maximum number of tokens the bucket can hold. Default to the rate value if not specified. @@ -373,8 +261,8 @@ Syntax: `/` where `` is `s|sec|second`, `m|min|minute`, or `h| The maximum number of commands a partition processor will apply in a batch. The larger this value is, the higher the throughput and latency are. - - Partition store snapshotting settings. At a minimum, set `destination` and `snapshot-interval-num-records` to enable snapshotting. For a complete example, see [Snapshots](https://docs.restate.dev/operate/snapshots). + + Snapshots provide a mechanism for safely trimming the log and efficient bootstrapping of new worker nodes. @@ -393,11 +281,11 @@ This setting does not influence explicitly requested snapshots triggered using ` Default: `None` - automatic snapshots are disabled - - Definition of a retry policy + + A retry policy for dealing with retryable object store errors. - + No retry strategy. @@ -407,7 +295,7 @@ Default: `None` - automatic snapshots are disabled - + Retry with a fixed delay strategy. @@ -415,7 +303,9 @@ Default: `None` - automatic snapshots are disabled - Duration string in either jiff human friendly or ISO8601 format. Check https://docs.rs/jiff/latest/jiff/struct.Span.html#parsing-and-printing for more details. + Interval between retries. + +Can be configured using the [`jiff::fmt::friendly`](https://docs.rs/jiff/latest/jiff/fmt/friendly/index.html) format or ISO8601, for example `5 hours`. @@ -425,7 +315,7 @@ Default: `None` - automatic snapshots are disabled - + Retry with an exponential strategy. The next retry is computed as `min(last_retry_interval * factor, max_interval)`. @@ -433,7 +323,9 @@ Default: `None` - automatic snapshots are disabled - Duration string in either jiff human friendly or ISO8601 format. Check https://docs.rs/jiff/latest/jiff/struct.Span.html#parsing-and-printing for more details. + Initial interval for the first retry attempt. + +Can be configured using the [`jiff::fmt::friendly`](https://docs.rs/jiff/latest/jiff/fmt/friendly/index.html) format or ISO8601, for example `5 hours`. @@ -444,20 +336,11 @@ Default: `None` - automatic snapshots are disabled Number of maximum attempts before giving up. Infinite retries if unset. - + Maximum interval between retries. Can be configured using the [`jiff::fmt::friendly`](https://docs.rs/jiff/latest/jiff/fmt/friendly/index.html) format or ISO8601, for example `5 hours`. - - - - Duration string in either jiff human friendly or ISO8601 format. Check https://docs.rs/jiff/latest/jiff/struct.Span.html#parsing-and-printing for more details. - - - - - - + Duration string in either jiff human friendly or ISO8601 format. Check https://docs.rs/jiff/latest/jiff/struct.Span.html#parsing-and-printing for more details. @@ -500,7 +383,7 @@ Can be configured using the [`jiff::fmt::friendly`](https://docs.rs/jiff/latest/ - + Admin server options @@ -528,11 +411,11 @@ These will be used during deployment creation to distinguish between an already Concurrency limit for the Admin APIs. Default is unlimited. - + Storage query engine options - + Non-zero human-readable bytes @@ -547,11 +430,11 @@ These will be used during deployment creation to distinguish between an already - - Non-zero duration string in either jiff human friendly or ISO8601 format. Check https://docs.rs/jiff/latest/jiff/struct.Span.html#parsing-and-printing for more details. + + Controls the interval at which cluster controller polls nodes of the cluster. - + Duration string in either jiff human friendly or ISO8601 format. Check https://docs.rs/jiff/latest/jiff/struct.Span.html#parsing-and-printing for more details. @@ -565,7 +448,7 @@ These will be used during deployment creation to distinguish between an already - + Ingress options @@ -611,19 +494,19 @@ These will be used during deployment creation to distinguish between an already - + Bifrost options - - An enum with the list of supported loglet providers. + + Default: Replicated - + A local rocksdb-backed loglet. - + Replicated loglets are restate's native log replication system. This requires `log-server` role to run on enough nodes in the cluster. @@ -634,7 +517,8 @@ These will be used during deployment creation to distinguish between an already Configuration of local loglet provider - + + Configuration of replicated loglet provider @@ -645,11 +529,13 @@ Once this maximum is hit, sequencer will induce back pressure on clients. This c Note that this will be increased to fit the biggest batch of records being enqueued. - - Definition of a retry policy + + Sequencer retry policy + +Backoff introduced when sequencer fail to find a suitable spread of log servers - + No retry strategy. @@ -659,7 +545,7 @@ Note that this will be increased to fit the biggest batch of records being enque - + Retry with a fixed delay strategy. @@ -667,7 +553,9 @@ Note that this will be increased to fit the biggest batch of records being enque - Duration string in either jiff human friendly or ISO8601 format. Check https://docs.rs/jiff/latest/jiff/struct.Span.html#parsing-and-printing for more details. + Interval between retries. + +Can be configured using the [`jiff::fmt::friendly`](https://docs.rs/jiff/latest/jiff/fmt/friendly/index.html) format or ISO8601, for example `5 hours`. @@ -677,7 +565,7 @@ Note that this will be increased to fit the biggest batch of records being enque - + Retry with an exponential strategy. The next retry is computed as `min(last_retry_interval * factor, max_interval)`. @@ -685,7 +573,9 @@ Note that this will be increased to fit the biggest batch of records being enque - Duration string in either jiff human friendly or ISO8601 format. Check https://docs.rs/jiff/latest/jiff/struct.Span.html#parsing-and-printing for more details. + Initial interval for the first retry attempt. + +Can be configured using the [`jiff::fmt::friendly`](https://docs.rs/jiff/latest/jiff/fmt/friendly/index.html) format or ISO8601, for example `5 hours`. @@ -696,20 +586,11 @@ Note that this will be increased to fit the biggest batch of records being enque Number of maximum attempts before giving up. Infinite retries if unset. - + Maximum interval between retries. Can be configured using the [`jiff::fmt::friendly`](https://docs.rs/jiff/latest/jiff/fmt/friendly/index.html) format or ISO8601, for example `5 hours`. - - - - Duration string in either jiff human friendly or ISO8601 format. Check https://docs.rs/jiff/latest/jiff/struct.Span.html#parsing-and-printing for more details. - - - - - - + Duration string in either jiff human friendly or ISO8601 format. Check https://docs.rs/jiff/latest/jiff/struct.Span.html#parsing-and-printing for more details. @@ -718,19 +599,27 @@ Can be configured using the [`jiff::fmt::friendly`](https://docs.rs/jiff/latest/ - - Non-zero duration string in either jiff human friendly or ISO8601 format. Check https://docs.rs/jiff/latest/jiff/struct.Span.html#parsing-and-printing for more details. + + Sequencer inactivity timeout + +The sequencer is allowed to consider itself quiescent if it did not commit records for this period of time. It may use this to sends pre-emptive release/seal check requests to log-servers. + +The sequencer is also allowed to use this value as interval to send seal/release checks even if it's not quiescent. - - Non-zero duration string in either jiff human friendly or ISO8601 format. Check https://docs.rs/jiff/latest/jiff/struct.Span.html#parsing-and-printing for more details. + + Log Server RPC timeout + +Timeout waiting on log server response - - Definition of a retry policy + + Log Server RPC retry policy + +Retry policy for log server RPCs - + No retry strategy. @@ -740,7 +629,7 @@ Can be configured using the [`jiff::fmt::friendly`](https://docs.rs/jiff/latest/ - + Retry with a fixed delay strategy. @@ -748,7 +637,9 @@ Can be configured using the [`jiff::fmt::friendly`](https://docs.rs/jiff/latest/ - Duration string in either jiff human friendly or ISO8601 format. Check https://docs.rs/jiff/latest/jiff/struct.Span.html#parsing-and-printing for more details. + Interval between retries. + +Can be configured using the [`jiff::fmt::friendly`](https://docs.rs/jiff/latest/jiff/fmt/friendly/index.html) format or ISO8601, for example `5 hours`. @@ -758,7 +649,7 @@ Can be configured using the [`jiff::fmt::friendly`](https://docs.rs/jiff/latest/ - + Retry with an exponential strategy. The next retry is computed as `min(last_retry_interval * factor, max_interval)`. @@ -766,7 +657,9 @@ Can be configured using the [`jiff::fmt::friendly`](https://docs.rs/jiff/latest/ - Duration string in either jiff human friendly or ISO8601 format. Check https://docs.rs/jiff/latest/jiff/struct.Span.html#parsing-and-printing for more details. + Initial interval for the first retry attempt. + +Can be configured using the [`jiff::fmt::friendly`](https://docs.rs/jiff/latest/jiff/fmt/friendly/index.html) format or ISO8601, for example `5 hours`. @@ -777,20 +670,11 @@ Can be configured using the [`jiff::fmt::friendly`](https://docs.rs/jiff/latest/ Number of maximum attempts before giving up. Infinite retries if unset. - + Maximum interval between retries. Can be configured using the [`jiff::fmt::friendly`](https://docs.rs/jiff/latest/jiff/fmt/friendly/index.html) format or ISO8601, for example `5 hours`. - - - - Duration string in either jiff human friendly or ISO8601 format. Check https://docs.rs/jiff/latest/jiff/struct.Span.html#parsing-and-printing for more details. - - - - - - + Duration string in either jiff human friendly or ISO8601 format. Check https://docs.rs/jiff/latest/jiff/struct.Span.html#parsing-and-printing for more details. @@ -805,7 +689,7 @@ Can be configured using the [`jiff::fmt::friendly`](https://docs.rs/jiff/latest/ The number of records bifrost will attempt to prefetch from replicated loglet's log-servers for every loglet reader (e.g. partition processor). Note that this mainly impacts readers that are not co-located with the loglet sequencer (i.e. partition processor followers). - + Non-zero human-readable bytes @@ -824,11 +708,11 @@ Value must be between 0 and 1. It will be clamped at `1.0`. - - Definition of a retry policy + + Retry policy to use when bifrost waits for reconfiguration to complete during read operations - + No retry strategy. @@ -838,7 +722,7 @@ Value must be between 0 and 1. It will be clamped at `1.0`. - + Retry with a fixed delay strategy. @@ -846,7 +730,9 @@ Value must be between 0 and 1. It will be clamped at `1.0`. - Duration string in either jiff human friendly or ISO8601 format. Check https://docs.rs/jiff/latest/jiff/struct.Span.html#parsing-and-printing for more details. + Interval between retries. + +Can be configured using the [`jiff::fmt::friendly`](https://docs.rs/jiff/latest/jiff/fmt/friendly/index.html) format or ISO8601, for example `5 hours`. @@ -856,7 +742,7 @@ Value must be between 0 and 1. It will be clamped at `1.0`. - + Retry with an exponential strategy. The next retry is computed as `min(last_retry_interval * factor, max_interval)`. @@ -864,7 +750,9 @@ Value must be between 0 and 1. It will be clamped at `1.0`. - Duration string in either jiff human friendly or ISO8601 format. Check https://docs.rs/jiff/latest/jiff/struct.Span.html#parsing-and-printing for more details. + Initial interval for the first retry attempt. + +Can be configured using the [`jiff::fmt::friendly`](https://docs.rs/jiff/latest/jiff/fmt/friendly/index.html) format or ISO8601, for example `5 hours`. @@ -875,20 +763,11 @@ Value must be between 0 and 1. It will be clamped at `1.0`. Number of maximum attempts before giving up. Infinite retries if unset. - + Maximum interval between retries. Can be configured using the [`jiff::fmt::friendly`](https://docs.rs/jiff/latest/jiff/fmt/friendly/index.html) format or ISO8601, for example `5 hours`. - - - - Duration string in either jiff human friendly or ISO8601 format. Check https://docs.rs/jiff/latest/jiff/struct.Span.html#parsing-and-printing for more details. - - - - - - + Duration string in either jiff human friendly or ISO8601 format. Check https://docs.rs/jiff/latest/jiff/struct.Span.html#parsing-and-printing for more details. @@ -897,24 +776,24 @@ Can be configured using the [`jiff::fmt::friendly`](https://docs.rs/jiff/latest/ - - Non-zero duration string in either jiff human friendly or ISO8601 format. Check https://docs.rs/jiff/latest/jiff/struct.Span.html#parsing-and-printing for more details. + + Interval to wait between retries of loglet seal failures - - Non-zero duration string in either jiff human friendly or ISO8601 format. Check https://docs.rs/jiff/latest/jiff/struct.Span.html#parsing-and-printing for more details. + + Time interval after which bifrost's auto-recovery mechanism will kick in. This is triggered in scenarios where the control plane took too long to complete loglet reconfigurations. - - Non-zero duration string in either jiff human friendly or ISO8601 format. Check https://docs.rs/jiff/latest/jiff/struct.Span.html#parsing-and-printing for more details. + + Minimum retry duration used by the exponential backoff mechanism for bifrost appends. - - Non-zero duration string in either jiff human friendly or ISO8601 format. Check https://docs.rs/jiff/latest/jiff/struct.Span.html#parsing-and-printing for more details. + + Maximum retry duration used by the exponential backoff mechanism for bifrost appends. - - Human-readable bytes + + Optional size of record cache in bytes. If set to 0, record cache will be disabled. Defaults: 250MB @@ -926,7 +805,7 @@ This allows the log to pick up replication property changes, apply better placem - + Metadata store options @@ -936,20 +815,11 @@ This allows the log to pick up replication property changes, apply better placem Number of in-flight metadata store requests. - + The memory budget for rocksdb memtables in bytes If this value is set, it overrides the ratio defined in `rocksdb-memory-ratio`. - - - - Non-zero human-readable bytes - - - - - - + Non-zero human-readable bytes @@ -988,76 +858,23 @@ Default: False (statistics enabled) Default: the number of CPU cores on this node. - + If non-zero, we perform bigger reads when doing compaction. If you're running RocksDB on spinning disks, you should set this to at least 2MB. That way RocksDB's compaction is doing sequential instead of random reads. - - - - Non-zero human-readable bytes - - - - - - + Non-zero human-readable bytes - + StatsLevel can be used to reduce statistics overhead by skipping certain types of stats in the stats collection process. Default: \"except-detailed-timers\" - - - - - - Disable all metrics - - - - Disable timer stats, and skip histogram stats - - - - Skip timer stats - - - - Collect all stats except time inside mutex lock AND time spent on compression. - - - - Collect all stats except the counters requiring to get time inside the mutex lock. - - - - Collect all stats, including measuring duration of mutex operations. If getting time is expensive on the platform to run, it can reduce scalability to more threads, especially for writes. - - - - - - - - - - + Verbosity of the LOG. Default: \"error\" - - - - Verbosity of the LOG. - - - - - - + Verbosity of the LOG. @@ -1066,36 +883,18 @@ Default: \"error\" Default: 1 - + Max size of info LOG file Default: 64MB - - - - Non-zero human-readable bytes - - - - - - + Non-zero human-readable bytes - + Uncompressed block size Default: 64KiB - - - - Non-zero human-readable bytes - - - - - - + Non-zero human-readable bytes @@ -1110,12 +909,16 @@ The number of ticks before triggering an election. The value must be larger than A leader sends heartbeat messages to maintain its leadership every heartbeat ticks. Decrease this value to send heartbeats more often. - - Non-zero duration string in either jiff human friendly or ISO8601 format. Check https://docs.rs/jiff/latest/jiff/struct.Span.html#parsing-and-printing for more details. + + The raft tick interval + +The interval at which the raft node will tick. Decrease this value in order to let the Raft node react more quickly to changes. Note, that every tick comes with an overhead. Moreover, the tick interval directly affects the election timeout. If the election timeout becomes too small, then this can cause cluster instabilities due to frequent leader changes. - - Non-zero duration string in either jiff human friendly or ISO8601 format. Check https://docs.rs/jiff/latest/jiff/struct.Span.html#parsing-and-printing for more details. + + The status update interval + +The interval at which the raft node will update its status. Decrease this value in order to see more recent status updates. @@ -1125,19 +928,19 @@ A leader sends heartbeat messages to maintain its leadership every heartbeat tic - + Common network configuration options for communicating with Restate cluster nodes. Note that similar keys are present in other config sections, such as in Service Client options. - - Non-zero duration string in either jiff human friendly or ISO8601 format. Check https://docs.rs/jiff/latest/jiff/struct.Span.html#parsing-and-printing for more details. + + TCP connection timeout for Restate cluster node-to-node network connections. - - Definition of a retry policy + + Retry policy to use for internal node-to-node networking. - + No retry strategy. @@ -1147,7 +950,7 @@ A leader sends heartbeat messages to maintain its leadership every heartbeat tic - + Retry with a fixed delay strategy. @@ -1155,7 +958,9 @@ A leader sends heartbeat messages to maintain its leadership every heartbeat tic - Duration string in either jiff human friendly or ISO8601 format. Check https://docs.rs/jiff/latest/jiff/struct.Span.html#parsing-and-printing for more details. + Interval between retries. + +Can be configured using the [`jiff::fmt::friendly`](https://docs.rs/jiff/latest/jiff/fmt/friendly/index.html) format or ISO8601, for example `5 hours`. @@ -1165,7 +970,7 @@ A leader sends heartbeat messages to maintain its leadership every heartbeat tic - + Retry with an exponential strategy. The next retry is computed as `min(last_retry_interval * factor, max_interval)`. @@ -1173,7 +978,9 @@ A leader sends heartbeat messages to maintain its leadership every heartbeat tic - Duration string in either jiff human friendly or ISO8601 format. Check https://docs.rs/jiff/latest/jiff/struct.Span.html#parsing-and-printing for more details. + Initial interval for the first retry attempt. + +Can be configured using the [`jiff::fmt::friendly`](https://docs.rs/jiff/latest/jiff/fmt/friendly/index.html) format or ISO8601, for example `5 hours`. @@ -1184,20 +991,11 @@ A leader sends heartbeat messages to maintain its leadership every heartbeat tic Number of maximum attempts before giving up. Infinite retries if unset. - + Maximum interval between retries. Can be configured using the [`jiff::fmt::friendly`](https://docs.rs/jiff/latest/jiff/fmt/friendly/index.html) format or ISO8601, for example `5 hours`. - - - - Duration string in either jiff human friendly or ISO8601 format. Check https://docs.rs/jiff/latest/jiff/struct.Span.html#parsing-and-printing for more details. - - - - - - + Duration string in either jiff human friendly or ISO8601 format. Check https://docs.rs/jiff/latest/jiff/struct.Span.html#parsing-and-printing for more details. @@ -1206,15 +1004,15 @@ Can be configured using the [`jiff::fmt::friendly`](https://docs.rs/jiff/latest/ - - Non-zero duration string in either jiff human friendly or ISO8601 format. Check https://docs.rs/jiff/latest/jiff/struct.Span.html#parsing-and-printing for more details. + + Timeout for receiving a handshake response from Restate cluster peers. - + Non-zero duration string in either jiff human friendly or ISO8601 format. Check https://docs.rs/jiff/latest/jiff/struct.Span.html#parsing-and-printing for more details. - + Non-zero duration string in either jiff human friendly or ISO8601 format. Check https://docs.rs/jiff/latest/jiff/struct.Span.html#parsing-and-printing for more details. @@ -1226,31 +1024,22 @@ Can be configured using the [`jiff::fmt::friendly`](https://docs.rs/jiff/latest/ Disables Zstd compression for internal gRPC network connections - + Non-zero human-readable bytes - + Configuration is only used on nodes running with `log-server` role. - + The memory budget for rocksdb memtables in bytes If this value is set, it overrides the ratio defined in `rocksdb-memory-ratio`. - - - - Non-zero human-readable bytes - - - - - - + Non-zero human-readable bytes @@ -1274,7 +1063,13 @@ Default is 0 which maps to floor(number of CPU cores / 2) - Human-readable bytes + The size limit of all WAL files + +Use this to limit the size of WAL files. If the size of all WAL files exceeds this limit, the oldest WAL file will be deleted and if needed, memtable flush will be triggered. + +Note: RocksDB internally counts the uncompressed bytes to determine the WAL size, and since the WAL is compressed, the actual size on disk will be significantly smaller than this value (~1/4 depending on the compression ratio). For instance, if this is set to \"1 MiB\", then rocksdb might decide to flush if the total WAL (on disk) reached ~260 KiB (compressed). + +Default is `0` which translates into 6 times the memory allocated for membtables for this database. @@ -1311,76 +1106,23 @@ Default: False (statistics enabled) Default: the number of CPU cores on this node. - + If non-zero, we perform bigger reads when doing compaction. If you're running RocksDB on spinning disks, you should set this to at least 2MB. That way RocksDB's compaction is doing sequential instead of random reads. - - - - Non-zero human-readable bytes - - - - - - + Non-zero human-readable bytes - + StatsLevel can be used to reduce statistics overhead by skipping certain types of stats in the stats collection process. Default: \"except-detailed-timers\" - - - - - - Disable all metrics - - - - Disable timer stats, and skip histogram stats - - - - Skip timer stats - - - - Collect all stats except time inside mutex lock AND time spent on compression. - - - - Collect all stats except the counters requiring to get time inside the mutex lock. - - - - Collect all stats, including measuring duration of mutex operations. If getting time is expensive on the platform to run, it can reduce scalability to more threads, especially for writes. - - - - - - - - - - + Verbosity of the LOG. Default: \"error\" - - - - Verbosity of the LOG. - - - - - - + Verbosity of the LOG. @@ -1389,36 +1131,18 @@ Default: \"error\" Default: 1 - + Max size of info LOG file Default: 64MB - - - - Non-zero human-readable bytes - - - - - - + Non-zero human-readable bytes - + Uncompressed block size Default: 64KiB - - - - Non-zero human-readable bytes - - - - - - + Non-zero human-readable bytes @@ -1431,23 +1155,23 @@ Default: 64KiB - + A worker runs partition processor (journal, state, and drives invocations) - + Admin runs cluster controller and user-facing admin APIs - + Serves the metadata store - + Serves a log-server for replicated loglets - + Serves HTTP ingress requests @@ -1497,27 +1221,27 @@ Default: true The working directory which this Restate node should use for relative paths. The default is `restate-data` under the current working directory. - + The metadata client type to store metadata - - Non-zero duration string in either jiff human friendly or ISO8601 format. Check https://docs.rs/jiff/latest/jiff/struct.Span.html#parsing-and-printing for more details. + + TCP connection timeout for connecting to the metadata store. - + Non-zero duration string in either jiff human friendly or ISO8601 format. Check https://docs.rs/jiff/latest/jiff/struct.Span.html#parsing-and-printing for more details. - + Non-zero duration string in either jiff human friendly or ISO8601 format. Check https://docs.rs/jiff/latest/jiff/struct.Span.html#parsing-and-printing for more details. - - Definition of a retry policy + + Backoff policy used by the metadata client when it encounters concurrent modifications. - + No retry strategy. @@ -1527,7 +1251,7 @@ Default: true - + Retry with a fixed delay strategy. @@ -1535,7 +1259,9 @@ Default: true - Duration string in either jiff human friendly or ISO8601 format. Check https://docs.rs/jiff/latest/jiff/struct.Span.html#parsing-and-printing for more details. + Interval between retries. + +Can be configured using the [`jiff::fmt::friendly`](https://docs.rs/jiff/latest/jiff/fmt/friendly/index.html) format or ISO8601, for example `5 hours`. @@ -1545,7 +1271,7 @@ Default: true - + Retry with an exponential strategy. The next retry is computed as `min(last_retry_interval * factor, max_interval)`. @@ -1553,7 +1279,9 @@ Default: true - Duration string in either jiff human friendly or ISO8601 format. Check https://docs.rs/jiff/latest/jiff/struct.Span.html#parsing-and-printing for more details. + Initial interval for the first retry attempt. + +Can be configured using the [`jiff::fmt::friendly`](https://docs.rs/jiff/latest/jiff/fmt/friendly/index.html) format or ISO8601, for example `5 hours`. @@ -1564,20 +1292,11 @@ Default: true Number of maximum attempts before giving up. Infinite retries if unset. - + Maximum interval between retries. Can be configured using the [`jiff::fmt::friendly`](https://docs.rs/jiff/latest/jiff/fmt/friendly/index.html) format or ISO8601, for example `5 hours`. - - - - Duration string in either jiff human friendly or ISO8601 format. Check https://docs.rs/jiff/latest/jiff/struct.Span.html#parsing-and-printing for more details. - - - - - - + Duration string in either jiff human friendly or ISO8601 format. Check https://docs.rs/jiff/latest/jiff/struct.Span.html#parsing-and-printing for more details. @@ -1589,7 +1308,7 @@ Can be configured using the [`jiff::fmt::friendly`](https://docs.rs/jiff/latest/ - + Store metadata on the replicated metadata store that runs on nodes with the metadata-server role. @@ -1609,7 +1328,7 @@ Can be configured using the [`jiff::fmt::friendly`](https://docs.rs/jiff/latest/ - + Store metadata on an external etcd cluster. The addresses are formatted as `host:port` @@ -1625,7 +1344,7 @@ The addresses are formatted as `host:port` - + Store metadata on an external object store. @@ -1638,11 +1357,11 @@ The addresses are formatted as `host:port` Example: `s3://bucket/prefix` - + Definition of a retry policy - + No retry strategy. @@ -1652,7 +1371,7 @@ Example: `s3://bucket/prefix` - + Retry with a fixed delay strategy. @@ -1660,7 +1379,9 @@ Example: `s3://bucket/prefix` - Duration string in either jiff human friendly or ISO8601 format. Check https://docs.rs/jiff/latest/jiff/struct.Span.html#parsing-and-printing for more details. + Interval between retries. + +Can be configured using the [`jiff::fmt::friendly`](https://docs.rs/jiff/latest/jiff/fmt/friendly/index.html) format or ISO8601, for example `5 hours`. @@ -1670,7 +1391,7 @@ Example: `s3://bucket/prefix` - + Retry with an exponential strategy. The next retry is computed as `min(last_retry_interval * factor, max_interval)`. @@ -1678,7 +1399,9 @@ Example: `s3://bucket/prefix` - Duration string in either jiff human friendly or ISO8601 format. Check https://docs.rs/jiff/latest/jiff/struct.Span.html#parsing-and-printing for more details. + Initial interval for the first retry attempt. + +Can be configured using the [`jiff::fmt::friendly`](https://docs.rs/jiff/latest/jiff/fmt/friendly/index.html) format or ISO8601, for example `5 hours`. @@ -1689,20 +1412,11 @@ Example: `s3://bucket/prefix` Number of maximum attempts before giving up. Infinite retries if unset. - + Maximum interval between retries. Can be configured using the [`jiff::fmt::friendly`](https://docs.rs/jiff/latest/jiff/fmt/friendly/index.html) format or ISO8601, for example `5 hours`. - - - - Duration string in either jiff human friendly or ISO8601 format. Check https://docs.rs/jiff/latest/jiff/struct.Span.html#parsing-and-printing for more details. - - - - - - + Duration string in either jiff human friendly or ISO8601 format. Check https://docs.rs/jiff/latest/jiff/struct.Span.html#parsing-and-printing for more details. @@ -1773,8 +1487,8 @@ Note that this value only impacts the cluster initial provisioning and will not To update existing clusters use the `restatectl` utility. - - Non-zero duration string in either jiff human friendly or ISO8601 format. Check https://docs.rs/jiff/latest/jiff/struct.Span.html#parsing-and-printing for more details. + + This timeout is used when shutting down the various Restate components to drain all the internal queues. @@ -1785,19 +1499,19 @@ To update existing clusters use the `restatectl` utility. Log filter configuration. Can be overridden by the `RUST_LOG` environment variable. Check the [`RUST_LOG` documentation](https://docs.rs/tracing-subscriber/latest/tracing_subscriber/filter/struct.EnvFilter.html) for more details how to configure it. - - Log format + + Format to use when logging. - + Enables verbose logging. Not recommended in production. - + Enables compact logging. - + Enables json logging. You can use a json log collector to ingest these logs and further process them. @@ -1828,7 +1542,7 @@ This configures the restate-managed storage thread pool for performing high-prio This configures the restate-managed storage thread pool for performing low-priority or latency-insensitive storage tasks. - + Non-zero human-readable bytes @@ -1844,53 +1558,54 @@ This configures the restate-managed storage thread pool for performing low-prior The number of threads to reserve to high priority Rocksdb background tasks. - - Non-zero duration string in either jiff human friendly or ISO8601 format. Check https://docs.rs/jiff/latest/jiff/struct.Span.html#parsing-and-printing for more details. + + This defines the duration after which a write is to be considered in \"stall\" state. For every write that meets this threshold, the system will increment the `restate.rocksdb_stall_flare` gauge, if the write is unstalled, the guage will be updated accordingly. Note if automatic memory budgeting is enabled, it should be safe to allow rocksdb to stall if it hits the limit. However, if rocksdb stall kicked in, it's unlikely that the system will recover from this without intervention. - + + Defines the level of PerfContext used internally by rocksdb. Default is `enable-count` which should be sufficient for most users. Note that higher levels incur a CPU cost and might slow down the critical path. - + Disable perf stats - + Enables only count stats - + Count stats and enable time stats except for mutexes - + Other than time, also measure CPU time counters. Still don't measure time (neither wall time nor CPU time) for mutexes - + Enables count and time stats - - Non-zero duration string in either jiff human friendly or ISO8601 format. Check https://docs.rs/jiff/latest/jiff/struct.Span.html#parsing-and-printing for more details. + + The idle time after which the node will check for metadata updates from metadata store. This helps the node detect if it has been operating with stale metadata for extended period of time, primarily because it didn't interact with other peers in the cluster during that period. - - Non-zero duration string in either jiff human friendly or ISO8601 format. Check https://docs.rs/jiff/latest/jiff/struct.Span.html#parsing-and-printing for more details. + + When a node detects that a new metadata version exists, it'll attempt to fetch it from its peers. After this timeout duration has passed, the node will attempt to fetch the metadata from metadata store as well. This is to ensure that the nodes converge quickly while reducing the load on the metadata store. - - Definition of a retry policy + + The retry policy for network related errors - + No retry strategy. @@ -1900,7 +1615,7 @@ This configures the restate-managed storage thread pool for performing low-prior - + Retry with a fixed delay strategy. @@ -1908,7 +1623,9 @@ This configures the restate-managed storage thread pool for performing low-prior - Duration string in either jiff human friendly or ISO8601 format. Check https://docs.rs/jiff/latest/jiff/struct.Span.html#parsing-and-printing for more details. + Interval between retries. + +Can be configured using the [`jiff::fmt::friendly`](https://docs.rs/jiff/latest/jiff/fmt/friendly/index.html) format or ISO8601, for example `5 hours`. @@ -1918,7 +1635,7 @@ This configures the restate-managed storage thread pool for performing low-prior - + Retry with an exponential strategy. The next retry is computed as `min(last_retry_interval * factor, max_interval)`. @@ -1926,7 +1643,9 @@ This configures the restate-managed storage thread pool for performing low-prior - Duration string in either jiff human friendly or ISO8601 format. Check https://docs.rs/jiff/latest/jiff/struct.Span.html#parsing-and-printing for more details. + Initial interval for the first retry attempt. + +Can be configured using the [`jiff::fmt::friendly`](https://docs.rs/jiff/latest/jiff/fmt/friendly/index.html) format or ISO8601, for example `5 hours`. @@ -1937,20 +1656,11 @@ This configures the restate-managed storage thread pool for performing low-prior Number of maximum attempts before giving up. Infinite retries if unset. - + Maximum interval between retries. Can be configured using the [`jiff::fmt::friendly`](https://docs.rs/jiff/latest/jiff/fmt/friendly/index.html) format or ISO8601, for example `5 hours`. - - - - Duration string in either jiff human friendly or ISO8601 format. Check https://docs.rs/jiff/latest/jiff/struct.Span.html#parsing-and-printing for more details. - - - - - - + Duration string in either jiff human friendly or ISO8601 format. Check https://docs.rs/jiff/latest/jiff/struct.Span.html#parsing-and-printing for more details. @@ -1959,8 +1669,8 @@ Can be configured using the [`jiff::fmt::friendly`](https://docs.rs/jiff/latest/ - - Non-zero duration string in either jiff human friendly or ISO8601 format. Check https://docs.rs/jiff/latest/jiff/struct.Span.html#parsing-and-printing for more details. + + The timeout until the node gives up joining a cluster and initializing itself. @@ -2016,21 +1726,23 @@ This file is currently only read on client creation, but this may change in futu - Headers that should be applied to all outgoing requests (HTTP and Lambda). Defaults to `x-restate-cluster-name: `. + Headers that should be applied to all outgoing requests (HTTP and Lambda). Defaults to `x-restate-cluster-name: <cluster name>`. - - Configuration for the HTTP/2 keep-alive mechanism, using PING frames. - -Please note: most gateways don't propagate the HTTP/2 keep-alive between downstream and upstream hosts. In those environments, you need to make sure the gateway can detect a broken connection to the upstream deployment(s). + + Configuration for the HTTP/2 keep-alive mechanism, using PING frames. If unset, HTTP/2 keep-alive are disabled. - - Non-zero duration string in either jiff human friendly or ISO8601 format. Check https://docs.rs/jiff/latest/jiff/struct.Span.html#parsing-and-printing for more details. + + Sets an interval for HTTP/2 PING frames should be sent to keep a connection alive. + +You should set this timeout with a value lower than the `abort_timeout`. - - Non-zero duration string in either jiff human friendly or ISO8601 format. Check https://docs.rs/jiff/latest/jiff/struct.Span.html#parsing-and-printing for more details. + + Sets a timeout for receiving an acknowledgement of the keep-alive ping. + +If the ping is not acknowledged within the timeout, the connection will be closed. @@ -2050,8 +1762,8 @@ Please note: most gateways don't propagate the HTTP/2 keep-alive between downstr - - Non-zero duration string in either jiff human friendly or ISO8601 format. Check https://docs.rs/jiff/latest/jiff/struct.Span.html#parsing-and-printing for more details. + + How long to wait for a TCP connection to be established before considering it a failed attempt. @@ -2072,20 +1784,11 @@ Default: None An external ID to apply to any AssumeRole operations taken by this client. https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_create_for-user_externalid.html Can be overridden by the `AWS_EXTERNAL_ID` environment variable. - + Request minimum size to enable compression. The request size includes the total of the journal replay and its framing using Restate service protocol, without accounting for the json envelope and the base 64 encoding. Default: 4MB (The default AWS Lambda Limit is 6MB, 4MB roughly accounts for +33% of Base64 and the json envelope). - - - - Human-readable bytes - - - - - - + Human-readable bytes @@ -2112,76 +1815,23 @@ Default: False (statistics enabled) Default: the number of CPU cores on this node. - + If non-zero, we perform bigger reads when doing compaction. If you're running RocksDB on spinning disks, you should set this to at least 2MB. That way RocksDB's compaction is doing sequential instead of random reads. - - - - Non-zero human-readable bytes - - - - - - + Non-zero human-readable bytes - + StatsLevel can be used to reduce statistics overhead by skipping certain types of stats in the stats collection process. Default: \"except-detailed-timers\" - - - - - - Disable all metrics - - - - Disable timer stats, and skip histogram stats - - - - Skip timer stats - - - - Collect all stats except time inside mutex lock AND time spent on compression. - - - - Collect all stats except the counters requiring to get time inside the mutex lock. - - - - Collect all stats, including measuring duration of mutex operations. If getting time is expensive on the platform to run, it can reduce scalability to more threads, especially for writes. - - - - - - - - - - + Verbosity of the LOG. Default: \"error\" - - - - Verbosity of the LOG. - - - - - - + Verbosity of the LOG. @@ -2190,40 +1840,22 @@ Default: \"error\" Default: 1 - + Max size of info LOG file Default: 64MB - - - - Non-zero human-readable bytes - - - - - - + Non-zero human-readable bytes - + Uncompressed block size Default: 64KiB - - - - Non-zero human-readable bytes - - - - - - + Non-zero human-readable bytes - - Non-zero duration string in either jiff human friendly or ISO8601 format. Check https://docs.rs/jiff/latest/jiff/struct.Span.html#parsing-and-printing for more details. + + The interval at which the failure detector will tick. Decrease this value for faster reaction to node failures. Note, that every tick comes with an overhead. @@ -2238,7 +1870,7 @@ Default: 64KiB Gossips before failure detector is stable - + Duration string in either jiff human friendly or ISO8601 format. Check https://docs.rs/jiff/latest/jiff/struct.Span.html#parsing-and-printing for more details. @@ -2254,89 +1886,73 @@ Note: this threshold does not apply to a cluster that's configured with a single In addition to basic health/liveness information, the gossip protocol is used to exchange extra information about the roles hosted by this node. For instance, which partitions are currently running, their configuration versions, and the durable LSN of the corresponding partition databases. This information is sent every Nth gossip message. This setting controls the frequency of this exchange. For instance, `10` means that every 10th gossip message will contain the extra information about. - - Non-zero duration string in either jiff human friendly or ISO8601 format. Check https://docs.rs/jiff/latest/jiff/struct.Span.html#parsing-and-printing for more details. + + The time skew is the maximum acceptable time difference between the local node and the time reported by peers via gossip messages. The time skew is also used to ignore gossip messages that are too old. Duration string in either jiff human friendly or ISO8601 format. Check https://docs.rs/jiff/latest/jiff/struct.Span.html#parsing-and-printing for more details. - + Maximum journal retention duration that can be configured. When discovering a service deployment, or when modifying the journal retention using the Admin API, the given value will be clamped. Unset means no limit. - - - - Duration string in either jiff human friendly or ISO8601 format. Check https://docs.rs/jiff/latest/jiff/struct.Span.html#parsing-and-printing for more details. - - - - - - + Duration string in either jiff human friendly or ISO8601 format. Check https://docs.rs/jiff/latest/jiff/struct.Span.html#parsing-and-printing for more details. - + The default retry policy to use for invocations. The retry policy can be customized on a service/handler basis, using the respective SDK APIs. - - - - - - Non-zero duration string in either jiff human friendly or ISO8601 format. Check https://docs.rs/jiff/latest/jiff/struct.Span.html#parsing-and-printing for more details. - + + + Initial interval for the first retry attempt. + + + + The factor to use to compute the next retry attempt. Default: `2.0`. + - - The factor to use to compute the next retry attempt. Default: `2.0`. + + Number of maximum attempts (including the initial) before giving up. No retries if set to 1. + + + + Unlimited retries. - + + Bounded number of retries. - - - Unlimited retries. - - - - Bounded number of retries. - - - - - - + + - - - - - Pause the invocation when max attempts are reached. - - - - Kill the invocation when max attempts are reached. - + + - + + Behavior when max attempts are reached. + + + + Pause the invocation when max attempts are reached. - - Non-zero duration string in either jiff human friendly or ISO8601 format. Check https://docs.rs/jiff/latest/jiff/struct.Span.html#parsing-and-printing for more details. + + Kill the invocation when max attempts are reached. - + + Maximum interval between retries. diff --git a/package-lock.json b/package-lock.json index 70e68529..732226e4 100644 --- a/package-lock.json +++ b/package-lock.json @@ -9,12 +9,31 @@ "version": "1.0.0", "license": "ISC", "dependencies": { + "@apidevtools/json-schema-ref-parser": "^14.2.1", "@restatedev/restate": "^1.4.4", "chokidar": "^4.0.3", "concurrently": "^9.2.0", "yoga-wasm-web": "^0.3.3" } }, + "node_modules/@apidevtools/json-schema-ref-parser": { + "version": "14.2.1", + "resolved": "https://registry.npmjs.org/@apidevtools/json-schema-ref-parser/-/json-schema-ref-parser-14.2.1.tgz", + "integrity": "sha512-HmdFw9CDYqM6B25pqGBpNeLCKvGPlIx1EbLrVL0zPvj50CJQUHyBNBw45Muk0kEIkogo1VZvOKHajdMuAzSxRg==", + "license": "MIT", + "dependencies": { + "js-yaml": "^4.1.0" + }, + "engines": { + "node": ">= 20" + }, + "funding": { + "url": "https://github.com/sponsors/philsturgeon" + }, + "peerDependencies": { + "@types/json-schema": "^7.0.15" + } + }, "node_modules/@restatedev/restate": { "version": "1.4.4", "resolved": "https://registry.npmjs.org/@restatedev/restate/-/restate-1.4.4.tgz", @@ -86,6 +105,13 @@ "integrity": "sha512-xxeapPiUXdZAE3che6f3xogoJPeZgig6omHEy1rIY5WVsB3H2BHNnZH+gHG6x91SCWyQCzWGsuL2Hh3ClO5/qQ==", "hasInstallScript": true }, + "node_modules/@types/json-schema": { + "version": "7.0.15", + "resolved": "https://registry.npmjs.org/@types/json-schema/-/json-schema-7.0.15.tgz", + "integrity": "sha512-5+fP8P8MFNC+AyZCDxrB2pkZFPGzqQWUzpSeuuVLvm8VMcorNYavBqoFcxK8bQz4Qsbn4oUEEem4wDLfcysGHA==", + "license": "MIT", + "peer": true + }, "node_modules/ansi-regex": { "version": "5.0.1", "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-5.0.1.tgz", @@ -110,6 +136,12 @@ "url": "https://github.com/chalk/ansi-styles?sponsor=1" } }, + "node_modules/argparse": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/argparse/-/argparse-2.0.1.tgz", + "integrity": "sha512-8+9WqebbFzpX9OR+Wa6O29asIogeRMzcGtAINdpMHHyAg10f05aSFVBbcEqGf/PXw1EjAZ+q2/bEBg3DvurK3Q==", + "license": "Python-2.0" + }, "node_modules/chalk": { "version": "4.1.2", "resolved": "https://registry.npmjs.org/chalk/-/chalk-4.1.2.tgz", @@ -252,6 +284,18 @@ "node": ">=8" } }, + "node_modules/js-yaml": { + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/js-yaml/-/js-yaml-4.1.0.tgz", + "integrity": "sha512-wpxZs9NoxZaJESJGIZTyDEaYpl0FKSA+FB9aJiyemKhMwkxQg63h4T1KJgUGHpTqPDNRcmmYLugrRjJlBtWvRA==", + "license": "MIT", + "dependencies": { + "argparse": "^2.0.1" + }, + "bin": { + "js-yaml": "bin/js-yaml.js" + } + }, "node_modules/lodash": { "version": "4.17.21", "resolved": "https://registry.npmjs.org/lodash/-/lodash-4.17.21.tgz", diff --git a/package.json b/package.json index d23e4e19..1dec1381 100644 --- a/package.json +++ b/package.json @@ -10,6 +10,7 @@ "author": "", "license": "ISC", "dependencies": { + "@apidevtools/json-schema-ref-parser": "^14.2.1", "@restatedev/restate": "^1.4.4", "chokidar": "^4.0.3", "concurrently": "^9.2.0", diff --git a/scripts/generate-restate-config-viewer.js b/scripts/generate-restate-config-viewer.js index 1f50a8dc..9c84cb0a 100755 --- a/scripts/generate-restate-config-viewer.js +++ b/scripts/generate-restate-config-viewer.js @@ -2,23 +2,42 @@ const fs = require('fs'); const path = require('path'); +const $RefParser = require("@apidevtools/json-schema-ref-parser"); const schemaPath = "docs/schemas/restate-server-configuration-schema.json"; const outputPath = "docs/references/server-config.mdx"; -function parseJsonSchema(schemaPath) { - const schemaContent = fs.readFileSync(schemaPath, 'utf8'); - const schema = JSON.parse(schemaContent); - return schema; +async function parseJsonSchema(schemaPath) { + try { + // Use $RefParser directly to dereference all $ref pointers + return await $RefParser.dereference(schemaPath, { + mutateInputSchema: false, + continueOnError: false, + dereference: { + circular: "ignore" + } + }); + } catch (error) { + console.error('Error parsing JSON schema:', error); + throw error; + } } function formatDescription(description) { if (!description) return ''; - // Convert markdown links to proper format and escape quotes + // Escape HTML-like syntax in code blocks and regular text return description .replace(/\n\n/g, '\n\n') - .replace(/`([^`]+)`/g, '`$1`') + // Preserve code blocks with backticks but escape any HTML-like content within + .replace(/`([^`]+)`/g, (match, code) => { + return '`' + code.replace(//g, '>') + '`'; + }) + // Escape standalone HTML-like tags that aren't in code blocks + .replace(/<(?!\/?\w+[^>]*>)/g, '<') + .replace(/(?]*)>/g, '>') + // Convert markdown links to proper format .replace(/\[(.*?)\]\((.*?)\)/g, '[$1]($2)') + // Escape quotes for JSX attributes .replace(/"/g, '\\"'); } @@ -56,7 +75,7 @@ function generateResponseField(propName, propSchema, isRequired = false, level = const indent = ' '.repeat(level); const { type, optional } = getTypeFromSchema(propSchema); const required = isRequired && !optional ? ' required' : ''; - const description = formatDescription(propSchema.description || propSchema.title || ''); + const description = formatDescription(propSchema.description|| propSchema.title || ''); // Format default value properly for the attribute let defaultAttr = ''; @@ -106,17 +125,82 @@ function generateResponseField(propName, propSchema, isRequired = false, level = output += `${indent} \n`; } - // Handle oneOf/anyOf - if (propSchema.oneOf || propSchema.anyOf) { - const variants = propSchema.oneOf || propSchema.anyOf; + // Handle anyOf + if (propSchema.anyOf) { + const variants = propSchema.anyOf; + + // Handle the optional type case of [T, null] + if (variants.length === 2 && variants.some(variant => variant.type === "null")) { + console.log(variants); + let optionalVariant = variants.find(variant => variant.type !== "null") + + const optionalType = getTypeFromSchema(optionalVariant); + output = `${indent}\n`; + + if (description) { + output += `${indent} ${description}\n`; + } + if (optionalType.type === 'object' && optionalVariant.properties) { + const requiredProps = optionalVariant.required || []; + output += `${indent} \n`; + output += `${indent} \n`; + + Object.entries(optionalVariant.properties).forEach(([subPropName, subPropSchema]) => { + output += generateResponseField( + subPropName, + subPropSchema, + requiredProps.includes(subPropName), + level + 2 + ); + }); + + output += `${indent} \n`; + } else { + output += `${indent} ${formatDescription(optionalVariant.description)}\n` + } + } else { + output += `${indent} \n`; + output += `${indent} \n`; + + variants.forEach((variant, index) => { + let variantName; + if (variant.enum && variant.enum.length === 1) { + variantName = `${variant.enum[0]}`; + } else if (variant.title) { + variantName = `Option ${index + 1}: ${variant.title}`; + } else if (variant.const !== undefined) { + variantName = `"${variant.const}"`; + } else { + variantName = `Option ${index + 1}`; + } + output += generateResponseField(variantName, variant, false, level + 2); + }); + + output += `${indent} \n`; + } + } + + // Handle oneOf + if (propSchema.oneOf) { + const variants = propSchema.oneOf + output += `${indent} \n`; output += `${indent} \n`; - + variants.forEach((variant, index) => { - const variantName = variant.title || `option-${index + 1}`; + let variantName; + if (variant.enum && variant.enum.length === 1) { + variantName = `${variant.enum[0]}`; + } else if (variant.title) { + variantName = `Option ${index + 1}: ${variant.title}`; + } else if (variant.const !== undefined) { + variantName = `"${variant.const}"`; + } else { + variantName = `Option ${index + 1}`; + } output += generateResponseField(variantName, variant, false, level + 2); }); - + output += `${indent} \n`; } @@ -124,46 +208,6 @@ function generateResponseField(propName, propSchema, isRequired = false, level = return output; } -function expandDefinitions(schema, definitions) { - // Helper function to resolve $ref definitions - function resolveRef(propSchema) { - if (propSchema.$ref && propSchema.$ref.startsWith('#/definitions/')) { - const defName = propSchema.$ref.split('/').pop(); - if (definitions[defName]) { - return { ...definitions[defName], _refName: defName }; - } - } - return propSchema; - } - - // Recursively expand references in the schema - function expandRefs(obj) { - if (typeof obj !== 'object' || obj === null) return obj; - - if (obj.$ref) { - return expandRefs(resolveRef(obj)); - } - - const result = {}; - for (const [key, value] of Object.entries(obj)) { - if (key === 'properties' && typeof value === 'object') { - result[key] = {}; - for (const [propName, propSchema] of Object.entries(value)) { - result[key][propName] = expandRefs(propSchema); - } - } else if (Array.isArray(value)) { - result[key] = value.map(item => expandRefs(item)); - } else if (typeof value === 'object') { - result[key] = expandRefs(value); - } else { - result[key] = value; - } - } - return result; - } - - return expandRefs(schema); -} function generateRestateConfigViewer(schema) { let output = `---\ntitle: "Restate Server Configuration"\ndescription: "Reference of the configuration options for Restate Server."\nmode: "wide"\n---\n\n` + @@ -171,35 +215,32 @@ function generateRestateConfigViewer(schema) { '\n' + '' + '\n\n'; - - // Expand definitions into the main schema - const definitions = schema.definitions || {}; - const expandedSchema = expandDefinitions(schema, definitions); - - if (expandedSchema.properties) { - const requiredProps = expandedSchema.required || []; - - Object.entries(expandedSchema.properties).forEach(([propName, propSchema]) => { + + if (schema.properties) { + const requiredProps = schema.required || []; + + Object.entries(schema.properties).forEach(([propName, propSchema]) => { output += generateResponseField( - propName, - propSchema, - requiredProps.includes(propName), + propName, + propSchema, + requiredProps.includes(propName), 0 ); }); } - + return output; } -function main() { + +async function generate() { if (!fs.existsSync(schemaPath)) { console.error(`Schema file not found: ${schemaPath}`); process.exit(1); } try { - const schema = parseJsonSchema(schemaPath); + const schema = await parseJsonSchema(schemaPath); const mdxContent = generateRestateConfigViewer(schema); if (outputPath) { @@ -215,7 +256,7 @@ function main() { } if (require.main === module) { - main(); + generate(); } -module.exports = { generateSchemaViewer: generateRestateConfigViewer }; \ No newline at end of file +module.exports = { generateSchemaViewer: generate }; \ No newline at end of file From 8ef00e052b08e88afbff87dede36e9be212286eb Mon Sep 17 00:00:00 2001 From: Giselle van Dongen Date: Tue, 4 Nov 2025 13:10:44 +0100 Subject: [PATCH 02/10] Improve rendering of options --- docs/references/server-config.mdx | 1111 ++++++++++----------- scripts/generate-restate-config-viewer.js | 90 +- 2 files changed, 622 insertions(+), 579 deletions(-) diff --git a/docs/references/server-config.mdx b/docs/references/server-config.mdx index ee22011f..b84a8d8e 100644 --- a/docs/references/server-config.mdx +++ b/docs/references/server-config.mdx @@ -10,34 +10,41 @@ import Intro from "/snippets/common/default-configuration.mdx" Worker options + Internal queue for partition processor communication + The number of timers in memory limit is used to bound the amount of timers loaded in memory. If this limit is set, when exceeding it, the timers farther in the future will be spilled to disk. + In order to clean up completed invocations, that is invocations invoked with an idempotency id, or workflows, Restate periodically scans among the completed invocations to check whether they need to be removed or not. This interval sets the scan interval of the cleanup procedure. Default: 1 hour. + Storage options + How many partitions to divide memory across? By default this uses the value defined in `default-num-partitions` in the common section of the config. + The memory budget for rocksdb memtables in bytes The total is divided evenly across partitions. The divisor is defined in `num-partitions-to-share-memory-budget`. If this value is set, it overrides the ratio defined in `rocksdb-memory-ratio`. + Non-zero human-readable bytes @@ -45,34 +52,41 @@ The total is divided evenly across partitions. The divisor is defined in `num-pa The memory budget for rocksdb memtables as ratio This defines the total memory for rocksdb as a ratio of all memory available to memtables (See `rocksdb-total-memtables-ratio` in common). The budget is then divided evenly across partitions. The divisor is defined in `num-partitions-to-share-memory-budget` + Files will be opened in \"direct I/O\" mode which means that data r/w from the disk will not be cached or buffered. The hardware buffer of the devices may however still be used. Memory mapped files are not impacted by these parameters. + Use O_DIRECT for writes in background flush and compactions. + The default depends on the different rocksdb use-cases at Restate. Supports hot-reloading (Partial / Bifrost only) + Disable rocksdb statistics collection Default: False (statistics enabled) + Default: the number of CPU cores on this node. + If non-zero, we perform bigger reads when doing compaction. If you're running RocksDB on spinning disks, you should set this to at least 2MB. That way RocksDB's compaction is doing sequential instead of random reads. + Non-zero human-readable bytes @@ -80,6 +94,7 @@ Default: False (statistics enabled) StatsLevel can be used to reduce statistics overhead by skipping certain types of stats in the stats collection process. Default: \"except-detailed-timers\" + @@ -87,6 +102,7 @@ Default: \"except-detailed-timers\" Verbosity of the LOG. Default: \"error\" + Verbosity of the LOG. @@ -94,12 +110,14 @@ Default: \"error\" Number of info LOG files to keep Default: 1 + Max size of info LOG file Default: 64MB + Non-zero human-readable bytes @@ -107,6 +125,7 @@ Default: 64MB Uncompressed block size Default: 64KiB + Non-zero human-readable bytes @@ -115,73 +134,74 @@ Default: 64KiB Invoker options + This is **deprecated** and will be removed in the next Restate releases. Please refer to `default-retry-policy` for the new configuration options. + - - - No retry strategy. - - - - + + + + No retry strategy. - + - - Retry with a fixed delay strategy. - - - - + + + + Retry with a fixed delay strategy. - - Interval between retries. + + + + + Interval between retries. Can be configured using the [`jiff::fmt::friendly`](https://docs.rs/jiff/latest/jiff/fmt/friendly/index.html) format or ISO8601, for example `5 hours`. - - - Number of maximum attempts before giving up. Infinite retries if unset. - + + + + Number of maximum attempts before giving up. Infinite retries if unset. - - - Retry with an exponential strategy. The next retry is computed as `min(last_retry_interval * factor, max_interval)`. - - - - + + + + Retry with an exponential strategy. The next retry is computed as `min(last_retry_interval * factor, max_interval)`. - - Initial interval for the first retry attempt. + + + + + Initial interval for the first retry attempt. Can be configured using the [`jiff::fmt::friendly`](https://docs.rs/jiff/latest/jiff/fmt/friendly/index.html) format or ISO8601, for example `5 hours`. - - - The factor to use to compute the next retry attempt. - + - - Number of maximum attempts before giving up. Infinite retries if unset. - + + The factor to use to compute the next retry attempt. + + + + + Number of maximum attempts before giving up. Infinite retries if unset. + + - - Maximum interval between retries. + + Maximum interval between retries. Can be configured using the [`jiff::fmt::friendly`](https://docs.rs/jiff/latest/jiff/fmt/friendly/index.html) format or ISO8601, for example `5 hours`. - Duration string in either jiff human friendly or ISO8601 format. Check https://docs.rs/jiff/latest/jiff/struct.Span.html#parsing-and-printing for more details. - - + Duration string in either jiff human friendly or ISO8601 format. Check https://docs.rs/jiff/latest/jiff/struct.Span.html#parsing-and-printing for more details. @@ -189,47 +209,57 @@ Can be configured using the [`jiff::fmt::friendly`](https://docs.rs/jiff/latest/ Duration string in either jiff human friendly or ISO8601 format. Check https://docs.rs/jiff/latest/jiff/struct.Span.html#parsing-and-printing for more details. + Duration string in either jiff human friendly or ISO8601 format. Check https://docs.rs/jiff/latest/jiff/struct.Span.html#parsing-and-printing for more details. + Non-zero human-readable bytes + Threshold to fail the invocation in case protocol messages coming from a service are larger than the specified amount. + Non-zero human-readable bytes Temporary directory to use for the invoker temporary files. If empty, the system temporary directory will be used instead. + Defines the threshold after which queues invocations will spill to disk at the path defined in `tmp-dir`. In other words, this is the number of invocations that can be kept in memory before spilling to disk. This is a per-partition limit. + Number of concurrent invocations that can be processed by the invoker. + Configures throttling for service invocations at the node level. This throttling mechanism uses a token bucket algorithm to control the rate at which invocations can be processed, helping to prevent resource exhaustion and maintain system stability under high load. The throttling limit is shared across all partitions running on this node, providing a global rate limit for the entire node rather than per-partition limits. When `unset`, no throttling is applied and invocations are processed without throttling. + The rate at which the tokens are replenished. Syntax: `<rate>/<unit>` where `<unit>` is `s|sec|second`, `m|min|minute`, or `h|hr|hour`. unit defaults to per second if not specified. + The maximum number of tokens the bucket can hold. Default to the rate value if not specified. + @@ -239,16 +269,19 @@ Syntax: `<rate>/<unit>` where `<unit>` is `s|sec|second`, `m|m Configures rate limiting for service actions at the node level. This throttling mechanism uses a token bucket algorithm to control the rate at which actions can be processed, helping to prevent resource exhaustion and maintain system stability under high load. The throttling limit is shared across all partitions running on this node, providing a global rate limit for the entire node rather than per-partition limits. When `unset`, no throttling is applied and actions are processed without throttling. + The rate at which the tokens are replenished. Syntax: `<rate>/<unit>` where `<unit>` is `s|sec|second`, `m|min|minute`, or `h|hr|hour`. unit defaults to per second if not specified. + The maximum number of tokens the bucket can hold. Default to the rate value if not specified. + @@ -259,16 +292,19 @@ Syntax: `<rate>/<unit>` where `<unit>` is `s|sec|second`, `m|m The maximum number of commands a partition processor will apply in a batch. The larger this value is, the higher the throughput and latency are. + Snapshots provide a mechanism for safely trimming the log and efficient bootstrapping of new worker nodes. + Base URL for cluster snapshots. Supports `s3://` and `file://` protocol scheme. S3-compatible object stores must support ETag-based conditional writes. Default: `None` + @@ -279,71 +315,72 @@ As snapshots are created asynchronously, the actual number of new records that w This setting does not influence explicitly requested snapshots triggered using `restatectl`. Default: `None` - automatic snapshots are disabled + A retry policy for dealing with retryable object store errors. + - - - No retry strategy. - - - - + + + + No retry strategy. - + - - Retry with a fixed delay strategy. - - - - + + + + Retry with a fixed delay strategy. + + + - - Interval between retries. + + Interval between retries. Can be configured using the [`jiff::fmt::friendly`](https://docs.rs/jiff/latest/jiff/fmt/friendly/index.html) format or ISO8601, for example `5 hours`. - - - Number of maximum attempts before giving up. Infinite retries if unset. - + + + + Number of maximum attempts before giving up. Infinite retries if unset. - - - Retry with an exponential strategy. The next retry is computed as `min(last_retry_interval * factor, max_interval)`. - - - - + + + + Retry with an exponential strategy. The next retry is computed as `min(last_retry_interval * factor, max_interval)`. + + + - - Initial interval for the first retry attempt. + + Initial interval for the first retry attempt. Can be configured using the [`jiff::fmt::friendly`](https://docs.rs/jiff/latest/jiff/fmt/friendly/index.html) format or ISO8601, for example `5 hours`. - - - The factor to use to compute the next retry attempt. - + - - Number of maximum attempts before giving up. Infinite retries if unset. - + + The factor to use to compute the next retry attempt. + + + + + Number of maximum attempts before giving up. Infinite retries if unset. + + - - Maximum interval between retries. + + Maximum interval between retries. Can be configured using the [`jiff::fmt::friendly`](https://docs.rs/jiff/latest/jiff/fmt/friendly/index.html) format or ISO8601, for example `5 hours`. - Duration string in either jiff human friendly or ISO8601 format. Check https://docs.rs/jiff/latest/jiff/struct.Span.html#parsing-and-printing for more details. - - + Duration string in either jiff human friendly or ISO8601 format. Check https://docs.rs/jiff/latest/jiff/struct.Span.html#parsing-and-printing for more details. @@ -351,30 +388,37 @@ Can be configured using the [`jiff::fmt::friendly`](https://docs.rs/jiff/latest/ The AWS configuration profile to use for S3 object store destinations. If you use named profiles in your AWS configuration, you can replace all the other settings with a single profile reference. See the [AWS documentation on profiles] (https://docs.aws.amazon.com/sdkref/latest/guide/file-format.html) for more. + AWS region to use with S3 object store destinations. This may be inferred from the environment, for example the current region when running in EC2. Because of the request signing algorithm this must have a value. For Minio, you can generally set this to any string, such as `us-east-1`. + Username for Minio, or consult the service documentation for other S3-compatible stores. + Password for Minio, or consult the service documentation for other S3-compatible stores. + This is only needed with short-term STS session credentials. + When you use Amazon S3, this is typically inferred from the region and there is no need to set it. With other object stores, you will have to provide an appropriate HTTP(S) endpoint. If *not* using HTTPS, also set `aws-allow-http` to `true`. + Allow plain HTTP to be used with the object store endpoint. Required when the endpoint URL that isn't using HTTPS. + @@ -385,20 +429,24 @@ Can be configured using the [`jiff::fmt::friendly`](https://docs.rs/jiff/latest/ Admin server options + Address to bind for the Admin APIs. + Optional advertised Admin API endpoint. + List of header names considered routing headers. These will be used during deployment creation to distinguish between an already existing deployment and a new deployment. + @@ -409,22 +457,27 @@ These will be used during deployment creation to distinguish between an already Concurrency limit for the Admin APIs. Default is unlimited. + Storage query engine options + Non-zero human-readable bytes + The path to spill to + The degree of parallelism to use for query execution (Defaults to the number of available cores). + @@ -432,14 +485,17 @@ These will be used during deployment creation to distinguish between an already Controls the interval at which cluster controller polls nodes of the cluster. + Duration string in either jiff human friendly or ISO8601 format. Check https://docs.rs/jiff/latest/jiff/struct.Span.html#parsing-and-printing for more details. + Disable serving the Restate Web UI on the admin port. Default is `false`. + @@ -450,14 +506,17 @@ These will be used during deployment creation to distinguish between an already Ingress options + The address to bind for the ingress. + Local concurrency limit to use to limit the amount of concurrent requests. If exceeded, the ingress will reply immediately with an appropriate status code. Default is unlimited. + @@ -465,14 +524,17 @@ These will be used during deployment creation to distinguish between an already Configuration options to connect to a Kafka cluster. + Cluster name (Used to identify subscriptions). + Initial list of brokers (host or host:port). + @@ -489,6 +551,7 @@ These will be used during deployment creation to distinguish between an already Ingress endpoint that the Web UI should use to interact with. + @@ -496,29 +559,26 @@ These will be used during deployment creation to distinguish between an already Bifrost options + Default: Replicated - - - - A local rocksdb-backed loglet. - - - - Replicated loglets are restate's native log replication system. This requires `log-server` role to run on enough nodes in the cluster. - - + + + - `local` : A local rocksdb-backed loglet. + - `replicated` : Replicated loglets are restate's native log replication system. This requires `log-server` role to run on enough nodes in the cluster. Configuration of local loglet provider + Configuration of replicated loglet provider + @@ -527,73 +587,74 @@ These will be used during deployment creation to distinguish between an already Once this maximum is hit, sequencer will induce back pressure on clients. This controls the total number of records regardless of how many batches. Note that this will be increased to fit the biggest batch of records being enqueued. + Sequencer retry policy Backoff introduced when sequencer fail to find a suitable spread of log servers + - - - No retry strategy. - - - - + + + + No retry strategy. - + - - Retry with a fixed delay strategy. - - - - + + + + Retry with a fixed delay strategy. + + + - - Interval between retries. + + Interval between retries. Can be configured using the [`jiff::fmt::friendly`](https://docs.rs/jiff/latest/jiff/fmt/friendly/index.html) format or ISO8601, for example `5 hours`. - - - Number of maximum attempts before giving up. Infinite retries if unset. - + + + + Number of maximum attempts before giving up. Infinite retries if unset. - - - Retry with an exponential strategy. The next retry is computed as `min(last_retry_interval * factor, max_interval)`. - - - - + + + + Retry with an exponential strategy. The next retry is computed as `min(last_retry_interval * factor, max_interval)`. + + + - - Initial interval for the first retry attempt. + + Initial interval for the first retry attempt. Can be configured using the [`jiff::fmt::friendly`](https://docs.rs/jiff/latest/jiff/fmt/friendly/index.html) format or ISO8601, for example `5 hours`. - - - The factor to use to compute the next retry attempt. - + - - Number of maximum attempts before giving up. Infinite retries if unset. - + + The factor to use to compute the next retry attempt. + + + + + Number of maximum attempts before giving up. Infinite retries if unset. + + - - Maximum interval between retries. + + Maximum interval between retries. Can be configured using the [`jiff::fmt::friendly`](https://docs.rs/jiff/latest/jiff/fmt/friendly/index.html) format or ISO8601, for example `5 hours`. - Duration string in either jiff human friendly or ISO8601 format. Check https://docs.rs/jiff/latest/jiff/struct.Span.html#parsing-and-printing for more details. - - + Duration string in either jiff human friendly or ISO8601 format. Check https://docs.rs/jiff/latest/jiff/struct.Span.html#parsing-and-printing for more details. @@ -605,79 +666,81 @@ Can be configured using the [`jiff::fmt::friendly`](https://docs.rs/jiff/latest/ The sequencer is allowed to consider itself quiescent if it did not commit records for this period of time. It may use this to sends pre-emptive release/seal check requests to log-servers. The sequencer is also allowed to use this value as interval to send seal/release checks even if it's not quiescent. + Log Server RPC timeout Timeout waiting on log server response + Log Server RPC retry policy Retry policy for log server RPCs + - - - No retry strategy. - - - - + + + + No retry strategy. - + - - Retry with a fixed delay strategy. - - - - + + + + Retry with a fixed delay strategy. - - Interval between retries. + + -Can be configured using the [`jiff::fmt::friendly`](https://docs.rs/jiff/latest/jiff/fmt/friendly/index.html) format or ISO8601, for example `5 hours`. - + + Interval between retries. - - Number of maximum attempts before giving up. Infinite retries if unset. - +Can be configured using the [`jiff::fmt::friendly`](https://docs.rs/jiff/latest/jiff/fmt/friendly/index.html) format or ISO8601, for example `5 hours`. - - - Retry with an exponential strategy. The next retry is computed as `min(last_retry_interval * factor, max_interval)`. - - - - + + Number of maximum attempts before giving up. Infinite retries if unset. - - Initial interval for the first retry attempt. + -Can be configured using the [`jiff::fmt::friendly`](https://docs.rs/jiff/latest/jiff/fmt/friendly/index.html) format or ISO8601, for example `5 hours`. - + + + + Retry with an exponential strategy. The next retry is computed as `min(last_retry_interval * factor, max_interval)`. - - The factor to use to compute the next retry attempt. - + + - - Number of maximum attempts before giving up. Infinite retries if unset. - + + Initial interval for the first retry attempt. + +Can be configured using the [`jiff::fmt::friendly`](https://docs.rs/jiff/latest/jiff/fmt/friendly/index.html) format or ISO8601, for example `5 hours`. + + + + + The factor to use to compute the next retry attempt. + + + + + Number of maximum attempts before giving up. Infinite retries if unset. + + - - Maximum interval between retries. + + Maximum interval between retries. Can be configured using the [`jiff::fmt::friendly`](https://docs.rs/jiff/latest/jiff/fmt/friendly/index.html) format or ISO8601, for example `5 hours`. - Duration string in either jiff human friendly or ISO8601 format. Check https://docs.rs/jiff/latest/jiff/struct.Span.html#parsing-and-printing for more details. - - + Duration string in either jiff human friendly or ISO8601 format. Check https://docs.rs/jiff/latest/jiff/struct.Span.html#parsing-and-printing for more details. @@ -687,10 +750,12 @@ Can be configured using the [`jiff::fmt::friendly`](https://docs.rs/jiff/latest/ Maximum number of records to prefetch from log servers The number of records bifrost will attempt to prefetch from replicated loglet's log-servers for every loglet reader (e.g. partition processor). Note that this mainly impacts readers that are not co-located with the loglet sequencer (i.e. partition processor followers). + Non-zero human-readable bytes + @@ -703,6 +768,7 @@ The higher the value is, the longer bifrost will wait before it triggers the nex To illustrate, if readahead-records is set to 100 and readahead-trigger-ratio is 1.0. Then bifrost will prefetch up to 100 records from log-servers and will not trigger the next prefetch unless the consumer consumes 100% of this buffer. This means that bifrost will read in batches but will not do while the consumer is still reading the previous batch. Value must be between 0 and 1. It will be clamped at `1.0`. + @@ -710,67 +776,67 @@ Value must be between 0 and 1. It will be clamped at `1.0`. Retry policy to use when bifrost waits for reconfiguration to complete during read operations + - - - No retry strategy. - - - - + + + + No retry strategy. - + - - Retry with a fixed delay strategy. - - - - + + + + Retry with a fixed delay strategy. - - Interval between retries. + + + + + Interval between retries. Can be configured using the [`jiff::fmt::friendly`](https://docs.rs/jiff/latest/jiff/fmt/friendly/index.html) format or ISO8601, for example `5 hours`. - - - Number of maximum attempts before giving up. Infinite retries if unset. - + + + + Number of maximum attempts before giving up. Infinite retries if unset. - - - Retry with an exponential strategy. The next retry is computed as `min(last_retry_interval * factor, max_interval)`. - - - - + + + + Retry with an exponential strategy. The next retry is computed as `min(last_retry_interval * factor, max_interval)`. - - Initial interval for the first retry attempt. + + + + + Initial interval for the first retry attempt. Can be configured using the [`jiff::fmt::friendly`](https://docs.rs/jiff/latest/jiff/fmt/friendly/index.html) format or ISO8601, for example `5 hours`. - - - The factor to use to compute the next retry attempt. - + - - Number of maximum attempts before giving up. Infinite retries if unset. - + + The factor to use to compute the next retry attempt. - - Maximum interval between retries. + + + + Number of maximum attempts before giving up. Infinite retries if unset. + + + + + Maximum interval between retries. Can be configured using the [`jiff::fmt::friendly`](https://docs.rs/jiff/latest/jiff/fmt/friendly/index.html) format or ISO8601, for example `5 hours`. - Duration string in either jiff human friendly or ISO8601 format. Check https://docs.rs/jiff/latest/jiff/struct.Span.html#parsing-and-printing for more details. - - + Duration string in either jiff human friendly or ISO8601 format. Check https://docs.rs/jiff/latest/jiff/struct.Span.html#parsing-and-printing for more details. @@ -778,28 +844,34 @@ Can be configured using the [`jiff::fmt::friendly`](https://docs.rs/jiff/latest/ Interval to wait between retries of loglet seal failures + Time interval after which bifrost's auto-recovery mechanism will kick in. This is triggered in scenarios where the control plane took too long to complete loglet reconfigurations. + Minimum retry duration used by the exponential backoff mechanism for bifrost appends. + Maximum retry duration used by the exponential backoff mechanism for bifrost appends. + Optional size of record cache in bytes. If set to 0, record cache will be disabled. Defaults: 250MB + When enabled, automatic improvement periodically checks with the loglet provider if the loglet configuration can be improved by performing a reconfiguration. This allows the log to pick up replication property changes, apply better placement of replicas, or for other reasons. + @@ -807,18 +879,21 @@ This allows the log to pick up replication property changes, apply better placem Metadata store options + Limit number of in-flight requests Number of in-flight metadata store requests. + The memory budget for rocksdb memtables in bytes If this value is set, it overrides the ratio defined in `rocksdb-memory-ratio`. + Non-zero human-readable bytes @@ -826,40 +901,48 @@ If this value is set, it overrides the ratio defined in `rocksdb-memory-ratio`. The memory budget for rocksdb memtables as ratio This defines the total memory for rocksdb as a ratio of all memory available to memtables (See `rocksdb-total-memtables-ratio` in common). + Auto join the metadata cluster when being started Defines whether this node should auto join the metadata store cluster when being started for the first time. + Files will be opened in \"direct I/O\" mode which means that data r/w from the disk will not be cached or buffered. The hardware buffer of the devices may however still be used. Memory mapped files are not impacted by these parameters. + Use O_DIRECT for writes in background flush and compactions. + The default depends on the different rocksdb use-cases at Restate. Supports hot-reloading (Partial / Bifrost only) + Disable rocksdb statistics collection Default: False (statistics enabled) + Default: the number of CPU cores on this node. + If non-zero, we perform bigger reads when doing compaction. If you're running RocksDB on spinning disks, you should set this to at least 2MB. That way RocksDB's compaction is doing sequential instead of random reads. + Non-zero human-readable bytes @@ -867,6 +950,7 @@ Default: False (statistics enabled) StatsLevel can be used to reduce statistics overhead by skipping certain types of stats in the stats collection process. Default: \"except-detailed-timers\" + @@ -874,6 +958,7 @@ Default: \"except-detailed-timers\" Verbosity of the LOG. Default: \"error\" + Verbosity of the LOG. @@ -881,12 +966,14 @@ Default: \"error\" Number of info LOG files to keep Default: 1 + Max size of info LOG file Default: 64MB + Non-zero human-readable bytes @@ -894,6 +981,7 @@ Default: 64MB Uncompressed block size Default: 64KiB + Non-zero human-readable bytes @@ -901,28 +989,33 @@ Default: 64KiB The number of ticks before triggering an election The number of ticks before triggering an election. The value must be larger than `raft_heartbeat_tick`. It's recommended to set `raft_election_tick = 10 * raft_heartbeat_tick`. Decrease this value if you want to react faster to failed leaders. Note, decreasing this value too much can lead to cluster instabilities due to falsely detecting dead leaders. + The number of ticks before sending a heartbeat A leader sends heartbeat messages to maintain its leadership every heartbeat ticks. Decrease this value to send heartbeats more often. + The raft tick interval The interval at which the raft node will tick. Decrease this value in order to let the Raft node react more quickly to changes. Note, that every tick comes with an overhead. Moreover, the tick interval directly affects the election timeout. If the election timeout becomes too small, then this can cause cluster instabilities due to frequent leader changes. + The status update interval The interval at which the raft node will update its status. Decrease this value in order to see more recent status updates. + The threshold for trimming the raft log. The log will be trimmed if the number of apply entries exceeds this threshold. The default value is `1000`. + @@ -930,75 +1023,77 @@ The interval at which the raft node will update its status. Decrease this value Common network configuration options for communicating with Restate cluster nodes. Note that similar keys are present in other config sections, such as in Service Client options. + TCP connection timeout for Restate cluster node-to-node network connections. + Retry policy to use for internal node-to-node networking. + - - - No retry strategy. - - - - + + + + No retry strategy. - + - - Retry with a fixed delay strategy. - - - - + + + + Retry with a fixed delay strategy. - - Interval between retries. + + + + + Interval between retries. Can be configured using the [`jiff::fmt::friendly`](https://docs.rs/jiff/latest/jiff/fmt/friendly/index.html) format or ISO8601, for example `5 hours`. - - - Number of maximum attempts before giving up. Infinite retries if unset. - + + + + Number of maximum attempts before giving up. Infinite retries if unset. - - - Retry with an exponential strategy. The next retry is computed as `min(last_retry_interval * factor, max_interval)`. - - - - + + + + Retry with an exponential strategy. The next retry is computed as `min(last_retry_interval * factor, max_interval)`. - - Initial interval for the first retry attempt. + + + + + Initial interval for the first retry attempt. Can be configured using the [`jiff::fmt::friendly`](https://docs.rs/jiff/latest/jiff/fmt/friendly/index.html) format or ISO8601, for example `5 hours`. - - - The factor to use to compute the next retry attempt. - + - - Number of maximum attempts before giving up. Infinite retries if unset. - + + The factor to use to compute the next retry attempt. - - Maximum interval between retries. + + + + Number of maximum attempts before giving up. Infinite retries if unset. + + + + + Maximum interval between retries. Can be configured using the [`jiff::fmt::friendly`](https://docs.rs/jiff/latest/jiff/fmt/friendly/index.html) format or ISO8601, for example `5 hours`. - Duration string in either jiff human friendly or ISO8601 format. Check https://docs.rs/jiff/latest/jiff/struct.Span.html#parsing-and-printing for more details. - - + Duration string in either jiff human friendly or ISO8601 format. Check https://docs.rs/jiff/latest/jiff/struct.Span.html#parsing-and-printing for more details. @@ -1006,26 +1101,32 @@ Can be configured using the [`jiff::fmt::friendly`](https://docs.rs/jiff/latest/ Timeout for receiving a handshake response from Restate cluster peers. + Non-zero duration string in either jiff human friendly or ISO8601 format. Check https://docs.rs/jiff/latest/jiff/struct.Span.html#parsing-and-printing for more details. + Non-zero duration string in either jiff human friendly or ISO8601 format. Check https://docs.rs/jiff/latest/jiff/struct.Span.html#parsing-and-printing for more details. + HTTP/2 Adaptive Window + Disables Zstd compression for internal gRPC network connections + Non-zero human-readable bytes + @@ -1033,12 +1134,14 @@ Can be configured using the [`jiff::fmt::friendly`](https://docs.rs/jiff/latest/ Configuration is only used on nodes running with `log-server` role. + The memory budget for rocksdb memtables in bytes If this value is set, it overrides the ratio defined in `rocksdb-memory-ratio`. + Non-zero human-readable bytes @@ -1048,10 +1151,12 @@ If this value is set, it overrides the ratio defined in `rocksdb-memory-ratio`. This defines the total memory for rocksdb as a ratio of all memory available to the log-server. (See `rocksdb-total-memtables-ratio` in common). + Disable fsync of WAL on every batch + @@ -1060,6 +1165,7 @@ This defines the total memory for rocksdb as a ratio of all memory available to Setting this to 1 means no sub-compactions are allowed (i.e. only 1 thread will do the compaction). Default is 0 which maps to floor(number of CPU cores / 2) + @@ -1070,44 +1176,53 @@ Use this to limit the size of WAL files. If the size of all WAL files exceeds th Note: RocksDB internally counts the uncompressed bytes to determine the WAL size, and since the WAL is compressed, the actual size on disk will be significantly smaller than this value (~1/4 depending on the compression ratio). For instance, if this is set to \"1 MiB\", then rocksdb might decide to flush if the total WAL (on disk) reached ~260 KiB (compressed). Default is `0` which translates into 6 times the memory allocated for membtables for this database. + Trigger a commit when the batch size exceeds this threshold. Set to 0 or 1 to commit the write batch on every command. + The number of messages that can queue up on input network stream while request processor is busy. + Files will be opened in \"direct I/O\" mode which means that data r/w from the disk will not be cached or buffered. The hardware buffer of the devices may however still be used. Memory mapped files are not impacted by these parameters. + Use O_DIRECT for writes in background flush and compactions. + The default depends on the different rocksdb use-cases at Restate. Supports hot-reloading (Partial / Bifrost only) + Disable rocksdb statistics collection Default: False (statistics enabled) + Default: the number of CPU cores on this node. + If non-zero, we perform bigger reads when doing compaction. If you're running RocksDB on spinning disks, you should set this to at least 2MB. That way RocksDB's compaction is doing sequential instead of random reads. + Non-zero human-readable bytes @@ -1115,6 +1230,7 @@ Default: False (statistics enabled) StatsLevel can be used to reduce statistics overhead by skipping certain types of stats in the stats collection process. Default: \"except-detailed-timers\" + @@ -1122,6 +1238,7 @@ Default: \"except-detailed-timers\" Verbosity of the LOG. Default: \"error\" + Verbosity of the LOG. @@ -1129,12 +1246,14 @@ Default: \"error\" Number of info LOG files to keep Default: 1 + Max size of info LOG file Default: 64MB + Non-zero human-readable bytes @@ -1142,6 +1261,7 @@ Default: 64MB Uncompressed block size Default: 64KiB + Non-zero human-readable bytes @@ -1150,32 +1270,17 @@ Default: 64KiB Defines the roles which this Restate node should run, by default the node starts with all roles. + - - - A worker runs partition processor (journal, state, and drives invocations) - - - - Admin runs cluster controller and user-facing admin APIs - - - - Serves the metadata store - - - - Serves a log-server for replicated loglets - - - - Serves HTTP ingress requests - - - + + - `worker` : A worker runs partition processor (journal, state, and drives invocations) + - `admin` : Admin runs cluster controller and user-facing admin APIs + - `metadata-server` : Serves the metadata store + - `log-server` : Serves a log-server for replicated loglets + - `http-ingress` : Serves HTTP ingress requests @@ -1183,6 +1288,7 @@ Default: 64KiB Unique name for this node in the cluster. The node must not change unless it's started with empty local store. It defaults to the node's hostname. + @@ -1195,14 +1301,17 @@ NOTE: It's _strongly_ recommended to not change the node's location string after When this value is not set, the node is considered to be in the _default_ location. The _default_ location means that the node is not assigned to any specific region or zone. ## Examples - `us-west` -- the node is in the `us-west` region. - `us-west.a1` -- the node is in the `us-west` region and in the `a1` zone. - `` -- [default] the node is in the default location + If set, the node insists on acquiring this node ID. + A unique identifier for the cluster. All nodes in the same cluster should have the same. + @@ -1215,242 +1324,97 @@ Use `restatectl` to provision the cluster/node if automatic provisioning is disa This can also be explicitly disabled by setting this value to false. Default: true + The working directory which this Restate node should use for relative paths. The default is `restate-data` under the current working directory. + The metadata client type to store metadata + TCP connection timeout for connecting to the metadata store. + Non-zero duration string in either jiff human friendly or ISO8601 format. Check https://docs.rs/jiff/latest/jiff/struct.Span.html#parsing-and-printing for more details. + Non-zero duration string in either jiff human friendly or ISO8601 format. Check https://docs.rs/jiff/latest/jiff/struct.Span.html#parsing-and-printing for more details. + Backoff policy used by the metadata client when it encounters concurrent modifications. + - - - No retry strategy. - - - - + + + + No retry strategy. - - - - - Retry with a fixed delay strategy. - - - - - - - Interval between retries. - -Can be configured using the [`jiff::fmt::friendly`](https://docs.rs/jiff/latest/jiff/fmt/friendly/index.html) format or ISO8601, for example `5 hours`. - - - - Number of maximum attempts before giving up. Infinite retries if unset. - - - - - - - Retry with an exponential strategy. The next retry is computed as `min(last_retry_interval * factor, max_interval)`. - - - - - - - Initial interval for the first retry attempt. - -Can be configured using the [`jiff::fmt::friendly`](https://docs.rs/jiff/latest/jiff/fmt/friendly/index.html) format or ISO8601, for example `5 hours`. - - - - The factor to use to compute the next retry attempt. - - - - Number of maximum attempts before giving up. Infinite retries if unset. - - - - Maximum interval between retries. - -Can be configured using the [`jiff::fmt::friendly`](https://docs.rs/jiff/latest/jiff/fmt/friendly/index.html) format or ISO8601, for example `5 hours`. - Duration string in either jiff human friendly or ISO8601 format. Check https://docs.rs/jiff/latest/jiff/struct.Span.html#parsing-and-printing for more details. - - - + - - - - - - - Store metadata on the replicated metadata store that runs on nodes with the metadata-server role. - - - - - - Restate metadata server address list - - - - + + Retry with a fixed delay strategy. - + - - + + Interval between retries. - - Store metadata on an external etcd cluster. +Can be configured using the [`jiff::fmt::friendly`](https://docs.rs/jiff/latest/jiff/fmt/friendly/index.html) format or ISO8601, for example `5 hours`. -The addresses are formatted as `host:port` - - - - - Etcd cluster node address list + + Number of maximum attempts before giving up. Infinite retries if unset. + - - - - Store metadata on an external object store. - - - - - - This location will be used to persist cluster metadata. Takes the form of a URL with `s3://` as the protocol and bucket name as the authority, plus an optional prefix specified as the path component. + + Retry with an exponential strategy. The next retry is computed as `min(last_retry_interval * factor, max_interval)`. -Example: `s3://bucket/prefix` + - - Definition of a retry policy - - - - No retry strategy. - - - - - - - - - - Retry with a fixed delay strategy. - - - - - - - Interval between retries. - -Can be configured using the [`jiff::fmt::friendly`](https://docs.rs/jiff/latest/jiff/fmt/friendly/index.html) format or ISO8601, for example `5 hours`. - - - - Number of maximum attempts before giving up. Infinite retries if unset. - - - - - - - Retry with an exponential strategy. The next retry is computed as `min(last_retry_interval * factor, max_interval)`. - - - - - - - Initial interval for the first retry attempt. - -Can be configured using the [`jiff::fmt::friendly`](https://docs.rs/jiff/latest/jiff/fmt/friendly/index.html) format or ISO8601, for example `5 hours`. - - - - The factor to use to compute the next retry attempt. - - - - Number of maximum attempts before giving up. Infinite retries if unset. - - - - Maximum interval between retries. + + Initial interval for the first retry attempt. Can be configured using the [`jiff::fmt::friendly`](https://docs.rs/jiff/latest/jiff/fmt/friendly/index.html) format or ISO8601, for example `5 hours`. - Duration string in either jiff human friendly or ISO8601 format. Check https://docs.rs/jiff/latest/jiff/struct.Span.html#parsing-and-printing for more details. - - - - - - - The AWS configuration profile to use for S3 object store destinations. If you use named profiles in your AWS configuration, you can replace all the other settings with a single profile reference. See the [AWS documentation on profiles] (https://docs.aws.amazon.com/sdkref/latest/guide/file-format.html) for more. - + + The factor to use to compute the next retry attempt. - - AWS region to use with S3 object store destinations. This may be inferred from the environment, for example the current region when running in EC2. Because of the request signing algorithm this must have a value. For Minio, you can generally set this to any string, such as `us-east-1`. - - Username for Minio, or consult the service documentation for other S3-compatible stores. - + + Number of maximum attempts before giving up. Infinite retries if unset. - - Password for Minio, or consult the service documentation for other S3-compatible stores. - - This is only needed with short-term STS session credentials. - + + Maximum interval between retries. - - When you use Amazon S3, this is typically inferred from the region and there is no need to set it. With other object stores, you will have to provide an appropriate HTTP(S) endpoint. If *not* using HTTPS, also set `aws-allow-http` to `true`. - +Can be configured using the [`jiff::fmt::friendly`](https://docs.rs/jiff/latest/jiff/fmt/friendly/index.html) format or ISO8601, for example `5 hours`. - - Allow plain HTTP to be used with the object store endpoint. Required when the endpoint URL that isn't using HTTPS. + Duration string in either jiff human friendly or ISO8601 format. Check https://docs.rs/jiff/latest/jiff/struct.Span.html#parsing-and-printing for more details. @@ -1461,10 +1425,12 @@ Can be configured using the [`jiff::fmt::friendly`](https://docs.rs/jiff/latest/ Address to bind for the Node server. Derived from the advertised address, defaulting to `0.0.0.0:$PORT` (where the port will be inferred from the URL scheme). + Address that other nodes will use to connect to this node. Default is `http://127.0.0.1:5122/` + @@ -1477,6 +1443,7 @@ NOTE 1: This config entry only impacts the initial number of partitions, the val NOTE 2: This will be renamed to `default-num-partitions` by default as of v1.3+ Default: 24 + @@ -1485,185 +1452,178 @@ Default: 24 Note that this value only impacts the cluster initial provisioning and will not be respected after the cluster has been provisioned. To update existing clusters use the `restatectl` utility. + This timeout is used when shutting down the various Restate components to drain all the internal queues. + Size of the default thread pool used to perform internal tasks. If not set, it defaults to the number of CPU cores. + Log filter configuration. Can be overridden by the `RUST_LOG` environment variable. Check the [`RUST_LOG` documentation](https://docs.rs/tracing-subscriber/latest/tracing_subscriber/filter/struct.EnvFilter.html) for more details how to configure it. + Format to use when logging. - - - - Enables verbose logging. Not recommended in production. - - - Enables compact logging. - - - - Enables json logging. You can use a json log collector to ingest these logs and further process them. - - - + + + - `pretty` : Enables verbose logging. Not recommended in production. + - `compact` : Enables compact logging. + - `json` : Enables json logging. You can use a json log collector to ingest these logs and further process them. Disable ANSI terminal codes for logs. This is useful when the log collector doesn't support processing ANSI terminal codes. + Address to bind for the tokio-console tracing subscriber. If unset and restate-server is built with tokio-console support, it'll listen on `0.0.0.0:6669`. + Disable prometheus metric recording and reporting. Default is `false`. + Storage high priority thread pool This configures the restate-managed storage thread pool for performing high-priority or latency-sensitive storage tasks when the IO operation cannot be performed on in-memory caches. + Storage low priority thread pool This configures the restate-managed storage thread pool for performing low-priority or latency-insensitive storage tasks. + Non-zero human-readable bytes + The memory size used across all memtables (ratio between 0 to 1.0). This limits how much memory memtables can eat up from the value in rocksdb-total-memory-limit. When set to 0, memtables can take all available memory up to the value specified in rocksdb-total-memory-limit. This value will be sanitized to 1.0 if outside the valid bounds. + The number of threads to reserve to Rocksdb background tasks. Defaults to the number of cores on the machine. + The number of threads to reserve to high priority Rocksdb background tasks. + This defines the duration after which a write is to be considered in \"stall\" state. For every write that meets this threshold, the system will increment the `restate.rocksdb_stall_flare` gauge, if the write is unstalled, the guage will be updated accordingly. + Note if automatic memory budgeting is enabled, it should be safe to allow rocksdb to stall if it hits the limit. However, if rocksdb stall kicked in, it's unlikely that the system will recover from this without intervention. + Defines the level of PerfContext used internally by rocksdb. Default is `enable-count` which should be sufficient for most users. Note that higher levels incur a CPU cost and might slow down the critical path. - - - - Disable perf stats - - - - Enables only count stats - - - - Count stats and enable time stats except for mutexes - - - Other than time, also measure CPU time counters. Still don't measure time (neither wall time nor CPU time) for mutexes - - - - Enables count and time stats - - - + + + - `disable` : Disable perf stats + - `enable-count` : Enables only count stats + - `enable-time-except-for-mutex` : Count stats and enable time stats except for mutexes + - `enable-time-and-c-p-u-time-except-for-mutex` : Other than time, also measure CPU time counters. Still don't measure time (neither wall time nor CPU time) for mutexes + - `enable-time` : Enables count and time stats The idle time after which the node will check for metadata updates from metadata store. This helps the node detect if it has been operating with stale metadata for extended period of time, primarily because it didn't interact with other peers in the cluster during that period. + When a node detects that a new metadata version exists, it'll attempt to fetch it from its peers. After this timeout duration has passed, the node will attempt to fetch the metadata from metadata store as well. This is to ensure that the nodes converge quickly while reducing the load on the metadata store. + The retry policy for network related errors + - - - No retry strategy. - - - - + + + + No retry strategy. - + - - Retry with a fixed delay strategy. - - - - + + + + Retry with a fixed delay strategy. - - Interval between retries. + + + + + Interval between retries. Can be configured using the [`jiff::fmt::friendly`](https://docs.rs/jiff/latest/jiff/fmt/friendly/index.html) format or ISO8601, for example `5 hours`. - - - Number of maximum attempts before giving up. Infinite retries if unset. - + + + + Number of maximum attempts before giving up. Infinite retries if unset. - - - Retry with an exponential strategy. The next retry is computed as `min(last_retry_interval * factor, max_interval)`. - - - - + + + + Retry with an exponential strategy. The next retry is computed as `min(last_retry_interval * factor, max_interval)`. - - Initial interval for the first retry attempt. + + + + + Initial interval for the first retry attempt. Can be configured using the [`jiff::fmt::friendly`](https://docs.rs/jiff/latest/jiff/fmt/friendly/index.html) format or ISO8601, for example `5 hours`. - - - The factor to use to compute the next retry attempt. - + - - Number of maximum attempts before giving up. Infinite retries if unset. - + + The factor to use to compute the next retry attempt. - - Maximum interval between retries. + + + + Number of maximum attempts before giving up. Infinite retries if unset. + + + + + Maximum interval between retries. Can be configured using the [`jiff::fmt::friendly`](https://docs.rs/jiff/latest/jiff/fmt/friendly/index.html) format or ISO8601, for example `5 hours`. - Duration string in either jiff human friendly or ISO8601 format. Check https://docs.rs/jiff/latest/jiff/struct.Span.html#parsing-and-printing for more details. - - + Duration string in either jiff human friendly or ISO8601 format. Check https://docs.rs/jiff/latest/jiff/struct.Span.html#parsing-and-printing for more details. @@ -1671,10 +1631,12 @@ Can be configured using the [`jiff::fmt::friendly`](https://docs.rs/jiff/latest/ The timeout until the node gives up joining a cluster and initializing itself. + Restate uses Scarf to collect anonymous usage data to help us understand how the software is being used. You can set this flag to true to disable this collection. It can also be set with the environment variable DO_NOT_TRACK=1. + @@ -1683,6 +1645,7 @@ Can be configured using the [`jiff::fmt::friendly`](https://docs.rs/jiff/latest/ Specify the tracing endpoint to send runtime traces to. Traces will be exported using [OTLP gRPC](https://opentelemetry.io/docs/specs/otlp/#otlpgrpc) through [opentelemetry_otlp](https://docs.rs/opentelemetry-otlp/0.12.0/opentelemetry_otlp/). To configure the sampling, please refer to the [opentelemetry autoconfigure docs](https://github.com/open-telemetry/opentelemetry-java/blob/main/sdk-extensions/autoconfigure/README.md#sampler). + @@ -1691,6 +1654,7 @@ To configure the sampling, please refer to the [opentelemetry autoconfigure docs Specify the tracing endpoint to send runtime traces to. Traces will be exported using [OTLP gRPC](https://opentelemetry.io/docs/specs/otlp/#otlpgrpc) through [opentelemetry_otlp](https://docs.rs/opentelemetry-otlp/0.12.0/opentelemetry_otlp/). To configure the sampling, please refer to the [opentelemetry autoconfigure docs](https://github.com/open-telemetry/opentelemetry-java/blob/main/sdk-extensions/autoconfigure/README.md#sampler). + @@ -1699,6 +1663,7 @@ To configure the sampling, please refer to the [opentelemetry autoconfigure docs Specify the tracing endpoint to send services traces to. Traces will be exported using [OTLP gRPC](https://opentelemetry.io/docs/specs/otlp/#otlpgrpc) through [opentelemetry_otlp](https://docs.rs/opentelemetry-otlp/0.12.0/opentelemetry_otlp/). To configure the sampling, please refer to the [opentelemetry autoconfigure docs](https://github.com/open-telemetry/opentelemetry-java/blob/main/sdk-extensions/autoconfigure/README.md#sampler). + @@ -1709,40 +1674,48 @@ If unset, no traces will be written to file. It can be used to export traces in a structured format without configuring a Jaeger agent. To inspect the traces, open the Jaeger UI and use the Upload JSON feature to load and inspect them. + Distributed tracing exporter filter. Check the [`RUST_LOG` documentation](https://docs.rs/tracing-subscriber/latest/tracing_subscriber/filter/struct.EnvFilter.html) for more details how to configure it. + Specify additional headers you want the system to send to the tracing endpoint (e.g. authentication headers). + A path to a file, such as \"/var/secrets/key.pem\", which contains exactly one ed25519 private key in PEM format. Such a file can be generated with `openssl genpkey -algorithm ed25519`. If provided, this key will be used to attach JWTs to requests from this client which SDKs may optionally verify, proving that the caller is a particular Restate instance. This file is currently only read on client creation, but this may change in future. Parsed public keys will be logged at INFO level in the same format that SDKs expect. + Headers that should be applied to all outgoing requests (HTTP and Lambda). Defaults to `x-restate-cluster-name: <cluster name>`. + Configuration for the HTTP/2 keep-alive mechanism, using PING frames. If unset, HTTP/2 keep-alive are disabled. + Sets an interval for HTTP/2 PING frames should be sent to keep a connection alive. You should set this timeout with a value lower than the `abort_timeout`. + Sets a timeout for receiving an acknowledgement of the keep-alive ping. If the ping is not acknowledged within the timeout, the connection will be closed. + @@ -1750,10 +1723,12 @@ If the ping is not acknowledged within the timeout, the connection will be close A URI, such as `http://127.0.0.1:10001`, of a server to which all invocations should be sent, with the `Host` header set to the deployment URI. HTTPS proxy URIs are supported, but only HTTP endpoint traffic will be proxied currently. Can be overridden by the `HTTP_PROXY` environment variable. + HTTP authorities eg `localhost`, `restate.dev`, `127.0.0.1` that should not be proxied by the http_proxy. Ports are ignored. Subdomains are also matched. An entry “*” matches all hostnames. Can be overridden by the `NO_PROXY` environment variable, which supports comma separated values. + @@ -1764,6 +1739,7 @@ If the ping is not acknowledged within the timeout, the connection will be close How long to wait for a TCP connection to be established before considering it a failed attempt. + @@ -1774,49 +1750,59 @@ This value will be overwritten by the value included in the initial SETTINGS fra Default: None **NOTE**: Setting this value to None (default) users the default recommended value from HTTP2 specs + Name of the AWS profile to select. Defaults to 'AWS_PROFILE' env var, or otherwise the `default` profile. + An external ID to apply to any AssumeRole operations taken by this client. https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_create_for-user_externalid.html Can be overridden by the `AWS_EXTERNAL_ID` environment variable. + Request minimum size to enable compression. The request size includes the total of the journal replay and its framing using Restate service protocol, without accounting for the json envelope and the base 64 encoding. Default: 4MB (The default AWS Lambda Limit is 6MB, 4MB roughly accounts for +33% of Base64 and the json envelope). + Human-readable bytes Files will be opened in \"direct I/O\" mode which means that data r/w from the disk will not be cached or buffered. The hardware buffer of the devices may however still be used. Memory mapped files are not impacted by these parameters. + Use O_DIRECT for writes in background flush and compactions. + The default depends on the different rocksdb use-cases at Restate. Supports hot-reloading (Partial / Bifrost only) + Disable rocksdb statistics collection Default: False (statistics enabled) + Default: the number of CPU cores on this node. + If non-zero, we perform bigger reads when doing compaction. If you're running RocksDB on spinning disks, you should set this to at least 2MB. That way RocksDB's compaction is doing sequential instead of random reads. + Non-zero human-readable bytes @@ -1824,6 +1810,7 @@ Default: False (statistics enabled) StatsLevel can be used to reduce statistics overhead by skipping certain types of stats in the stats collection process. Default: \"except-detailed-timers\" + @@ -1831,6 +1818,7 @@ Default: \"except-detailed-timers\" Verbosity of the LOG. Default: \"error\" + Verbosity of the LOG. @@ -1838,12 +1826,14 @@ Default: \"error\" Number of info LOG files to keep Default: 1 + Max size of info LOG file Default: 64MB + Non-zero human-readable bytes @@ -1851,27 +1841,33 @@ Default: 64MB Uncompressed block size Default: 64KiB + Non-zero human-readable bytes The interval at which the failure detector will tick. Decrease this value for faster reaction to node failures. Note, that every tick comes with an overhead. + Specifies how many gossip intervals of inactivity need to pass before considering a node as dead. + On every gossip interval, how many peers each node attempts to gossip with. The default is optimized for small clusters (less than 5 nodes). On larger clusters, if gossip overhead is noticeable, consider reducing this value to 1. + Gossips before failure detector is stable + Duration string in either jiff human friendly or ISO8601 format. Check https://docs.rs/jiff/latest/jiff/struct.Span.html#parsing-and-printing for more details. + @@ -1880,24 +1876,29 @@ Default: 64KiB In this case, the node will advertise itself as dead in the gossip messages it sends out. Note: this threshold does not apply to a cluster that's configured with a single node. + In addition to basic health/liveness information, the gossip protocol is used to exchange extra information about the roles hosted by this node. For instance, which partitions are currently running, their configuration versions, and the durable LSN of the corresponding partition databases. This information is sent every Nth gossip message. This setting controls the frequency of this exchange. For instance, `10` means that every 10th gossip message will contain the extra information about. + The time skew is the maximum acceptable time difference between the local node and the time reported by peers via gossip messages. The time skew is also used to ignore gossip messages that are too old. + Duration string in either jiff human friendly or ISO8601 format. Check https://docs.rs/jiff/latest/jiff/struct.Span.html#parsing-and-printing for more details. + Maximum journal retention duration that can be configured. When discovering a service deployment, or when modifying the journal retention using the Admin API, the given value will be clamped. Unset means no limit. + Duration string in either jiff human friendly or ISO8601 format. Check https://docs.rs/jiff/latest/jiff/struct.Span.html#parsing-and-printing for more details. @@ -1905,32 +1906,30 @@ Unset means no limit. The default retry policy to use for invocations. The retry policy can be customized on a service/handler basis, using the respective SDK APIs. + Initial interval for the first retry attempt. + The factor to use to compute the next retry attempt. Default: `2.0`. + Number of maximum attempts (including the initial) before giving up. No retries if set to 1. - - - - Unlimited retries. - - - Bounded number of retries. - - - - + + + - `unlimited` : Unlimited retries. + + + Bounded number of retries. - + @@ -1938,21 +1937,16 @@ The retry policy can be customized on a service/handler basis, using the respect Behavior when max attempts are reached. - - - - Pause the invocation when max attempts are reached. - - - Kill the invocation when max attempts are reached. - - - + + + - `pause` : Pause the invocation when max attempts are reached. + - `kill` : Kill the invocation when max attempts are reached. Maximum interval between retries. + @@ -1962,5 +1956,6 @@ The retry policy can be customized on a service/handler basis, using the respect Maximum max attempts configurable in an invocation retry policy. When discovering a service deployment with configured retry policies, or when modifying the invocation retry policy using the Admin API, the given value will be clamped. `None` means no limit, that is infinite retries is enabled. + diff --git a/scripts/generate-restate-config-viewer.js b/scripts/generate-restate-config-viewer.js index 9c84cb0a..25321487 100755 --- a/scripts/generate-restate-config-viewer.js +++ b/scripts/generate-restate-config-viewer.js @@ -79,8 +79,8 @@ function generateResponseField(propName, propSchema, isRequired = false, level = // Format default value properly for the attribute let defaultAttr = ''; + let defaultStr = ''; if (propSchema.default !== undefined && propSchema.default !== null) { - let defaultStr = ''; if (typeof propSchema.default === 'string') { defaultStr = `"${propSchema.default}"`; } else if (typeof propSchema.default === 'object') { @@ -88,19 +88,32 @@ function generateResponseField(propName, propSchema, isRequired = false, level = } else { defaultStr = `${String(propSchema.default)}`; } - // Escape quotes for the attribute - // const escapedDefault = defaultStr.replace(/"/g, '"'); defaultAttr = ` default={${defaultStr}}`; + } else { + if (type === 'string' && propSchema.enum !== undefined && Array.isArray(propSchema.enum) && propSchema.enum.length > 0) { + // If enum of strings is defined, use the first enum value as default + let defaultValue = propSchema.enum[0]; + if (typeof defaultValue === 'string') { + defaultStr = `"${defaultValue}"`; + defaultAttr = ` default={${defaultStr}}`; + } else if (typeof defaultValue === 'object') { + defaultStr = JSON.stringify(defaultValue); + } else { + defaultStr = `${String(defaultValue)}`; + } + defaultAttr = ` default={${defaultStr}}`; + } } - + + let output = `${indent}\n`; if (description) { - output += `${indent} ${description}\n`; + output += `${indent} ${description}\n\n`; } // Handle object properties - if (propSchema.type === 'object' && propSchema.properties) { + if (type === 'object' && propSchema.properties) { const requiredProps = propSchema.required || []; output += `${indent} \n`; output += `${indent} \n`; @@ -118,7 +131,7 @@ function generateResponseField(propName, propSchema, isRequired = false, level = } // Handle array items - if (propSchema.type === 'array' && propSchema.items) { + if (type === 'array' && propSchema.items) { output += `${indent} \n`; output += `${indent} \n`; output += generateResponseField('item', propSchema.items, false, level + 2); @@ -126,21 +139,20 @@ function generateResponseField(propName, propSchema, isRequired = false, level = } // Handle anyOf - if (propSchema.anyOf) { + if (type === 'anyOf') { const variants = propSchema.anyOf; - + // Handle the optional type case of [T, null] if (variants.length === 2 && variants.some(variant => variant.type === "null")) { - console.log(variants); let optionalVariant = variants.find(variant => variant.type !== "null") const optionalType = getTypeFromSchema(optionalVariant); output = `${indent}\n`; if (description) { - output += `${indent} ${description}\n`; + output += `${indent} ${description}\n\n`; } - if (optionalType.type === 'object' && optionalVariant.properties) { + if ((['object', 'oneOf', 'array'].some(t => optionalType.type.includes(t))) && optionalVariant.properties) { const requiredProps = optionalVariant.required || []; output += `${indent} \n`; output += `${indent} \n`; @@ -160,10 +172,9 @@ function generateResponseField(propName, propSchema, isRequired = false, level = } } else { output += `${indent} \n`; - output += `${indent} \n`; variants.forEach((variant, index) => { - let variantName; + let variantName = ''; if (variant.enum && variant.enum.length === 1) { variantName = `${variant.enum[0]}`; } else if (variant.title) { @@ -173,22 +184,41 @@ function generateResponseField(propName, propSchema, isRequired = false, level = } else { variantName = `Option ${index + 1}`; } - output += generateResponseField(variantName, variant, false, level + 2); + if ((['object', 'oneOf', 'array'].some(t => variant.type.includes(t))) && variant.properties) { + const requiredProps = variant.required || []; + output += `${indent} \n`; + output += `${indent} \n`; + + Object.entries(variant.properties).forEach(([subPropName, subPropSchema]) => { + output += generateResponseField( + subPropName, + subPropSchema, + requiredProps.includes(subPropName), + level + 2 + ); + }); + + output += `${indent} \n`; + } else { + output += `${indent} - \`${variantName}\` : ${formatDescription(variant.description)}\n` + } }); - output += `${indent} \n`; } } // Handle oneOf - if (propSchema.oneOf) { + if (type === 'oneOf') { const variants = propSchema.oneOf + console.log(variants); + + output += `${indent} \n`; + // output += `${indent} \n`; output += `${indent} \n`; - output += `${indent} \n`; variants.forEach((variant, index) => { - let variantName; + let variantName = ''; if (variant.enum && variant.enum.length === 1) { variantName = `${variant.enum[0]}`; } else if (variant.title) { @@ -198,10 +228,28 @@ function generateResponseField(propName, propSchema, isRequired = false, level = } else { variantName = `Option ${index + 1}`; } - output += generateResponseField(variantName, variant, false, level + 2); + if ((['object', 'oneOf', 'array'].some(t => variant.type.includes(t))) && variant.properties) { + const requiredProps = variant.required || []; + output += `${indent} \n`; + output += `${indent} \n`; + output += `${indent} ${formatDescription(variant.description)}\n\n`; + + Object.entries(variant.properties).forEach(([subPropName, subPropSchema]) => { + output += generateResponseField( + subPropName, + subPropSchema, + requiredProps.includes(subPropName), + level + 2 + ); + }); + + output += `${indent} \n`; + } else { + output += `${indent} - \`${variantName}\` : ${formatDescription(variant.description)}\n` + } }); - output += `${indent} \n`; + // output += `${indent} \n`; } output += `${indent}\n\n`; From 89bcbce6a1aa129ea0bac61216803db269dd1fd7 Mon Sep 17 00:00:00 2001 From: Giselle van Dongen Date: Tue, 4 Nov 2025 14:05:45 +0100 Subject: [PATCH 03/10] Improve rendering of options --- docs/references/server-config.mdx | 592 +++++++++--------- .../restate-server-configuration-schema.json | 2 +- scripts/generate-restate-config-viewer.js | 20 +- 3 files changed, 314 insertions(+), 300 deletions(-) diff --git a/docs/references/server-config.mdx b/docs/references/server-config.mdx index b84a8d8e..2ba0a11a 100644 --- a/docs/references/server-config.mdx +++ b/docs/references/server-config.mdx @@ -8,39 +8,39 @@ import Intro from "/snippets/common/default-configuration.mdx" - + Worker options - + Internal queue for partition processor communication - + The number of timers in memory limit is used to bound the amount of timers loaded in memory. If this limit is set, when exceeding it, the timers farther in the future will be spilled to disk. - + In order to clean up completed invocations, that is invocations invoked with an idempotency id, or workflows, Restate periodically scans among the completed invocations to check whether they need to be removed or not. This interval sets the scan interval of the cleanup procedure. Default: 1 hour. - + Storage options - + How many partitions to divide memory across? By default this uses the value defined in `default-num-partitions` in the common section of the config. - + The memory budget for rocksdb memtables in bytes The total is divided evenly across partitions. The divisor is defined in `num-partitions-to-share-memory-budget`. If this value is set, it overrides the ratio defined in `rocksdb-memory-ratio`. @@ -48,49 +48,49 @@ The total is divided evenly across partitions. The divisor is defined in `num-pa Non-zero human-readable bytes - + The memory budget for rocksdb memtables as ratio This defines the total memory for rocksdb as a ratio of all memory available to memtables (See `rocksdb-total-memtables-ratio` in common). The budget is then divided evenly across partitions. The divisor is defined in `num-partitions-to-share-memory-budget` - + Files will be opened in \"direct I/O\" mode which means that data r/w from the disk will not be cached or buffered. The hardware buffer of the devices may however still be used. Memory mapped files are not impacted by these parameters. - + Use O_DIRECT for writes in background flush and compactions. - + The default depends on the different rocksdb use-cases at Restate. Supports hot-reloading (Partial / Bifrost only) - + Disable rocksdb statistics collection Default: False (statistics enabled) - + Default: the number of CPU cores on this node. - + If non-zero, we perform bigger reads when doing compaction. If you're running RocksDB on spinning disks, you should set this to at least 2MB. That way RocksDB's compaction is doing sequential instead of random reads. Non-zero human-readable bytes - + StatsLevel can be used to reduce statistics overhead by skipping certain types of stats in the stats collection process. Default: \"except-detailed-timers\" @@ -98,7 +98,7 @@ Default: \"except-detailed-timers\" - + Verbosity of the LOG. Default: \"error\" @@ -106,14 +106,14 @@ Default: \"error\" Verbosity of the LOG. - + Number of info LOG files to keep Default: 1 - + Max size of info LOG file Default: 64MB @@ -121,7 +121,7 @@ Default: 64MB Non-zero human-readable bytes - + Uncompressed block size Default: 64KiB @@ -132,12 +132,12 @@ Default: 64KiB - + Invoker options - + This is **deprecated** and will be removed in the next Restate releases. Please refer to `default-retry-policy` for the new configuration options. @@ -148,7 +148,7 @@ Please refer to `default-retry-policy` for the new configuration options. No retry strategy. - + @@ -156,17 +156,17 @@ Please refer to `default-retry-policy` for the new configuration options. Retry with a fixed delay strategy. - + - + Interval between retries. Can be configured using the [`jiff::fmt::friendly`](https://docs.rs/jiff/latest/jiff/fmt/friendly/index.html) format or ISO8601, for example `5 hours`. - + Number of maximum attempts before giving up. Infinite retries if unset. @@ -176,27 +176,27 @@ Can be configured using the [`jiff::fmt::friendly`](https://docs.rs/jiff/latest/ Retry with an exponential strategy. The next retry is computed as `min(last_retry_interval * factor, max_interval)`. - + - + Initial interval for the first retry attempt. Can be configured using the [`jiff::fmt::friendly`](https://docs.rs/jiff/latest/jiff/fmt/friendly/index.html) format or ISO8601, for example `5 hours`. - + The factor to use to compute the next retry attempt. - + Number of maximum attempts before giving up. Infinite retries if unset. - + Maximum interval between retries. Can be configured using the [`jiff::fmt::friendly`](https://docs.rs/jiff/latest/jiff/fmt/friendly/index.html) format or ISO8601, for example `5 hours`. @@ -207,57 +207,57 @@ Can be configured using the [`jiff::fmt::friendly`](https://docs.rs/jiff/latest/ - + Duration string in either jiff human friendly or ISO8601 format. Check https://docs.rs/jiff/latest/jiff/struct.Span.html#parsing-and-printing for more details. - + Duration string in either jiff human friendly or ISO8601 format. Check https://docs.rs/jiff/latest/jiff/struct.Span.html#parsing-and-printing for more details. - + Non-zero human-readable bytes - + Threshold to fail the invocation in case protocol messages coming from a service are larger than the specified amount. Non-zero human-readable bytes - + Temporary directory to use for the invoker temporary files. If empty, the system temporary directory will be used instead. - + Defines the threshold after which queues invocations will spill to disk at the path defined in `tmp-dir`. In other words, this is the number of invocations that can be kept in memory before spilling to disk. This is a per-partition limit. - + Number of concurrent invocations that can be processed by the invoker. - + Configures throttling for service invocations at the node level. This throttling mechanism uses a token bucket algorithm to control the rate at which invocations can be processed, helping to prevent resource exhaustion and maintain system stability under high load. The throttling limit is shared across all partitions running on this node, providing a global rate limit for the entire node rather than per-partition limits. When `unset`, no throttling is applied and invocations are processed without throttling. - + The rate at which the tokens are replenished. Syntax: `<rate>/<unit>` where `<unit>` is `s|sec|second`, `m|min|minute`, or `h|hr|hour`. unit defaults to per second if not specified. - + The maximum number of tokens the bucket can hold. Default to the rate value if not specified. @@ -265,21 +265,21 @@ Syntax: `<rate>/<unit>` where `<unit>` is `s|sec|second`, `m|m - + Configures rate limiting for service actions at the node level. This throttling mechanism uses a token bucket algorithm to control the rate at which actions can be processed, helping to prevent resource exhaustion and maintain system stability under high load. The throttling limit is shared across all partitions running on this node, providing a global rate limit for the entire node rather than per-partition limits. When `unset`, no throttling is applied and actions are processed without throttling. - + The rate at which the tokens are replenished. Syntax: `<rate>/<unit>` where `<unit>` is `s|sec|second`, `m|min|minute`, or `h|hr|hour`. unit defaults to per second if not specified. - + The maximum number of tokens the bucket can hold. Default to the rate value if not specified. @@ -290,24 +290,24 @@ Syntax: `<rate>/<unit>` where `<unit>` is `s|sec|second`, `m|m - + The maximum number of commands a partition processor will apply in a batch. The larger this value is, the higher the throughput and latency are. - + Snapshots provide a mechanism for safely trimming the log and efficient bootstrapping of new worker nodes. - + Base URL for cluster snapshots. Supports `s3://` and `file://` protocol scheme. S3-compatible object stores must support ETag-based conditional writes. Default: `None` - + Number of log records that trigger a snapshot to be created. As snapshots are created asynchronously, the actual number of new records that will trigger a snapshot will vary. The counter for the subsequent snapshot begins from the LSN at which the previous snapshot export was initiated. Only leader Partition Processors will take snapshots for a given partition. @@ -318,7 +318,7 @@ Default: `None` - automatic snapshots are disabled - + A retry policy for dealing with retryable object store errors. @@ -327,7 +327,7 @@ Default: `None` - automatic snapshots are disabled No retry strategy. - + @@ -335,17 +335,17 @@ Default: `None` - automatic snapshots are disabled Retry with a fixed delay strategy. - + - + Interval between retries. Can be configured using the [`jiff::fmt::friendly`](https://docs.rs/jiff/latest/jiff/fmt/friendly/index.html) format or ISO8601, for example `5 hours`. - + Number of maximum attempts before giving up. Infinite retries if unset. @@ -355,27 +355,27 @@ Can be configured using the [`jiff::fmt::friendly`](https://docs.rs/jiff/latest/ Retry with an exponential strategy. The next retry is computed as `min(last_retry_interval * factor, max_interval)`. - + - + Initial interval for the first retry attempt. Can be configured using the [`jiff::fmt::friendly`](https://docs.rs/jiff/latest/jiff/fmt/friendly/index.html) format or ISO8601, for example `5 hours`. - + The factor to use to compute the next retry attempt. - + Number of maximum attempts before giving up. Infinite retries if unset. - + Maximum interval between retries. Can be configured using the [`jiff::fmt::friendly`](https://docs.rs/jiff/latest/jiff/fmt/friendly/index.html) format or ISO8601, for example `5 hours`. @@ -386,37 +386,37 @@ Can be configured using the [`jiff::fmt::friendly`](https://docs.rs/jiff/latest/ - + The AWS configuration profile to use for S3 object store destinations. If you use named profiles in your AWS configuration, you can replace all the other settings with a single profile reference. See the [AWS documentation on profiles] (https://docs.aws.amazon.com/sdkref/latest/guide/file-format.html) for more. - + AWS region to use with S3 object store destinations. This may be inferred from the environment, for example the current region when running in EC2. Because of the request signing algorithm this must have a value. For Minio, you can generally set this to any string, such as `us-east-1`. - + Username for Minio, or consult the service documentation for other S3-compatible stores. - + Password for Minio, or consult the service documentation for other S3-compatible stores. - + This is only needed with short-term STS session credentials. - + When you use Amazon S3, this is typically inferred from the region and there is no need to set it. With other object stores, you will have to provide an appropriate HTTP(S) endpoint. If *not* using HTTPS, also set `aws-allow-http` to `true`. - + Allow plain HTTP to be used with the object store endpoint. Required when the endpoint URL that isn't using HTTPS. @@ -427,55 +427,55 @@ Can be configured using the [`jiff::fmt::friendly`](https://docs.rs/jiff/latest/ - + Admin server options - + Address to bind for the Admin APIs. - + Optional advertised Admin API endpoint. - + List of header names considered routing headers. These will be used during deployment creation to distinguish between an already existing deployment and a new deployment. - + - + Concurrency limit for the Admin APIs. Default is unlimited. - + Storage query engine options - + Non-zero human-readable bytes - + The path to spill to - + The degree of parallelism to use for query execution (Defaults to the number of available cores). @@ -483,61 +483,61 @@ These will be used during deployment creation to distinguish between an already - + Controls the interval at which cluster controller polls nodes of the cluster. - + Duration string in either jiff human friendly or ISO8601 format. Check https://docs.rs/jiff/latest/jiff/struct.Span.html#parsing-and-printing for more details. - + Disable serving the Restate Web UI on the admin port. Default is `false`. - + - + Ingress options - + The address to bind for the ingress. - + Local concurrency limit to use to limit the amount of concurrent requests. If exceeded, the ingress will reply immediately with an appropriate status code. Default is unlimited. - + - + Configuration options to connect to a Kafka cluster. - + Cluster name (Used to identify subscriptions). - + Initial list of brokers (host or host:port). - + @@ -549,7 +549,7 @@ These will be used during deployment creation to distinguish between an already - + Ingress endpoint that the Web UI should use to interact with. @@ -557,12 +557,12 @@ These will be used during deployment creation to distinguish between an already - + Bifrost options - + Default: Replicated @@ -571,17 +571,17 @@ These will be used during deployment creation to distinguish between an already - `replicated` : Replicated loglets are restate's native log replication system. This requires `log-server` role to run on enough nodes in the cluster. - + Configuration of local loglet provider - + Configuration of replicated loglet provider - + Maximum number of inflight records sequencer can accept Once this maximum is hit, sequencer will induce back pressure on clients. This controls the total number of records regardless of how many batches. @@ -590,7 +590,7 @@ Note that this will be increased to fit the biggest batch of records being enque - + Sequencer retry policy Backoff introduced when sequencer fail to find a suitable spread of log servers @@ -601,7 +601,7 @@ Backoff introduced when sequencer fail to find a suitable spread of log servers No retry strategy. - + @@ -609,17 +609,17 @@ Backoff introduced when sequencer fail to find a suitable spread of log servers Retry with a fixed delay strategy. - + - + Interval between retries. Can be configured using the [`jiff::fmt::friendly`](https://docs.rs/jiff/latest/jiff/fmt/friendly/index.html) format or ISO8601, for example `5 hours`. - + Number of maximum attempts before giving up. Infinite retries if unset. @@ -629,27 +629,27 @@ Can be configured using the [`jiff::fmt::friendly`](https://docs.rs/jiff/latest/ Retry with an exponential strategy. The next retry is computed as `min(last_retry_interval * factor, max_interval)`. - + - + Initial interval for the first retry attempt. Can be configured using the [`jiff::fmt::friendly`](https://docs.rs/jiff/latest/jiff/fmt/friendly/index.html) format or ISO8601, for example `5 hours`. - + The factor to use to compute the next retry attempt. - + Number of maximum attempts before giving up. Infinite retries if unset. - + Maximum interval between retries. Can be configured using the [`jiff::fmt::friendly`](https://docs.rs/jiff/latest/jiff/fmt/friendly/index.html) format or ISO8601, for example `5 hours`. @@ -660,7 +660,7 @@ Can be configured using the [`jiff::fmt::friendly`](https://docs.rs/jiff/latest/ - + Sequencer inactivity timeout The sequencer is allowed to consider itself quiescent if it did not commit records for this period of time. It may use this to sends pre-emptive release/seal check requests to log-servers. @@ -669,14 +669,14 @@ The sequencer is also allowed to use this value as interval to send seal/release - + Log Server RPC timeout Timeout waiting on log server response - + Log Server RPC retry policy Retry policy for log server RPCs @@ -687,7 +687,7 @@ Retry policy for log server RPCs No retry strategy. - + @@ -695,17 +695,17 @@ Retry policy for log server RPCs Retry with a fixed delay strategy. - + - + Interval between retries. Can be configured using the [`jiff::fmt::friendly`](https://docs.rs/jiff/latest/jiff/fmt/friendly/index.html) format or ISO8601, for example `5 hours`. - + Number of maximum attempts before giving up. Infinite retries if unset. @@ -715,27 +715,27 @@ Can be configured using the [`jiff::fmt::friendly`](https://docs.rs/jiff/latest/ Retry with an exponential strategy. The next retry is computed as `min(last_retry_interval * factor, max_interval)`. - + - + Initial interval for the first retry attempt. Can be configured using the [`jiff::fmt::friendly`](https://docs.rs/jiff/latest/jiff/fmt/friendly/index.html) format or ISO8601, for example `5 hours`. - + The factor to use to compute the next retry attempt. - + Number of maximum attempts before giving up. Infinite retries if unset. - + Maximum interval between retries. Can be configured using the [`jiff::fmt::friendly`](https://docs.rs/jiff/latest/jiff/fmt/friendly/index.html) format or ISO8601, for example `5 hours`. @@ -746,19 +746,19 @@ Can be configured using the [`jiff::fmt::friendly`](https://docs.rs/jiff/latest/ - + Maximum number of records to prefetch from log servers The number of records bifrost will attempt to prefetch from replicated loglet's log-servers for every loglet reader (e.g. partition processor). Note that this mainly impacts readers that are not co-located with the loglet sequencer (i.e. partition processor followers). - + Non-zero human-readable bytes - + Trigger to prefetch more records When read-ahead is used (readahead-records), this value (percentage in float) will determine when readers should trigger a prefetch for another batch to fill up the buffer. For instance, if this value is 0.3, then bifrost will trigger a prefetch when 30% or more of the read-ahead slots become available (e.g. partition processor consumed records and freed up enough slots). @@ -774,7 +774,7 @@ Value must be between 0 and 1. It will be clamped at `1.0`. - + Retry policy to use when bifrost waits for reconfiguration to complete during read operations @@ -783,7 +783,7 @@ Value must be between 0 and 1. It will be clamped at `1.0`. No retry strategy. - + @@ -791,17 +791,17 @@ Value must be between 0 and 1. It will be clamped at `1.0`. Retry with a fixed delay strategy. - + - + Interval between retries. Can be configured using the [`jiff::fmt::friendly`](https://docs.rs/jiff/latest/jiff/fmt/friendly/index.html) format or ISO8601, for example `5 hours`. - + Number of maximum attempts before giving up. Infinite retries if unset. @@ -811,27 +811,27 @@ Can be configured using the [`jiff::fmt::friendly`](https://docs.rs/jiff/latest/ Retry with an exponential strategy. The next retry is computed as `min(last_retry_interval * factor, max_interval)`. - + - + Initial interval for the first retry attempt. Can be configured using the [`jiff::fmt::friendly`](https://docs.rs/jiff/latest/jiff/fmt/friendly/index.html) format or ISO8601, for example `5 hours`. - + The factor to use to compute the next retry attempt. - + Number of maximum attempts before giving up. Infinite retries if unset. - + Maximum interval between retries. Can be configured using the [`jiff::fmt::friendly`](https://docs.rs/jiff/latest/jiff/fmt/friendly/index.html) format or ISO8601, for example `5 hours`. @@ -842,32 +842,32 @@ Can be configured using the [`jiff::fmt::friendly`](https://docs.rs/jiff/latest/ - + Interval to wait between retries of loglet seal failures - + Time interval after which bifrost's auto-recovery mechanism will kick in. This is triggered in scenarios where the control plane took too long to complete loglet reconfigurations. - + Minimum retry duration used by the exponential backoff mechanism for bifrost appends. - + Maximum retry duration used by the exponential backoff mechanism for bifrost appends. - + Optional size of record cache in bytes. If set to 0, record cache will be disabled. Defaults: 250MB - + When enabled, automatic improvement periodically checks with the loglet provider if the loglet configuration can be improved by performing a reconfiguration. This allows the log to pick up replication property changes, apply better placement of replicas, or for other reasons. @@ -877,19 +877,19 @@ This allows the log to pick up replication property changes, apply better placem - + Metadata store options - + Limit number of in-flight requests Number of in-flight metadata store requests. - + The memory budget for rocksdb memtables in bytes If this value is set, it overrides the ratio defined in `rocksdb-memory-ratio`. @@ -897,56 +897,56 @@ If this value is set, it overrides the ratio defined in `rocksdb-memory-ratio`. Non-zero human-readable bytes - + The memory budget for rocksdb memtables as ratio This defines the total memory for rocksdb as a ratio of all memory available to memtables (See `rocksdb-total-memtables-ratio` in common). - + Auto join the metadata cluster when being started Defines whether this node should auto join the metadata store cluster when being started for the first time. - + Files will be opened in \"direct I/O\" mode which means that data r/w from the disk will not be cached or buffered. The hardware buffer of the devices may however still be used. Memory mapped files are not impacted by these parameters. - + Use O_DIRECT for writes in background flush and compactions. - + The default depends on the different rocksdb use-cases at Restate. Supports hot-reloading (Partial / Bifrost only) - + Disable rocksdb statistics collection Default: False (statistics enabled) - + Default: the number of CPU cores on this node. - + If non-zero, we perform bigger reads when doing compaction. If you're running RocksDB on spinning disks, you should set this to at least 2MB. That way RocksDB's compaction is doing sequential instead of random reads. Non-zero human-readable bytes - + StatsLevel can be used to reduce statistics overhead by skipping certain types of stats in the stats collection process. Default: \"except-detailed-timers\" @@ -954,7 +954,7 @@ Default: \"except-detailed-timers\" - + Verbosity of the LOG. Default: \"error\" @@ -962,14 +962,14 @@ Default: \"error\" Verbosity of the LOG. - + Number of info LOG files to keep Default: 1 - + Max size of info LOG file Default: 64MB @@ -977,7 +977,7 @@ Default: 64MB Non-zero human-readable bytes - + Uncompressed block size Default: 64KiB @@ -985,35 +985,35 @@ Default: 64KiB Non-zero human-readable bytes - + The number of ticks before triggering an election The number of ticks before triggering an election. The value must be larger than `raft_heartbeat_tick`. It's recommended to set `raft_election_tick = 10 * raft_heartbeat_tick`. Decrease this value if you want to react faster to failed leaders. Note, decreasing this value too much can lead to cluster instabilities due to falsely detecting dead leaders. - + The number of ticks before sending a heartbeat A leader sends heartbeat messages to maintain its leadership every heartbeat ticks. Decrease this value to send heartbeats more often. - + The raft tick interval The interval at which the raft node will tick. Decrease this value in order to let the Raft node react more quickly to changes. Note, that every tick comes with an overhead. Moreover, the tick interval directly affects the election timeout. If the election timeout becomes too small, then this can cause cluster instabilities due to frequent leader changes. - + The status update interval The interval at which the raft node will update its status. Decrease this value in order to see more recent status updates. - + The threshold for trimming the raft log. The log will be trimmed if the number of apply entries exceeds this threshold. The default value is `1000`. @@ -1021,17 +1021,17 @@ The interval at which the raft node will update its status. Decrease this value - + Common network configuration options for communicating with Restate cluster nodes. Note that similar keys are present in other config sections, such as in Service Client options. - + TCP connection timeout for Restate cluster node-to-node network connections. - + Retry policy to use for internal node-to-node networking. @@ -1040,7 +1040,7 @@ The interval at which the raft node will update its status. Decrease this value No retry strategy. - + @@ -1048,17 +1048,17 @@ The interval at which the raft node will update its status. Decrease this value Retry with a fixed delay strategy. - + - + Interval between retries. Can be configured using the [`jiff::fmt::friendly`](https://docs.rs/jiff/latest/jiff/fmt/friendly/index.html) format or ISO8601, for example `5 hours`. - + Number of maximum attempts before giving up. Infinite retries if unset. @@ -1068,27 +1068,27 @@ Can be configured using the [`jiff::fmt::friendly`](https://docs.rs/jiff/latest/ Retry with an exponential strategy. The next retry is computed as `min(last_retry_interval * factor, max_interval)`. - + - + Initial interval for the first retry attempt. Can be configured using the [`jiff::fmt::friendly`](https://docs.rs/jiff/latest/jiff/fmt/friendly/index.html) format or ISO8601, for example `5 hours`. - + The factor to use to compute the next retry attempt. - + Number of maximum attempts before giving up. Infinite retries if unset. - + Maximum interval between retries. Can be configured using the [`jiff::fmt::friendly`](https://docs.rs/jiff/latest/jiff/fmt/friendly/index.html) format or ISO8601, for example `5 hours`. @@ -1099,32 +1099,32 @@ Can be configured using the [`jiff::fmt::friendly`](https://docs.rs/jiff/latest/ - + Timeout for receiving a handshake response from Restate cluster peers. - + Non-zero duration string in either jiff human friendly or ISO8601 format. Check https://docs.rs/jiff/latest/jiff/struct.Span.html#parsing-and-printing for more details. - + Non-zero duration string in either jiff human friendly or ISO8601 format. Check https://docs.rs/jiff/latest/jiff/struct.Span.html#parsing-and-printing for more details. - + HTTP/2 Adaptive Window - + Disables Zstd compression for internal gRPC network connections - + Non-zero human-readable bytes @@ -1132,12 +1132,12 @@ Can be configured using the [`jiff::fmt::friendly`](https://docs.rs/jiff/latest/ - + Configuration is only used on nodes running with `log-server` role. - + The memory budget for rocksdb memtables in bytes If this value is set, it overrides the ratio defined in `rocksdb-memory-ratio`. @@ -1145,7 +1145,7 @@ If this value is set, it overrides the ratio defined in `rocksdb-memory-ratio`. Non-zero human-readable bytes - + The memory budget for rocksdb memtables as ratio This defines the total memory for rocksdb as a ratio of all memory available to the log-server. @@ -1154,12 +1154,12 @@ This defines the total memory for rocksdb as a ratio of all memory available to - + Disable fsync of WAL on every batch - + The maximum number of subcompactions to run in parallel. Setting this to 1 means no sub-compactions are allowed (i.e. only 1 thread will do the compaction). @@ -1168,7 +1168,7 @@ Default is 0 which maps to floor(number of CPU cores / 2) - + The size limit of all WAL files Use this to limit the size of WAL files. If the size of all WAL files exceeds this limit, the oldest WAL file will be deleted and if needed, memtable flush will be triggered. @@ -1179,54 +1179,54 @@ Default is `0` which translates into 6 times the memory allocated for membtables - + Trigger a commit when the batch size exceeds this threshold. Set to 0 or 1 to commit the write batch on every command. - + The number of messages that can queue up on input network stream while request processor is busy. - + Files will be opened in \"direct I/O\" mode which means that data r/w from the disk will not be cached or buffered. The hardware buffer of the devices may however still be used. Memory mapped files are not impacted by these parameters. - + Use O_DIRECT for writes in background flush and compactions. - + The default depends on the different rocksdb use-cases at Restate. Supports hot-reloading (Partial / Bifrost only) - + Disable rocksdb statistics collection Default: False (statistics enabled) - + Default: the number of CPU cores on this node. - + If non-zero, we perform bigger reads when doing compaction. If you're running RocksDB on spinning disks, you should set this to at least 2MB. That way RocksDB's compaction is doing sequential instead of random reads. Non-zero human-readable bytes - + StatsLevel can be used to reduce statistics overhead by skipping certain types of stats in the stats collection process. Default: \"except-detailed-timers\" @@ -1234,7 +1234,7 @@ Default: \"except-detailed-timers\" - + Verbosity of the LOG. Default: \"error\" @@ -1242,14 +1242,14 @@ Default: \"error\" Verbosity of the LOG. - + Number of info LOG files to keep Default: 1 - + Max size of info LOG file Default: 64MB @@ -1257,7 +1257,7 @@ Default: 64MB Non-zero human-readable bytes - + Uncompressed block size Default: 64KiB @@ -1268,12 +1268,12 @@ Default: 64KiB - + Defines the roles which this Restate node should run, by default the node starts with all roles. - + - `worker` : A worker runs partition processor (journal, state, and drives invocations) @@ -1286,12 +1286,12 @@ Default: 64KiB - + Unique name for this node in the cluster. The node must not change unless it's started with empty local store. It defaults to the node's hostname. - + [PREVIEW FEATURE] Setting the location allows Restate to form a tree-like cluster topology. The value is written in the format of \"region[.zone]\" to assign this node to a specific region, or to a zone within a region. The value of region and zone is arbitrary but whitespace and `.` are disallowed. @@ -1300,21 +1300,21 @@ NOTE: It's _strongly_ recommended to not change the node's location string after When this value is not set, the node is considered to be in the _default_ location. The _default_ location means that the node is not assigned to any specific region or zone. -## Examples - `us-west` -- the node is in the `us-west` region. - `us-west.a1` -- the node is in the `us-west` region and in the `a1` zone. - `` -- [default] the node is in the default location +Examples - `us-west` -- the node is in the `us-west` region. - `us-west.a1` -- the node is in the `us-west` region and in the `a1` zone. - `` -- [default] the node is in the default location - + If set, the node insists on acquiring this node ID. - + A unique identifier for the cluster. All nodes in the same cluster should have the same. - + If true, then this node is allowed to automatically provision as a new cluster. This node *must* have an admin role and a new nodes configuration will be created that includes this node. auto-provision is allowed by default in development mode and is disabled if restate-server runs with `--production` flag to prevent cluster nodes from forming their own clusters, rather than forming a single cluster. @@ -1327,32 +1327,32 @@ Default: true - + The working directory which this Restate node should use for relative paths. The default is `restate-data` under the current working directory. - + The metadata client type to store metadata - + TCP connection timeout for connecting to the metadata store. - + Non-zero duration string in either jiff human friendly or ISO8601 format. Check https://docs.rs/jiff/latest/jiff/struct.Span.html#parsing-and-printing for more details. - + Non-zero duration string in either jiff human friendly or ISO8601 format. Check https://docs.rs/jiff/latest/jiff/struct.Span.html#parsing-and-printing for more details. - + Backoff policy used by the metadata client when it encounters concurrent modifications. @@ -1361,7 +1361,7 @@ Default: true No retry strategy. - + @@ -1369,17 +1369,17 @@ Default: true Retry with a fixed delay strategy. - + - + Interval between retries. Can be configured using the [`jiff::fmt::friendly`](https://docs.rs/jiff/latest/jiff/fmt/friendly/index.html) format or ISO8601, for example `5 hours`. - + Number of maximum attempts before giving up. Infinite retries if unset. @@ -1389,27 +1389,27 @@ Can be configured using the [`jiff::fmt::friendly`](https://docs.rs/jiff/latest/ Retry with an exponential strategy. The next retry is computed as `min(last_retry_interval * factor, max_interval)`. - + - + Initial interval for the first retry attempt. Can be configured using the [`jiff::fmt::friendly`](https://docs.rs/jiff/latest/jiff/fmt/friendly/index.html) format or ISO8601, for example `5 hours`. - + The factor to use to compute the next retry attempt. - + Number of maximum attempts before giving up. Infinite retries if unset. - + Maximum interval between retries. Can be configured using the [`jiff::fmt::friendly`](https://docs.rs/jiff/latest/jiff/fmt/friendly/index.html) format or ISO8601, for example `5 hours`. @@ -1423,17 +1423,17 @@ Can be configured using the [`jiff::fmt::friendly`](https://docs.rs/jiff/latest/ - + Address to bind for the Node server. Derived from the advertised address, defaulting to `0.0.0.0:$PORT` (where the port will be inferred from the URL scheme). - + Address that other nodes will use to connect to this node. Default is `http://127.0.0.1:5122/` - + Number of partitions that will be provisioned during initial cluster provisioning. partitions are the logical shards used to process messages. Cannot be higher than `65535` (You should almost never need as many partitions anyway) @@ -1446,7 +1446,7 @@ Default: 24 - + Configures the global default replication factor to be used by the the system. Note that this value only impacts the cluster initial provisioning and will not be respected after the cluster has been provisioned. @@ -1455,22 +1455,22 @@ To update existing clusters use the `restatectl` utility. - + This timeout is used when shutting down the various Restate components to drain all the internal queues. - + Size of the default thread pool used to perform internal tasks. If not set, it defaults to the number of CPU cores. - + Log filter configuration. Can be overridden by the `RUST_LOG` environment variable. Check the [`RUST_LOG` documentation](https://docs.rs/tracing-subscriber/latest/tracing_subscriber/filter/struct.EnvFilter.html) for more details how to configure it. - + Format to use when logging. @@ -1480,66 +1480,66 @@ To update existing clusters use the `restatectl` utility. - `json` : Enables json logging. You can use a json log collector to ingest these logs and further process them. - + Disable ANSI terminal codes for logs. This is useful when the log collector doesn't support processing ANSI terminal codes. - + Address to bind for the tokio-console tracing subscriber. If unset and restate-server is built with tokio-console support, it'll listen on `0.0.0.0:6669`. - + Disable prometheus metric recording and reporting. Default is `false`. - + Storage high priority thread pool This configures the restate-managed storage thread pool for performing high-priority or latency-sensitive storage tasks when the IO operation cannot be performed on in-memory caches. - + Storage low priority thread pool This configures the restate-managed storage thread pool for performing low-priority or latency-insensitive storage tasks. - + Non-zero human-readable bytes - + The memory size used across all memtables (ratio between 0 to 1.0). This limits how much memory memtables can eat up from the value in rocksdb-total-memory-limit. When set to 0, memtables can take all available memory up to the value specified in rocksdb-total-memory-limit. This value will be sanitized to 1.0 if outside the valid bounds. - + The number of threads to reserve to Rocksdb background tasks. Defaults to the number of cores on the machine. - + The number of threads to reserve to high priority Rocksdb background tasks. - + This defines the duration after which a write is to be considered in \"stall\" state. For every write that meets this threshold, the system will increment the `restate.rocksdb_stall_flare` gauge, if the write is unstalled, the guage will be updated accordingly. - + Note if automatic memory budgeting is enabled, it should be safe to allow rocksdb to stall if it hits the limit. However, if rocksdb stall kicked in, it's unlikely that the system will recover from this without intervention. - + Defines the level of PerfContext used internally by rocksdb. Default is `enable-count` which should be sufficient for most users. Note that higher levels incur a CPU cost and might slow down the critical path. @@ -1551,17 +1551,17 @@ This configures the restate-managed storage thread pool for performing low-prior - `enable-time` : Enables count and time stats - + The idle time after which the node will check for metadata updates from metadata store. This helps the node detect if it has been operating with stale metadata for extended period of time, primarily because it didn't interact with other peers in the cluster during that period. - + When a node detects that a new metadata version exists, it'll attempt to fetch it from its peers. After this timeout duration has passed, the node will attempt to fetch the metadata from metadata store as well. This is to ensure that the nodes converge quickly while reducing the load on the metadata store. - + The retry policy for network related errors @@ -1570,7 +1570,7 @@ This configures the restate-managed storage thread pool for performing low-prior No retry strategy. - + @@ -1578,17 +1578,17 @@ This configures the restate-managed storage thread pool for performing low-prior Retry with a fixed delay strategy. - + - + Interval between retries. Can be configured using the [`jiff::fmt::friendly`](https://docs.rs/jiff/latest/jiff/fmt/friendly/index.html) format or ISO8601, for example `5 hours`. - + Number of maximum attempts before giving up. Infinite retries if unset. @@ -1598,27 +1598,27 @@ Can be configured using the [`jiff::fmt::friendly`](https://docs.rs/jiff/latest/ Retry with an exponential strategy. The next retry is computed as `min(last_retry_interval * factor, max_interval)`. - + - + Initial interval for the first retry attempt. Can be configured using the [`jiff::fmt::friendly`](https://docs.rs/jiff/latest/jiff/fmt/friendly/index.html) format or ISO8601, for example `5 hours`. - + The factor to use to compute the next retry attempt. - + Number of maximum attempts before giving up. Infinite retries if unset. - + Maximum interval between retries. Can be configured using the [`jiff::fmt::friendly`](https://docs.rs/jiff/latest/jiff/fmt/friendly/index.html) format or ISO8601, for example `5 hours`. @@ -1629,17 +1629,17 @@ Can be configured using the [`jiff::fmt::friendly`](https://docs.rs/jiff/latest/ - + The timeout until the node gives up joining a cluster and initializing itself. - + Restate uses Scarf to collect anonymous usage data to help us understand how the software is being used. You can set this flag to true to disable this collection. It can also be set with the environment variable DO_NOT_TRACK=1. - + This is a shortcut to set both [`Self::tracing_runtime_endpoint`], and [`Self::tracing_services_endpoint`]. Specify the tracing endpoint to send runtime traces to. Traces will be exported using [OTLP gRPC](https://opentelemetry.io/docs/specs/otlp/#otlpgrpc) through [opentelemetry_otlp](https://docs.rs/opentelemetry-otlp/0.12.0/opentelemetry_otlp/). @@ -1648,7 +1648,7 @@ To configure the sampling, please refer to the [opentelemetry autoconfigure docs - + Overrides [`Self::tracing_endpoint`] for runtime traces Specify the tracing endpoint to send runtime traces to. Traces will be exported using [OTLP gRPC](https://opentelemetry.io/docs/specs/otlp/#otlpgrpc) through [opentelemetry_otlp](https://docs.rs/opentelemetry-otlp/0.12.0/opentelemetry_otlp/). @@ -1657,7 +1657,7 @@ To configure the sampling, please refer to the [opentelemetry autoconfigure docs - + Overrides [`Self::tracing_endpoint`] for services traces Specify the tracing endpoint to send services traces to. Traces will be exported using [OTLP gRPC](https://opentelemetry.io/docs/specs/otlp/#otlpgrpc) through [opentelemetry_otlp](https://docs.rs/opentelemetry-otlp/0.12.0/opentelemetry_otlp/). @@ -1666,7 +1666,7 @@ To configure the sampling, please refer to the [opentelemetry autoconfigure docs - + If set, an exporter will be configured to write traces to files using the Jaeger JSON format. Each trace file will start with the `trace` prefix. If unset, no traces will be written to file. @@ -1677,41 +1677,41 @@ To inspect the traces, open the Jaeger UI and use the Upload JSON feature to loa - + Distributed tracing exporter filter. Check the [`RUST_LOG` documentation](https://docs.rs/tracing-subscriber/latest/tracing_subscriber/filter/struct.EnvFilter.html) for more details how to configure it. - + Specify additional headers you want the system to send to the tracing endpoint (e.g. authentication headers). - + A path to a file, such as \"/var/secrets/key.pem\", which contains exactly one ed25519 private key in PEM format. Such a file can be generated with `openssl genpkey -algorithm ed25519`. If provided, this key will be used to attach JWTs to requests from this client which SDKs may optionally verify, proving that the caller is a particular Restate instance. This file is currently only read on client creation, but this may change in future. Parsed public keys will be logged at INFO level in the same format that SDKs expect. - + Headers that should be applied to all outgoing requests (HTTP and Lambda). Defaults to `x-restate-cluster-name: <cluster name>`. - + Configuration for the HTTP/2 keep-alive mechanism, using PING frames. If unset, HTTP/2 keep-alive are disabled. - + Sets an interval for HTTP/2 PING frames should be sent to keep a connection alive. You should set this timeout with a value lower than the `abort_timeout`. - + Sets a timeout for receiving an acknowledgement of the keep-alive ping. If the ping is not acknowledged within the timeout, the connection will be closed. @@ -1721,28 +1721,28 @@ If the ping is not acknowledged within the timeout, the connection will be close - + A URI, such as `http://127.0.0.1:10001`, of a server to which all invocations should be sent, with the `Host` header set to the deployment URI. HTTPS proxy URIs are supported, but only HTTP endpoint traffic will be proxied currently. Can be overridden by the `HTTP_PROXY` environment variable. - + HTTP authorities eg `localhost`, `restate.dev`, `127.0.0.1` that should not be proxied by the http_proxy. Ports are ignored. Subdomains are also matched. An entry “*” matches all hostnames. Can be overridden by the `NO_PROXY` environment variable, which supports comma separated values. - + - + How long to wait for a TCP connection to be established before considering it a failed attempt. - + Sets the initial maximum of locally initiated (send) streams. This value will be overwritten by the value included in the initial SETTINGS frame received from the peer as part of a [connection preface]. @@ -1753,17 +1753,17 @@ Default: None - + Name of the AWS profile to select. Defaults to 'AWS_PROFILE' env var, or otherwise the `default` profile. - + An external ID to apply to any AssumeRole operations taken by this client. https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_create_for-user_externalid.html Can be overridden by the `AWS_EXTERNAL_ID` environment variable. - + Request minimum size to enable compression. The request size includes the total of the journal replay and its framing using Restate service protocol, without accounting for the json envelope and the base 64 encoding. Default: 4MB (The default AWS Lambda Limit is 6MB, 4MB roughly accounts for +33% of Base64 and the json envelope). @@ -1771,42 +1771,42 @@ Default: 4MB (The default AWS Lambda Limit is 6MB, 4MB roughly accounts for +33% Human-readable bytes - + Files will be opened in \"direct I/O\" mode which means that data r/w from the disk will not be cached or buffered. The hardware buffer of the devices may however still be used. Memory mapped files are not impacted by these parameters. - + Use O_DIRECT for writes in background flush and compactions. - + The default depends on the different rocksdb use-cases at Restate. Supports hot-reloading (Partial / Bifrost only) - + Disable rocksdb statistics collection Default: False (statistics enabled) - + Default: the number of CPU cores on this node. - + If non-zero, we perform bigger reads when doing compaction. If you're running RocksDB on spinning disks, you should set this to at least 2MB. That way RocksDB's compaction is doing sequential instead of random reads. Non-zero human-readable bytes - + StatsLevel can be used to reduce statistics overhead by skipping certain types of stats in the stats collection process. Default: \"except-detailed-timers\" @@ -1814,7 +1814,7 @@ Default: \"except-detailed-timers\" - + Verbosity of the LOG. Default: \"error\" @@ -1822,14 +1822,14 @@ Default: \"error\" Verbosity of the LOG. - + Number of info LOG files to keep Default: 1 - + Max size of info LOG file Default: 64MB @@ -1837,7 +1837,7 @@ Default: 64MB Non-zero human-readable bytes - + Uncompressed block size Default: 64KiB @@ -1845,32 +1845,32 @@ Default: 64KiB Non-zero human-readable bytes - + The interval at which the failure detector will tick. Decrease this value for faster reaction to node failures. Note, that every tick comes with an overhead. - + Specifies how many gossip intervals of inactivity need to pass before considering a node as dead. - + On every gossip interval, how many peers each node attempts to gossip with. The default is optimized for small clusters (less than 5 nodes). On larger clusters, if gossip overhead is noticeable, consider reducing this value to 1. - + Gossips before failure detector is stable - + Duration string in either jiff human friendly or ISO8601 format. Check https://docs.rs/jiff/latest/jiff/struct.Span.html#parsing-and-printing for more details. - + How many intervals need to pass without receiving any gossip messages before considering this node as potentially isolated/dead. This threshold is used in the case where the node can still send gossip messages but did not receive any. This can rarely happen in asymmetric network partitions. In this case, the node will advertise itself as dead in the gossip messages it sends out. @@ -1879,22 +1879,22 @@ Note: this threshold does not apply to a cluster that's configured with a single - + In addition to basic health/liveness information, the gossip protocol is used to exchange extra information about the roles hosted by this node. For instance, which partitions are currently running, their configuration versions, and the durable LSN of the corresponding partition databases. This information is sent every Nth gossip message. This setting controls the frequency of this exchange. For instance, `10` means that every 10th gossip message will contain the extra information about. - + The time skew is the maximum acceptable time difference between the local node and the time reported by peers via gossip messages. The time skew is also used to ignore gossip messages that are too old. - + Duration string in either jiff human friendly or ISO8601 format. Check https://docs.rs/jiff/latest/jiff/struct.Span.html#parsing-and-printing for more details. - + Maximum journal retention duration that can be configured. When discovering a service deployment, or when modifying the journal retention using the Admin API, the given value will be clamped. Unset means no limit. @@ -1902,24 +1902,24 @@ Unset means no limit. Duration string in either jiff human friendly or ISO8601 format. Check https://docs.rs/jiff/latest/jiff/struct.Span.html#parsing-and-printing for more details. - + The default retry policy to use for invocations. The retry policy can be customized on a service/handler basis, using the respective SDK APIs. - + Initial interval for the first retry attempt. - + The factor to use to compute the next retry attempt. Default: `2.0`. - + Number of maximum attempts (including the initial) before giving up. No retries if set to 1. @@ -1929,13 +1929,13 @@ The retry policy can be customized on a service/handler basis, using the respect Bounded number of retries. - + - + Behavior when max attempts are reached. @@ -1944,7 +1944,7 @@ The retry policy can be customized on a service/handler basis, using the respect - `kill` : Kill the invocation when max attempts are reached. - + Maximum interval between retries. @@ -1952,7 +1952,7 @@ The retry policy can be customized on a service/handler basis, using the respect - + Maximum max attempts configurable in an invocation retry policy. When discovering a service deployment with configured retry policies, or when modifying the invocation retry policy using the Admin API, the given value will be clamped. `None` means no limit, that is infinite retries is enabled. diff --git a/docs/schemas/restate-server-configuration-schema.json b/docs/schemas/restate-server-configuration-schema.json index 1f04ebda..5f340389 100644 --- a/docs/schemas/restate-server-configuration-schema.json +++ b/docs/schemas/restate-server-configuration-schema.json @@ -205,7 +205,7 @@ }, "location": { "title": "Node Location", - "description": "[PREVIEW FEATURE] Setting the location allows Restate to form a tree-like cluster topology. The value is written in the format of \"region[.zone]\" to assign this node to a specific region, or to a zone within a region.\n\nThe value of region and zone is arbitrary but whitespace and `.` are disallowed.\n\nNOTE: It's _strongly_ recommended to not change the node's location string after its initial registration. Changing the location may result in data loss or data inconsistency if `log-server` is enabled on this node.\n\nWhen this value is not set, the node is considered to be in the _default_ location. The _default_ location means that the node is not assigned to any specific region or zone.\n\n## Examples - `us-west` -- the node is in the `us-west` region. - `us-west.a1` -- the node is in the `us-west` region and in the `a1` zone. - `` -- [default] the node is in the default location", + "description": "[PREVIEW FEATURE] Setting the location allows Restate to form a tree-like cluster topology. The value is written in the format of \"region[.zone]\" to assign this node to a specific region, or to a zone within a region.\n\nThe value of region and zone is arbitrary but whitespace and `.` are disallowed.\n\nNOTE: It's _strongly_ recommended to not change the node's location string after its initial registration. Changing the location may result in data loss or data inconsistency if `log-server` is enabled on this node.\n\nWhen this value is not set, the node is considered to be in the _default_ location. The _default_ location means that the node is not assigned to any specific region or zone.\n\nExamples - `us-west` -- the node is in the `us-west` region. - `us-west.a1` -- the node is in the `us-west` region and in the `a1` zone. - `` -- [default] the node is in the default location", "type": "string" }, "force-node-id": { diff --git a/scripts/generate-restate-config-viewer.js b/scripts/generate-restate-config-viewer.js index 25321487..14eeafef 100755 --- a/scripts/generate-restate-config-viewer.js +++ b/scripts/generate-restate-config-viewer.js @@ -75,7 +75,7 @@ function generateResponseField(propName, propSchema, isRequired = false, level = const indent = ' '.repeat(level); const { type, optional } = getTypeFromSchema(propSchema); const required = isRequired && !optional ? ' required' : ''; - const description = formatDescription(propSchema.description|| propSchema.title || ''); + let description = formatDescription(propSchema.description|| propSchema.title || ''); // Format default value properly for the attribute let defaultAttr = ''; @@ -105,8 +105,22 @@ function generateResponseField(propName, propSchema, isRequired = false, level = } } + let postTags = [] + if (propSchema.format) { + postTags.push(`\'format: ${propSchema.format}\'`); + } + if (propSchema.enum) { + postTags.push(`\'enum: ${propSchema.enum.map(v => (typeof v === 'string' ? `"${v}"` : v)).join(', ')}\'`); + } + if (propSchema.minimum) { + postTags.push(`\'minimum: ${propSchema.minimum}\'`); + } + if (propSchema.maximum) { + postTags.push(`\'maximum: ${propSchema.maximum}\'`); + } + const postAttr = ` post={[${postTags.join(",")}]}`; - let output = `${indent}\n`; + let output = `${indent}\n`; if (description) { output += `${indent} ${description}\n\n`; @@ -147,7 +161,7 @@ function generateResponseField(propName, propSchema, isRequired = false, level = let optionalVariant = variants.find(variant => variant.type !== "null") const optionalType = getTypeFromSchema(optionalVariant); - output = `${indent}\n`; + output = `${indent}\n`; if (description) { output += `${indent} ${description}\n\n`; From eef01c61f8e796466a33b61b63d5c546f38c08e2 Mon Sep 17 00:00:00 2001 From: Giselle van Dongen Date: Tue, 4 Nov 2025 14:43:25 +0100 Subject: [PATCH 04/10] Improve rendering of options --- docs/references/server-config.mdx | 104 ++++++++++++---------- scripts/generate-restate-config-viewer.js | 70 +++++++++++++-- 2 files changed, 119 insertions(+), 55 deletions(-) diff --git a/docs/references/server-config.mdx b/docs/references/server-config.mdx index 2ba0a11a..29ccf594 100644 --- a/docs/references/server-config.mdx +++ b/docs/references/server-config.mdx @@ -96,6 +96,12 @@ Default: False (statistics enabled) Default: \"except-detailed-timers\" + - `"disable-all"` : Disable all metrics + - `"except-histogram-or-timers"` : Disable timer stats, and skip histogram stats + - `"except-timers"` : Skip timer stats + - `"except-detailed-timers"` : Collect all stats except time inside mutex lock AND time spent on compression. + - `"except-time-for-mutex"` : Collect all stats except the counters requiring to get time inside the mutex lock. + - `"all"` : Collect all stats, including measuring duration of mutex operations. If getting time is expensive on the platform to run, it can reduce scalability to more threads, especially for writes. @@ -137,14 +143,13 @@ Default: 64KiB - + This is **deprecated** and will be removed in the next Restate releases. Please refer to `default-retry-policy` for the new configuration options. - No retry strategy. @@ -248,6 +253,7 @@ Can be configured using the [`jiff::fmt::friendly`](https://docs.rs/jiff/latest/ The throttling limit is shared across all partitions running on this node, providing a global rate limit for the entire node rather than per-partition limits. When `unset`, no throttling is applied and invocations are processed without throttling. + Throttling options per invoker. @@ -270,6 +276,7 @@ Syntax: `<rate>/<unit>` where `<unit>` is `s|sec|second`, `m|m The throttling limit is shared across all partitions running on this node, providing a global rate limit for the entire node rather than per-partition limits. When `unset`, no throttling is applied and actions are processed without throttling. + Throttling options per invoker. @@ -318,12 +325,11 @@ Default: `None` - automatic snapshots are disabled - + A retry policy for dealing with retryable object store errors. - No retry strategy. @@ -562,13 +568,12 @@ These will be used during deployment creation to distinguish between an already - + Default: Replicated - - - `local` : A local rocksdb-backed loglet. - - `replicated` : Replicated loglets are restate's native log replication system. This requires `log-server` role to run on enough nodes in the cluster. + - `"local"` : A local rocksdb-backed loglet. + - `"replicated"` : Replicated loglets are restate's native log replication system. This requires `log-server` role to run on enough nodes in the cluster. @@ -590,14 +595,13 @@ Note that this will be increased to fit the biggest batch of records being enque - + Sequencer retry policy Backoff introduced when sequencer fail to find a suitable spread of log servers - No retry strategy. @@ -676,14 +680,13 @@ Timeout waiting on log server response - + Log Server RPC retry policy Retry policy for log server RPCs - No retry strategy. @@ -774,12 +777,11 @@ Value must be between 0 and 1. It will be clamped at `1.0`. - + Retry policy to use when bifrost waits for reconfiguration to complete during read operations - No retry strategy. @@ -952,6 +954,12 @@ Default: False (statistics enabled) Default: \"except-detailed-timers\" + - `"disable-all"` : Disable all metrics + - `"except-histogram-or-timers"` : Disable timer stats, and skip histogram stats + - `"except-timers"` : Skip timer stats + - `"except-detailed-timers"` : Collect all stats except time inside mutex lock AND time spent on compression. + - `"except-time-for-mutex"` : Collect all stats except the counters requiring to get time inside the mutex lock. + - `"all"` : Collect all stats, including measuring duration of mutex operations. If getting time is expensive on the platform to run, it can reduce scalability to more threads, especially for writes. @@ -1031,12 +1039,11 @@ The interval at which the raft node will update its status. Decrease this value - + Retry policy to use for internal node-to-node networking. - No retry strategy. @@ -1232,6 +1239,12 @@ Default: False (statistics enabled) Default: \"except-detailed-timers\" + - `"disable-all"` : Disable all metrics + - `"except-histogram-or-timers"` : Disable timer stats, and skip histogram stats + - `"except-timers"` : Skip timer stats + - `"except-detailed-timers"` : Collect all stats except time inside mutex lock AND time spent on compression. + - `"except-time-for-mutex"` : Collect all stats except the counters requiring to get time inside the mutex lock. + - `"all"` : Collect all stats, including measuring duration of mutex operations. If getting time is expensive on the platform to run, it can reduce scalability to more threads, especially for writes. @@ -1273,14 +1286,13 @@ Default: 64KiB - + - - - `worker` : A worker runs partition processor (journal, state, and drives invocations) - - `admin` : Admin runs cluster controller and user-facing admin APIs - - `metadata-server` : Serves the metadata store - - `log-server` : Serves a log-server for replicated loglets - - `http-ingress` : Serves HTTP ingress requests + - `"worker"` : A worker runs partition processor (journal, state, and drives invocations) + - `"admin"` : Admin runs cluster controller and user-facing admin APIs + - `"metadata-server"` : Serves the metadata store + - `"log-server"` : Serves a log-server for replicated loglets + - `"http-ingress"` : Serves HTTP ingress requests @@ -1352,12 +1364,11 @@ Default: true - + Backoff policy used by the metadata client when it encounters concurrent modifications. - No retry strategy. @@ -1470,14 +1481,13 @@ To update existing clusters use the `restatectl` utility. - + Format to use when logging. - - - `pretty` : Enables verbose logging. Not recommended in production. - - `compact` : Enables compact logging. - - `json` : Enables json logging. You can use a json log collector to ingest these logs and further process them. + - `"pretty"` : Enables verbose logging. Not recommended in production. + - `"compact"` : Enables compact logging. + - `"json"` : Enables json logging. You can use a json log collector to ingest these logs and further process them. @@ -1539,16 +1549,15 @@ This configures the restate-managed storage thread pool for performing low-prior - + Defines the level of PerfContext used internally by rocksdb. Default is `enable-count` which should be sufficient for most users. Note that higher levels incur a CPU cost and might slow down the critical path. - - - `disable` : Disable perf stats - - `enable-count` : Enables only count stats - - `enable-time-except-for-mutex` : Count stats and enable time stats except for mutexes - - `enable-time-and-c-p-u-time-except-for-mutex` : Other than time, also measure CPU time counters. Still don't measure time (neither wall time nor CPU time) for mutexes - - `enable-time` : Enables count and time stats + - `"disable"` : Disable perf stats + - `"enable-count"` : Enables only count stats + - `"enable-time-except-for-mutex"` : Count stats and enable time stats except for mutexes + - `"enable-time-and-c-p-u-time-except-for-mutex"` : Other than time, also measure CPU time counters. Still don't measure time (neither wall time nor CPU time) for mutexes + - `"enable-time"` : Enables count and time stats @@ -1561,12 +1570,11 @@ This configures the restate-managed storage thread pool for performing low-prior - + The retry policy for network related errors - No retry strategy. @@ -1812,6 +1820,12 @@ Default: False (statistics enabled) Default: \"except-detailed-timers\" + - `"disable-all"` : Disable all metrics + - `"except-histogram-or-timers"` : Disable timer stats, and skip histogram stats + - `"except-timers"` : Skip timer stats + - `"except-detailed-timers"` : Collect all stats except time inside mutex lock AND time spent on compression. + - `"except-time-for-mutex"` : Collect all stats except the counters requiring to get time inside the mutex lock. + - `"all"` : Collect all stats, including measuring duration of mutex operations. If getting time is expensive on the platform to run, it can reduce scalability to more threads, especially for writes. @@ -1919,12 +1933,11 @@ The retry policy can be customized on a service/handler basis, using the respect - + Number of maximum attempts (including the initial) before giving up. No retries if set to 1. - - - `unlimited` : Unlimited retries. + - `"unlimited"` : Unlimited retries. Bounded number of retries. @@ -1935,13 +1948,12 @@ The retry policy can be customized on a service/handler basis, using the respect - + Behavior when max attempts are reached. - - - `pause` : Pause the invocation when max attempts are reached. - - `kill` : Kill the invocation when max attempts are reached. + - `"pause"` : Pause the invocation when max attempts are reached. + - `"kill"` : Kill the invocation when max attempts are reached. diff --git a/scripts/generate-restate-config-viewer.js b/scripts/generate-restate-config-viewer.js index 14eeafef..89259598 100755 --- a/scripts/generate-restate-config-viewer.js +++ b/scripts/generate-restate-config-viewer.js @@ -162,11 +162,13 @@ function generateResponseField(propName, propSchema, isRequired = false, level = const optionalType = getTypeFromSchema(optionalVariant); output = `${indent}\n`; - if (description) { output += `${indent} ${description}\n\n`; } - if ((['object', 'oneOf', 'array'].some(t => optionalType.type.includes(t))) && optionalVariant.properties) { + if (optionalVariant.description) { + output += `${indent} ${formatDescription(optionalVariant.description)}\n` + } + if (optionalType.type === 'object' && optionalVariant.properties) { const requiredProps = optionalVariant.required || []; output += `${indent} \n`; output += `${indent} \n`; @@ -181,8 +183,43 @@ function generateResponseField(propName, propSchema, isRequired = false, level = }); output += `${indent} \n`; - } else { - output += `${indent} ${formatDescription(optionalVariant.description)}\n` + } else if (optionalType.type === 'oneOf') { + const oneOfVariants = optionalVariant.oneOf; + output += `${indent} \n`; + + oneOfVariants.forEach((variant, index) => { + let variantName = ''; + if (variant.enum && variant.enum.length === 1) { + let variantValue = variant.enum[0]; + if (typeof variantValue === 'string') { + variantName = `"${variantValue}"`; + } else if (typeof variantValue === 'object') { + variantName = JSON.stringify(variantValue); + } else { + variantName = `${String(variantValue)}`; + } + } else if (variant.title) { + variantName = `Option ${index + 1}: ${variant.title}`; + } else if (variant.const !== undefined) { + variantName = `"${variant.const}"`; + } else { + variantName = `Option ${index + 1}`; + } + if ((['object', 'oneOf', 'array'].some(t => variant.type.includes(t))) && variant.properties) { + const requiredProps = variant.required || []; + Object.entries(variant.properties).forEach(([subPropName, subPropSchema]) => { + output += generateResponseField( + subPropName, + subPropSchema, + requiredProps.includes(subPropName), + level + 2 + ); + }); + + } else { + output += `${indent} - \`${variantName}\` : ${formatDescription(variant.description)}\n` + } + }); } } else { output += `${indent} \n`; @@ -190,7 +227,14 @@ function generateResponseField(propName, propSchema, isRequired = false, level = variants.forEach((variant, index) => { let variantName = ''; if (variant.enum && variant.enum.length === 1) { - variantName = `${variant.enum[0]}`; + let variantValue = variant.enum[0]; + if (typeof variantValue === 'string') { + variantName = `"${variantValue}"`; + } else if (typeof variantValue === 'object') { + variantName = JSON.stringify(variantValue); + } else { + variantName = `${String(variantValue)}`; + } } else if (variant.title) { variantName = `Option ${index + 1}: ${variant.title}`; } else if (variant.const !== undefined) { @@ -226,15 +270,24 @@ function generateResponseField(propName, propSchema, isRequired = false, level = const variants = propSchema.oneOf console.log(variants); - output += `${indent} \n`; - // output += `${indent} \n`; + output = `${indent}\n`; + if (description) { + output += `${indent} ${description}\n\n`; + } output += `${indent} \n`; variants.forEach((variant, index) => { let variantName = ''; if (variant.enum && variant.enum.length === 1) { - variantName = `${variant.enum[0]}`; + let variantValue = variant.enum[0]; + if (typeof variantValue === 'string') { + variantName = `"${variantValue}"`; + } else if (typeof variantValue === 'object') { + variantName = JSON.stringify(variantValue); + } else { + variantName = `${String(variantValue)}`; + } } else if (variant.title) { variantName = `Option ${index + 1}: ${variant.title}`; } else if (variant.const !== undefined) { @@ -263,7 +316,6 @@ function generateResponseField(propName, propSchema, isRequired = false, level = } }); - // output += `${indent} \n`; } output += `${indent}\n\n`; From 95be9d570b1d49dfd4c024168045d86a6885c22c Mon Sep 17 00:00:00 2001 From: Giselle van Dongen Date: Tue, 4 Nov 2025 14:52:26 +0100 Subject: [PATCH 05/10] add examples --- docs/references/server-config.mdx | 141 +++++++++++++--------- scripts/generate-restate-config-viewer.js | 23 +++- 2 files changed, 103 insertions(+), 61 deletions(-) diff --git a/docs/references/server-config.mdx b/docs/references/server-config.mdx index 29ccf594..1cd1086e 100644 --- a/docs/references/server-config.mdx +++ b/docs/references/server-config.mdx @@ -23,7 +23,7 @@ import Intro from "/snippets/common/default-configuration.mdx" - + In order to clean up completed invocations, that is invocations invoked with an idempotency id, or workflows, Restate periodically scans among the completed invocations to check whether they need to be removed or not. This interval sets the scan interval of the cleanup procedure. Default: 1 hour. @@ -164,7 +164,7 @@ Please refer to `default-retry-policy` for the new configuration options. - + Interval between retries. Can be configured using the [`jiff::fmt::friendly`](https://docs.rs/jiff/latest/jiff/fmt/friendly/index.html) format or ISO8601, for example `5 hours`. @@ -184,7 +184,7 @@ Can be configured using the [`jiff::fmt::friendly`](https://docs.rs/jiff/latest/ - + Initial interval for the first retry attempt. Can be configured using the [`jiff::fmt::friendly`](https://docs.rs/jiff/latest/jiff/fmt/friendly/index.html) format or ISO8601, for example `5 hours`. @@ -207,22 +207,25 @@ Can be configured using the [`jiff::fmt::friendly`](https://docs.rs/jiff/latest/ Can be configured using the [`jiff::fmt::friendly`](https://docs.rs/jiff/latest/jiff/fmt/friendly/index.html) format or ISO8601, for example `5 hours`. Duration string in either jiff human friendly or ISO8601 format. Check https://docs.rs/jiff/latest/jiff/struct.Span.html#parsing-and-printing for more details. + +Examples: +"10 hours" or "5 days" or "5d" or "1h 4m" or "P40D" or "0" - + Duration string in either jiff human friendly or ISO8601 format. Check https://docs.rs/jiff/latest/jiff/struct.Span.html#parsing-and-printing for more details. - + Duration string in either jiff human friendly or ISO8601 format. Check https://docs.rs/jiff/latest/jiff/struct.Span.html#parsing-and-printing for more details. - + Non-zero human-readable bytes @@ -344,7 +347,7 @@ Default: `None` - automatic snapshots are disabled - + Interval between retries. Can be configured using the [`jiff::fmt::friendly`](https://docs.rs/jiff/latest/jiff/fmt/friendly/index.html) format or ISO8601, for example `5 hours`. @@ -364,7 +367,7 @@ Can be configured using the [`jiff::fmt::friendly`](https://docs.rs/jiff/latest/ - + Initial interval for the first retry attempt. Can be configured using the [`jiff::fmt::friendly`](https://docs.rs/jiff/latest/jiff/fmt/friendly/index.html) format or ISO8601, for example `5 hours`. @@ -387,6 +390,9 @@ Can be configured using the [`jiff::fmt::friendly`](https://docs.rs/jiff/latest/ Can be configured using the [`jiff::fmt::friendly`](https://docs.rs/jiff/latest/jiff/fmt/friendly/index.html) format or ISO8601, for example `5 hours`. Duration string in either jiff human friendly or ISO8601 format. Check https://docs.rs/jiff/latest/jiff/struct.Span.html#parsing-and-printing for more details. + +Examples: +"10 hours" or "5 days" or "5d" or "1h 4m" or "P40D" or "0" @@ -471,7 +477,7 @@ These will be used during deployment creation to distinguish between an already - + Non-zero human-readable bytes @@ -489,12 +495,12 @@ These will be used during deployment creation to distinguish between an already - + Controls the interval at which cluster controller polls nodes of the cluster. - + Duration string in either jiff human friendly or ISO8601 format. Check https://docs.rs/jiff/latest/jiff/struct.Span.html#parsing-and-printing for more details. @@ -616,7 +622,7 @@ Backoff introduced when sequencer fail to find a suitable spread of log servers - + Interval between retries. Can be configured using the [`jiff::fmt::friendly`](https://docs.rs/jiff/latest/jiff/fmt/friendly/index.html) format or ISO8601, for example `5 hours`. @@ -636,7 +642,7 @@ Can be configured using the [`jiff::fmt::friendly`](https://docs.rs/jiff/latest/ - + Initial interval for the first retry attempt. Can be configured using the [`jiff::fmt::friendly`](https://docs.rs/jiff/latest/jiff/fmt/friendly/index.html) format or ISO8601, for example `5 hours`. @@ -659,12 +665,15 @@ Can be configured using the [`jiff::fmt::friendly`](https://docs.rs/jiff/latest/ Can be configured using the [`jiff::fmt::friendly`](https://docs.rs/jiff/latest/jiff/fmt/friendly/index.html) format or ISO8601, for example `5 hours`. Duration string in either jiff human friendly or ISO8601 format. Check https://docs.rs/jiff/latest/jiff/struct.Span.html#parsing-and-printing for more details. + +Examples: +"10 hours" or "5 days" or "5d" or "1h 4m" or "P40D" or "0" - + Sequencer inactivity timeout The sequencer is allowed to consider itself quiescent if it did not commit records for this period of time. It may use this to sends pre-emptive release/seal check requests to log-servers. @@ -673,7 +682,7 @@ The sequencer is also allowed to use this value as interval to send seal/release - + Log Server RPC timeout Timeout waiting on log server response @@ -701,7 +710,7 @@ Retry policy for log server RPCs - + Interval between retries. Can be configured using the [`jiff::fmt::friendly`](https://docs.rs/jiff/latest/jiff/fmt/friendly/index.html) format or ISO8601, for example `5 hours`. @@ -721,7 +730,7 @@ Can be configured using the [`jiff::fmt::friendly`](https://docs.rs/jiff/latest/ - + Initial interval for the first retry attempt. Can be configured using the [`jiff::fmt::friendly`](https://docs.rs/jiff/latest/jiff/fmt/friendly/index.html) format or ISO8601, for example `5 hours`. @@ -744,6 +753,9 @@ Can be configured using the [`jiff::fmt::friendly`](https://docs.rs/jiff/latest/ Can be configured using the [`jiff::fmt::friendly`](https://docs.rs/jiff/latest/jiff/fmt/friendly/index.html) format or ISO8601, for example `5 hours`. Duration string in either jiff human friendly or ISO8601 format. Check https://docs.rs/jiff/latest/jiff/struct.Span.html#parsing-and-printing for more details. + +Examples: +"10 hours" or "5 days" or "5d" or "1h 4m" or "P40D" or "0" @@ -756,7 +768,7 @@ The number of records bifrost will attempt to prefetch from replicated loglet's - + Non-zero human-readable bytes @@ -796,7 +808,7 @@ Value must be between 0 and 1. It will be clamped at `1.0`. - + Interval between retries. Can be configured using the [`jiff::fmt::friendly`](https://docs.rs/jiff/latest/jiff/fmt/friendly/index.html) format or ISO8601, for example `5 hours`. @@ -816,7 +828,7 @@ Can be configured using the [`jiff::fmt::friendly`](https://docs.rs/jiff/latest/ - + Initial interval for the first retry attempt. Can be configured using the [`jiff::fmt::friendly`](https://docs.rs/jiff/latest/jiff/fmt/friendly/index.html) format or ISO8601, for example `5 hours`. @@ -839,32 +851,35 @@ Can be configured using the [`jiff::fmt::friendly`](https://docs.rs/jiff/latest/ Can be configured using the [`jiff::fmt::friendly`](https://docs.rs/jiff/latest/jiff/fmt/friendly/index.html) format or ISO8601, for example `5 hours`. Duration string in either jiff human friendly or ISO8601 format. Check https://docs.rs/jiff/latest/jiff/struct.Span.html#parsing-and-printing for more details. + +Examples: +"10 hours" or "5 days" or "5d" or "1h 4m" or "P40D" or "0" - + Interval to wait between retries of loglet seal failures - + Time interval after which bifrost's auto-recovery mechanism will kick in. This is triggered in scenarios where the control plane took too long to complete loglet reconfigurations. - + Minimum retry duration used by the exponential backoff mechanism for bifrost appends. - + Maximum retry duration used by the exponential backoff mechanism for bifrost appends. - + Optional size of record cache in bytes. If set to 0, record cache will be disabled. Defaults: 250MB @@ -1007,14 +1022,14 @@ A leader sends heartbeat messages to maintain its leadership every heartbeat tic - + The raft tick interval The interval at which the raft node will tick. Decrease this value in order to let the Raft node react more quickly to changes. Note, that every tick comes with an overhead. Moreover, the tick interval directly affects the election timeout. If the election timeout becomes too small, then this can cause cluster instabilities due to frequent leader changes. - + The status update interval The interval at which the raft node will update its status. Decrease this value in order to see more recent status updates. @@ -1034,7 +1049,7 @@ The interval at which the raft node will update its status. Decrease this value - + TCP connection timeout for Restate cluster node-to-node network connections. @@ -1058,7 +1073,7 @@ The interval at which the raft node will update its status. Decrease this value - + Interval between retries. Can be configured using the [`jiff::fmt::friendly`](https://docs.rs/jiff/latest/jiff/fmt/friendly/index.html) format or ISO8601, for example `5 hours`. @@ -1078,7 +1093,7 @@ Can be configured using the [`jiff::fmt::friendly`](https://docs.rs/jiff/latest/ - + Initial interval for the first retry attempt. Can be configured using the [`jiff::fmt::friendly`](https://docs.rs/jiff/latest/jiff/fmt/friendly/index.html) format or ISO8601, for example `5 hours`. @@ -1101,22 +1116,25 @@ Can be configured using the [`jiff::fmt::friendly`](https://docs.rs/jiff/latest/ Can be configured using the [`jiff::fmt::friendly`](https://docs.rs/jiff/latest/jiff/fmt/friendly/index.html) format or ISO8601, for example `5 hours`. Duration string in either jiff human friendly or ISO8601 format. Check https://docs.rs/jiff/latest/jiff/struct.Span.html#parsing-and-printing for more details. + +Examples: +"10 hours" or "5 days" or "5d" or "1h 4m" or "P40D" or "0" - + Timeout for receiving a handshake response from Restate cluster peers. - + Non-zero duration string in either jiff human friendly or ISO8601 format. Check https://docs.rs/jiff/latest/jiff/struct.Span.html#parsing-and-printing for more details. - + Non-zero duration string in either jiff human friendly or ISO8601 format. Check https://docs.rs/jiff/latest/jiff/struct.Span.html#parsing-and-printing for more details. @@ -1131,7 +1149,7 @@ Can be configured using the [`jiff::fmt::friendly`](https://docs.rs/jiff/latest/ - + Non-zero human-readable bytes @@ -1175,7 +1193,7 @@ Default is 0 which maps to floor(number of CPU cores / 2) - + The size limit of all WAL files Use this to limit the size of WAL files. If the size of all WAL files exceeds this limit, the oldest WAL file will be deleted and if needed, memtable flush will be triggered. @@ -1349,17 +1367,17 @@ Default: true - + TCP connection timeout for connecting to the metadata store. - + Non-zero duration string in either jiff human friendly or ISO8601 format. Check https://docs.rs/jiff/latest/jiff/struct.Span.html#parsing-and-printing for more details. - + Non-zero duration string in either jiff human friendly or ISO8601 format. Check https://docs.rs/jiff/latest/jiff/struct.Span.html#parsing-and-printing for more details. @@ -1383,7 +1401,7 @@ Default: true - + Interval between retries. Can be configured using the [`jiff::fmt::friendly`](https://docs.rs/jiff/latest/jiff/fmt/friendly/index.html) format or ISO8601, for example `5 hours`. @@ -1403,7 +1421,7 @@ Can be configured using the [`jiff::fmt::friendly`](https://docs.rs/jiff/latest/ - + Initial interval for the first retry attempt. Can be configured using the [`jiff::fmt::friendly`](https://docs.rs/jiff/latest/jiff/fmt/friendly/index.html) format or ISO8601, for example `5 hours`. @@ -1426,6 +1444,9 @@ Can be configured using the [`jiff::fmt::friendly`](https://docs.rs/jiff/latest/ Can be configured using the [`jiff::fmt::friendly`](https://docs.rs/jiff/latest/jiff/fmt/friendly/index.html) format or ISO8601, for example `5 hours`. Duration string in either jiff human friendly or ISO8601 format. Check https://docs.rs/jiff/latest/jiff/struct.Span.html#parsing-and-printing for more details. + +Examples: +"10 hours" or "5 days" or "5d" or "1h 4m" or "P40D" or "0" @@ -1466,7 +1487,7 @@ To update existing clusters use the `restatectl` utility. - + This timeout is used when shutting down the various Restate components to drain all the internal queues. @@ -1519,7 +1540,7 @@ This configures the restate-managed storage thread pool for performing low-prior - + Non-zero human-readable bytes @@ -1539,7 +1560,7 @@ This configures the restate-managed storage thread pool for performing low-prior - + This defines the duration after which a write is to be considered in \"stall\" state. For every write that meets this threshold, the system will increment the `restate.rocksdb_stall_flare` gauge, if the write is unstalled, the guage will be updated accordingly. @@ -1560,12 +1581,12 @@ This configures the restate-managed storage thread pool for performing low-prior - `"enable-time"` : Enables count and time stats - + The idle time after which the node will check for metadata updates from metadata store. This helps the node detect if it has been operating with stale metadata for extended period of time, primarily because it didn't interact with other peers in the cluster during that period. - + When a node detects that a new metadata version exists, it'll attempt to fetch it from its peers. After this timeout duration has passed, the node will attempt to fetch the metadata from metadata store as well. This is to ensure that the nodes converge quickly while reducing the load on the metadata store. @@ -1589,7 +1610,7 @@ This configures the restate-managed storage thread pool for performing low-prior - + Interval between retries. Can be configured using the [`jiff::fmt::friendly`](https://docs.rs/jiff/latest/jiff/fmt/friendly/index.html) format or ISO8601, for example `5 hours`. @@ -1609,7 +1630,7 @@ Can be configured using the [`jiff::fmt::friendly`](https://docs.rs/jiff/latest/ - + Initial interval for the first retry attempt. Can be configured using the [`jiff::fmt::friendly`](https://docs.rs/jiff/latest/jiff/fmt/friendly/index.html) format or ISO8601, for example `5 hours`. @@ -1632,12 +1653,15 @@ Can be configured using the [`jiff::fmt::friendly`](https://docs.rs/jiff/latest/ Can be configured using the [`jiff::fmt::friendly`](https://docs.rs/jiff/latest/jiff/fmt/friendly/index.html) format or ISO8601, for example `5 hours`. Duration string in either jiff human friendly or ISO8601 format. Check https://docs.rs/jiff/latest/jiff/struct.Span.html#parsing-and-printing for more details. + +Examples: +"10 hours" or "5 days" or "5d" or "1h 4m" or "P40D" or "0" - + The timeout until the node gives up joining a cluster and initializing itself. @@ -1712,14 +1736,14 @@ This file is currently only read on client creation, but this may change in futu - + Sets an interval for HTTP/2 PING frames should be sent to keep a connection alive. You should set this timeout with a value lower than the `abort_timeout`. - + Sets a timeout for receiving an acknowledgement of the keep-alive ping. If the ping is not acknowledged within the timeout, the connection will be closed. @@ -1745,7 +1769,7 @@ If the ping is not acknowledged within the timeout, the connection will be close - + How long to wait for a TCP connection to be established before considering it a failed attempt. @@ -1859,7 +1883,7 @@ Default: 64KiB Non-zero human-readable bytes - + The interval at which the failure detector will tick. Decrease this value for faster reaction to node failures. Note, that every tick comes with an overhead. @@ -1879,7 +1903,7 @@ Default: 64KiB - + Duration string in either jiff human friendly or ISO8601 format. Check https://docs.rs/jiff/latest/jiff/struct.Span.html#parsing-and-printing for more details. @@ -1898,12 +1922,12 @@ Note: this threshold does not apply to a cluster that's configured with a single - + The time skew is the maximum acceptable time difference between the local node and the time reported by peers via gossip messages. The time skew is also used to ignore gossip messages that are too old. - + Duration string in either jiff human friendly or ISO8601 format. Check https://docs.rs/jiff/latest/jiff/struct.Span.html#parsing-and-printing for more details. @@ -1914,6 +1938,9 @@ Note: this threshold does not apply to a cluster that's configured with a single Unset means no limit. Duration string in either jiff human friendly or ISO8601 format. Check https://docs.rs/jiff/latest/jiff/struct.Span.html#parsing-and-printing for more details. + +Examples: +"10 hours" or "5 days" or "5d" or "1h 4m" or "P40D" or "0" @@ -1923,7 +1950,7 @@ The retry policy can be customized on a service/handler basis, using the respect - + Initial interval for the first retry attempt. @@ -1956,7 +1983,7 @@ The retry policy can be customized on a service/handler basis, using the respect - `"kill"` : Kill the invocation when max attempts are reached. - + Maximum interval between retries. diff --git a/scripts/generate-restate-config-viewer.js b/scripts/generate-restate-config-viewer.js index 89259598..6fa5052a 100755 --- a/scripts/generate-restate-config-viewer.js +++ b/scripts/generate-restate-config-viewer.js @@ -23,10 +23,10 @@ async function parseJsonSchema(schemaPath) { } } -function formatDescription(description) { +function formatDescription(description, examples) { if (!description) return ''; // Escape HTML-like syntax in code blocks and regular text - return description + const cleanDescription = description .replace(/\n\n/g, '\n\n') // Preserve code blocks with backticks but escape any HTML-like content within .replace(/`([^`]+)`/g, (match, code) => { @@ -38,7 +38,12 @@ function formatDescription(description) { // Convert markdown links to proper format .replace(/\[(.*?)\]\((.*?)\)/g, '[$1]($2)') // Escape quotes for JSX attributes - .replace(/"/g, '\\"'); + .replace(/"/g, '\\"') + + const exampleStr = examples && Array.isArray(examples) && examples.length > 0 + ? '\n\nExamples:\n' + examples.map(ex => `${JSON.stringify(ex, null, 2)}`).join(' or ') + : ''; + return cleanDescription + exampleStr; } function getTypeFromSchema(propSchema) { @@ -118,6 +123,16 @@ function generateResponseField(propName, propSchema, isRequired = false, level = if (propSchema.maximum) { postTags.push(`\'maximum: ${propSchema.maximum}\'`); } + if (propSchema.minLength) { + postTags.push(`\'minLength: ${propSchema.minLength}\'`); + } + if (propSchema.maxLength) { + postTags.push(`\'maxLength: ${propSchema.maxLength}\'`); + } + if (propSchema.pattern) { + postTags.push(`\'pattern: ${propSchema.pattern}\'`); + } + const postAttr = ` post={[${postTags.join(",")}]}`; let output = `${indent}\n`; @@ -166,7 +181,7 @@ function generateResponseField(propName, propSchema, isRequired = false, level = output += `${indent} ${description}\n\n`; } if (optionalVariant.description) { - output += `${indent} ${formatDescription(optionalVariant.description)}\n` + output += `${indent} ${formatDescription(optionalVariant.description, optionalVariant.examples)}\n` } if (optionalType.type === 'object' && optionalVariant.properties) { const requiredProps = optionalVariant.required || []; From f4d4bac64f61414c10e2ed8e9c4e398036729441 Mon Sep 17 00:00:00 2001 From: Giselle van Dongen Date: Tue, 4 Nov 2025 14:53:04 +0100 Subject: [PATCH 06/10] cleanup --- scripts/generate-restate-config-viewer.js | 3 --- 1 file changed, 3 deletions(-) diff --git a/scripts/generate-restate-config-viewer.js b/scripts/generate-restate-config-viewer.js index 6fa5052a..d051b02d 100755 --- a/scripts/generate-restate-config-viewer.js +++ b/scripts/generate-restate-config-viewer.js @@ -129,9 +129,6 @@ function generateResponseField(propName, propSchema, isRequired = false, level = if (propSchema.maxLength) { postTags.push(`\'maxLength: ${propSchema.maxLength}\'`); } - if (propSchema.pattern) { - postTags.push(`\'pattern: ${propSchema.pattern}\'`); - } const postAttr = ` post={[${postTags.join(",")}]}`; From 1c28833819594510ad43686e874644dfc74d0d33 Mon Sep 17 00:00:00 2001 From: Giselle van Dongen Date: Tue, 4 Nov 2025 14:55:23 +0100 Subject: [PATCH 07/10] cleanup --- docs/references/server-config.mdx | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/docs/references/server-config.mdx b/docs/references/server-config.mdx index 1cd1086e..de1f9945 100644 --- a/docs/references/server-config.mdx +++ b/docs/references/server-config.mdx @@ -225,7 +225,7 @@ Examples: - + Non-zero human-readable bytes @@ -477,7 +477,7 @@ These will be used during deployment creation to distinguish between an already - + Non-zero human-readable bytes @@ -768,7 +768,7 @@ The number of records bifrost will attempt to prefetch from replicated loglet's - + Non-zero human-readable bytes @@ -879,7 +879,7 @@ Examples: - + Optional size of record cache in bytes. If set to 0, record cache will be disabled. Defaults: 250MB @@ -1149,7 +1149,7 @@ Examples: - + Non-zero human-readable bytes @@ -1193,7 +1193,7 @@ Default is 0 which maps to floor(number of CPU cores / 2) - + The size limit of all WAL files Use this to limit the size of WAL files. If the size of all WAL files exceeds this limit, the oldest WAL file will be deleted and if needed, memtable flush will be triggered. @@ -1540,7 +1540,7 @@ This configures the restate-managed storage thread pool for performing low-prior - + Non-zero human-readable bytes From 2374a75b4efb829dfa31cf8b15cbeb582f79eec0 Mon Sep 17 00:00:00 2001 From: Giselle van Dongen Date: Tue, 4 Nov 2025 17:17:03 +0100 Subject: [PATCH 08/10] make object with oneof and properties workf --- docs/references/server-config.mdx | 320 ++++++++++++++++++++++ scripts/generate-restate-config-viewer.js | 58 +++- 2 files changed, 366 insertions(+), 12 deletions(-) diff --git a/docs/references/server-config.mdx b/docs/references/server-config.mdx index de1f9945..db4d0616 100644 --- a/docs/references/server-config.mdx +++ b/docs/references/server-config.mdx @@ -1366,7 +1366,326 @@ Default: true The metadata client type to store metadata + + + + + + + + Restate metadata server address list + + + + + + + + + + + TCP connection timeout for connecting to the metadata store. + + + + + Non-zero duration string in either jiff human friendly or ISO8601 format. Check https://docs.rs/jiff/latest/jiff/struct.Span.html#parsing-and-printing for more details. + + + + + Non-zero duration string in either jiff human friendly or ISO8601 format. Check https://docs.rs/jiff/latest/jiff/struct.Span.html#parsing-and-printing for more details. + + + + + Backoff policy used by the metadata client when it encounters concurrent modifications. + + + + + No retry strategy. + + + + + + + + Retry with a fixed delay strategy. + + + + + + Interval between retries. + +Can be configured using the [`jiff::fmt::friendly`](https://docs.rs/jiff/latest/jiff/fmt/friendly/index.html) format or ISO8601, for example `5 hours`. + + + + + Number of maximum attempts before giving up. Infinite retries if unset. + + + + + + + Retry with an exponential strategy. The next retry is computed as `min(last_retry_interval * factor, max_interval)`. + + + + + + Initial interval for the first retry attempt. + +Can be configured using the [`jiff::fmt::friendly`](https://docs.rs/jiff/latest/jiff/fmt/friendly/index.html) format or ISO8601, for example `5 hours`. + + + + + The factor to use to compute the next retry attempt. + + + + + Number of maximum attempts before giving up. Infinite retries if unset. + + + + + Maximum interval between retries. + +Can be configured using the [`jiff::fmt::friendly`](https://docs.rs/jiff/latest/jiff/fmt/friendly/index.html) format or ISO8601, for example `5 hours`. + + Duration string in either jiff human friendly or ISO8601 format. Check https://docs.rs/jiff/latest/jiff/struct.Span.html#parsing-and-printing for more details. + +Examples: +"10 hours" or "5 days" or "5d" or "1h 4m" or "P40D" or "0" + + + + + + + + + + + + + + Etcd cluster node address list + + + + + TCP connection timeout for connecting to the metadata store. + + + + + Non-zero duration string in either jiff human friendly or ISO8601 format. Check https://docs.rs/jiff/latest/jiff/struct.Span.html#parsing-and-printing for more details. + + + + + Non-zero duration string in either jiff human friendly or ISO8601 format. Check https://docs.rs/jiff/latest/jiff/struct.Span.html#parsing-and-printing for more details. + + + + + Backoff policy used by the metadata client when it encounters concurrent modifications. + + + + + No retry strategy. + + + + + + + + Retry with a fixed delay strategy. + + + + + + Interval between retries. + +Can be configured using the [`jiff::fmt::friendly`](https://docs.rs/jiff/latest/jiff/fmt/friendly/index.html) format or ISO8601, for example `5 hours`. + + + + + Number of maximum attempts before giving up. Infinite retries if unset. + + + + + + + Retry with an exponential strategy. The next retry is computed as `min(last_retry_interval * factor, max_interval)`. + + + + + + Initial interval for the first retry attempt. + +Can be configured using the [`jiff::fmt::friendly`](https://docs.rs/jiff/latest/jiff/fmt/friendly/index.html) format or ISO8601, for example `5 hours`. + + + + + The factor to use to compute the next retry attempt. + + + + + Number of maximum attempts before giving up. Infinite retries if unset. + + + + + Maximum interval between retries. + +Can be configured using the [`jiff::fmt::friendly`](https://docs.rs/jiff/latest/jiff/fmt/friendly/index.html) format or ISO8601, for example `5 hours`. + + Duration string in either jiff human friendly or ISO8601 format. Check https://docs.rs/jiff/latest/jiff/struct.Span.html#parsing-and-printing for more details. + +Examples: +"10 hours" or "5 days" or "5d" or "1h 4m" or "P40D" or "0" + + + + + + + + + + + + + This location will be used to persist cluster metadata. Takes the form of a URL with `s3://` as the protocol and bucket name as the authority, plus an optional prefix specified as the path component. + +Example: `s3://bucket/prefix` + + + + + Definition of a retry policy + + + + + No retry strategy. + + + + + + + + Retry with a fixed delay strategy. + + + + + + Interval between retries. + +Can be configured using the [`jiff::fmt::friendly`](https://docs.rs/jiff/latest/jiff/fmt/friendly/index.html) format or ISO8601, for example `5 hours`. + + + + + Number of maximum attempts before giving up. Infinite retries if unset. + + + + + + + Retry with an exponential strategy. The next retry is computed as `min(last_retry_interval * factor, max_interval)`. + + + + + + Initial interval for the first retry attempt. + +Can be configured using the [`jiff::fmt::friendly`](https://docs.rs/jiff/latest/jiff/fmt/friendly/index.html) format or ISO8601, for example `5 hours`. + + + + + The factor to use to compute the next retry attempt. + + + + + Number of maximum attempts before giving up. Infinite retries if unset. + + + + + Maximum interval between retries. + +Can be configured using the [`jiff::fmt::friendly`](https://docs.rs/jiff/latest/jiff/fmt/friendly/index.html) format or ISO8601, for example `5 hours`. + + Duration string in either jiff human friendly or ISO8601 format. Check https://docs.rs/jiff/latest/jiff/struct.Span.html#parsing-and-printing for more details. + +Examples: +"10 hours" or "5 days" or "5d" or "1h 4m" or "P40D" or "0" + + + + + + + The AWS configuration profile to use for S3 object store destinations. If you use named profiles in your AWS configuration, you can replace all the other settings with a single profile reference. See the [AWS documentation on profiles] (https://docs.aws.amazon.com/sdkref/latest/guide/file-format.html) for more. + + + + + AWS region to use with S3 object store destinations. This may be inferred from the environment, for example the current region when running in EC2. Because of the request signing algorithm this must have a value. For Minio, you can generally set this to any string, such as `us-east-1`. + + + + + Username for Minio, or consult the service documentation for other S3-compatible stores. + + + + + Password for Minio, or consult the service documentation for other S3-compatible stores. + + + + + This is only needed with short-term STS session credentials. + + + + + When you use Amazon S3, this is typically inferred from the region and there is no need to set it. With other object stores, you will have to provide an appropriate HTTP(S) endpoint. If *not* using HTTPS, also set `aws-allow-http` to `true`. + + + + + Allow plain HTTP to be used with the object store endpoint. Required when the endpoint URL that isn't using HTTPS. + + + TCP connection timeout for connecting to the metadata store. @@ -1453,6 +1772,7 @@ Examples: + diff --git a/scripts/generate-restate-config-viewer.js b/scripts/generate-restate-config-viewer.js index d051b02d..cd276608 100755 --- a/scripts/generate-restate-config-viewer.js +++ b/scripts/generate-restate-config-viewer.js @@ -142,18 +142,52 @@ function generateResponseField(propName, propSchema, isRequired = false, level = if (type === 'object' && propSchema.properties) { const requiredProps = propSchema.required || []; output += `${indent} \n`; - output += `${indent} \n`; - - Object.entries(propSchema.properties).forEach(([subPropName, subPropSchema]) => { - output += generateResponseField( - subPropName, - subPropSchema, - requiredProps.includes(subPropName), - level + 2 - ); - }); - - output += `${indent} \n`; + + if (propSchema.oneOf) { + const variants = propSchema.oneOf; + output += `${indent} \n`; + + variants.forEach((variant, index) => { + let variantName = ''; + + output += `${indent}\n`; + // add description + output += `${indent} \n`; + + Object.entries(variant.properties).forEach(([subPropName, subPropSchema]) => { + output += generateResponseField( + subPropName, + subPropSchema, + requiredProps.includes(subPropName), + level + 2 + ); + }); + + Object.entries(propSchema.properties).forEach(([subPropName, subPropSchema]) => { + output += generateResponseField( + subPropName, + subPropSchema, + requiredProps.includes(subPropName), + level + 2 + ); + }); + output += `${indent} \n`; + output += `${indent} \n`; + }); + } else { + output += `${indent} \n`; + + Object.entries(propSchema.properties).forEach(([subPropName, subPropSchema]) => { + output += generateResponseField( + subPropName, + subPropSchema, + requiredProps.includes(subPropName), + level + 2 + ); + }); + + output += `${indent} \n`; + } } // Handle array items From e006e30377fcb663304a1187b344f5aa5fc5f8ef Mon Sep 17 00:00:00 2001 From: Giselle van Dongen Date: Wed, 5 Nov 2025 16:23:28 +0100 Subject: [PATCH 09/10] Fix rendering restate config --- docs/references/server-config.mdx | 1456 +++++++++++++-------- scripts/generate-restate-config-viewer.js | 281 ++-- 2 files changed, 968 insertions(+), 769 deletions(-) diff --git a/docs/references/server-config.mdx b/docs/references/server-config.mdx index db4d0616..08055b92 100644 --- a/docs/references/server-config.mdx +++ b/docs/references/server-config.mdx @@ -8,32 +8,29 @@ import Intro from "/snippets/common/default-configuration.mdx" - - Worker options - + - - Internal queue for partition processor communication - + - - The number of timers in memory limit is used to bound the amount of timers loaded in memory. If this limit is set, when exceeding it, the timers farther in the future will be spilled to disk. + + Num timers in memory limit: The number of timers in memory limit is used to bound the amount of timers loaded in memory. If this limit is set, when exceeding it, the timers farther in the future will be spilled to disk. - - In order to clean up completed invocations, that is invocations invoked with an idempotency id, or workflows, Restate periodically scans among the completed invocations to check whether they need to be removed or not. This interval sets the scan interval of the cleanup procedure. Default: 1 hour. + + Cleanup interval: In order to clean up completed invocations, that is invocations invoked with an idempotency id, or workflows, Restate periodically scans among the completed invocations to check whether they need to be removed or not. This interval sets the scan interval of the cleanup procedure. Default: 1 hour. - +Examples: +"10 hours" or "5 days" or "5d" or "1h 4m" or "P40D" - - Storage options + + - + How many partitions to divide memory across? By default this uses the value defined in `default-num-partitions` in the common section of the config. @@ -45,53 +42,53 @@ By default this uses the value defined in `default-num-partitions` in the common The total is divided evenly across partitions. The divisor is defined in `num-partitions-to-share-memory-budget`. If this value is set, it overrides the ratio defined in `rocksdb-memory-ratio`. - Non-zero human-readable bytes + Non-zero human-readable bytes: Non-zero human-readable bytes - + The memory budget for rocksdb memtables as ratio This defines the total memory for rocksdb as a ratio of all memory available to memtables (See `rocksdb-total-memtables-ratio` in common). The budget is then divided evenly across partitions. The divisor is defined in `num-partitions-to-share-memory-budget` - - Files will be opened in \"direct I/O\" mode which means that data r/w from the disk will not be cached or buffered. The hardware buffer of the devices may however still be used. Memory mapped files are not impacted by these parameters. + + Disable Direct IO for reads: Files will be opened in \"direct I/O\" mode which means that data r/w from the disk will not be cached or buffered. The hardware buffer of the devices may however still be used. Memory mapped files are not impacted by these parameters. - - Use O_DIRECT for writes in background flush and compactions. + + Disable Direct IO for flush and compactions: Use O_DIRECT for writes in background flush and compactions. - - The default depends on the different rocksdb use-cases at Restate. + + Disable WAL: The default depends on the different rocksdb use-cases at Restate. Supports hot-reloading (Partial / Bifrost only) - + Disable rocksdb statistics collection Default: False (statistics enabled) - - Default: the number of CPU cores on this node. + + RocksDB max background jobs (flushes and compactions): Default: the number of CPU cores on this node. - If non-zero, we perform bigger reads when doing compaction. If you're running RocksDB on spinning disks, you should set this to at least 2MB. That way RocksDB's compaction is doing sequential instead of random reads. + RocksDB compaction readahead size in bytes: If non-zero, we perform bigger reads when doing compaction. If you're running RocksDB on spinning disks, you should set this to at least 2MB. That way RocksDB's compaction is doing sequential instead of random reads. - Non-zero human-readable bytes + Non-zero human-readable bytes: Non-zero human-readable bytes - StatsLevel can be used to reduce statistics overhead by skipping certain types of stats in the stats collection process. + RocksDB statistics level: StatsLevel can be used to reduce statistics overhead by skipping certain types of stats in the stats collection process. Default: \"except-detailed-timers\" @@ -104,47 +101,45 @@ Default: \"except-detailed-timers\" - `"all"` : Collect all stats, including measuring duration of mutex operations. If getting time is expensive on the platform to run, it can reduce scalability to more threads, especially for writes. - - Verbosity of the LOG. + + RocksDB log level: Verbosity of the LOG. Default: \"error\" Verbosity of the LOG. - - Number of info LOG files to keep + + RocksDB log keep file num: Number of info LOG files to keep Default: 1 - - Max size of info LOG file + + RocksDB log max file size: Max size of info LOG file Default: 64MB - Non-zero human-readable bytes + Non-zero human-readable bytes: Non-zero human-readable bytes - Uncompressed block size + RocksDB block size: Uncompressed block size Default: 64KiB - Non-zero human-readable bytes + Non-zero human-readable bytes: Non-zero human-readable bytes - - Invoker options - + - - This is **deprecated** and will be removed in the next Restate releases. + + Retry policy: This is **deprecated** and will be removed in the next Restate releases. Please refer to `default-retry-policy` for the new configuration options. @@ -153,7 +148,11 @@ Please refer to `default-retry-policy` for the new configuration options. No retry strategy. - + + + +Set `type: "none"` + @@ -161,18 +160,25 @@ Please refer to `default-retry-policy` for the new configuration options. Retry with a fixed delay strategy. - + + + +Set `type: "fixed-delay"` + - Interval between retries. + Interval: Interval between retries. Can be configured using the [`jiff::fmt::friendly`](https://docs.rs/jiff/latest/jiff/fmt/friendly/index.html) format or ISO8601, for example `5 hours`. +Examples: +"10 hours" or "5 days" or "5d" or "1h 4m" or "P40D" or "0" + - - Number of maximum attempts before giving up. Infinite retries if unset. + + Max attempts: Number of maximum attempts before giving up. Infinite retries if unset. @@ -181,32 +187,39 @@ Can be configured using the [`jiff::fmt::friendly`](https://docs.rs/jiff/latest/ Retry with an exponential strategy. The next retry is computed as `min(last_retry_interval * factor, max_interval)`. - + + + +Set `type: "exponential"` + - Initial interval for the first retry attempt. + Initial Interval: Initial interval for the first retry attempt. Can be configured using the [`jiff::fmt::friendly`](https://docs.rs/jiff/latest/jiff/fmt/friendly/index.html) format or ISO8601, for example `5 hours`. +Examples: +"10 hours" or "5 days" or "5d" or "1h 4m" or "P40D" or "0" + - The factor to use to compute the next retry attempt. + Factor: The factor to use to compute the next retry attempt. - - Number of maximum attempts before giving up. Infinite retries if unset. + + Max attempts: Number of maximum attempts before giving up. Infinite retries if unset. - Maximum interval between retries. + Max interval: Maximum interval between retries. Can be configured using the [`jiff::fmt::friendly`](https://docs.rs/jiff/latest/jiff/fmt/friendly/index.html) format or ISO8601, for example `5 hours`. - Duration string in either jiff human friendly or ISO8601 format. Check https://docs.rs/jiff/latest/jiff/struct.Span.html#parsing-and-printing for more details. + Human-readable duration: Duration string in either jiff human friendly or ISO8601 format. Check https://docs.rs/jiff/latest/jiff/struct.Span.html#parsing-and-printing for more details. Examples: "10 hours" or "5 days" or "5d" or "1h 4m" or "P40D" or "0" @@ -215,82 +228,88 @@ Examples: - - Duration string in either jiff human friendly or ISO8601 format. Check https://docs.rs/jiff/latest/jiff/struct.Span.html#parsing-and-printing for more details. + + Human-readable duration: Duration string in either jiff human friendly or ISO8601 format. Check https://docs.rs/jiff/latest/jiff/struct.Span.html#parsing-and-printing for more details. + +Examples: +"10 hours" or "5 days" or "5d" or "1h 4m" or "P40D" or "0" - - Duration string in either jiff human friendly or ISO8601 format. Check https://docs.rs/jiff/latest/jiff/struct.Span.html#parsing-and-printing for more details. + + Human-readable duration: Duration string in either jiff human friendly or ISO8601 format. Check https://docs.rs/jiff/latest/jiff/struct.Span.html#parsing-and-printing for more details. + +Examples: +"10 hours" or "5 days" or "5d" or "1h 4m" or "P40D" or "0" - - Non-zero human-readable bytes + + Non-zero human-readable bytes: Non-zero human-readable bytes - - Threshold to fail the invocation in case protocol messages coming from a service are larger than the specified amount. + + Message size limit: Threshold to fail the invocation in case protocol messages coming from a service are larger than the specified amount. - Non-zero human-readable bytes + Non-zero human-readable bytes: Non-zero human-readable bytes - - Temporary directory to use for the invoker temporary files. If empty, the system temporary directory will be used instead. + + Temporary directory: Temporary directory to use for the invoker temporary files. If empty, the system temporary directory will be used instead. - - Defines the threshold after which queues invocations will spill to disk at the path defined in `tmp-dir`. In other words, this is the number of invocations that can be kept in memory before spilling to disk. This is a per-partition limit. + + Spill invocations to disk: Defines the threshold after which queues invocations will spill to disk at the path defined in `tmp-dir`. In other words, this is the number of invocations that can be kept in memory before spilling to disk. This is a per-partition limit. - - Number of concurrent invocations that can be processed by the invoker. + + Limit number of concurrent invocations from this node: Number of concurrent invocations that can be processed by the invoker. - - Configures throttling for service invocations at the node level. This throttling mechanism uses a token bucket algorithm to control the rate at which invocations can be processed, helping to prevent resource exhaustion and maintain system stability under high load. + + Invocation throttling: Configures throttling for service invocations at the node level. This throttling mechanism uses a token bucket algorithm to control the rate at which invocations can be processed, helping to prevent resource exhaustion and maintain system stability under high load. The throttling limit is shared across all partitions running on this node, providing a global rate limit for the entire node rather than per-partition limits. When `unset`, no throttling is applied and invocations are processed without throttling. - Throttling options per invoker. + Throttling options: Throttling options per invoker. - The rate at which the tokens are replenished. + Refill rate: The rate at which the tokens are replenished. Syntax: `<rate>/<unit>` where `<unit>` is `s|sec|second`, `m|min|minute`, or `h|hr|hour`. unit defaults to per second if not specified. - - The maximum number of tokens the bucket can hold. Default to the rate value if not specified. + + Burst capacity: The maximum number of tokens the bucket can hold. Default to the rate value if not specified. - - Configures rate limiting for service actions at the node level. This throttling mechanism uses a token bucket algorithm to control the rate at which actions can be processed, helping to prevent resource exhaustion and maintain system stability under high load. + + Action throttling: Configures rate limiting for service actions at the node level. This throttling mechanism uses a token bucket algorithm to control the rate at which actions can be processed, helping to prevent resource exhaustion and maintain system stability under high load. The throttling limit is shared across all partitions running on this node, providing a global rate limit for the entire node rather than per-partition limits. When `unset`, no throttling is applied and actions are processed without throttling. - Throttling options per invoker. + Throttling options: Throttling options per invoker. - The rate at which the tokens are replenished. + Refill rate: The rate at which the tokens are replenished. Syntax: `<rate>/<unit>` where `<unit>` is `s|sec|second`, `m|min|minute`, or `h|hr|hour`. unit defaults to per second if not specified. - - The maximum number of tokens the bucket can hold. Default to the rate value if not specified. + + Burst capacity: The maximum number of tokens the bucket can hold. Default to the rate value if not specified. @@ -300,25 +319,25 @@ Syntax: `<rate>/<unit>` where `<unit>` is `s|sec|second`, `m|m - - The maximum number of commands a partition processor will apply in a batch. The larger this value is, the higher the throughput and latency are. + + Maximum command batch size for partition processors: The maximum number of commands a partition processor will apply in a batch. The larger this value is, the higher the throughput and latency are. - - Snapshots provide a mechanism for safely trimming the log and efficient bootstrapping of new worker nodes. + + Snapshots: Snapshots provide a mechanism for safely trimming the log and efficient bootstrapping of new worker nodes. - - Base URL for cluster snapshots. Supports `s3://` and `file://` protocol scheme. S3-compatible object stores must support ETag-based conditional writes. + + Snapshot destination URL: Base URL for cluster snapshots. Supports `s3://` and `file://` protocol scheme. S3-compatible object stores must support ETag-based conditional writes. Default: `None` - - Number of log records that trigger a snapshot to be created. + + Automatic snapshot creation frequency: Number of log records that trigger a snapshot to be created. As snapshots are created asynchronously, the actual number of new records that will trigger a snapshot will vary. The counter for the subsequent snapshot begins from the LSN at which the previous snapshot export was initiated. Only leader Partition Processors will take snapshots for a given partition. @@ -328,15 +347,19 @@ Default: `None` - automatic snapshots are disabled - - A retry policy for dealing with retryable object store errors. + + Error retry policy: A retry policy for dealing with retryable object store errors. No retry strategy. - + + + +Set `type: "none"` + @@ -344,18 +367,25 @@ Default: `None` - automatic snapshots are disabled Retry with a fixed delay strategy. - + + + +Set `type: "fixed-delay"` + - Interval between retries. + Interval: Interval between retries. Can be configured using the [`jiff::fmt::friendly`](https://docs.rs/jiff/latest/jiff/fmt/friendly/index.html) format or ISO8601, for example `5 hours`. +Examples: +"10 hours" or "5 days" or "5d" or "1h 4m" or "P40D" or "0" + - - Number of maximum attempts before giving up. Infinite retries if unset. + + Max attempts: Number of maximum attempts before giving up. Infinite retries if unset. @@ -364,32 +394,39 @@ Can be configured using the [`jiff::fmt::friendly`](https://docs.rs/jiff/latest/ Retry with an exponential strategy. The next retry is computed as `min(last_retry_interval * factor, max_interval)`. - + + + +Set `type: "exponential"` + - Initial interval for the first retry attempt. + Initial Interval: Initial interval for the first retry attempt. Can be configured using the [`jiff::fmt::friendly`](https://docs.rs/jiff/latest/jiff/fmt/friendly/index.html) format or ISO8601, for example `5 hours`. +Examples: +"10 hours" or "5 days" or "5d" or "1h 4m" or "P40D" or "0" + - The factor to use to compute the next retry attempt. + Factor: The factor to use to compute the next retry attempt. - - Number of maximum attempts before giving up. Infinite retries if unset. + + Max attempts: Number of maximum attempts before giving up. Infinite retries if unset. - Maximum interval between retries. + Max interval: Maximum interval between retries. Can be configured using the [`jiff::fmt::friendly`](https://docs.rs/jiff/latest/jiff/fmt/friendly/index.html) format or ISO8601, for example `5 hours`. - Duration string in either jiff human friendly or ISO8601 format. Check https://docs.rs/jiff/latest/jiff/struct.Span.html#parsing-and-printing for more details. + Human-readable duration: Duration string in either jiff human friendly or ISO8601 format. Check https://docs.rs/jiff/latest/jiff/struct.Span.html#parsing-and-printing for more details. Examples: "10 hours" or "5 days" or "5d" or "1h 4m" or "P40D" or "0" @@ -398,38 +435,38 @@ Examples: - - The AWS configuration profile to use for S3 object store destinations. If you use named profiles in your AWS configuration, you can replace all the other settings with a single profile reference. See the [AWS documentation on profiles] (https://docs.aws.amazon.com/sdkref/latest/guide/file-format.html) for more. + + AWS profile: The AWS configuration profile to use for S3 object store destinations. If you use named profiles in your AWS configuration, you can replace all the other settings with a single profile reference. See the [AWS documentation on profiles] (https://docs.aws.amazon.com/sdkref/latest/guide/file-format.html) for more. - - AWS region to use with S3 object store destinations. This may be inferred from the environment, for example the current region when running in EC2. Because of the request signing algorithm this must have a value. For Minio, you can generally set this to any string, such as `us-east-1`. + + AWS region: AWS region to use with S3 object store destinations. This may be inferred from the environment, for example the current region when running in EC2. Because of the request signing algorithm this must have a value. For Minio, you can generally set this to any string, such as `us-east-1`. - - Username for Minio, or consult the service documentation for other S3-compatible stores. + + AWS access key: Username for Minio, or consult the service documentation for other S3-compatible stores. - - Password for Minio, or consult the service documentation for other S3-compatible stores. + + AWS secret key: Password for Minio, or consult the service documentation for other S3-compatible stores. - - This is only needed with short-term STS session credentials. + + AWS session token: This is only needed with short-term STS session credentials. - - When you use Amazon S3, this is typically inferred from the region and there is no need to set it. With other object stores, you will have to provide an appropriate HTTP(S) endpoint. If *not* using HTTPS, also set `aws-allow-http` to `true`. + + Object store API endpoint URL override: When you use Amazon S3, this is typically inferred from the region and there is no need to set it. With other object stores, you will have to provide an appropriate HTTP(S) endpoint. If *not* using HTTPS, also set `aws-allow-http` to `true`. - - Allow plain HTTP to be used with the object store endpoint. Required when the endpoint URL that isn't using HTTPS. + + Allow insecure HTTP: Allow plain HTTP to be used with the object store endpoint. Required when the endpoint URL that isn't using HTTPS. @@ -439,23 +476,21 @@ Examples: - - Admin server options - + - - Address to bind for the Admin APIs. + + Endpoint address: Address to bind for the Admin APIs. - - Optional advertised Admin API endpoint. + + Advertised Admin endpoint: Optional advertised Admin API endpoint. - List of header names considered routing headers. + Deployment routing headers: List of header names considered routing headers. These will be used during deployment creation to distinguish between an already existing deployment and a new deployment. @@ -467,75 +502,77 @@ These will be used during deployment creation to distinguish between an already - - Concurrency limit for the Admin APIs. Default is unlimited. + + Concurrency limit: Concurrency limit for the Admin APIs. Default is unlimited. - - Storage query engine options - + - - Non-zero human-readable bytes + + Non-zero human-readable bytes: Non-zero human-readable bytes - - The path to spill to + + Temp folder to use for spill: The path to spill to - - The degree of parallelism to use for query execution (Defaults to the number of available cores). + + Default query parallelism: The degree of parallelism to use for query execution (Defaults to the number of available cores). - - Controls the interval at which cluster controller polls nodes of the cluster. + + Controller heartbeats: Controls the interval at which cluster controller polls nodes of the cluster. + +Examples: +"10 hours" or "5 days" or "5d" or "1h 4m" or "P40D" - - Duration string in either jiff human friendly or ISO8601 format. Check https://docs.rs/jiff/latest/jiff/struct.Span.html#parsing-and-printing for more details. + + Human-readable duration: Duration string in either jiff human friendly or ISO8601 format. Check https://docs.rs/jiff/latest/jiff/struct.Span.html#parsing-and-printing for more details. + +Examples: +"10 hours" or "5 days" or "5d" or "1h 4m" or "P40D" or "0" - + Disable serving the Restate Web UI on the admin port. Default is `false`. - + - - Ingress options - + - - The address to bind for the ingress. + + Bind address: The address to bind for the ingress. - - Local concurrency limit to use to limit the amount of concurrent requests. If exceeded, the ingress will reply immediately with an appropriate status code. Default is unlimited. + + Concurrency limit: Local concurrency limit to use to limit the amount of concurrent requests. If exceeded, the ingress will reply immediately with an appropriate status code. Default is unlimited. - + - Configuration options to connect to a Kafka cluster. + Kafka cluster options: Configuration options to connect to a Kafka cluster. @@ -545,7 +582,7 @@ These will be used during deployment creation to distinguish between an already - Initial list of brokers (host or host:port). + Servers: Initial list of brokers (host or host:port). @@ -561,38 +598,36 @@ These will be used during deployment creation to distinguish between an already - - Ingress endpoint that the Web UI should use to interact with. + + Ingress endpoint: Ingress endpoint that the Web UI should use to interact with. - - Bifrost options - + - - Default: Replicated + + The default kind of loglet to be used: Default: Replicated - `"local"` : A local rocksdb-backed loglet. - `"replicated"` : Replicated loglets are restate's native log replication system. This requires `log-server` role to run on enough nodes in the cluster. - + Configuration of local loglet provider - + Configuration of replicated loglet provider - + Maximum number of inflight records sequencer can accept Once this maximum is hit, sequencer will induce back pressure on clients. This controls the total number of records regardless of how many batches. @@ -601,8 +636,8 @@ Note that this will be increased to fit the biggest batch of records being enque - - Sequencer retry policy + + Retry policy: Sequencer retry policy Backoff introduced when sequencer fail to find a suitable spread of log servers @@ -611,7 +646,11 @@ Backoff introduced when sequencer fail to find a suitable spread of log servers No retry strategy. - + + + +Set `type: "none"` + @@ -619,18 +658,25 @@ Backoff introduced when sequencer fail to find a suitable spread of log servers Retry with a fixed delay strategy. - + + + +Set `type: "fixed-delay"` + - Interval between retries. + Interval: Interval between retries. Can be configured using the [`jiff::fmt::friendly`](https://docs.rs/jiff/latest/jiff/fmt/friendly/index.html) format or ISO8601, for example `5 hours`. +Examples: +"10 hours" or "5 days" or "5d" or "1h 4m" or "P40D" or "0" + - - Number of maximum attempts before giving up. Infinite retries if unset. + + Max attempts: Number of maximum attempts before giving up. Infinite retries if unset. @@ -639,32 +685,39 @@ Can be configured using the [`jiff::fmt::friendly`](https://docs.rs/jiff/latest/ Retry with an exponential strategy. The next retry is computed as `min(last_retry_interval * factor, max_interval)`. - + + + +Set `type: "exponential"` + - Initial interval for the first retry attempt. + Initial Interval: Initial interval for the first retry attempt. Can be configured using the [`jiff::fmt::friendly`](https://docs.rs/jiff/latest/jiff/fmt/friendly/index.html) format or ISO8601, for example `5 hours`. +Examples: +"10 hours" or "5 days" or "5d" or "1h 4m" or "P40D" or "0" + - The factor to use to compute the next retry attempt. + Factor: The factor to use to compute the next retry attempt. - - Number of maximum attempts before giving up. Infinite retries if unset. + + Max attempts: Number of maximum attempts before giving up. Infinite retries if unset. - Maximum interval between retries. + Max interval: Maximum interval between retries. Can be configured using the [`jiff::fmt::friendly`](https://docs.rs/jiff/latest/jiff/fmt/friendly/index.html) format or ISO8601, for example `5 hours`. - Duration string in either jiff human friendly or ISO8601 format. Check https://docs.rs/jiff/latest/jiff/struct.Span.html#parsing-and-printing for more details. + Human-readable duration: Duration string in either jiff human friendly or ISO8601 format. Check https://docs.rs/jiff/latest/jiff/struct.Span.html#parsing-and-printing for more details. Examples: "10 hours" or "5 days" or "5d" or "1h 4m" or "P40D" or "0" @@ -673,24 +726,30 @@ Examples: - - Sequencer inactivity timeout + + Non-zero human-readable duration: Sequencer inactivity timeout The sequencer is allowed to consider itself quiescent if it did not commit records for this period of time. It may use this to sends pre-emptive release/seal check requests to log-servers. The sequencer is also allowed to use this value as interval to send seal/release checks even if it's not quiescent. +Examples: +"10 hours" or "5 days" or "5d" or "1h 4m" or "P40D" + - - Log Server RPC timeout + + Non-zero human-readable duration: Log Server RPC timeout Timeout waiting on log server response +Examples: +"10 hours" or "5 days" or "5d" or "1h 4m" or "P40D" + - - Log Server RPC retry policy + + Retry policy: Log Server RPC retry policy Retry policy for log server RPCs @@ -699,7 +758,11 @@ Retry policy for log server RPCs No retry strategy. - + + + +Set `type: "none"` + @@ -707,18 +770,25 @@ Retry policy for log server RPCs Retry with a fixed delay strategy. - + + + +Set `type: "fixed-delay"` + - Interval between retries. + Interval: Interval between retries. Can be configured using the [`jiff::fmt::friendly`](https://docs.rs/jiff/latest/jiff/fmt/friendly/index.html) format or ISO8601, for example `5 hours`. +Examples: +"10 hours" or "5 days" or "5d" or "1h 4m" or "P40D" or "0" + - - Number of maximum attempts before giving up. Infinite retries if unset. + + Max attempts: Number of maximum attempts before giving up. Infinite retries if unset. @@ -727,32 +797,39 @@ Can be configured using the [`jiff::fmt::friendly`](https://docs.rs/jiff/latest/ Retry with an exponential strategy. The next retry is computed as `min(last_retry_interval * factor, max_interval)`. - + + + +Set `type: "exponential"` + - Initial interval for the first retry attempt. + Initial Interval: Initial interval for the first retry attempt. Can be configured using the [`jiff::fmt::friendly`](https://docs.rs/jiff/latest/jiff/fmt/friendly/index.html) format or ISO8601, for example `5 hours`. +Examples: +"10 hours" or "5 days" or "5d" or "1h 4m" or "P40D" or "0" + - The factor to use to compute the next retry attempt. + Factor: The factor to use to compute the next retry attempt. - - Number of maximum attempts before giving up. Infinite retries if unset. + + Max attempts: Number of maximum attempts before giving up. Infinite retries if unset. - Maximum interval between retries. + Max interval: Maximum interval between retries. Can be configured using the [`jiff::fmt::friendly`](https://docs.rs/jiff/latest/jiff/fmt/friendly/index.html) format or ISO8601, for example `5 hours`. - Duration string in either jiff human friendly or ISO8601 format. Check https://docs.rs/jiff/latest/jiff/struct.Span.html#parsing-and-printing for more details. + Human-readable duration: Duration string in either jiff human friendly or ISO8601 format. Check https://docs.rs/jiff/latest/jiff/struct.Span.html#parsing-and-printing for more details. Examples: "10 hours" or "5 days" or "5d" or "1h 4m" or "P40D" or "0" @@ -761,19 +838,19 @@ Examples: - + Maximum number of records to prefetch from log servers The number of records bifrost will attempt to prefetch from replicated loglet's log-servers for every loglet reader (e.g. partition processor). Note that this mainly impacts readers that are not co-located with the loglet sequencer (i.e. partition processor followers). - - Non-zero human-readable bytes + + Non-zero human-readable bytes: Non-zero human-readable bytes - + Trigger to prefetch more records When read-ahead is used (readahead-records), this value (percentage in float) will determine when readers should trigger a prefetch for another batch to fill up the buffer. For instance, if this value is 0.3, then bifrost will trigger a prefetch when 30% or more of the read-ahead slots become available (e.g. partition processor consumed records and freed up enough slots). @@ -789,15 +866,19 @@ Value must be between 0 and 1. It will be clamped at `1.0`. - - Retry policy to use when bifrost waits for reconfiguration to complete during read operations + + Read retry policy: Retry policy to use when bifrost waits for reconfiguration to complete during read operations No retry strategy. - + + + +Set `type: "none"` + @@ -805,18 +886,25 @@ Value must be between 0 and 1. It will be clamped at `1.0`. Retry with a fixed delay strategy. - + + + +Set `type: "fixed-delay"` + - Interval between retries. + Interval: Interval between retries. Can be configured using the [`jiff::fmt::friendly`](https://docs.rs/jiff/latest/jiff/fmt/friendly/index.html) format or ISO8601, for example `5 hours`. +Examples: +"10 hours" or "5 days" or "5d" or "1h 4m" or "P40D" or "0" + - - Number of maximum attempts before giving up. Infinite retries if unset. + + Max attempts: Number of maximum attempts before giving up. Infinite retries if unset. @@ -825,32 +913,39 @@ Can be configured using the [`jiff::fmt::friendly`](https://docs.rs/jiff/latest/ Retry with an exponential strategy. The next retry is computed as `min(last_retry_interval * factor, max_interval)`. - + + + +Set `type: "exponential"` + - Initial interval for the first retry attempt. + Initial Interval: Initial interval for the first retry attempt. Can be configured using the [`jiff::fmt::friendly`](https://docs.rs/jiff/latest/jiff/fmt/friendly/index.html) format or ISO8601, for example `5 hours`. +Examples: +"10 hours" or "5 days" or "5d" or "1h 4m" or "P40D" or "0" + - The factor to use to compute the next retry attempt. + Factor: The factor to use to compute the next retry attempt. - - Number of maximum attempts before giving up. Infinite retries if unset. + + Max attempts: Number of maximum attempts before giving up. Infinite retries if unset. - Maximum interval between retries. + Max interval: Maximum interval between retries. Can be configured using the [`jiff::fmt::friendly`](https://docs.rs/jiff/latest/jiff/fmt/friendly/index.html) format or ISO8601, for example `5 hours`. - Duration string in either jiff human friendly or ISO8601 format. Check https://docs.rs/jiff/latest/jiff/struct.Span.html#parsing-and-printing for more details. + Human-readable duration: Duration string in either jiff human friendly or ISO8601 format. Check https://docs.rs/jiff/latest/jiff/struct.Span.html#parsing-and-printing for more details. Examples: "10 hours" or "5 days" or "5d" or "1h 4m" or "P40D" or "0" @@ -859,33 +954,45 @@ Examples: - - Interval to wait between retries of loglet seal failures + + Seal retry interval: Interval to wait between retries of loglet seal failures + +Examples: +"10 hours" or "5 days" or "5d" or "1h 4m" or "P40D" - - Time interval after which bifrost's auto-recovery mechanism will kick in. This is triggered in scenarios where the control plane took too long to complete loglet reconfigurations. + + Auto recovery threshold: Time interval after which bifrost's auto-recovery mechanism will kick in. This is triggered in scenarios where the control plane took too long to complete loglet reconfigurations. + +Examples: +"10 hours" or "5 days" or "5d" or "1h 4m" or "P40D" - - Minimum retry duration used by the exponential backoff mechanism for bifrost appends. + + Append retry minimum interval: Minimum retry duration used by the exponential backoff mechanism for bifrost appends. + +Examples: +"10 hours" or "5 days" or "5d" or "1h 4m" or "P40D" - - Maximum retry duration used by the exponential backoff mechanism for bifrost appends. + + Append retry maximum interval: Maximum retry duration used by the exponential backoff mechanism for bifrost appends. + +Examples: +"10 hours" or "5 days" or "5d" or "1h 4m" or "P40D" - - Optional size of record cache in bytes. If set to 0, record cache will be disabled. Defaults: 250MB + + In-memory RecordCache memory limit: Optional size of record cache in bytes. If set to 0, record cache will be disabled. Defaults: 250MB - - When enabled, automatic improvement periodically checks with the loglet provider if the loglet configuration can be improved by performing a reconfiguration. + + Disable Automatic Improvement: When enabled, automatic improvement periodically checks with the loglet provider if the loglet configuration can be improved by performing a reconfiguration. This allows the log to pick up replication property changes, apply better placement of replicas, or for other reasons. @@ -894,12 +1001,10 @@ This allows the log to pick up replication property changes, apply better placem - - Metadata store options - + - + Limit number of in-flight requests Number of in-flight metadata store requests. @@ -911,60 +1016,60 @@ Number of in-flight metadata store requests. If this value is set, it overrides the ratio defined in `rocksdb-memory-ratio`. - Non-zero human-readable bytes + Non-zero human-readable bytes: Non-zero human-readable bytes - + The memory budget for rocksdb memtables as ratio This defines the total memory for rocksdb as a ratio of all memory available to memtables (See `rocksdb-total-memtables-ratio` in common). - + Auto join the metadata cluster when being started Defines whether this node should auto join the metadata store cluster when being started for the first time. - - Files will be opened in \"direct I/O\" mode which means that data r/w from the disk will not be cached or buffered. The hardware buffer of the devices may however still be used. Memory mapped files are not impacted by these parameters. + + Disable Direct IO for reads: Files will be opened in \"direct I/O\" mode which means that data r/w from the disk will not be cached or buffered. The hardware buffer of the devices may however still be used. Memory mapped files are not impacted by these parameters. - - Use O_DIRECT for writes in background flush and compactions. + + Disable Direct IO for flush and compactions: Use O_DIRECT for writes in background flush and compactions. - - The default depends on the different rocksdb use-cases at Restate. + + Disable WAL: The default depends on the different rocksdb use-cases at Restate. Supports hot-reloading (Partial / Bifrost only) - + Disable rocksdb statistics collection Default: False (statistics enabled) - - Default: the number of CPU cores on this node. + + RocksDB max background jobs (flushes and compactions): Default: the number of CPU cores on this node. - If non-zero, we perform bigger reads when doing compaction. If you're running RocksDB on spinning disks, you should set this to at least 2MB. That way RocksDB's compaction is doing sequential instead of random reads. + RocksDB compaction readahead size in bytes: If non-zero, we perform bigger reads when doing compaction. If you're running RocksDB on spinning disks, you should set this to at least 2MB. That way RocksDB's compaction is doing sequential instead of random reads. - Non-zero human-readable bytes + Non-zero human-readable bytes: Non-zero human-readable bytes - StatsLevel can be used to reduce statistics overhead by skipping certain types of stats in the stats collection process. + RocksDB statistics level: StatsLevel can be used to reduce statistics overhead by skipping certain types of stats in the stats collection process. Default: \"except-detailed-timers\" @@ -977,92 +1082,105 @@ Default: \"except-detailed-timers\" - `"all"` : Collect all stats, including measuring duration of mutex operations. If getting time is expensive on the platform to run, it can reduce scalability to more threads, especially for writes. - - Verbosity of the LOG. + + RocksDB log level: Verbosity of the LOG. Default: \"error\" Verbosity of the LOG. - - Number of info LOG files to keep + + RocksDB log keep file num: Number of info LOG files to keep Default: 1 - - Max size of info LOG file + + RocksDB log max file size: Max size of info LOG file Default: 64MB - Non-zero human-readable bytes + Non-zero human-readable bytes: Non-zero human-readable bytes - Uncompressed block size + RocksDB block size: Uncompressed block size Default: 64KiB - Non-zero human-readable bytes + Non-zero human-readable bytes: Non-zero human-readable bytes - + The number of ticks before triggering an election The number of ticks before triggering an election. The value must be larger than `raft_heartbeat_tick`. It's recommended to set `raft_election_tick = 10 * raft_heartbeat_tick`. Decrease this value if you want to react faster to failed leaders. Note, decreasing this value too much can lead to cluster instabilities due to falsely detecting dead leaders. - + The number of ticks before sending a heartbeat A leader sends heartbeat messages to maintain its leadership every heartbeat ticks. Decrease this value to send heartbeats more often. - - The raft tick interval + + Non-zero human-readable duration: The raft tick interval The interval at which the raft node will tick. Decrease this value in order to let the Raft node react more quickly to changes. Note, that every tick comes with an overhead. Moreover, the tick interval directly affects the election timeout. If the election timeout becomes too small, then this can cause cluster instabilities due to frequent leader changes. +Examples: +"10 hours" or "5 days" or "5d" or "1h 4m" or "P40D" + - - The status update interval + + Non-zero human-readable duration: The status update interval The interval at which the raft node will update its status. Decrease this value in order to see more recent status updates. +Examples: +"10 hours" or "5 days" or "5d" or "1h 4m" or "P40D" + - - The threshold for trimming the raft log. The log will be trimmed if the number of apply entries exceeds this threshold. The default value is `1000`. + + The raft log trim threshold: The threshold for trimming the raft log. The log will be trimmed if the number of apply entries exceeds this threshold. The default value is `1000`. - - Common network configuration options for communicating with Restate cluster nodes. Note that similar keys are present in other config sections, such as in Service Client options. + + Networking options: Common network configuration options for communicating with Restate cluster nodes. Note that similar keys are present in other config sections, such as in Service Client options. - - TCP connection timeout for Restate cluster node-to-node network connections. + + Connect timeout: TCP connection timeout for Restate cluster node-to-node network connections. + +Examples: +"10 hours" or "5 days" or "5d" or "1h 4m" or "P40D" - - Retry policy to use for internal node-to-node networking. + + Connect retry policy: Retry policy to use for internal node-to-node networking. No retry strategy. - + + + +Set `type: "none"` + @@ -1070,18 +1188,25 @@ The interval at which the raft node will update its status. Decrease this value Retry with a fixed delay strategy. - + + + +Set `type: "fixed-delay"` + - Interval between retries. + Interval: Interval between retries. Can be configured using the [`jiff::fmt::friendly`](https://docs.rs/jiff/latest/jiff/fmt/friendly/index.html) format or ISO8601, for example `5 hours`. +Examples: +"10 hours" or "5 days" or "5d" or "1h 4m" or "P40D" or "0" + - - Number of maximum attempts before giving up. Infinite retries if unset. + + Max attempts: Number of maximum attempts before giving up. Infinite retries if unset. @@ -1090,32 +1215,39 @@ Can be configured using the [`jiff::fmt::friendly`](https://docs.rs/jiff/latest/ Retry with an exponential strategy. The next retry is computed as `min(last_retry_interval * factor, max_interval)`. - + + + +Set `type: "exponential"` + - Initial interval for the first retry attempt. + Initial Interval: Initial interval for the first retry attempt. Can be configured using the [`jiff::fmt::friendly`](https://docs.rs/jiff/latest/jiff/fmt/friendly/index.html) format or ISO8601, for example `5 hours`. +Examples: +"10 hours" or "5 days" or "5d" or "1h 4m" or "P40D" or "0" + - The factor to use to compute the next retry attempt. + Factor: The factor to use to compute the next retry attempt. - - Number of maximum attempts before giving up. Infinite retries if unset. + + Max attempts: Number of maximum attempts before giving up. Infinite retries if unset. - Maximum interval between retries. + Max interval: Maximum interval between retries. Can be configured using the [`jiff::fmt::friendly`](https://docs.rs/jiff/latest/jiff/fmt/friendly/index.html) format or ISO8601, for example `5 hours`. - Duration string in either jiff human friendly or ISO8601 format. Check https://docs.rs/jiff/latest/jiff/struct.Span.html#parsing-and-printing for more details. + Human-readable duration: Duration string in either jiff human friendly or ISO8601 format. Check https://docs.rs/jiff/latest/jiff/struct.Span.html#parsing-and-printing for more details. Examples: "10 hours" or "5 days" or "5d" or "1h 4m" or "P40D" or "0" @@ -1124,41 +1256,48 @@ Examples: - - Timeout for receiving a handshake response from Restate cluster peers. + + Handshake timeout: Timeout for receiving a handshake response from Restate cluster peers. + +Examples: +"10 hours" or "5 days" or "5d" or "1h 4m" or "P40D" - - Non-zero duration string in either jiff human friendly or ISO8601 format. Check https://docs.rs/jiff/latest/jiff/struct.Span.html#parsing-and-printing for more details. + + HTTP/2 Keep Alive Interval: Non-zero duration string in either jiff human friendly or ISO8601 format. Check https://docs.rs/jiff/latest/jiff/struct.Span.html#parsing-and-printing for more details. + +Examples: +"10 hours" or "5 days" or "5d" or "1h 4m" or "P40D" - - Non-zero duration string in either jiff human friendly or ISO8601 format. Check https://docs.rs/jiff/latest/jiff/struct.Span.html#parsing-and-printing for more details. + + HTTP/2 Keep Alive Timeout: Non-zero duration string in either jiff human friendly or ISO8601 format. Check https://docs.rs/jiff/latest/jiff/struct.Span.html#parsing-and-printing for more details. - +Examples: +"10 hours" or "5 days" or "5d" or "1h 4m" or "P40D" - - HTTP/2 Adaptive Window + + - - Disables Zstd compression for internal gRPC network connections + + Disable Compression: Disables Zstd compression for internal gRPC network connections - - Non-zero human-readable bytes + + Non-zero human-readable bytes: Non-zero human-readable bytes - - Configuration is only used on nodes running with `log-server` role. + + Log server options: Configuration is only used on nodes running with `log-server` role. @@ -1167,10 +1306,10 @@ Examples: If this value is set, it overrides the ratio defined in `rocksdb-memory-ratio`. - Non-zero human-readable bytes + Non-zero human-readable bytes: Non-zero human-readable bytes - + The memory budget for rocksdb memtables as ratio This defines the total memory for rocksdb as a ratio of all memory available to the log-server. @@ -1179,12 +1318,12 @@ This defines the total memory for rocksdb as a ratio of all memory available to - + Disable fsync of WAL on every batch - + The maximum number of subcompactions to run in parallel. Setting this to 1 means no sub-compactions are allowed (i.e. only 1 thread will do the compaction). @@ -1194,7 +1333,7 @@ Default is 0 which maps to floor(number of CPU cores / 2) - The size limit of all WAL files + Human-readable bytes: The size limit of all WAL files Use this to limit the size of WAL files. If the size of all WAL files exceeds this limit, the oldest WAL file will be deleted and if needed, memtable flush will be triggered. @@ -1204,55 +1343,55 @@ Default is `0` which translates into 6 times the memory allocated for membtables - + Trigger a commit when the batch size exceeds this threshold. Set to 0 or 1 to commit the write batch on every command. - + The number of messages that can queue up on input network stream while request processor is busy. - - Files will be opened in \"direct I/O\" mode which means that data r/w from the disk will not be cached or buffered. The hardware buffer of the devices may however still be used. Memory mapped files are not impacted by these parameters. + + Disable Direct IO for reads: Files will be opened in \"direct I/O\" mode which means that data r/w from the disk will not be cached or buffered. The hardware buffer of the devices may however still be used. Memory mapped files are not impacted by these parameters. - - Use O_DIRECT for writes in background flush and compactions. + + Disable Direct IO for flush and compactions: Use O_DIRECT for writes in background flush and compactions. - - The default depends on the different rocksdb use-cases at Restate. + + Disable WAL: The default depends on the different rocksdb use-cases at Restate. Supports hot-reloading (Partial / Bifrost only) - + Disable rocksdb statistics collection Default: False (statistics enabled) - - Default: the number of CPU cores on this node. + + RocksDB max background jobs (flushes and compactions): Default: the number of CPU cores on this node. - If non-zero, we perform bigger reads when doing compaction. If you're running RocksDB on spinning disks, you should set this to at least 2MB. That way RocksDB's compaction is doing sequential instead of random reads. + RocksDB compaction readahead size in bytes: If non-zero, we perform bigger reads when doing compaction. If you're running RocksDB on spinning disks, you should set this to at least 2MB. That way RocksDB's compaction is doing sequential instead of random reads. - Non-zero human-readable bytes + Non-zero human-readable bytes: Non-zero human-readable bytes - StatsLevel can be used to reduce statistics overhead by skipping certain types of stats in the stats collection process. + RocksDB statistics level: StatsLevel can be used to reduce statistics overhead by skipping certain types of stats in the stats collection process. Default: \"except-detailed-timers\" @@ -1265,41 +1404,41 @@ Default: \"except-detailed-timers\" - `"all"` : Collect all stats, including measuring duration of mutex operations. If getting time is expensive on the platform to run, it can reduce scalability to more threads, especially for writes. - - Verbosity of the LOG. + + RocksDB log level: Verbosity of the LOG. Default: \"error\" Verbosity of the LOG. - - Number of info LOG files to keep + + RocksDB log keep file num: Number of info LOG files to keep Default: 1 - - Max size of info LOG file + + RocksDB log max file size: Max size of info LOG file Default: 64MB - Non-zero human-readable bytes + Non-zero human-readable bytes: Non-zero human-readable bytes - Uncompressed block size + RocksDB block size: Uncompressed block size Default: 64KiB - Non-zero human-readable bytes + Non-zero human-readable bytes: Non-zero human-readable bytes - + Defines the roles which this Restate node should run, by default the node starts with all roles. @@ -1316,13 +1455,13 @@ Default: 64KiB - - Unique name for this node in the cluster. The node must not change unless it's started with empty local store. It defaults to the node's hostname. + + Node Name: Unique name for this node in the cluster. The node must not change unless it's started with empty local store. It defaults to the node's hostname. - [PREVIEW FEATURE] Setting the location allows Restate to form a tree-like cluster topology. The value is written in the format of \"region[.zone]\" to assign this node to a specific region, or to a zone within a region. + Node Location: [PREVIEW FEATURE] Setting the location allows Restate to form a tree-like cluster topology. The value is written in the format of \"region[.zone]\" to assign this node to a specific region, or to a zone within a region. The value of region and zone is arbitrary but whitespace and `.` are disallowed. @@ -1334,18 +1473,18 @@ Examples - `us-west` -- the node is in the `us-west` region. - `us-west.a1` -- t - + If set, the node insists on acquiring this node ID. - - A unique identifier for the cluster. All nodes in the same cluster should have the same. + + Cluster name: A unique identifier for the cluster. All nodes in the same cluster should have the same. - - If true, then this node is allowed to automatically provision as a new cluster. This node *must* have an admin role and a new nodes configuration will be created that includes this node. + + Auto cluster provisioning: If true, then this node is allowed to automatically provision as a new cluster. This node *must* have an admin role and a new nodes configuration will be created that includes this node. auto-provision is allowed by default in development mode and is disabled if restate-server runs with `--production` flag to prevent cluster nodes from forming their own clusters, rather than forming a single cluster. @@ -1357,24 +1496,25 @@ Default: true - + The working directory which this Restate node should use for relative paths. The default is `restate-data` under the current working directory. - - The metadata client type to store metadata + + Metadata client options: The metadata client type to store metadata - - + + + +Set `type: "replicated"` + - Restate metadata server address list - @@ -1383,30 +1523,43 @@ Default: true - - TCP connection timeout for connecting to the metadata store. + + Connect timeout: TCP connection timeout for connecting to the metadata store. + +Examples: +"10 hours" or "5 days" or "5d" or "1h 4m" or "P40D" - - Non-zero duration string in either jiff human friendly or ISO8601 format. Check https://docs.rs/jiff/latest/jiff/struct.Span.html#parsing-and-printing for more details. + + Metadata Store Keep Alive Interval: Non-zero duration string in either jiff human friendly or ISO8601 format. Check https://docs.rs/jiff/latest/jiff/struct.Span.html#parsing-and-printing for more details. + +Examples: +"10 hours" or "5 days" or "5d" or "1h 4m" or "P40D" - - Non-zero duration string in either jiff human friendly or ISO8601 format. Check https://docs.rs/jiff/latest/jiff/struct.Span.html#parsing-and-printing for more details. + + Metadata Store Keep Alive Timeout: Non-zero duration string in either jiff human friendly or ISO8601 format. Check https://docs.rs/jiff/latest/jiff/struct.Span.html#parsing-and-printing for more details. + +Examples: +"10 hours" or "5 days" or "5d" or "1h 4m" or "P40D" - - Backoff policy used by the metadata client when it encounters concurrent modifications. + + Backoff policy used by the metadata client: Backoff policy used by the metadata client when it encounters concurrent modifications. No retry strategy. - + + + +Set `type: "none"` + @@ -1414,18 +1567,25 @@ Default: true Retry with a fixed delay strategy. - + + + +Set `type: "fixed-delay"` + - Interval between retries. + Interval: Interval between retries. Can be configured using the [`jiff::fmt::friendly`](https://docs.rs/jiff/latest/jiff/fmt/friendly/index.html) format or ISO8601, for example `5 hours`. +Examples: +"10 hours" or "5 days" or "5d" or "1h 4m" or "P40D" or "0" + - - Number of maximum attempts before giving up. Infinite retries if unset. + + Max attempts: Number of maximum attempts before giving up. Infinite retries if unset. @@ -1434,32 +1594,39 @@ Can be configured using the [`jiff::fmt::friendly`](https://docs.rs/jiff/latest/ Retry with an exponential strategy. The next retry is computed as `min(last_retry_interval * factor, max_interval)`. - + + + +Set `type: "exponential"` + - Initial interval for the first retry attempt. + Initial Interval: Initial interval for the first retry attempt. Can be configured using the [`jiff::fmt::friendly`](https://docs.rs/jiff/latest/jiff/fmt/friendly/index.html) format or ISO8601, for example `5 hours`. +Examples: +"10 hours" or "5 days" or "5d" or "1h 4m" or "P40D" or "0" + - The factor to use to compute the next retry attempt. + Factor: The factor to use to compute the next retry attempt. - - Number of maximum attempts before giving up. Infinite retries if unset. + + Max attempts: Number of maximum attempts before giving up. Infinite retries if unset. - Maximum interval between retries. + Max interval: Maximum interval between retries. Can be configured using the [`jiff::fmt::friendly`](https://docs.rs/jiff/latest/jiff/fmt/friendly/index.html) format or ISO8601, for example `5 hours`. - Duration string in either jiff human friendly or ISO8601 format. Check https://docs.rs/jiff/latest/jiff/struct.Span.html#parsing-and-printing for more details. + Human-readable duration: Duration string in either jiff human friendly or ISO8601 format. Check https://docs.rs/jiff/latest/jiff/struct.Span.html#parsing-and-printing for more details. Examples: "10 hours" or "5 days" or "5d" or "1h 4m" or "P40D" or "0" @@ -1469,43 +1636,56 @@ Examples: - - - + + + +Set `type: "etcd"` + - Etcd cluster node address list - - - TCP connection timeout for connecting to the metadata store. + + Connect timeout: TCP connection timeout for connecting to the metadata store. + +Examples: +"10 hours" or "5 days" or "5d" or "1h 4m" or "P40D" - - Non-zero duration string in either jiff human friendly or ISO8601 format. Check https://docs.rs/jiff/latest/jiff/struct.Span.html#parsing-and-printing for more details. + + Metadata Store Keep Alive Interval: Non-zero duration string in either jiff human friendly or ISO8601 format. Check https://docs.rs/jiff/latest/jiff/struct.Span.html#parsing-and-printing for more details. + +Examples: +"10 hours" or "5 days" or "5d" or "1h 4m" or "P40D" - - Non-zero duration string in either jiff human friendly or ISO8601 format. Check https://docs.rs/jiff/latest/jiff/struct.Span.html#parsing-and-printing for more details. + + Metadata Store Keep Alive Timeout: Non-zero duration string in either jiff human friendly or ISO8601 format. Check https://docs.rs/jiff/latest/jiff/struct.Span.html#parsing-and-printing for more details. + +Examples: +"10 hours" or "5 days" or "5d" or "1h 4m" or "P40D" - - Backoff policy used by the metadata client when it encounters concurrent modifications. + + Backoff policy used by the metadata client: Backoff policy used by the metadata client when it encounters concurrent modifications. No retry strategy. - + + + +Set `type: "none"` + @@ -1513,18 +1693,25 @@ The addresses are formatted as `host:port`"> Retry with a fixed delay strategy. - + + + +Set `type: "fixed-delay"` + - Interval between retries. + Interval: Interval between retries. Can be configured using the [`jiff::fmt::friendly`](https://docs.rs/jiff/latest/jiff/fmt/friendly/index.html) format or ISO8601, for example `5 hours`. +Examples: +"10 hours" or "5 days" or "5d" or "1h 4m" or "P40D" or "0" + - - Number of maximum attempts before giving up. Infinite retries if unset. + + Max attempts: Number of maximum attempts before giving up. Infinite retries if unset. @@ -1533,32 +1720,39 @@ Can be configured using the [`jiff::fmt::friendly`](https://docs.rs/jiff/latest/ Retry with an exponential strategy. The next retry is computed as `min(last_retry_interval * factor, max_interval)`. - + + + +Set `type: "exponential"` + - Initial interval for the first retry attempt. + Initial Interval: Initial interval for the first retry attempt. Can be configured using the [`jiff::fmt::friendly`](https://docs.rs/jiff/latest/jiff/fmt/friendly/index.html) format or ISO8601, for example `5 hours`. +Examples: +"10 hours" or "5 days" or "5d" or "1h 4m" or "P40D" or "0" + - The factor to use to compute the next retry attempt. + Factor: The factor to use to compute the next retry attempt. - - Number of maximum attempts before giving up. Infinite retries if unset. + + Max attempts: Number of maximum attempts before giving up. Infinite retries if unset. - Maximum interval between retries. + Max interval: Maximum interval between retries. Can be configured using the [`jiff::fmt::friendly`](https://docs.rs/jiff/latest/jiff/fmt/friendly/index.html) format or ISO8601, for example `5 hours`. - Duration string in either jiff human friendly or ISO8601 format. Check https://docs.rs/jiff/latest/jiff/struct.Span.html#parsing-and-printing for more details. + Human-readable duration: Duration string in either jiff human friendly or ISO8601 format. Check https://docs.rs/jiff/latest/jiff/struct.Span.html#parsing-and-printing for more details. Examples: "10 hours" or "5 days" or "5d" or "1h 4m" or "P40D" or "0" @@ -1568,28 +1762,34 @@ Examples: - - - + + + +Set `type: "object-store"` + - This location will be used to persist cluster metadata. Takes the form of a URL with `s3://` as the protocol and bucket name as the authority, plus an optional prefix specified as the path component. + Object store path for metadata storage: This location will be used to persist cluster metadata. Takes the form of a URL with `s3://` as the protocol and bucket name as the authority, plus an optional prefix specified as the path component. Example: `s3://bucket/prefix` - - Definition of a retry policy + + Error retry policy: Definition of a retry policy No retry strategy. - + + + +Set `type: "none"` + @@ -1597,18 +1797,25 @@ Example: `s3://bucket/prefix` Retry with a fixed delay strategy. - + + + +Set `type: "fixed-delay"` + - Interval between retries. + Interval: Interval between retries. Can be configured using the [`jiff::fmt::friendly`](https://docs.rs/jiff/latest/jiff/fmt/friendly/index.html) format or ISO8601, for example `5 hours`. +Examples: +"10 hours" or "5 days" or "5d" or "1h 4m" or "P40D" or "0" + - - Number of maximum attempts before giving up. Infinite retries if unset. + + Max attempts: Number of maximum attempts before giving up. Infinite retries if unset. @@ -1617,32 +1824,39 @@ Can be configured using the [`jiff::fmt::friendly`](https://docs.rs/jiff/latest/ Retry with an exponential strategy. The next retry is computed as `min(last_retry_interval * factor, max_interval)`. - + + + +Set `type: "exponential"` + - Initial interval for the first retry attempt. + Initial Interval: Initial interval for the first retry attempt. Can be configured using the [`jiff::fmt::friendly`](https://docs.rs/jiff/latest/jiff/fmt/friendly/index.html) format or ISO8601, for example `5 hours`. +Examples: +"10 hours" or "5 days" or "5d" or "1h 4m" or "P40D" or "0" + - The factor to use to compute the next retry attempt. + Factor: The factor to use to compute the next retry attempt. - - Number of maximum attempts before giving up. Infinite retries if unset. + + Max attempts: Number of maximum attempts before giving up. Infinite retries if unset. - Maximum interval between retries. + Max interval: Maximum interval between retries. Can be configured using the [`jiff::fmt::friendly`](https://docs.rs/jiff/latest/jiff/fmt/friendly/index.html) format or ISO8601, for example `5 hours`. - Duration string in either jiff human friendly or ISO8601 format. Check https://docs.rs/jiff/latest/jiff/struct.Span.html#parsing-and-printing for more details. + Human-readable duration: Duration string in either jiff human friendly or ISO8601 format. Check https://docs.rs/jiff/latest/jiff/struct.Span.html#parsing-and-printing for more details. Examples: "10 hours" or "5 days" or "5d" or "1h 4m" or "P40D" or "0" @@ -1651,65 +1865,78 @@ Examples: - - The AWS configuration profile to use for S3 object store destinations. If you use named profiles in your AWS configuration, you can replace all the other settings with a single profile reference. See the [AWS documentation on profiles] (https://docs.aws.amazon.com/sdkref/latest/guide/file-format.html) for more. + + AWS profile: The AWS configuration profile to use for S3 object store destinations. If you use named profiles in your AWS configuration, you can replace all the other settings with a single profile reference. See the [AWS documentation on profiles] (https://docs.aws.amazon.com/sdkref/latest/guide/file-format.html) for more. - - AWS region to use with S3 object store destinations. This may be inferred from the environment, for example the current region when running in EC2. Because of the request signing algorithm this must have a value. For Minio, you can generally set this to any string, such as `us-east-1`. + + AWS region: AWS region to use with S3 object store destinations. This may be inferred from the environment, for example the current region when running in EC2. Because of the request signing algorithm this must have a value. For Minio, you can generally set this to any string, such as `us-east-1`. - - Username for Minio, or consult the service documentation for other S3-compatible stores. + + AWS access key: Username for Minio, or consult the service documentation for other S3-compatible stores. - - Password for Minio, or consult the service documentation for other S3-compatible stores. + + AWS secret key: Password for Minio, or consult the service documentation for other S3-compatible stores. - - This is only needed with short-term STS session credentials. + + AWS session token: This is only needed with short-term STS session credentials. - - When you use Amazon S3, this is typically inferred from the region and there is no need to set it. With other object stores, you will have to provide an appropriate HTTP(S) endpoint. If *not* using HTTPS, also set `aws-allow-http` to `true`. + + Object store API endpoint URL override: When you use Amazon S3, this is typically inferred from the region and there is no need to set it. With other object stores, you will have to provide an appropriate HTTP(S) endpoint. If *not* using HTTPS, also set `aws-allow-http` to `true`. - - Allow plain HTTP to be used with the object store endpoint. Required when the endpoint URL that isn't using HTTPS. + + Allow insecure HTTP: Allow plain HTTP to be used with the object store endpoint. Required when the endpoint URL that isn't using HTTPS. - - TCP connection timeout for connecting to the metadata store. + + Connect timeout: TCP connection timeout for connecting to the metadata store. + +Examples: +"10 hours" or "5 days" or "5d" or "1h 4m" or "P40D" - - Non-zero duration string in either jiff human friendly or ISO8601 format. Check https://docs.rs/jiff/latest/jiff/struct.Span.html#parsing-and-printing for more details. + + Metadata Store Keep Alive Interval: Non-zero duration string in either jiff human friendly or ISO8601 format. Check https://docs.rs/jiff/latest/jiff/struct.Span.html#parsing-and-printing for more details. + +Examples: +"10 hours" or "5 days" or "5d" or "1h 4m" or "P40D" - - Non-zero duration string in either jiff human friendly or ISO8601 format. Check https://docs.rs/jiff/latest/jiff/struct.Span.html#parsing-and-printing for more details. + + Metadata Store Keep Alive Timeout: Non-zero duration string in either jiff human friendly or ISO8601 format. Check https://docs.rs/jiff/latest/jiff/struct.Span.html#parsing-and-printing for more details. + +Examples: +"10 hours" or "5 days" or "5d" or "1h 4m" or "P40D" - - Backoff policy used by the metadata client when it encounters concurrent modifications. + + Backoff policy used by the metadata client: Backoff policy used by the metadata client when it encounters concurrent modifications. No retry strategy. - + + + +Set `type: "none"` + @@ -1717,18 +1944,25 @@ Examples: Retry with a fixed delay strategy. - + + + +Set `type: "fixed-delay"` + - Interval between retries. + Interval: Interval between retries. Can be configured using the [`jiff::fmt::friendly`](https://docs.rs/jiff/latest/jiff/fmt/friendly/index.html) format or ISO8601, for example `5 hours`. +Examples: +"10 hours" or "5 days" or "5d" or "1h 4m" or "P40D" or "0" + - - Number of maximum attempts before giving up. Infinite retries if unset. + + Max attempts: Number of maximum attempts before giving up. Infinite retries if unset. @@ -1737,32 +1971,39 @@ Can be configured using the [`jiff::fmt::friendly`](https://docs.rs/jiff/latest/ Retry with an exponential strategy. The next retry is computed as `min(last_retry_interval * factor, max_interval)`. - + + + +Set `type: "exponential"` + - Initial interval for the first retry attempt. + Initial Interval: Initial interval for the first retry attempt. Can be configured using the [`jiff::fmt::friendly`](https://docs.rs/jiff/latest/jiff/fmt/friendly/index.html) format or ISO8601, for example `5 hours`. +Examples: +"10 hours" or "5 days" or "5d" or "1h 4m" or "P40D" or "0" + - The factor to use to compute the next retry attempt. + Factor: The factor to use to compute the next retry attempt. - - Number of maximum attempts before giving up. Infinite retries if unset. + + Max attempts: Number of maximum attempts before giving up. Infinite retries if unset. - Maximum interval between retries. + Max interval: Maximum interval between retries. Can be configured using the [`jiff::fmt::friendly`](https://docs.rs/jiff/latest/jiff/fmt/friendly/index.html) format or ISO8601, for example `5 hours`. - Duration string in either jiff human friendly or ISO8601 format. Check https://docs.rs/jiff/latest/jiff/struct.Span.html#parsing-and-printing for more details. + Human-readable duration: Duration string in either jiff human friendly or ISO8601 format. Check https://docs.rs/jiff/latest/jiff/struct.Span.html#parsing-and-printing for more details. Examples: "10 hours" or "5 days" or "5d" or "1h 4m" or "P40D" or "0" @@ -1772,7 +2013,6 @@ Examples: - @@ -1780,13 +2020,13 @@ Examples: - + Address that other nodes will use to connect to this node. Default is `http://127.0.0.1:5122/` - - Number of partitions that will be provisioned during initial cluster provisioning. partitions are the logical shards used to process messages. + + Partitions: Number of partitions that will be provisioned during initial cluster provisioning. partitions are the logical shards used to process messages. Cannot be higher than `65535` (You should almost never need as many partitions anyway) @@ -1798,8 +2038,8 @@ Default: 24 - - Configures the global default replication factor to be used by the the system. + + Default replication factor: Configures the global default replication factor to be used by the the system. Note that this value only impacts the cluster initial provisioning and will not be respected after the cluster has been provisioned. @@ -1807,23 +2047,26 @@ To update existing clusters use the `restatectl` utility. - - This timeout is used when shutting down the various Restate components to drain all the internal queues. + + Shutdown grace timeout: This timeout is used when shutting down the various Restate components to drain all the internal queues. + +Examples: +"10 hours" or "5 days" or "5d" or "1h 4m" or "P40D" - - Size of the default thread pool used to perform internal tasks. If not set, it defaults to the number of CPU cores. + + Default async runtime thread pool: Size of the default thread pool used to perform internal tasks. If not set, it defaults to the number of CPU cores. - - Log filter configuration. Can be overridden by the `RUST_LOG` environment variable. Check the [`RUST_LOG` documentation](https://docs.rs/tracing-subscriber/latest/tracing_subscriber/filter/struct.EnvFilter.html) for more details how to configure it. + + Logging Filter: Log filter configuration. Can be overridden by the `RUST_LOG` environment variable. Check the [`RUST_LOG` documentation](https://docs.rs/tracing-subscriber/latest/tracing_subscriber/filter/struct.EnvFilter.html) for more details how to configure it. - - Format to use when logging. + + Logging format: Format to use when logging. - `"pretty"` : Enables verbose logging. Not recommended in production. @@ -1831,8 +2074,8 @@ To update existing clusters use the `restatectl` utility. - `"json"` : Enables json logging. You can use a json log collector to ingest these logs and further process them. - - Disable ANSI terminal codes for logs. This is useful when the log collector doesn't support processing ANSI terminal codes. + + Disable ANSI in log output: Disable ANSI terminal codes for logs. This is useful when the log collector doesn't support processing ANSI terminal codes. @@ -1841,57 +2084,60 @@ To update existing clusters use the `restatectl` utility. - + Disable prometheus metric recording and reporting. Default is `false`. - + Storage high priority thread pool This configures the restate-managed storage thread pool for performing high-priority or latency-sensitive storage tasks when the IO operation cannot be performed on in-memory caches. - + Storage low priority thread pool This configures the restate-managed storage thread pool for performing low-priority or latency-insensitive storage tasks. - - Non-zero human-readable bytes + + Non-zero human-readable bytes: Non-zero human-readable bytes - - The memory size used across all memtables (ratio between 0 to 1.0). This limits how much memory memtables can eat up from the value in rocksdb-total-memory-limit. When set to 0, memtables can take all available memory up to the value specified in rocksdb-total-memory-limit. This value will be sanitized to 1.0 if outside the valid bounds. + + Rocksdb total memtable size ratio: The memory size used across all memtables (ratio between 0 to 1.0). This limits how much memory memtables can eat up from the value in rocksdb-total-memory-limit. When set to 0, memtables can take all available memory up to the value specified in rocksdb-total-memory-limit. This value will be sanitized to 1.0 if outside the valid bounds. - - The number of threads to reserve to Rocksdb background tasks. Defaults to the number of cores on the machine. + + Rocksdb Background Threads: The number of threads to reserve to Rocksdb background tasks. Defaults to the number of cores on the machine. - - The number of threads to reserve to high priority Rocksdb background tasks. + + Rocksdb High Priority Background Threads: The number of threads to reserve to high priority Rocksdb background tasks. - - This defines the duration after which a write is to be considered in \"stall\" state. For every write that meets this threshold, the system will increment the `restate.rocksdb_stall_flare` gauge, if the write is unstalled, the guage will be updated accordingly. + + Rocksdb stall detection threshold: This defines the duration after which a write is to be considered in \"stall\" state. For every write that meets this threshold, the system will increment the `restate.rocksdb_stall_flare` gauge, if the write is unstalled, the guage will be updated accordingly. + +Examples: +"10 hours" or "5 days" or "5d" or "1h 4m" or "P40D" - - Note if automatic memory budgeting is enabled, it should be safe to allow rocksdb to stall if it hits the limit. However, if rocksdb stall kicked in, it's unlikely that the system will recover from this without intervention. + + Allow rocksdb writes to stall if memory limit is reached: Note if automatic memory budgeting is enabled, it should be safe to allow rocksdb to stall if it hits the limit. However, if rocksdb stall kicked in, it's unlikely that the system will recover from this without intervention. - - Defines the level of PerfContext used internally by rocksdb. Default is `enable-count` which should be sufficient for most users. Note that higher levels incur a CPU cost and might slow down the critical path. + + Rocksdb performance statistics level: Defines the level of PerfContext used internally by rocksdb. Default is `enable-count` which should be sufficient for most users. Note that higher levels incur a CPU cost and might slow down the critical path. - `"disable"` : Disable perf stats @@ -1901,25 +2147,35 @@ This configures the restate-managed storage thread pool for performing low-prior - `"enable-time"` : Enables count and time stats - - The idle time after which the node will check for metadata updates from metadata store. This helps the node detect if it has been operating with stale metadata for extended period of time, primarily because it didn't interact with other peers in the cluster during that period. + + Metadata update interval: The idle time after which the node will check for metadata updates from metadata store. This helps the node detect if it has been operating with stale metadata for extended period of time, primarily because it didn't interact with other peers in the cluster during that period. + +Examples: +"10 hours" or "5 days" or "5d" or "1h 4m" or "P40D" - - When a node detects that a new metadata version exists, it'll attempt to fetch it from its peers. After this timeout duration has passed, the node will attempt to fetch the metadata from metadata store as well. This is to ensure that the nodes converge quickly while reducing the load on the metadata store. + + Timeout for metadata peer-to-peer fetching: When a node detects that a new metadata version exists, it'll attempt to fetch it from its peers. After this timeout duration has passed, the node will attempt to fetch the metadata from metadata store as well. This is to ensure that the nodes converge quickly while reducing the load on the metadata store. + +Examples: +"10 hours" or "5 days" or "5d" or "1h 4m" or "P40D" - - The retry policy for network related errors + + Network error retry policy: The retry policy for network related errors No retry strategy. - + + + +Set `type: "none"` + @@ -1927,18 +2183,25 @@ This configures the restate-managed storage thread pool for performing low-prior Retry with a fixed delay strategy. - + + + +Set `type: "fixed-delay"` + - Interval between retries. + Interval: Interval between retries. Can be configured using the [`jiff::fmt::friendly`](https://docs.rs/jiff/latest/jiff/fmt/friendly/index.html) format or ISO8601, for example `5 hours`. +Examples: +"10 hours" or "5 days" or "5d" or "1h 4m" or "P40D" or "0" + - - Number of maximum attempts before giving up. Infinite retries if unset. + + Max attempts: Number of maximum attempts before giving up. Infinite retries if unset. @@ -1947,32 +2210,39 @@ Can be configured using the [`jiff::fmt::friendly`](https://docs.rs/jiff/latest/ Retry with an exponential strategy. The next retry is computed as `min(last_retry_interval * factor, max_interval)`. - + + + +Set `type: "exponential"` + - Initial interval for the first retry attempt. + Initial Interval: Initial interval for the first retry attempt. Can be configured using the [`jiff::fmt::friendly`](https://docs.rs/jiff/latest/jiff/fmt/friendly/index.html) format or ISO8601, for example `5 hours`. +Examples: +"10 hours" or "5 days" or "5d" or "1h 4m" or "P40D" or "0" + - The factor to use to compute the next retry attempt. + Factor: The factor to use to compute the next retry attempt. - - Number of maximum attempts before giving up. Infinite retries if unset. + + Max attempts: Number of maximum attempts before giving up. Infinite retries if unset. - Maximum interval between retries. + Max interval: Maximum interval between retries. Can be configured using the [`jiff::fmt::friendly`](https://docs.rs/jiff/latest/jiff/fmt/friendly/index.html) format or ISO8601, for example `5 hours`. - Duration string in either jiff human friendly or ISO8601 format. Check https://docs.rs/jiff/latest/jiff/struct.Span.html#parsing-and-printing for more details. + Human-readable duration: Duration string in either jiff human friendly or ISO8601 format. Check https://docs.rs/jiff/latest/jiff/struct.Span.html#parsing-and-printing for more details. Examples: "10 hours" or "5 days" or "5d" or "1h 4m" or "P40D" or "0" @@ -1981,18 +2251,21 @@ Examples: - - The timeout until the node gives up joining a cluster and initializing itself. + + Initialization timeout: The timeout until the node gives up joining a cluster and initializing itself. + +Examples: +"10 hours" or "5 days" or "5d" or "1h 4m" or "P40D" - - Restate uses Scarf to collect anonymous usage data to help us understand how the software is being used. You can set this flag to true to disable this collection. It can also be set with the environment variable DO_NOT_TRACK=1. + + Disable telemetry: Restate uses Scarf to collect anonymous usage data to help us understand how the software is being used. You can set this flag to true to disable this collection. It can also be set with the environment variable DO_NOT_TRACK=1. - - This is a shortcut to set both [`Self::tracing_runtime_endpoint`], and [`Self::tracing_services_endpoint`]. + + Tracing Endpoint: This is a shortcut to set both [`Self::tracing_runtime_endpoint`], and [`Self::tracing_services_endpoint`]. Specify the tracing endpoint to send runtime traces to. Traces will be exported using [OTLP gRPC](https://opentelemetry.io/docs/specs/otlp/#otlpgrpc) through [opentelemetry_otlp](https://docs.rs/opentelemetry-otlp/0.12.0/opentelemetry_otlp/). @@ -2000,8 +2273,8 @@ To configure the sampling, please refer to the [opentelemetry autoconfigure docs - - Overrides [`Self::tracing_endpoint`] for runtime traces + + Runtime Tracing Endpoint: Overrides [`Self::tracing_endpoint`] for runtime traces Specify the tracing endpoint to send runtime traces to. Traces will be exported using [OTLP gRPC](https://opentelemetry.io/docs/specs/otlp/#otlpgrpc) through [opentelemetry_otlp](https://docs.rs/opentelemetry-otlp/0.12.0/opentelemetry_otlp/). @@ -2009,8 +2282,8 @@ To configure the sampling, please refer to the [opentelemetry autoconfigure docs - - Overrides [`Self::tracing_endpoint`] for services traces + + Services Tracing Endpoint: Overrides [`Self::tracing_endpoint`] for services traces Specify the tracing endpoint to send services traces to. Traces will be exported using [OTLP gRPC](https://opentelemetry.io/docs/specs/otlp/#otlpgrpc) through [opentelemetry_otlp](https://docs.rs/opentelemetry-otlp/0.12.0/opentelemetry_otlp/). @@ -2018,8 +2291,8 @@ To configure the sampling, please refer to the [opentelemetry autoconfigure docs - - If set, an exporter will be configured to write traces to files using the Jaeger JSON format. Each trace file will start with the `trace` prefix. + + Distributed Tracing JSON Export Path: If set, an exporter will be configured to write traces to files using the Jaeger JSON format. Each trace file will start with the `trace` prefix. If unset, no traces will be written to file. @@ -2030,56 +2303,62 @@ To inspect the traces, open the Jaeger UI and use the Upload JSON feature to loa - Distributed tracing exporter filter. Check the [`RUST_LOG` documentation](https://docs.rs/tracing-subscriber/latest/tracing_subscriber/filter/struct.EnvFilter.html) for more details how to configure it. + Tracing Filter: Distributed tracing exporter filter. Check the [`RUST_LOG` documentation](https://docs.rs/tracing-subscriber/latest/tracing_subscriber/filter/struct.EnvFilter.html) for more details how to configure it. - Specify additional headers you want the system to send to the tracing endpoint (e.g. authentication headers). + Additional tracing headers: Specify additional headers you want the system to send to the tracing endpoint (e.g. authentication headers). - - A path to a file, such as \"/var/secrets/key.pem\", which contains exactly one ed25519 private key in PEM format. Such a file can be generated with `openssl genpkey -algorithm ed25519`. If provided, this key will be used to attach JWTs to requests from this client which SDKs may optionally verify, proving that the caller is a particular Restate instance. + + Request identity private key PEM file: A path to a file, such as \"/var/secrets/key.pem\", which contains exactly one ed25519 private key in PEM format. Such a file can be generated with `openssl genpkey -algorithm ed25519`. If provided, this key will be used to attach JWTs to requests from this client which SDKs may optionally verify, proving that the caller is a particular Restate instance. This file is currently only read on client creation, but this may change in future. Parsed public keys will be logged at INFO level in the same format that SDKs expect. - - Headers that should be applied to all outgoing requests (HTTP and Lambda). Defaults to `x-restate-cluster-name: <cluster name>`. + + Additional request headers: Headers that should be applied to all outgoing requests (HTTP and Lambda). Defaults to `x-restate-cluster-name: <cluster name>`. - - Configuration for the HTTP/2 keep-alive mechanism, using PING frames. If unset, HTTP/2 keep-alive are disabled. + + HTTP/2 Keep-alive: Configuration for the HTTP/2 keep-alive mechanism, using PING frames. If unset, HTTP/2 keep-alive are disabled. - - Sets an interval for HTTP/2 PING frames should be sent to keep a connection alive. + + HTTP/2 Keep-alive interval: Sets an interval for HTTP/2 PING frames should be sent to keep a connection alive. You should set this timeout with a value lower than the `abort_timeout`. +Examples: +"10 hours" or "5 days" or "5d" or "1h 4m" or "P40D" + - - Sets a timeout for receiving an acknowledgement of the keep-alive ping. + + Timeout: Sets a timeout for receiving an acknowledgement of the keep-alive ping. If the ping is not acknowledged within the timeout, the connection will be closed. +Examples: +"10 hours" or "5 days" or "5d" or "1h 4m" or "P40D" + - - A URI, such as `http://127.0.0.1:10001`, of a server to which all invocations should be sent, with the `Host` header set to the deployment URI. HTTPS proxy URIs are supported, but only HTTP endpoint traffic will be proxied currently. Can be overridden by the `HTTP_PROXY` environment variable. + + Proxy URI: A URI, such as `http://127.0.0.1:10001`, of a server to which all invocations should be sent, with the `Host` header set to the deployment URI. HTTPS proxy URIs are supported, but only HTTP endpoint traffic will be proxied currently. Can be overridden by the `HTTP_PROXY` environment variable. - - HTTP authorities eg `localhost`, `restate.dev`, `127.0.0.1` that should not be proxied by the http_proxy. Ports are ignored. Subdomains are also matched. An entry “*” matches all hostnames. Can be overridden by the `NO_PROXY` environment variable, which supports comma separated values. + + No proxy: HTTP authorities eg `localhost`, `restate.dev`, `127.0.0.1` that should not be proxied by the http_proxy. Ports are ignored. Subdomains are also matched. An entry “*” matches all hostnames. Can be overridden by the `NO_PROXY` environment variable, which supports comma separated values. @@ -2089,13 +2368,16 @@ If the ping is not acknowledged within the timeout, the connection will be close - - How long to wait for a TCP connection to be established before considering it a failed attempt. + + Connect timeout: How long to wait for a TCP connection to be established before considering it a failed attempt. + +Examples: +"10 hours" or "5 days" or "5d" or "1h 4m" or "P40D" - - Sets the initial maximum of locally initiated (send) streams. + + Initial Max Send Streams: Sets the initial maximum of locally initiated (send) streams. This value will be overwritten by the value included in the initial SETTINGS frame received from the peer as part of a [connection preface]. @@ -2105,61 +2387,61 @@ Default: None - - Name of the AWS profile to select. Defaults to 'AWS_PROFILE' env var, or otherwise the `default` profile. + + AWS Profile: Name of the AWS profile to select. Defaults to 'AWS_PROFILE' env var, or otherwise the `default` profile. - - An external ID to apply to any AssumeRole operations taken by this client. https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_create_for-user_externalid.html Can be overridden by the `AWS_EXTERNAL_ID` environment variable. + + AssumeRole external ID: An external ID to apply to any AssumeRole operations taken by this client. https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_create_for-user_externalid.html Can be overridden by the `AWS_EXTERNAL_ID` environment variable. - - Request minimum size to enable compression. The request size includes the total of the journal replay and its framing using Restate service protocol, without accounting for the json envelope and the base 64 encoding. + + Request Compression threshold: Request minimum size to enable compression. The request size includes the total of the journal replay and its framing using Restate service protocol, without accounting for the json envelope and the base 64 encoding. Default: 4MB (The default AWS Lambda Limit is 6MB, 4MB roughly accounts for +33% of Base64 and the json envelope). - Human-readable bytes + Human-readable bytes: Human-readable bytes - - Files will be opened in \"direct I/O\" mode which means that data r/w from the disk will not be cached or buffered. The hardware buffer of the devices may however still be used. Memory mapped files are not impacted by these parameters. + + Disable Direct IO for reads: Files will be opened in \"direct I/O\" mode which means that data r/w from the disk will not be cached or buffered. The hardware buffer of the devices may however still be used. Memory mapped files are not impacted by these parameters. - - Use O_DIRECT for writes in background flush and compactions. + + Disable Direct IO for flush and compactions: Use O_DIRECT for writes in background flush and compactions. - - The default depends on the different rocksdb use-cases at Restate. + + Disable WAL: The default depends on the different rocksdb use-cases at Restate. Supports hot-reloading (Partial / Bifrost only) - + Disable rocksdb statistics collection Default: False (statistics enabled) - - Default: the number of CPU cores on this node. + + RocksDB max background jobs (flushes and compactions): Default: the number of CPU cores on this node. - If non-zero, we perform bigger reads when doing compaction. If you're running RocksDB on spinning disks, you should set this to at least 2MB. That way RocksDB's compaction is doing sequential instead of random reads. + RocksDB compaction readahead size in bytes: If non-zero, we perform bigger reads when doing compaction. If you're running RocksDB on spinning disks, you should set this to at least 2MB. That way RocksDB's compaction is doing sequential instead of random reads. - Non-zero human-readable bytes + Non-zero human-readable bytes: Non-zero human-readable bytes - StatsLevel can be used to reduce statistics overhead by skipping certain types of stats in the stats collection process. + RocksDB statistics level: StatsLevel can be used to reduce statistics overhead by skipping certain types of stats in the stats collection process. Default: \"except-detailed-timers\" @@ -2172,64 +2454,68 @@ Default: \"except-detailed-timers\" - `"all"` : Collect all stats, including measuring duration of mutex operations. If getting time is expensive on the platform to run, it can reduce scalability to more threads, especially for writes. - - Verbosity of the LOG. + + RocksDB log level: Verbosity of the LOG. Default: \"error\" Verbosity of the LOG. - - Number of info LOG files to keep + + RocksDB log keep file num: Number of info LOG files to keep Default: 1 - - Max size of info LOG file + + RocksDB log max file size: Max size of info LOG file Default: 64MB - Non-zero human-readable bytes + Non-zero human-readable bytes: Non-zero human-readable bytes - Uncompressed block size + RocksDB block size: Uncompressed block size Default: 64KiB - Non-zero human-readable bytes + Non-zero human-readable bytes: Non-zero human-readable bytes - - The interval at which the failure detector will tick. Decrease this value for faster reaction to node failures. Note, that every tick comes with an overhead. + + Gossip tick interval: The interval at which the failure detector will tick. Decrease this value for faster reaction to node failures. Note, that every tick comes with an overhead. + +Examples: +"10 hours" or "5 days" or "5d" or "1h 4m" or "P40D" - - Specifies how many gossip intervals of inactivity need to pass before considering a node as dead. + + Gossip failure threshold: Specifies how many gossip intervals of inactivity need to pass before considering a node as dead. - - On every gossip interval, how many peers each node attempts to gossip with. The default is optimized for small clusters (less than 5 nodes). On larger clusters, if gossip overhead is noticeable, consider reducing this value to 1. + + Number of peers to gossip: On every gossip interval, how many peers each node attempts to gossip with. The default is optimized for small clusters (less than 5 nodes). On larger clusters, if gossip overhead is noticeable, consider reducing this value to 1. - - Gossips before failure detector is stable - + - - Duration string in either jiff human friendly or ISO8601 format. Check https://docs.rs/jiff/latest/jiff/struct.Span.html#parsing-and-printing for more details. + + Human-readable duration: Duration string in either jiff human friendly or ISO8601 format. Check https://docs.rs/jiff/latest/jiff/struct.Span.html#parsing-and-printing for more details. + +Examples: +"10 hours" or "5 days" or "5d" or "1h 4m" or "P40D" or "0" - - How many intervals need to pass without receiving any gossip messages before considering this node as potentially isolated/dead. This threshold is used in the case where the node can still send gossip messages but did not receive any. This can rarely happen in asymmetric network partitions. + + Gossip loneliness threshold: How many intervals need to pass without receiving any gossip messages before considering this node as potentially isolated/dead. This threshold is used in the case where the node can still send gossip messages but did not receive any. This can rarely happen in asymmetric network partitions. In this case, the node will advertise itself as dead in the gossip messages it sends out. @@ -2237,56 +2523,65 @@ Note: this threshold does not apply to a cluster that's configured with a single - - In addition to basic health/liveness information, the gossip protocol is used to exchange extra information about the roles hosted by this node. For instance, which partitions are currently running, their configuration versions, and the durable LSN of the corresponding partition databases. This information is sent every Nth gossip message. This setting controls the frequency of this exchange. For instance, `10` means that every 10th gossip message will contain the extra information about. + + Gossip extras exchange frequency: In addition to basic health/liveness information, the gossip protocol is used to exchange extra information about the roles hosted by this node. For instance, which partitions are currently running, their configuration versions, and the durable LSN of the corresponding partition databases. This information is sent every Nth gossip message. This setting controls the frequency of this exchange. For instance, `10` means that every 10th gossip message will contain the extra information about. - - The time skew is the maximum acceptable time difference between the local node and the time reported by peers via gossip messages. The time skew is also used to ignore gossip messages that are too old. + + Gossips time skew threshold: The time skew is the maximum acceptable time difference between the local node and the time reported by peers via gossip messages. The time skew is also used to ignore gossip messages that are too old. + +Examples: +"10 hours" or "5 days" or "5d" or "1h 4m" or "P40D" - Duration string in either jiff human friendly or ISO8601 format. Check https://docs.rs/jiff/latest/jiff/struct.Span.html#parsing-and-printing for more details. + Human-readable duration: Duration string in either jiff human friendly or ISO8601 format. Check https://docs.rs/jiff/latest/jiff/struct.Span.html#parsing-and-printing for more details. + +Examples: +"10 hours" or "5 days" or "5d" or "1h 4m" or "P40D" or "0" - Maximum journal retention duration that can be configured. When discovering a service deployment, or when modifying the journal retention using the Admin API, the given value will be clamped. + Maximum journal retention duration: Maximum journal retention duration that can be configured. When discovering a service deployment, or when modifying the journal retention using the Admin API, the given value will be clamped. Unset means no limit. - Duration string in either jiff human friendly or ISO8601 format. Check https://docs.rs/jiff/latest/jiff/struct.Span.html#parsing-and-printing for more details. + Human-readable duration: Duration string in either jiff human friendly or ISO8601 format. Check https://docs.rs/jiff/latest/jiff/struct.Span.html#parsing-and-printing for more details. Examples: "10 hours" or "5 days" or "5d" or "1h 4m" or "P40D" or "0" - The default retry policy to use for invocations. + Default retry policy: The default retry policy to use for invocations. The retry policy can be customized on a service/handler basis, using the respective SDK APIs. - - Initial interval for the first retry attempt. + + Initial Interval: Initial interval for the first retry attempt. + +Examples: +"10 hours" or "5 days" or "5d" or "1h 4m" or "P40D" - - The factor to use to compute the next retry attempt. Default: `2.0`. + + Factor: The factor to use to compute the next retry attempt. Default: `2.0`. - - Number of maximum attempts (including the initial) before giving up. No retries if set to 1. + + Max attempts: Number of maximum attempts (including the initial) before giving up. No retries if set to 1. - `"unlimited"` : Unlimited retries. - + Bounded number of retries. @@ -2295,24 +2590,27 @@ The retry policy can be customized on a service/handler basis, using the respect - - Behavior when max attempts are reached. + + On max attempts: Behavior when max attempts are reached. - `"pause"` : Pause the invocation when max attempts are reached. - `"kill"` : Kill the invocation when max attempts are reached. - - Maximum interval between retries. + + Max interval: Maximum interval between retries. + +Examples: +"10 hours" or "5 days" or "5d" or "1h 4m" or "P40D" - - Maximum max attempts configurable in an invocation retry policy. When discovering a service deployment with configured retry policies, or when modifying the invocation retry policy using the Admin API, the given value will be clamped. + + Max configurable value for retry policy max attempts: Maximum max attempts configurable in an invocation retry policy. When discovering a service deployment with configured retry policies, or when modifying the invocation retry policy using the Admin API, the given value will be clamped. `None` means no limit, that is infinite retries is enabled. diff --git a/scripts/generate-restate-config-viewer.js b/scripts/generate-restate-config-viewer.js index cd276608..437a2f74 100755 --- a/scripts/generate-restate-config-viewer.js +++ b/scripts/generate-restate-config-viewer.js @@ -9,7 +9,6 @@ const outputPath = "docs/references/server-config.mdx"; async function parseJsonSchema(schemaPath) { try { - // Use $RefParser directly to dereference all $ref pointers return await $RefParser.dereference(schemaPath, { mutateInputSchema: false, continueOnError: false, @@ -23,7 +22,7 @@ async function parseJsonSchema(schemaPath) { } } -function formatDescription(description, examples) { +function formatDescription(description, title, examples) { if (!description) return ''; // Escape HTML-like syntax in code blocks and regular text const cleanDescription = description @@ -38,22 +37,22 @@ function formatDescription(description, examples) { // Convert markdown links to proper format .replace(/\[(.*?)\]\((.*?)\)/g, '[$1]($2)') // Escape quotes for JSX attributes - .replace(/"/g, '\\"') + .replace(/"/g, '\\"') || '' const exampleStr = examples && Array.isArray(examples) && examples.length > 0 ? '\n\nExamples:\n' + examples.map(ex => `${JSON.stringify(ex, null, 2)}`).join(' or ') : ''; - return cleanDescription + exampleStr; + const titleStr = title ? `${title}: ` : ''; + return `${titleStr}${cleanDescription}${exampleStr}`; } function getTypeFromSchema(propSchema) { if (propSchema.type) { if (Array.isArray(propSchema.type)) { // Handle union types like ["string", "null"] - const nonNullTypes = propSchema.type.filter(t => t !== 'null'); const isOptional = propSchema.type.includes('null'); return { - type: nonNullTypes.length === 1 ? nonNullTypes[0] : nonNullTypes.join(' | '), + type: propSchema.type.join(' | '), optional: isOptional }; } @@ -76,41 +75,23 @@ function getTypeFromSchema(propSchema) { return { type: 'unknown', optional: false }; } -function generateResponseField(propName, propSchema, isRequired = false, level = 0) { - const indent = ' '.repeat(level); - const { type, optional } = getTypeFromSchema(propSchema); - const required = isRequired && !optional ? ' required' : ''; - let description = formatDescription(propSchema.description|| propSchema.title || ''); - - // Format default value properly for the attribute - let defaultAttr = ''; - let defaultStr = ''; - if (propSchema.default !== undefined && propSchema.default !== null) { - if (typeof propSchema.default === 'string') { - defaultStr = `"${propSchema.default}"`; - } else if (typeof propSchema.default === 'object') { - defaultStr = JSON.stringify(propSchema.default); - } else { - defaultStr = `${String(propSchema.default)}`; - } - defaultAttr = ` default={${defaultStr}}`; - } else { - if (type === 'string' && propSchema.enum !== undefined && Array.isArray(propSchema.enum) && propSchema.enum.length > 0) { - // If enum of strings is defined, use the first enum value as default - let defaultValue = propSchema.enum[0]; - if (typeof defaultValue === 'string') { - defaultStr = `"${defaultValue}"`; - defaultAttr = ` default={${defaultStr}}`; - } else if (typeof defaultValue === 'object') { - defaultStr = JSON.stringify(defaultValue); - } else { - defaultStr = `${String(defaultValue)}`; - } - defaultAttr = ` default={${defaultStr}}`; - } - } +function getDefaultValueString(propSchema, type) { + let value = propSchema.default; + if (value === undefined) return null; + else if (value === null) return `default=null`; + else if (typeof value === 'string') return `default="${value}"`; + // needs to be checked before 'object' because typeof array is 'object' + else if (type === 'array') return `default=${JSON.stringify(value)}` + else if (typeof value === 'object') return null + else return `default=${String(value)}`; +} +function generatePostAttr(propSchema, type) { let postTags = [] + const defaultValue = getDefaultValueString(propSchema, type) + if (defaultValue) { + postTags.push(`\'${defaultValue}\'`); + } if (propSchema.format) { postTags.push(`\'format: ${propSchema.format}\'`); } @@ -130,17 +111,64 @@ function generateResponseField(propName, propSchema, isRequired = false, level = postTags.push(`\'maxLength: ${propSchema.maxLength}\'`); } - const postAttr = ` post={[${postTags.join(",")}]}`; + return ` post={[${postTags.join(",")}]}`; +} - let output = `${indent}\n`; - +function parseVariantName(variant, index) { + if (variant.enum && variant.enum.length === 1) { + let variantValue = variant.enum[0]; + if (typeof variantValue === 'string') { + return `"${variantValue}"`; + } else if (typeof variantValue === 'object') { + return JSON.stringify(variantValue); + } else { + return `${String(variantValue)}`; + } + } else if (variant.title) { + return `Option ${index + 1}: ${variant.title}`; + } else if (variant.const !== undefined) { + return `"${variant.const}"`; + } else if (variant.description) { + return `Option ${index + 1}: ${variant.description}`; + } else { + return `Option ${index + 1}`; + } +} + +function generateResponseFieldsFromProperties(properties, requiredProps = [], level = 0) { + let generatedOutput = ''; + Object.entries(properties).forEach(([subPropName, subPropSchema]) => { + generatedOutput += generateResponseField( + subPropName, + subPropSchema, + requiredProps.includes(subPropName), + level + 2 + ); + }); + return generatedOutput +} + +function generateResponseField(propName, propSchema, isRequired = false, level = 0) { + const indent = ' '.repeat(level); + const { type, optional } = getTypeFromSchema(propSchema); + const required = isRequired && !optional ? ' required' : ''; + let description = formatDescription(propSchema.description, propSchema.title, propSchema.examples); + + let postAttr = generatePostAttr(propSchema, type); + + // Special case: if type is string and enum has a single value, suggest setting that value (for example for type: "exponential-delay") + if (propSchema.default === undefined && type === 'string' && Array.isArray(propSchema.enum) && propSchema.enum.length === 1) { + let value = propSchema.enum[0]; + description += `\n\nSet \`${propName}: "${value}"\``; + } + + let output = `${indent}\n`; if (description) { output += `${indent} ${description}\n\n`; } // Handle object properties if (type === 'object' && propSchema.properties) { - const requiredProps = propSchema.required || []; output += `${indent} \n`; if (propSchema.oneOf) { @@ -148,44 +176,16 @@ function generateResponseField(propName, propSchema, isRequired = false, level = output += `${indent} \n`; variants.forEach((variant, index) => { - let variantName = ''; - - output += `${indent}\n`; - // add description - output += `${indent} \n`; - - Object.entries(variant.properties).forEach(([subPropName, subPropSchema]) => { - output += generateResponseField( - subPropName, - subPropSchema, - requiredProps.includes(subPropName), - level + 2 - ); - }); - - Object.entries(propSchema.properties).forEach(([subPropName, subPropSchema]) => { - output += generateResponseField( - subPropName, - subPropSchema, - requiredProps.includes(subPropName), - level + 2 - ); - }); - output += `${indent} \n`; + const variantName = parseVariantName(variant, index); + output += `${indent}\n`; + output = generateResponseFieldsFromProperties(output, variant.properties, propSchema.required, level); + output = generateResponseFieldsFromProperties(output, propSchema.properties, propSchema.required, level); output += `${indent} \n`; }); + } else { output += `${indent} \n`; - - Object.entries(propSchema.properties).forEach(([subPropName, subPropSchema]) => { - output += generateResponseField( - subPropName, - subPropSchema, - requiredProps.includes(subPropName), - level + 2 - ); - }); - + output = generateResponseFieldsFromProperties(output, propSchema.properties, propSchema.required, level); output += `${indent} \n`; } } @@ -194,7 +194,7 @@ function generateResponseField(propName, propSchema, isRequired = false, level = if (type === 'array' && propSchema.items) { output += `${indent} \n`; output += `${indent} \n`; - output += generateResponseField('item', propSchema.items, false, level + 2); + output += generateResponseField('item', propSchema.items, propSchema.required, level + 2); output += `${indent} \n`; } @@ -207,61 +207,27 @@ function generateResponseField(propName, propSchema, isRequired = false, level = let optionalVariant = variants.find(variant => variant.type !== "null") const optionalType = getTypeFromSchema(optionalVariant); - output = `${indent}\n`; + output = `${indent}\n`; if (description) { output += `${indent} ${description}\n\n`; } if (optionalVariant.description) { - output += `${indent} ${formatDescription(optionalVariant.description, optionalVariant.examples)}\n` + output += `${indent} ${formatDescription(optionalVariant.description, optionalVariant.title, optionalVariant.examples)}\n` } if (optionalType.type === 'object' && optionalVariant.properties) { - const requiredProps = optionalVariant.required || []; output += `${indent} \n`; output += `${indent} \n`; - - Object.entries(optionalVariant.properties).forEach(([subPropName, subPropSchema]) => { - output += generateResponseField( - subPropName, - subPropSchema, - requiredProps.includes(subPropName), - level + 2 - ); - }); - + output = generateResponseFieldsFromProperties(output, optionalVariant.properties, optionalVariant.required, level); output += `${indent} \n`; + } else if (optionalType.type === 'oneOf') { const oneOfVariants = optionalVariant.oneOf; output += `${indent} \n`; oneOfVariants.forEach((variant, index) => { - let variantName = ''; - if (variant.enum && variant.enum.length === 1) { - let variantValue = variant.enum[0]; - if (typeof variantValue === 'string') { - variantName = `"${variantValue}"`; - } else if (typeof variantValue === 'object') { - variantName = JSON.stringify(variantValue); - } else { - variantName = `${String(variantValue)}`; - } - } else if (variant.title) { - variantName = `Option ${index + 1}: ${variant.title}`; - } else if (variant.const !== undefined) { - variantName = `"${variant.const}"`; - } else { - variantName = `Option ${index + 1}`; - } + let variantName = parseVariantName(variant, index) if ((['object', 'oneOf', 'array'].some(t => variant.type.includes(t))) && variant.properties) { - const requiredProps = variant.required || []; - Object.entries(variant.properties).forEach(([subPropName, subPropSchema]) => { - output += generateResponseField( - subPropName, - subPropSchema, - requiredProps.includes(subPropName), - level + 2 - ); - }); - + output = generateResponseFieldsFromProperties(output, variant.properties, variant.required, level); } else { output += `${indent} - \`${variantName}\` : ${formatDescription(variant.description)}\n` } @@ -271,37 +237,11 @@ function generateResponseField(propName, propSchema, isRequired = false, level = output += `${indent} \n`; variants.forEach((variant, index) => { - let variantName = ''; - if (variant.enum && variant.enum.length === 1) { - let variantValue = variant.enum[0]; - if (typeof variantValue === 'string') { - variantName = `"${variantValue}"`; - } else if (typeof variantValue === 'object') { - variantName = JSON.stringify(variantValue); - } else { - variantName = `${String(variantValue)}`; - } - } else if (variant.title) { - variantName = `Option ${index + 1}: ${variant.title}`; - } else if (variant.const !== undefined) { - variantName = `"${variant.const}"`; - } else { - variantName = `Option ${index + 1}`; - } + let variantName = parseVariantName(variant, index); if ((['object', 'oneOf', 'array'].some(t => variant.type.includes(t))) && variant.properties) { - const requiredProps = variant.required || []; output += `${indent} \n`; output += `${indent} \n`; - - Object.entries(variant.properties).forEach(([subPropName, subPropSchema]) => { - output += generateResponseField( - subPropName, - subPropSchema, - requiredProps.includes(subPropName), - level + 2 - ); - }); - + output = generateResponseFieldsFromProperties(output, variant.properties, variant.required, level); output += `${indent} \n`; } else { output += `${indent} - \`${variantName}\` : ${formatDescription(variant.description)}\n` @@ -314,54 +254,25 @@ function generateResponseField(propName, propSchema, isRequired = false, level = // Handle oneOf if (type === 'oneOf') { const variants = propSchema.oneOf - console.log(variants); - - output = `${indent}\n`; + output = `${indent}\n`; if (description) { output += `${indent} ${description}\n\n`; } output += `${indent} \n`; variants.forEach((variant, index) => { - let variantName = ''; - if (variant.enum && variant.enum.length === 1) { - let variantValue = variant.enum[0]; - if (typeof variantValue === 'string') { - variantName = `"${variantValue}"`; - } else if (typeof variantValue === 'object') { - variantName = JSON.stringify(variantValue); - } else { - variantName = `${String(variantValue)}`; - } - } else if (variant.title) { - variantName = `Option ${index + 1}: ${variant.title}`; - } else if (variant.const !== undefined) { - variantName = `"${variant.const}"`; - } else { - variantName = `Option ${index + 1}`; - } + let variantName = parseVariantName(variant, index); if ((['object', 'oneOf', 'array'].some(t => variant.type.includes(t))) && variant.properties) { - const requiredProps = variant.required || []; output += `${indent} \n`; output += `${indent} \n`; - output += `${indent} ${formatDescription(variant.description)}\n\n`; - - Object.entries(variant.properties).forEach(([subPropName, subPropSchema]) => { - output += generateResponseField( - subPropName, - subPropSchema, - requiredProps.includes(subPropName), - level + 2 - ); - }); - + output += `${indent} ${formatDescription(variant.description, undefined, variant.examples)}\n\n`; + output = generateResponseFieldsFromProperties(output, variant.properties, variant.required, level); output += `${indent} \n`; } else { output += `${indent} - \`${variantName}\` : ${formatDescription(variant.description)}\n` } }); - } output += `${indent}\n\n`; @@ -377,18 +288,8 @@ function generateRestateConfigViewer(schema) { '\n\n'; if (schema.properties) { - const requiredProps = schema.required || []; - - Object.entries(schema.properties).forEach(([propName, propSchema]) => { - output += generateResponseField( - propName, - propSchema, - requiredProps.includes(propName), - 0 - ); - }); + output = generateResponseFieldsFromProperties(output, schema.properties, schema.required, 0); } - return output; } From 54841179264c031568807f843fcee895a54264ce Mon Sep 17 00:00:00 2001 From: Giselle van Dongen Date: Wed, 5 Nov 2025 16:26:06 +0100 Subject: [PATCH 10/10] Fix rendering restate config --- docs/references/server-config.mdx | 118 +++++++++++++--------- scripts/generate-restate-config-viewer.js | 24 +++-- 2 files changed, 85 insertions(+), 57 deletions(-) diff --git a/docs/references/server-config.mdx b/docs/references/server-config.mdx index 08055b92..12d8f5a3 100644 --- a/docs/references/server-config.mdx +++ b/docs/references/server-config.mdx @@ -9,9 +9,13 @@ import Intro from "/snippets/common/default-configuration.mdx" + Worker options: + + Internal queue for partition processor communication: + @@ -28,6 +32,8 @@ Examples: + Storage options: + @@ -42,7 +48,7 @@ By default this uses the value defined in `default-num-partitions` in the common The total is divided evenly across partitions. The divisor is defined in `num-partitions-to-share-memory-budget`. If this value is set, it overrides the ratio defined in `rocksdb-memory-ratio`. - Non-zero human-readable bytes: Non-zero human-readable bytes + Non-zero human-readable bytes @@ -84,7 +90,7 @@ Default: False (statistics enabled) RocksDB compaction readahead size in bytes: If non-zero, we perform bigger reads when doing compaction. If you're running RocksDB on spinning disks, you should set this to at least 2MB. That way RocksDB's compaction is doing sequential instead of random reads. - Non-zero human-readable bytes: Non-zero human-readable bytes + Non-zero human-readable bytes @@ -121,7 +127,7 @@ Default: 1 Default: 64MB - Non-zero human-readable bytes: Non-zero human-readable bytes + Non-zero human-readable bytes @@ -129,13 +135,15 @@ Default: 64MB Default: 64KiB - Non-zero human-readable bytes: Non-zero human-readable bytes + Non-zero human-readable bytes + Invoker options: + @@ -168,7 +176,7 @@ Set `type: "fixed-delay"` - Interval: Interval between retries. + Interval between retries. Can be configured using the [`jiff::fmt::friendly`](https://docs.rs/jiff/latest/jiff/fmt/friendly/index.html) format or ISO8601, for example `5 hours`. @@ -245,18 +253,18 @@ Examples: - Non-zero human-readable bytes: Non-zero human-readable bytes + Non-zero human-readable bytes Message size limit: Threshold to fail the invocation in case protocol messages coming from a service are larger than the specified amount. - Non-zero human-readable bytes: Non-zero human-readable bytes + Non-zero human-readable bytes - Temporary directory: Temporary directory to use for the invoker temporary files. If empty, the system temporary directory will be used instead. + Temporary directory to use for the invoker temporary files. If empty, the system temporary directory will be used instead. @@ -275,7 +283,7 @@ Examples: The throttling limit is shared across all partitions running on this node, providing a global rate limit for the entire node rather than per-partition limits. When `unset`, no throttling is applied and invocations are processed without throttling. - Throttling options: Throttling options per invoker. + Throttling options per invoker. @@ -298,7 +306,7 @@ Syntax: `<rate>/<unit>` where `<unit>` is `s|sec|second`, `m|m The throttling limit is shared across all partitions running on this node, providing a global rate limit for the entire node rather than per-partition limits. When `unset`, no throttling is applied and actions are processed without throttling. - Throttling options: Throttling options per invoker. + Throttling options per invoker. @@ -325,7 +333,7 @@ Syntax: `<rate>/<unit>` where `<unit>` is `s|sec|second`, `m|m - Snapshots: Snapshots provide a mechanism for safely trimming the log and efficient bootstrapping of new worker nodes. + Snapshots provide a mechanism for safely trimming the log and efficient bootstrapping of new worker nodes. @@ -375,7 +383,7 @@ Set `type: "fixed-delay"` - Interval: Interval between retries. + Interval between retries. Can be configured using the [`jiff::fmt::friendly`](https://docs.rs/jiff/latest/jiff/fmt/friendly/index.html) format or ISO8601, for example `5 hours`. @@ -441,7 +449,7 @@ Examples: - AWS region: AWS region to use with S3 object store destinations. This may be inferred from the environment, for example the current region when running in EC2. Because of the request signing algorithm this must have a value. For Minio, you can generally set this to any string, such as `us-east-1`. + AWS region to use with S3 object store destinations. This may be inferred from the environment, for example the current region when running in EC2. Because of the request signing algorithm this must have a value. For Minio, you can generally set this to any string, such as `us-east-1`. @@ -477,6 +485,8 @@ Examples: + Admin server options: + @@ -503,15 +513,17 @@ These will be used during deployment creation to distinguish between an already - Concurrency limit: Concurrency limit for the Admin APIs. Default is unlimited. + Concurrency limit for the Admin APIs. Default is unlimited. + Storage query engine options: + - Non-zero human-readable bytes: Non-zero human-readable bytes + Non-zero human-readable bytes @@ -556,6 +568,8 @@ Examples: + Ingress options: + @@ -599,7 +613,7 @@ Examples: - Ingress endpoint: Ingress endpoint that the Web UI should use to interact with. + Ingress endpoint that the Web UI should use to interact with. @@ -607,6 +621,8 @@ Examples: + Bifrost options: + @@ -666,7 +682,7 @@ Set `type: "fixed-delay"` - Interval: Interval between retries. + Interval between retries. Can be configured using the [`jiff::fmt::friendly`](https://docs.rs/jiff/latest/jiff/fmt/friendly/index.html) format or ISO8601, for example `5 hours`. @@ -749,7 +765,7 @@ Examples: - Retry policy: Log Server RPC retry policy + Log Server RPC retry policy Retry policy for log server RPCs @@ -778,7 +794,7 @@ Set `type: "fixed-delay"` - Interval: Interval between retries. + Interval between retries. Can be configured using the [`jiff::fmt::friendly`](https://docs.rs/jiff/latest/jiff/fmt/friendly/index.html) format or ISO8601, for example `5 hours`. @@ -846,7 +862,7 @@ The number of records bifrost will attempt to prefetch from replicated loglet's - Non-zero human-readable bytes: Non-zero human-readable bytes + Non-zero human-readable bytes @@ -894,7 +910,7 @@ Set `type: "fixed-delay"` - Interval: Interval between retries. + Interval between retries. Can be configured using the [`jiff::fmt::friendly`](https://docs.rs/jiff/latest/jiff/fmt/friendly/index.html) format or ISO8601, for example `5 hours`. @@ -1002,6 +1018,8 @@ This allows the log to pick up replication property changes, apply better placem + Metadata store options: + @@ -1016,7 +1034,7 @@ Number of in-flight metadata store requests. If this value is set, it overrides the ratio defined in `rocksdb-memory-ratio`. - Non-zero human-readable bytes: Non-zero human-readable bytes + Non-zero human-readable bytes @@ -1065,7 +1083,7 @@ Default: False (statistics enabled) RocksDB compaction readahead size in bytes: If non-zero, we perform bigger reads when doing compaction. If you're running RocksDB on spinning disks, you should set this to at least 2MB. That way RocksDB's compaction is doing sequential instead of random reads. - Non-zero human-readable bytes: Non-zero human-readable bytes + Non-zero human-readable bytes @@ -1102,7 +1120,7 @@ Default: 1 Default: 64MB - Non-zero human-readable bytes: Non-zero human-readable bytes + Non-zero human-readable bytes @@ -1110,7 +1128,7 @@ Default: 64MB Default: 64KiB - Non-zero human-readable bytes: Non-zero human-readable bytes + Non-zero human-readable bytes @@ -1196,7 +1214,7 @@ Set `type: "fixed-delay"` - Interval: Interval between retries. + Interval between retries. Can be configured using the [`jiff::fmt::friendly`](https://docs.rs/jiff/latest/jiff/fmt/friendly/index.html) format or ISO8601, for example `5 hours`. @@ -1281,6 +1299,8 @@ Examples: + HTTP/2 Adaptive Window: + @@ -1289,7 +1309,7 @@ Examples: - Non-zero human-readable bytes: Non-zero human-readable bytes + Non-zero human-readable bytes @@ -1306,7 +1326,7 @@ Examples: If this value is set, it overrides the ratio defined in `rocksdb-memory-ratio`. - Non-zero human-readable bytes: Non-zero human-readable bytes + Non-zero human-readable bytes @@ -1387,7 +1407,7 @@ Default: False (statistics enabled) RocksDB compaction readahead size in bytes: If non-zero, we perform bigger reads when doing compaction. If you're running RocksDB on spinning disks, you should set this to at least 2MB. That way RocksDB's compaction is doing sequential instead of random reads. - Non-zero human-readable bytes: Non-zero human-readable bytes + Non-zero human-readable bytes @@ -1424,7 +1444,7 @@ Default: 1 Default: 64MB - Non-zero human-readable bytes: Non-zero human-readable bytes + Non-zero human-readable bytes @@ -1432,7 +1452,7 @@ Default: 64MB Default: 64KiB - Non-zero human-readable bytes: Non-zero human-readable bytes + Non-zero human-readable bytes @@ -1515,6 +1535,8 @@ Set `type: "replicated"` + Restate metadata server address list: + @@ -1548,7 +1570,7 @@ Examples: - Backoff policy used by the metadata client: Backoff policy used by the metadata client when it encounters concurrent modifications. + Backoff policy used by the metadata client when it encounters concurrent modifications. @@ -1575,7 +1597,7 @@ Set `type: "fixed-delay"` - Interval: Interval between retries. + Interval between retries. Can be configured using the [`jiff::fmt::friendly`](https://docs.rs/jiff/latest/jiff/fmt/friendly/index.html) format or ISO8601, for example `5 hours`. @@ -1647,6 +1669,8 @@ Set `type: "etcd"` + Etcd cluster node address list: + @@ -1674,7 +1698,7 @@ Examples: - Backoff policy used by the metadata client: Backoff policy used by the metadata client when it encounters concurrent modifications. + Backoff policy used by the metadata client when it encounters concurrent modifications. @@ -1701,7 +1725,7 @@ Set `type: "fixed-delay"` - Interval: Interval between retries. + Interval between retries. Can be configured using the [`jiff::fmt::friendly`](https://docs.rs/jiff/latest/jiff/fmt/friendly/index.html) format or ISO8601, for example `5 hours`. @@ -1805,7 +1829,7 @@ Set `type: "fixed-delay"` - Interval: Interval between retries. + Interval between retries. Can be configured using the [`jiff::fmt::friendly`](https://docs.rs/jiff/latest/jiff/fmt/friendly/index.html) format or ISO8601, for example `5 hours`. @@ -1871,7 +1895,7 @@ Examples: - AWS region: AWS region to use with S3 object store destinations. This may be inferred from the environment, for example the current region when running in EC2. Because of the request signing algorithm this must have a value. For Minio, you can generally set this to any string, such as `us-east-1`. + AWS region to use with S3 object store destinations. This may be inferred from the environment, for example the current region when running in EC2. Because of the request signing algorithm this must have a value. For Minio, you can generally set this to any string, such as `us-east-1`. @@ -1925,7 +1949,7 @@ Examples: - Backoff policy used by the metadata client: Backoff policy used by the metadata client when it encounters concurrent modifications. + Backoff policy used by the metadata client when it encounters concurrent modifications. @@ -1952,7 +1976,7 @@ Set `type: "fixed-delay"` - Interval: Interval between retries. + Interval between retries. Can be configured using the [`jiff::fmt::friendly`](https://docs.rs/jiff/latest/jiff/fmt/friendly/index.html) format or ISO8601, for example `5 hours`. @@ -2104,7 +2128,7 @@ This configures the restate-managed storage thread pool for performing low-prior - Non-zero human-readable bytes: Non-zero human-readable bytes + Non-zero human-readable bytes @@ -2191,7 +2215,7 @@ Set `type: "fixed-delay"` - Interval: Interval between retries. + Interval between retries. Can be configured using the [`jiff::fmt::friendly`](https://docs.rs/jiff/latest/jiff/fmt/friendly/index.html) format or ISO8601, for example `5 hours`. @@ -2402,7 +2426,7 @@ Default: None Default: 4MB (The default AWS Lambda Limit is 6MB, 4MB roughly accounts for +33% of Base64 and the json envelope). - Human-readable bytes: Human-readable bytes + Human-readable bytes @@ -2437,7 +2461,7 @@ Default: False (statistics enabled) RocksDB compaction readahead size in bytes: If non-zero, we perform bigger reads when doing compaction. If you're running RocksDB on spinning disks, you should set this to at least 2MB. That way RocksDB's compaction is doing sequential instead of random reads. - Non-zero human-readable bytes: Non-zero human-readable bytes + Non-zero human-readable bytes @@ -2474,7 +2498,7 @@ Default: 1 Default: 64MB - Non-zero human-readable bytes: Non-zero human-readable bytes + Non-zero human-readable bytes @@ -2482,7 +2506,7 @@ Default: 64MB Default: 64KiB - Non-zero human-readable bytes: Non-zero human-readable bytes + Non-zero human-readable bytes @@ -2504,6 +2528,8 @@ Examples: + Gossips before failure detector is stable: + @@ -2545,7 +2571,7 @@ Examples: - Maximum journal retention duration: Maximum journal retention duration that can be configured. When discovering a service deployment, or when modifying the journal retention using the Admin API, the given value will be clamped. + Maximum journal retention duration that can be configured. When discovering a service deployment, or when modifying the journal retention using the Admin API, the given value will be clamped. Unset means no limit. diff --git a/scripts/generate-restate-config-viewer.js b/scripts/generate-restate-config-viewer.js index 437a2f74..ed731d07 100755 --- a/scripts/generate-restate-config-viewer.js +++ b/scripts/generate-restate-config-viewer.js @@ -23,9 +23,8 @@ async function parseJsonSchema(schemaPath) { } function formatDescription(description, title, examples) { - if (!description) return ''; // Escape HTML-like syntax in code blocks and regular text - const cleanDescription = description + const cleanDescription = description ? description .replace(/\n\n/g, '\n\n') // Preserve code blocks with backticks but escape any HTML-like content within .replace(/`([^`]+)`/g, (match, code) => { @@ -37,11 +36,14 @@ function formatDescription(description, title, examples) { // Convert markdown links to proper format .replace(/\[(.*?)\]\((.*?)\)/g, '[$1]($2)') // Escape quotes for JSX attributes - .replace(/"/g, '\\"') || '' + .replace(/"/g, '\\"') : '' const exampleStr = examples && Array.isArray(examples) && examples.length > 0 ? '\n\nExamples:\n' + examples.map(ex => `${JSON.stringify(ex, null, 2)}`).join(' or ') : ''; + if (title && description && description.includes(title)) { + return `${cleanDescription}${exampleStr}`; + } const titleStr = title ? `${title}: ` : ''; return `${titleStr}${cleanDescription}${exampleStr}`; } @@ -178,14 +180,14 @@ function generateResponseField(propName, propSchema, isRequired = false, level = variants.forEach((variant, index) => { const variantName = parseVariantName(variant, index); output += `${indent}\n`; - output = generateResponseFieldsFromProperties(output, variant.properties, propSchema.required, level); - output = generateResponseFieldsFromProperties(output, propSchema.properties, propSchema.required, level); + output += generateResponseFieldsFromProperties(variant.properties, propSchema.required, level); + output += generateResponseFieldsFromProperties(propSchema.properties, propSchema.required, level); output += `${indent} \n`; }); } else { output += `${indent} \n`; - output = generateResponseFieldsFromProperties(output, propSchema.properties, propSchema.required, level); + output += generateResponseFieldsFromProperties( propSchema.properties, propSchema.required, level); output += `${indent} \n`; } } @@ -217,7 +219,7 @@ function generateResponseField(propName, propSchema, isRequired = false, level = if (optionalType.type === 'object' && optionalVariant.properties) { output += `${indent} \n`; output += `${indent} \n`; - output = generateResponseFieldsFromProperties(output, optionalVariant.properties, optionalVariant.required, level); + output += generateResponseFieldsFromProperties(optionalVariant.properties, optionalVariant.required, level); output += `${indent} \n`; } else if (optionalType.type === 'oneOf') { @@ -227,7 +229,7 @@ function generateResponseField(propName, propSchema, isRequired = false, level = oneOfVariants.forEach((variant, index) => { let variantName = parseVariantName(variant, index) if ((['object', 'oneOf', 'array'].some(t => variant.type.includes(t))) && variant.properties) { - output = generateResponseFieldsFromProperties(output, variant.properties, variant.required, level); + output += generateResponseFieldsFromProperties(variant.properties, variant.required, level); } else { output += `${indent} - \`${variantName}\` : ${formatDescription(variant.description)}\n` } @@ -241,7 +243,7 @@ function generateResponseField(propName, propSchema, isRequired = false, level = if ((['object', 'oneOf', 'array'].some(t => variant.type.includes(t))) && variant.properties) { output += `${indent} \n`; output += `${indent} \n`; - output = generateResponseFieldsFromProperties(output, variant.properties, variant.required, level); + output += generateResponseFieldsFromProperties(variant.properties, variant.required, level); output += `${indent} \n`; } else { output += `${indent} - \`${variantName}\` : ${formatDescription(variant.description)}\n` @@ -267,7 +269,7 @@ function generateResponseField(propName, propSchema, isRequired = false, level = output += `${indent} \n`; output += `${indent} \n`; output += `${indent} ${formatDescription(variant.description, undefined, variant.examples)}\n\n`; - output = generateResponseFieldsFromProperties(output, variant.properties, variant.required, level); + output += generateResponseFieldsFromProperties( variant.properties, variant.required, level); output += `${indent} \n`; } else { output += `${indent} - \`${variantName}\` : ${formatDescription(variant.description)}\n` @@ -288,7 +290,7 @@ function generateRestateConfigViewer(schema) { '\n\n'; if (schema.properties) { - output = generateResponseFieldsFromProperties(output, schema.properties, schema.required, 0); + output += generateResponseFieldsFromProperties(schema.properties, schema.required, -2); } return output; }