From 8e83911687b35a55c835d8739b20b189b3d65928 Mon Sep 17 00:00:00 2001 From: JakeSCahill Date: Wed, 29 Oct 2025 10:11:16 +0000 Subject: [PATCH 1/8] Init overrides --- docs-data/property-overrides.json | 1382 +++++++++++++++++++++++++++++ 1 file changed, 1382 insertions(+) create mode 100644 docs-data/property-overrides.json diff --git a/docs-data/property-overrides.json b/docs-data/property-overrides.json new file mode 100644 index 0000000000..3b7a40a94c --- /dev/null +++ b/docs-data/property-overrides.json @@ -0,0 +1,1382 @@ +{ + "properties": { + "abort_index_segment_size": { + "description": "Capacity (in number of txns) of an abort index segment.\nEach partition tracks the aborted transaction offset ranges to help service client requests. If the number of transactions increases beyond this threshold, they are flushed to disk to ease memory pressure. Then they're loaded on demand. This configuration controls the maximum number of aborted transactions before they are flushed to disk.", + "config_scope": "cluster" + }, + "admin": { + "example": [ + ".Example", + "[,yaml]", + "----", + "redpanda:", + " admin:", + " - name: ", + " address: ", + " port: ", + "----" + ], + "description": "Network address for the glossterm:Admin API[] server.", + "config_scope": "broker", + "category": "redpanda" + }, + "admin_api_tls": { + "example": [ + ".Example", + "[,yaml]", + "----", + "redpanda:", + " admin_api_tls:", + " - name: ", + " enabled: true", + " cert_file: ", + " key_file: ", + " truststore_file: ", + " require_client_auth: true", + "----" + ], + "config_scope": "broker", + "category": "redpanda" + }, + "aggregate_metrics": { + "description": "Enable aggregation of metrics returned by the xref:reference:internal-metrics-reference.adoc[`/metrics`] endpoint. Aggregation can simplify monitoring by providing summarized data instead of raw, per-instance metrics. Metric aggregation is performed by summing the values of samples by labels and is done when it makes sense by the shard and/or partition labels.", + "related_topics": [ + "xref:reference:internal-metrics-reference.adoc[`/metrics`]" + ], + "config_scope": "cluster" + }, + "cleanup.policy": { + "description": "The cleanup policy to apply for log segments of a topic.\nWhen `cleanup.policy` is set, it overrides the cluster property xref:cluster-properties.adoc#log_cleanup_policy[`log_cleanup_policy`] for the topic.", + "related_topics": [ + "xref:cluster-properties.adoc#log_cleanup_policy[`log_cleanup_policy`]" + ], + "config_scope": "topic" + }, + "cloud_storage_azure_adls_endpoint": { + "description": "Azure Data Lake Storage v2 endpoint override. Use when hierarchical namespaces are enabled on your storage account and you have set up a custom endpoint.\n\nIf not set, this is automatically generated using `dfs.core.windows.net` and <>.", + "config_scope": "object-storage" + }, + "cloud_storage_azure_adls_port": { + "description": "Azure Data Lake Storage v2 port override. See also: <>. Use when hierarchical namespaces are enabled on your storage account and you have set up a custom endpoint.", + "config_scope": "object-storage" + }, + "cloud_storage_azure_container": { + "description": "The name of the Azure container to use with Tiered Storage. If `null`, the property is disabled.\n\nNOTE: The container must belong to <>.", + "config_scope": "object-storage" + }, + "cloud_storage_azure_managed_identity_id": { + "description": "The managed identity ID to use for access to the Azure storage account. To use Azure managed identities, you must set <> to `azure_vm_instance_metadata`. See xref:manage:security/iam-roles.adoc[IAM Roles] for more information on managed identities.", + "related_topics": [ + "xref:manage:security/iam-roles.adoc[IAM Roles]" + ], + "config_scope": "object-storage" + }, + "cloud_storage_bucket": { + "description": "AWS or GCP bucket that should be used to store data.\n\nWARNING: Modifying this property after writing data to a bucket could cause data loss.", + "config_scope": "object-storage" + }, + "cloud_storage_cache_directory": { + "description": "Directory for archival cache. Set when the xref:reference:properties/cluster-properties.adoc#cloud_storage_enabled[`cloud_storage_enabled`] cluster property is enabled. If not specified, Redpanda uses a default path within the data directory.", + "example": [ + ".Example", + "[,yaml]", + "----", + "redpanda:", + " cloud_storage_cache_directory: ", + "----", + "\n", + "Replace `` with the full path to your desired cache directory." + ], + "related_topics": [ + "xref:reference:properties/cluster-properties.adoc#cloud_storage_enabled[`cloud_storage_enabled`]" + ], + "config_scope": "broker", + "category": "redpanda" + }, + "cloud_storage_cache_size_percent": { + "related_topics": [ + "xref:reference:cluster-properties.adoc#disk_reservation_percent[`disk_reservation_percent`]" + ], + "config_scope": "object-storage" + }, + "cloud_storage_chunk_prefetch": { + "description": "Number of chunks to prefetch ahead of every downloaded chunk. Prefetching additional chunks can enhance read performance by reducing wait times for sequential data access. A value of `0` disables prefetching, relying solely on on-demand downloads. Adjusting this property allows for tuning the balance between improved read performance and increased network and storage I/O.", + "config_scope": "object-storage" + }, + "cloud_storage_credentials_host": { + "description": "The hostname to connect to for retrieving role based credentials. Derived from <> if not set. Only required when using IAM role based access. To authenticate using access keys, see <>.", + "config_scope": "object-storage" + }, + "cloud_storage_crl_file": { + "description": "Path to certificate revocation list for <>.", + "config_scope": "object-storage" + }, + "cloud_storage_disable_chunk_reads": { + "description": "Disable chunk reads and switch back to legacy mode where full segments are downloaded. When set to `true`, this option disables the more efficient chunk-based reads, causing Redpanda to download entire segments. This legacy behavior might be useful in specific scenarios where chunk-based fetching is not optimal.", + "config_scope": "object-storage" + }, + "cloud_storage_enable_compacted_topic_reupload": { + "description": "Enable re-uploading data for compacted topics.\nWhen set to `true`, Redpanda can re-upload data for compacted topics to object storage, ensuring that the most current state of compacted topics is available in the cloud. Disabling this property (`false`) may reduce storage and network overhead but at the risk of not having the latest compacted data state in object storage.", + "config_scope": "object-storage" + }, + "cloud_storage_enable_remote_read": { + "description": "Default remote read config value for new topics.\nWhen set to `true`, new topics are by default configured to allow reading data directly from object storage, facilitating access to older data that might have been offloaded as part of Tiered Storage. With the default set to `false`, remote reads must be explicitly enabled at the topic level.", + "config_scope": "object-storage" + }, + "cloud_storage_enable_remote_write": { + "description": "Default remote write value for new topics.\nWhen set to `true`, new topics are by default configured to upload data to object storage. With the default set to `false`, remote write must be explicitly enabled at the topic level.", + "config_scope": "object-storage" + }, + "cloud_storage_enable_segment_merging": { + "related_topics": [ + "xref:manage:tiered-storage.adoc#object-storage-housekeeping[Object storage housekeeping]" + ], + "config_scope": "object-storage" + }, + "cloud_storage_enabled": { + "related_topics": [ + "xref:get-started:licensing/index.adoc[Redpanda Licensing]" + ], + "config_scope": "object-storage" + }, + "cloud_storage_housekeeping_interval_ms": { + "description": "Interval, in milliseconds, between object storage housekeeping tasks.", + "config_scope": "object-storage" + }, + "cloud_storage_inventory_hash_store": { + "description": "Directory to store inventory report hashes for use by cloud storage scrubber. If not specified, Redpanda uses a default path within the data directory.", + "example": [ + ".Example", + "[,yaml]", + "----", + "redpanda:", + " cloud_storage_inventory_hash_store: ", + "----" + ], + "config_scope": "broker", + "category": "redpanda" + }, + "cloud_storage_max_connection_idle_time_ms": { + "description": "Defines the maximum duration an HTTPS connection to object storage can stay idle, in milliseconds, before being terminated.\nThis setting reduces resource utilization by closing inactive connections. Adjust this property to balance keeping connections ready for subsequent requests and freeing resources associated with idle connections.", + "config_scope": "object-storage" + }, + "cloud_storage_metadata_sync_timeout_ms": { + "description": "Timeout for xref:manage:tiered-storage.adoc[] metadata synchronization.", + "config_scope": "object-storage" + }, + "cloud_storage_recovery_topic_validation_depth": { + "description": "Number of metadata segments to validate, from newest to oldest, when <> is set to `check_manifest_and_segment_metadata`.", + "config_scope": "object-storage" + }, + "cloud_storage_segment_size_target": { + "description": "Desired segment size in the object storage. The default is set in the topic-level `segment.bytes` property.", + "config_scope": "object-storage" + }, + "cloud_storage_upload_ctrl_update_interval_ms": { + "description": "The interval (in milliseconds) for updating the controller that manages the priority of Tiered Storage uploads. This property determines how frequently the system recalculates and adjusts the work scheduling for uploads to object storage.\n\nThis is an internal-only configuration and should be enabled only after consulting with Redpanda support.", + "config_scope": "object-storage" + }, + "cluster_id": { + "description": "NOTE: This property is read-only in Redpanda Cloud.\n\nCluster identifier.", + "config_scope": "cluster" + }, + "compaction.strategy": { + "description": "Specifies the strategy used to determine which records to remove during log compaction. The compaction strategy controls how Redpanda identifies and removes duplicate records while preserving the latest value for each key.", + "related_topics": [ + "xref:./cluster-properties.adoc#compaction_strategy[`compaction_strategy`]" + ], + "config_scope": "topic" + }, + "compaction_ctrl_update_interval_ms": { + "description": "The interval (in milliseconds) for updating the controller responsible for compaction tasks. The controller uses this interval to decide how to prioritize background compaction work, which is essential for maintaining efficient storage use.\n\nThis is an internal-only configuration and should be enabled only after consulting with Redpanda support.", + "config_scope": "cluster" + }, + "compression.type": { + "description": "Redpanda ignores this property and always uses producer compression semantics. If producers send compressed data, Redpanda stores and serves it as-is. If producers send uncompressed data, Redpanda stores it uncompressed.\n\nThis property exists for Apache Kafka compatibility. Configure compression in your producers instead of using this topic property.\n\nCompression reduces message size and improves throughput, but increases CPU utilization. Enable producer batching to increase compression efficiency.\n\nWhen set, this property overrides the cluster property xref:./cluster-properties.adoc#log_compression_type[`log_compression_type`] for the topic.", + "related_topics": [ + "xref:./cluster-properties.adoc#log_compression_type[`log_compression_type`]", + "xref:./cluster-properties.adoc#log_compression_type[`log_compression_type`]", + "xref:develop:produce-data/configure-producers.adoc#message-batching[Message batching]", + "xref:develop:produce-data/configure-producers.adoc#commonly-used-producer-configuration-options[Common producer configuration options]" + ], + "config_scope": "topic" + }, + "core_balancing_continuous": { + "related_topics": [ + "xref:get-started:licensing/index.adoc[Redpanda Licensing]" + ], + "config_scope": "cluster" + }, + "crash_loop_sleep_sec": { + "description": "*Introduced in v24.3.4*\n\nThe amount of time the broker sleeps before terminating when the limit on consecutive broker crashes (<>) is reached. This property provides a debugging window for you to access the broker before it terminates, and is particularly useful in Kubernetes environments.\n\nIf `null`, the property is disabled, and the broker terminates immediately after reaching the crash loop limit.\n\nFor information about how to reset the crash loop limit, see the <> broker property.", + "version": "v24.3.4", + "config_scope": "broker", + "category": "redpanda" + }, + "data_transforms_binary_max_size": { + "description": "ifdef::env-cloud[]\nNOTE: This property is read-only in Redpanda Cloud.\nendif::[]\n\nThe maximum size for a deployable WebAssembly binary that the broker can store.", + "config_scope": "cluster" + }, + "data_transforms_per_core_memory_reservation": { + "description": "ifdef::env-cloud[]\nNOTE: This property is read-only in Redpanda Cloud.\nendif::[]\n\nThe amount of memory to reserve per core for data transform (Wasm) virtual machines. Memory is reserved on boot. The maximum number of functions that can be deployed to a cluster is equal to `data_transforms_per_core_memory_reservation` / `data_transforms_per_function_memory_limit`.", + "config_scope": "cluster" + }, + "data_transforms_per_function_memory_limit": { + "description": "ifdef::env-cloud[]\nNOTE: This property is read-only in Redpanda Cloud.\nendif::[]\n\nThe amount of memory to give an instance of a data transform (Wasm) virtual machine. The maximum number of functions that can be deployed to a cluster is equal to `data_transforms_per_core_memory_reservation` / `data_transforms_per_function_memory_limit`.", + "config_scope": "cluster" + }, + "data_transforms_read_buffer_memory_percentage": { + "description": "include::reference:partial$internal-use-property.adoc[]\n\nThe percentage of available memory in the transform subsystem to use for read buffers.", + "config_scope": "cluster" + }, + "data_transforms_write_buffer_memory_percentage": { + "description": "include::reference:partial$internal-use-property.adoc[]\n\nThe percentage of available memory in the transform subsystem to use for write buffers.", + "config_scope": "cluster" + }, + "default_leaders_preference": { + "description": "Default settings for preferred location of topic partition leaders. It can be either \"none\" (no preference), or \"racks:,,...\" (prefer brokers with rack ID from the list).\nThe list can contain one or more rack IDs. If you specify multiple IDs, Redpanda tries to distribute the partition leader locations equally across brokers in these racks.\nIf config_ref:enable_rack_awareness,true,properties/cluster-properties[] is set to `false`, leader pinning is disabled across the cluster.\nifndef::env-cloud[]", + "related_topics": [ + "xref:get-started:licensing/index.adoc[Redpanda Licensing]" + ], + "config_scope": "cluster" + }, + "delete.retention.ms": { + "description": "The retention time for tombstone records in a compacted topic. Redpanda removes tombstone records after the retention limit is exceeded.\n\nIf you have enabled Tiered Storage and set <> or <> for the topic, you cannot enable tombstone removal.\n\nIf both `delete.retention.ms` and the cluster property config_ref:tombstone_retention_ms,true,properties/cluster-properties[] are set, `delete.retention.ms` overrides the cluster level tombstone retention for an individual topic.", + "related_topics": [ + "xref:./cluster-properties.adoc#tombstone_retention_ms[`tombstone_retention_ms`]", + "xref:manage:cluster-maintenance/compaction-settings.adoc#tombstone-record-removal[Tombstone record removal]" + ], + "config_scope": "topic" + }, + "developer_mode": { + "description": "CAUTION: Enabling `developer_mode` isn't recommended for production use.\n\nEnable developer mode, which skips most of the checks performed at startup.", + "config_scope": "broker", + "category": "redpanda" + }, + "emergency_disable_data_transforms": { + "description": "Override the cluster property xref:reference:properties/cluster-properties.adoc#data_transforms_enabled[`data_transforms_enabled`] and disable Wasm-powered data transforms. This is an emergency shutoff button.", + "related_topics": [ + "xref:reference:properties/cluster-properties.adoc#data_transforms_enabled[`data_transforms_enabled`]" + ], + "config_scope": "broker", + "category": "redpanda" + }, + "empty_seed_starts_cluster": { + "description": "Controls how a new cluster is formed. All brokers in a cluster must have the same value.\n\n<> to form a cluster.\n\nTIP: For backward compatibility, `true` is the default. Redpanda recommends using `false` in production environments to prevent accidental cluster formation.", + "config_scope": "broker", + "category": "redpanda" + }, + "enable_cluster_metadata_upload_loop": { + "description": "Enables cluster metadata uploads. Required for xref:manage:whole-cluster-restore.adoc[whole cluster restore].", + "related_topics": [ + "xref:manage:whole-cluster-restore.adoc[whole cluster restore]" + ], + "config_scope": "cluster" + }, + "enable_consumer_group_metrics": { + "description": "List of enabled consumer group metrics.\n\n*Accepted values:*\n\n- `group`: Enables the xref:reference:public-metrics-reference.adoc#redpanda_kafka_consumer_group_consumers[`redpanda_kafka_consumer_group_consumers`] and xref:reference:public-metrics-reference.adoc#redpanda_kafka_consumer_group_topics[`redpanda_kafka_consumer_group_topics`] metrics.\n- `partition`: Enables the xref:reference:public-metrics-reference.adoc#redpanda_kafka_consumer_group_committed_offset[`redpanda_kafka_consumer_group_committed_offset`] metric.\n- `consumer_lag`: Enables the xref:reference:public-metrics-reference.adoc#redpanda_kafka_consumer_group_lag_max[`redpanda_kafka_consumer_group_lag_max`] and xref:reference:public-metrics-reference.adoc#redpanda_kafka_consumer_group_lag_sum[`redpanda_kafka_consumer_group_lag_sum`] metrics\n+\nEnabling `consumer_lag` may add a small amount of additional processing overhead to the brokers, especially in environments with a high number of consumer groups or partitions.\n+\nifndef::env-cloud[]\nUse the xref:reference:properties/cluster-properties.adoc#consumer_group_lag_collection_interval_sec[`consumer_group_lag_collection_interval_sec`] property to control the frequency of consumer lag metric collection.\nendif::[]", + "related_topics": [ + "xref:reference:public-metrics-reference.adoc#redpanda_kafka_consumer_group_consumers[`redpanda_kafka_consumer_group_consumers`]", + "xref:reference:public-metrics-reference.adoc#redpanda_kafka_consumer_group_topics[`redpanda_kafka_consumer_group_topics`]", + "xref:reference:public-metrics-reference.adoc#redpanda_kafka_consumer_group_committed_offset[`redpanda_kafka_consumer_group_committed_offset`]", + "xref:reference:public-metrics-reference.adoc#redpanda_kafka_consumer_group_lag_max[`redpanda_kafka_consumer_group_lag_max`]", + "xref:reference:public-metrics-reference.adoc#redpanda_kafka_consumer_group_lag_sum[`redpanda_kafka_consumer_group_lag_sum`]", + "xref:reference:properties/cluster-properties.adoc#consumer_group_lag_collection_interval_sec[`consumer_group_lag_collection_interval_sec`]", + "xref:manage:monitoring.adoc#consumers[Monitor consumer group lag]", + "xref:manage:monitor-cloud.adoc#consumers[Monitor consumer group lag]" + ], + "config_scope": "cluster" + }, + "enable_host_metrics": { + "description": "Enable exporting of some host metrics like `/proc/diskstats`, `/proc/snmp` and `/proc/net/netstat`.\n\nHost metrics are prefixed with xref:reference:internal-metrics-reference.adoc#vectorized_host_diskstats_discards[`vectorized_host`] and are available on the `/metrics` endpoint.", + "related_topics": [ + "xref:reference:internal-metrics-reference.adoc#vectorized_host_diskstats_discards[`vectorized_host`]" + ], + "config_scope": "cluster" + }, + "enable_metrics_reporter": { + "description": "Enable the cluster metrics reporter. If `true`, the metrics reporter collects and exports to Redpanda Data a set of customer usage metrics at the interval set by <>.\n\n[NOTE]\n====\nThe cluster metrics of the metrics reporter are different from xref:manage:monitoring.adoc[monitoring metrics].\n\n* The metrics reporter exports customer usage metrics for consumption by Redpanda Data.\n* Monitoring metrics are exported for consumption by Redpanda users.\n====", + "related_topics": [ + "xref:manage:monitoring.adoc[monitoring metrics]" + ], + "config_scope": "cluster" + }, + "enable_schema_id_validation": { + "related_topics": [ + "xref:manage:schema-reg/schema-id-validation.adoc[Server-Side Schema ID Validation]" + ], + "description": "Mode to enable server-side schema ID validation.\n\n*Accepted values:*\n\n* `none`: Schema validation is disabled (no schema ID checks are done). Associated topic properties cannot be modified.\n* `redpanda`: Schema validation is enabled. Only Redpanda topic properties are accepted.\n* `compat`: Schema validation is enabled. Both Redpanda and compatible topic properties are accepted.", + "config_scope": "cluster" + }, + "flush.bytes": { + "description": "The maximum bytes not fsynced per partition. If this configured threshold is reached, the log is automatically fsynced, even though it wasn't explicitly requested.", + "related_topics": [ + "xref:./cluster-properties.adoc#flush_bytes[`flush_bytes`]" + ], + "config_scope": "topic" + }, + "flush.ms": { + "description": "The maximum delay (in ms) between two subsequent fsyncs. After this delay, the log is automatically fsynced.", + "related_topics": [ + "xref:./cluster-properties.adoc#flush_ms[`flush_ms`]" + ], + "config_scope": "topic" + }, + "http_authentication": { + "description": "A list of supported HTTP authentication mechanisms.\n\n*Accepted values:*\n\n* `BASIC`: Basic authentication\n* `OIDC`: OpenID Connect", + "related_topics": [ + "xref:get-started:licensing/index.adoc[Redpanda Licensing]" + ], + "config_scope": "cluster" + }, + "iceberg_backlog_controller_p_coeff": { + "description": "Proportional coefficient for the Iceberg backlog controller. Number of shares assigned to the datalake scheduling group will be proportional to the backlog size error. A negative value means larger and faster changes in the number of shares in the datalake scheduling group.", + "config_scope": "cluster" + }, + "iceberg_catalog_type": { + "description": "Iceberg catalog type that Redpanda will use to commit table metadata updates. Supported types: `rest`, `object_storage`.\nNOTE: You must set <> at the same time that you set `iceberg_catalog_type` to `rest`.", + "config_scope": "cluster" + }, + "iceberg_default_partition_spec": { + "description": "ifndef::env-cloud[]\nDefault value for the xref:reference:properties/topic-properties.adoc#redpanda-iceberg-partition-spec[`redpanda.iceberg.partition.spec`] topic property that determines the partition spec for the Iceberg table corresponding to the topic.\nendif::[]\n\nifdef::env-cloud[]\nDefault value for the `redpanda.iceberg.partition.spec` topic property that determines the partition spec for the Iceberg table corresponding to the topic.\nendif::[]", + "related_topics": [ + "xref:reference:properties/topic-properties.adoc#redpanda-iceberg-partition-spec[`redpanda.iceberg.partition.spec`]" + ], + "config_scope": "cluster" + }, + "iceberg_enabled": { + "description": "ifndef::env-cloud[]\nEnables the translation of topic data into Iceberg tables. Setting `iceberg_enabled` to `true` activates the feature at the cluster level, but each topic must also set the xref:reference:properties/topic-properties.adoc#redpanda-iceberg-enabled[`redpanda.iceberg.enabled`] topic-level property to `true` to use it. If `iceberg_enabled` is set to `false`, then the feature is disabled for all topics in the cluster, overriding any topic-level settings.\nendif::[]\nifdef::env-cloud[]\nEnables the translation of topic data into Iceberg tables. Setting `iceberg_enabled` to `true` activates the feature at the cluster level, but each topic must also set the `redpanda.iceberg.enabled` topic-level property to `true` to use it. If `iceberg_enabled` is set to `false`, then the feature is disabled for all topics in the cluster, overriding any topic-level settings.\nendif::[]", + "related_topics": [ + "xref:reference:properties/topic-properties.adoc#redpanda-iceberg-enabled[`redpanda.iceberg.enabled`]" + ], + "config_scope": "cluster" + }, + "iceberg_invalid_record_action": { + "description": "ifndef::env-cloud[]\nDefault value for the xref:reference:properties/topic-properties.adoc#redpanda-iceberg-invalid-record-action[`redpanda.iceberg.invalid.record.action`] topic property.\nendif::[]\nifdef::env-cloud[]\nDefault value for the `redpanda.iceberg.invalid.record.action` topic property.\nendif::[]", + "related_topics": [ + "xref:reference:properties/topic-properties.adoc#redpanda-iceberg-invalid-record-action[`redpanda.iceberg.invalid.record.action`]", + "xref:manage:iceberg/about-iceberg-topics.adoc#troubleshoot-errors[Troubleshoot errors]" + ], + "config_scope": "cluster" + }, + "iceberg_latest_schema_cache_ttl_ms": { + "description": "The TTL for caching the latest schema during translation when using the xref:manage:iceberg/specify-iceberg-schema.adoc#value_schema_latest[`value_schema_latest`] iceberg mode. This setting controls how long the latest schema remains cached during translation, which affects schema refresh behavior and performance.", + "related_topics": [ + "xref:manage:iceberg/specify-iceberg-schema.adoc#value_schema_latest[`value_schema_latest`]" + ], + "config_scope": "cluster" + }, + "iceberg_rest_catalog_aws_access_key": { + "description": "AWS access key for Iceberg REST catalog SigV4 authentication. If not set, falls back to xref:reference:properties/object-storage-properties.adoc#cloud_storage_access_key[`cloud_storage_access_key`] when using aws_sigv4 authentication mode.", + "related_topics": [ + "xref:reference:properties/object-storage-properties.adoc#cloud_storage_access_key[`cloud_storage_access_key`]" + ], + "config_scope": "cluster" + }, + "iceberg_rest_catalog_aws_region": { + "description": "AWS region for Iceberg REST catalog SigV4 authentication. If not set, falls back to xref:reference:properties/object-storage-properties.adoc#cloud_storage_region[`cloud_storage_region`] when using aws_sigv4 authentication mode.", + "related_topics": [ + "xref:reference:properties/object-storage-properties.adoc#cloud_storage_region[`cloud_storage_region`]" + ], + "config_scope": "cluster" + }, + "iceberg_rest_catalog_aws_secret_key": { + "description": "AWS secret key for Iceberg REST catalog SigV4 authentication. If not set, falls back to xref:reference:properties/object-storage-properties.adoc#cloud_storage_secret_key[`cloud_storage_secret_key`] when using aws_sigv4 authentication mode.", + "related_topics": [ + "xref:reference:properties/object-storage-properties.adoc#cloud_storage_secret_key[`cloud_storage_secret_key`]" + ], + "config_scope": "cluster" + }, + "iceberg_rest_catalog_client_secret": { + "description": "Secret used with the client ID to query the OAuth token endpoint for Iceberg REST catalog authentication. Required if catalog type is set to `rest` and `iceberg_rest_catalog_authentication_mode` is set to `oauth2`.", + "config_scope": "cluster" + }, + "iceberg_rest_catalog_endpoint": { + "description": "URL of Iceberg REST catalog endpoint.\nNOTE: If you set <> to `rest`, you must also set this property at the same time.", + "config_scope": "cluster" + }, + "iceberg_rest_catalog_token": { + "description": "Token used to access the REST Iceberg catalog. If the token is present, Redpanda ignores credentials stored in the properties <> and <>.\nRequired if <> is set to `bearer`.", + "config_scope": "cluster" + }, + "iceberg_rest_catalog_trust": { + "description": "The contents of a certificate chain to trust for the REST Iceberg catalog.\nifndef::env-cloud[]\nTakes precedence over <>.\nendif::[]", + "config_scope": "cluster" + }, + "iceberg_rest_catalog_warehouse": { + "description": "Warehouse to use for the Iceberg REST catalog. Redpanda queries the catalog to retrieve warehouse-specific configurations and automatically configures settings like the appropriate prefix. The prefix is appended to the catalog path (for example, `/v1/\\{prefix}/namespaces`).", + "config_scope": "cluster" + }, + "iceberg_target_lag_ms": { + "related_topics": [ + "xref:reference:properties/topic-properties.adoc#redpanda-iceberg-target-lag-ms[`redpanda.iceberg.target.lag.ms`]" + ], + "config_scope": "cluster" + }, + "initial.retention.local.target.bytes": { + "description": "A size-based initial retention limit for Tiered Storage that determines how much data in local storage is transferred to a partition replica when a cluster is resized. If `null` (default), all locally retained data is transferred.", + "related_topics": [ + "xref:./cluster-properties.adoc#initial_retention_local_target_bytes[`initial_retention_local_target_bytes`]", + "xref:manage:tiered-storage.adoc#fast-commission-and-decommission[Fast commission and decommission through Tiered Storage]" + ], + "config_scope": "topic" + }, + "initial.retention.local.target.ms": { + "description": "A time-based initial retention limit for Tiered Storage that determines how much data in local storage is transferred to a partition replica when a cluster is resized. If `null` (default), all locally retained data is transferred.", + "related_topics": [ + "xref:./cluster-properties.adoc#initial_retention_local_target_ms[`initial_retention_local_target_ms`]", + "xref:manage:tiered-storage.adoc#fast-commission-and-decommission[Fast commission and decommission through Tiered Storage]" + ], + "config_scope": "topic" + }, + "initial_retention_local_target_bytes_default": { + "description": "Initial local retention size target for partitions of topics with xref:manage:tiered-storage.adoc[Tiered Storage] enabled. If no initial local target retention is configured, then all locally-retained data will be delivered to learner when joining the partition replica set.", + "related_topics": [ + "xref:manage:tiered-storage.adoc[Tiered Storage]" + ], + "config_scope": "cluster" + }, + "initial_retention_local_target_ms_default": { + "description": "Initial local retention time target for partitions of topics with xref:manage:tiered-storage.adoc[Tiered Storage] enabled. If no initial local target retention is configured, then all locally-retained data will be delivered to learner when joining the partition replica is set.", + "related_topics": [ + "xref:manage:tiered-storage.adoc[Tiered Storage]" + ], + "config_scope": "cluster" + }, + "kafka_api": { + "description": "IP address and port of the Kafka API endpoint that handles requests. Supports multiple listeners with different configurations.", + "related_topics": [ + "xref:reference:properties/cluster-properties.adoc#sasl_mechanisms[`sasl_mechanisms`]", + "xref:reference:properties/cluster-properties.adoc#sasl_mechanisms[`sasl_mechanisms`]" + ], + "config_scope": "broker", + "category": "redpanda" + }, + "kafka_api_tls": { + "example": [ + ".Example", + "[,yaml]", + "----", + "redpanda:", + " kafka_api_tls:", + " - name: ", + " enabled: true", + " cert_file: ", + " key_file: ", + " truststore_file: ", + " require_client_auth: false", + "----" + ], + "config_scope": "broker", + "category": "redpanda" + }, + "kafka_connection_rate_limit_overrides": { + "related_topics": [ + "xref:manage:cluster-maintenance/configure-availability.adoc#limit-client-connections[Limit client connections]" + ], + "config_scope": "cluster" + }, + "kafka_connections_max": { + "related_topics": [ + "xref:manage:cluster-maintenance/configure-availability.adoc#limit-client-connections[Limit client connections]" + ], + "config_scope": "cluster" + }, + "kafka_connections_max_overrides": { + "related_topics": [ + "xref:manage:cluster-maintenance/configure-availability.adoc#limit-client-connections[Limit client connections]" + ], + "config_scope": "cluster" + }, + "kafka_connections_max_per_ip": { + "related_topics": [ + "xref:manage:cluster-maintenance/configure-availability.adoc#limit-client-connections[Limit client connections]" + ], + "config_scope": "cluster" + }, + "kafka_nodelete_topics": { + "related_topics": [ + "xref:develop:consume-data/consumer-offsets.adoc[Consumer Offsets]", + "xref:manage:schema-registry.adoc[Schema Registry]" + ], + "config_scope": "cluster" + }, + "kafka_throughput_control": { + "related_topics": [ + "xref:manage:cluster-maintenance/manage-throughput.adoc[Manage throughput]" + ], + "config_scope": "cluster" + }, + "kafka_throughput_limit_node_in_bps": { + "related_topics": [ + "xref:manage:cluster-maintenance/manage-throughput.adoc#node-wide-throughput-limits[Node-wide throughput limits]" + ], + "config_scope": "cluster" + }, + "kafka_throughput_limit_node_out_bps": { + "related_topics": [ + "xref:manage:cluster-maintenance/manage-throughput.adoc#node-wide-throughput-limits[Node-wide throughput limits]" + ], + "config_scope": "cluster" + }, + "kafka_throughput_replenish_threshold": { + "related_topics": [ + "xref:reference:cluster-properties.adoc#kafka_throughput_limit_node_in_bps[`kafka_throughput_limit_node_in_bps`]", + "xref:reference:cluster-properties.adoc#kafka_throughput_limit_node_out_bps[`kafka_throughput_limit_node_out_bps`]", + "xref:manage:cluster-maintenance/manage-throughput.adoc[Manage Throughput]" + ], + "config_scope": "cluster" + }, + "leader_balancer_mute_timeout": { + "description": "The length of time that a glossterm:Raft[] group is muted after a leadership rebalance operation. Any group that has been moved, regardless of whether the move succeeded or failed, undergoes a cooling-off period. This prevents Raft groups from repeatedly experiencing leadership rebalance operations in a short time frame, which can lead to instability in the cluster.\n\nThe leader balancer maintains a list of muted groups and reevaluates muted status at the start of each balancing iteration. Muted groups still contribute to overall cluster balance calculations although they can't themselves be moved until the mute period is over.", + "config_scope": "cluster" + }, + "leader_balancer_node_mute_timeout": { + "description": "The duration after which a broker that hasn't sent a heartbeat is considered muted. This timeout sets a threshold for identifying brokers that shouldn't be targeted for leadership transfers when the cluster rebalances, for example, because of unreliable network connectivity.", + "config_scope": "cluster" + }, + "log_cleanup_policy": { + "description": "Default cleanup policy for topic logs.\n\nThe topic property xref:./topic-properties.adoc#cleanuppolicy[`cleanup.policy`] overrides the value of `log_cleanup_policy` at the topic level.", + "related_topics": [ + "xref:./topic-properties.adoc#cleanuppolicy[`cleanup.policy`]" + ], + "config_scope": "cluster" + }, + "log_compression_type": { + "description": "IMPORTANT: This property is ignored regardless of the value specified. The behavior is always the same as the `producer` value. Redpanda brokers do not compress or recompress data based on this property. If producers send compressed data, Redpanda stores it as-is; if producers send uncompressed data, Redpanda stores it uncompressed. Other listed values are accepted for Apache Kafka compatibility but are ignored by the broker. This property may appear in Admin API and `rpk topic describe` outputs for compatibility.\n\nDefault for the Kafka-compatible compression.type property. Redpanda does not recompress data.\n\nThe topic property xref:./topic-properties.adoc#compressiontype[`compression.type`] overrides the value of `log_compression_type` at the topic level.", + "related_topics": [ + "xref:./topic-properties.adoc#compressiontype[`compression.type`]" + ], + "config_scope": "cluster" + }, + "log_message_timestamp_type": { + "description": "Default timestamp type for topic messages (CreateTime or LogAppendTime).\n\nThe topic property xref:./topic-properties.adoc#messagetimestamptype[`message.timestamp.type`] overrides the value of `log_message_timestamp_type` at the topic level.", + "related_topics": [ + "xref:./topic-properties.adoc#messagetimestamptype[`message.timestamp.type`]" + ], + "config_scope": "cluster" + }, + "log_retention_ms": { + "related_topics": [ + "xref:./topic-properties.adoc#retentionms[`retention.ms`]" + ], + "config_scope": "cluster" + }, + "log_segment_ms": { + "related_topics": [ + "xref:./topic-properties.adoc#segmentms[`segment.ms`]" + ], + "config_scope": "cluster" + }, + "max.compaction.lag.ms": { + "description": "The maximum amount of time (in ms) that a log segment can remain unaltered before it is eligible for compaction in a compact topic. Overrides the cluster property xref:cluster-properties.adoc#max_compaction_lag_ms[`max_compaction_lag_ms`] for the topic.", + "related_topics": [ + "xref:cluster-properties.adoc#max_compaction_lag_ms[`max_compaction_lag_ms`]", + "xref:./cluster-properties.adoc#max_compaction_lag_ms[`max_compaction_lag_ms`]", + "xref:manage:cluster-maintenance/compaction-settings.adoc#configuration-options[Configure maximum compaction lag]" + ], + "config_scope": "topic" + }, + "max.message.bytes": { + "description": "The maximum size of a message or batch of a topic. If a compression type is enabled, `max.message.bytes` sets the maximum size of the compressed message or batch.\n\nIf `max.message.bytes` is set to a positive value, it overrides the cluster property xref:./cluster-properties.adoc#kafka_batch_max_bytes[`kafka_batch_max_bytes`] for the topic.", + "related_topics": [ + "xref:./cluster-properties.adoc#kafka_batch_max_bytes[`kafka_batch_max_bytes`]", + "xref:./cluster-properties.adoc#kafka_batch_max_bytes[`kafka_batch_max_bytes`]", + "xref:develop:produce-data/configure-producers.adoc#message-batching[Message batching]" + ], + "config_scope": "topic" + }, + "max_compaction_lag_ms": { + "related_topics": [ + "xref:reference:properties/topic-properties.adoc#max.compaction.lag.ms[`max.compaction.lag.ms`]" + ], + "config_scope": "cluster" + }, + "max_transactions_per_coordinator": { + "description": "Specifies the maximum number of active transaction sessions per coordinator. When the threshold is passed Redpanda terminates old sessions. When an idle producer corresponding to the terminated session wakes up and produces, it leads to its batches being rejected with invalid producer epoch or invalid_producer_id_mapping error (depends on the transaction execution phase).\n\nFor details, see xref:develop:transactions#transaction-usage-tips[Transaction usage tips].", + "related_topics": [ + "xref:develop:transactions#transaction-usage-tips[Transaction usage tips]" + ], + "config_scope": "cluster" + }, + "message.timestamp.type": { + "description": "The source of a message's timestamp: either the message's creation time or its log append time.\n\nWhen `message.timestamp.type` is set, it overrides the cluster property xref:./cluster-properties.adoc#log_message_timestamp_type[`log_message_timestamp_type`] for the topic.", + "related_topics": [ + "xref:./cluster-properties.adoc#log_message_timestamp_type[`log_message_timestamp_type`]", + "xref:./cluster-properties.adoc#log_message_timestamp_type[`log_message_timestamp_type`]" + ], + "config_scope": "topic" + }, + "min.cleanable.dirty.ratio": { + "description": "The minimum ratio between the number of bytes in dirty segments and the total number of bytes in closed segments that must be reached before a partition's log is eligible for compaction in a compact topic.", + "related_topics": [ + "xref:./cluster-properties.adoc#min_cleanable_dirty_ratio[`min_cleanable_dirty_ratio`]" + ], + "config_scope": "topic" + }, + "min.compaction.lag.ms": { + "description": "The minimum amount of time (in ms) that a log segment must remain unaltered before it can be compacted in a compact topic. Overrides the cluster property xref:cluster-properties.adoc#min_compaction_lag_ms[`min_compaction_lag_ms`] for the topic.", + "related_topics": [ + "xref:cluster-properties.adoc#min_compaction_lag_ms[`min_compaction_lag_ms`]", + "xref:./cluster-properties.adoc#min_compaction_lag_ms[`min_compaction_lag_ms`]", + "xref:manage:cluster-maintenance/compaction-settings.adoc#configure-min-compaction-lag[Configure minimum compaction lag]" + ], + "config_scope": "topic" + }, + "min_compaction_lag_ms": { + "related_topics": [ + "xref:reference:properties/topic-properties.adoc#min.compaction.lag.ms[`min.compaction.lag.ms`]" + ], + "config_scope": "cluster" + }, + "node_id_overrides": { + "description": "List of node ID and UUID overrides applied at broker startup. Each entry includes the current UUID, the desired new ID and UUID, and an ignore flag. An entry applies only if `current_uuid` matches the broker's actual UUID.\n\nRemove this property after the cluster restarts successfully and operates normally. This prevents reapplication and maintains consistent configuration across brokers.", + "example": [ + ".Example", + "[,yaml]", + "----", + "redpanda:", + " node_id_overrides:", + " - current_uuid: \"\"", + " new_id: ", + " new_uuid: \"\"", + " ignore_existing_node_id: ", + " - current_uuid: \"\"", + " new_id: ", + " new_uuid: \"\"", + " ignore_existing_node_id: ", + "----" + ], + "config_scope": "broker", + "category": "redpanda" + }, + "oidc_discovery_url": { + "description": "ifdef::env-cloud[]\nNOTE: This property is read-only in Redpanda Cloud.\nendif::[]\n\nThe URL pointing to the well-known discovery endpoint for the OIDC provider.", + "config_scope": "cluster" + }, + "oidc_principal_mapping": { + "description": "ifdef::env-cloud[]\nNOTE: This property is read-only in Redpanda Cloud.\nendif::[]\n\nRule for mapping JWT payload claim to a Redpanda user principal.", + "related_topics": [ + "xref:manage:security/authentication.adoc#oidc[OpenID Connect authentication]", + "xref:manage:kubernetes/security/authentication/k-authentication.adoc[OpenID Connect authentication in Kubernetes]" + ], + "config_scope": "cluster" + }, + "oidc_token_audience": { + "description": "ifdef::env-cloud[]\nNOTE: This property is read-only in Redpanda Cloud.\nendif::[]\n\nA string representing the intended recipient of the token.", + "config_scope": "cluster" + }, + "partition_autobalancing_max_disk_usage_percent": { + "related_topics": [ + "xref:manage:cluster-maintenance/continuous-data-balancing.adoc[Configure Continuous Data Balancing]" + ], + "config_scope": "cluster" + }, + "partition_autobalancing_mode": { + "related_topics": [ + "xref:manage:cluster-maintenance/cluster-balancing.adoc[partition balancing]", + "xref:get-started:licensing/index.adoc[enterprise license]", + "xref:manage:cluster-maintenance/continuous-data-balancing.adoc[Configure Continuous Data Balancing]" + ], + "description": "Mode of partition balancing for a cluster. * `node_add`: partition balancing happens when a node is added. * `continuous`: partition balancing happens automatically to maintain optimal performance and availability, based on continuous monitoring for node changes (same as `node_add`) and also high disk usage. This option requires an Enterprise license, and it is customized by `partition_autobalancing_node_availability_timeout_sec` and `partition_autobalancing_max_disk_usage_percent` properties. * `off`: partition balancing is disabled. This option is not recommended for production clusters.", + "config_scope": "cluster" + }, + "partition_autobalancing_node_availability_timeout_sec": { + "related_topics": [ + "xref:manage:cluster-maintenance/continuous-data-balancing.adoc[Configure Continuous Data Balancing]" + ], + "config_scope": "cluster" + }, + "pp_sr_smp_max_non_local_requests": { + "description": "Maximum number of Cross-core(Inter-shard communication) requests pending in HTTP Proxy and Schema Registry seastar::smp group. (For more details, see the `seastar::smp_service_group` documentation).\n\nSee https://docs.seastar.io/master/[Seastar documentation^]", + "config_scope": "cluster" + }, + "rack": { + "related_topics": [ + "xref:./cluster-properties.adoc#enable_rack_awareness[enable_rack_awareness]" + ], + "config_scope": "broker", + "category": "redpanda" + }, + "raft_recovery_throttle_disable_dynamic_mode": { + "description": "include::reference:partial$internal-use-property.adoc[]\n\nDisables cross shard sharing used to throttle recovery traffic. Should only be used to debug unexpected problems.", + "config_scope": "cluster" + }, + "raft_smp_max_non_local_requests": { + "description": "Maximum number of Cross-core(Inter-shard communication) requests pending in Raft seastar::smp group. For details, refer to the `seastar::smp_service_group` documentation).\n\nSee https://docs.seastar.io/master/[Seastar documentation^]", + "config_scope": "cluster" + }, + "recovery_mode_enabled": { + "description": "If `true`, start Redpanda in xref:manage:recovery-mode.adoc[recovery mode], where user partitions are not loaded and only administrative operations are allowed.", + "related_topics": [ + "xref:manage:recovery-mode.adoc[recovery mode]" + ], + "config_scope": "broker", + "category": "redpanda" + }, + "redpanda.iceberg.delete": { + "description": "Whether the corresponding Iceberg table is deleted upon deleting the topic.", + "config_scope": "topic" + }, + "redpanda.iceberg.invalid.record.action": { + "description": "Whether to write invalid records to a dead-letter queue (DLQ).", + "related_topics": [ + "xref:manage:iceberg/about-iceberg-topics.adoc#troubleshoot-errors[Troubleshoot errors]" + ], + "config_scope": "topic" + }, + "redpanda.iceberg.mode": { + "description": "Enable the Iceberg integration for the topic. You can choose one of four modes.", + "related_topics": [ + "xref:manage:iceberg/choose-iceberg-mode.adoc#override-value-schema-latest-default[Choose an Iceberg Mode]" + ], + "config_scope": "topic" + }, + "redpanda.iceberg.partition.spec": { + "description": "The link:https://iceberg.apache.org/docs/nightly/partitioning/[partitioning^] specification for the Iceberg table.", + "related_topics": [ + "xref:manage:iceberg/about-iceberg-topics.adoc#use-custom-partitioning[Use custom partitioning]" + ], + "config_scope": "topic" + }, + "redpanda.iceberg.target.lag.ms": { + "description": "Controls how often the data in the Iceberg table is refreshed with new data from the topic. Redpanda attempts to commit all data produced to the topic within the lag target, subject to resource availability.", + "config_scope": "topic" + }, + "redpanda.leaders.preference": { + "description": "The preferred location (rack) for partition leaders of a topic.\n\nThis property inherits the value from the config_ref:default_leaders_preference,true,properties/cluster-properties[] cluster configuration property. You may override the cluster-wide setting by specifying the value for individual topics.\n\nIf the cluster configuration property config_ref:enable_rack_awareness,true,properties/cluster-properties[] is set to `false`, Leader Pinning is disabled across the cluster.", + "related_topics": [ + "xref:develop:produce-data/leader-pinning.adoc[Leader pinning]" + ], + "config_scope": "topic" + }, + "redpanda.remote.allowgaps": { + "exclude_from_docs": true, + "config_scope": "topic" + }, + "redpanda.remote.delete": { + "description": "A flag that enables deletion of data from object storage for Tiered Storage when it's deleted from local storage for a topic.\n\nNOTE: `redpanda.remote.delete` doesn't apply to Remote Read Replica topics: a Remote Read Replica topic isn't deleted from object storage when this flag is `true`.", + "related_topics": [ + "xref:manage:tiered-storage.adoc[Tiered Storage]" + ], + "config_scope": "topic" + }, + "redpanda.remote.read": { + "description": "A flag for enabling Redpanda to fetch data for a topic from object storage to local storage. When set to `true` together with <>, it enables the xref:manage:tiered-storage.adoc[Tiered Storage] feature.", + "related_topics": [ + "xref:manage:tiered-storage.adoc[Tiered Storage]", + "xref:manage:tiered-storage.adoc[Tiered Storage]" + ], + "config_scope": "topic" + }, + "redpanda.remote.readreplica": { + "description": "The name of the object storage bucket for a Remote Read Replica topic.\n\nCAUTION: Setting `redpanda.remote.readreplica` together with either `redpanda.remote.read` or `redpanda.remote.write` results in an error.", + "related_topics": [ + "xref:manage:remote-read-replicas.adoc[Remote Read Replicas]" + ], + "config_scope": "topic" + }, + "redpanda.remote.recovery": { + "description": "A flag that enables the recovery or reproduction of a topic from object storage for Tiered Storage. The recovered data is saved in local storage, and the maximum amount of recovered data is determined by the local storage retention limits of the topic.\n\nTIP: You can only configure `redpanda.remote.recovery` when you create a topic. You cannot apply this setting to existing topics.", + "related_topics": [ + "xref:manage:tiered-storage.adoc[Tiered Storage]" + ], + "config_scope": "topic" + }, + "redpanda.remote.write": { + "description": "A flag for enabling Redpanda to upload data for a topic from local storage to object storage. When set to `true` together with <>, it enables the xref:manage:tiered-storage.adoc[Tiered Storage] feature.", + "related_topics": [ + "xref:manage:tiered-storage.adoc[Tiered Storage]", + "xref:manage:tiered-storage.adoc[Tiered Storage]" + ], + "config_scope": "topic" + }, + "redpanda.virtual.cluster.id": { + "exclude_from_docs": true, + "config_scope": "topic" + }, + "replication.factor": { + "description": "The number of replicas of a topic to save in different nodes (brokers) of a cluster.\n\nIf `replication.factor` is set to a positive value, it overrides the cluster property xref:./cluster-properties.adoc#default_topic_replication[default_topic_replication] for the topic.\n\nNOTE: Although `replication.factor` isn't returned or displayed by xref:reference:rpk/rpk-topic/rpk-topic-describe.adoc[`rpk topic describe`] as a valid Kafka property, you can set it using xref:reference:rpk/rpk-topic/rpk-topic-alter-config.adoc[`rpk topic alter-config`]. When the `replication.factor` of a topic is altered, it isn't simply a property value that's updated, but rather the actual replica sets of topic partitions that are changed.", + "related_topics": [ + "xref:./cluster-properties.adoc#default_topic_replication[default_topic_replication]", + "xref:reference:rpk/rpk-topic/rpk-topic-describe.adoc[`rpk topic describe`]", + "xref:reference:rpk/rpk-topic/rpk-topic-alter-config.adoc[`rpk topic alter-config`]", + "xref:./cluster-properties.adoc#default_topic_replication[`default_topic_replication`]", + "xref:develop:config-topics.adoc#choose-the-replication-factor[Choose the replication factor]", + "xref:develop:config-topics.adoc#change-the-replication-factor[Change the replication factor]" + ], + "config_scope": "topic" + }, + "retention.bytes": { + "description": "A size-based retention limit that configures the maximum size that a topic partition can grow before becoming eligible for cleanup.\n\nIf `retention.bytes` is set to a positive value, it overrides the cluster property xref:cluster-properties.adoc#retention_bytes[`retention_bytes`] for the topic, and the total retained size for the topic is `retention.bytes` multiplied by the number of partitions for the topic.\n\nWhen both size-based (`retention.bytes`) and time-based (`retention.ms`) retention limits are set, cleanup occurs when either limit is reached.", + "related_topics": [ + "xref:cluster-properties.adoc#retention_bytes[`retention_bytes`]", + "xref:./cluster-properties.adoc#retention_bytes[`retention_bytes`]", + "xref:manage:cluster-maintenance/disk-utilization.adoc#configure-message-retention[Configure message retention]" + ], + "config_scope": "topic" + }, + "retention.local.target.bytes": { + "description": "A size-based retention limit for Tiered Storage that configures the maximum size that a topic partition in local storage can grow before becoming eligible for cleanup. It applies per partition and is equivalent to <> without Tiered Storage.", + "related_topics": [ + "xref:./cluster-properties.adoc#retention_local_target_bytes[`retention_local_target_bytes`]", + "xref:manage:tiered-storage.adoc[Tiered Storage]" + ], + "config_scope": "topic" + }, + "retention.local.target.ms": { + "description": "A time-based retention limit for Tiered Storage that sets the maximum duration that a log's segment file for a topic is retained in local storage before it's eligible for cleanup. This property is equivalent to <> without Tiered Storage.", + "related_topics": [ + "xref:./cluster-properties.adoc#retention_local_target_ms[`retention_local_target_ms`]", + "xref:manage:tiered-storage.adoc[Tiered Storage]", + "xref:manage:remote-read-replicas.adoc[Remote Read Replicas]" + ], + "config_scope": "topic" + }, + "retention.ms": { + "description": "A time-based retention limit that configures the maximum duration that a log's segment file for a topic is retained before it becomes eligible to be cleaned up. To consume all data, a consumer of the topic must read from a segment before its `retention.ms` elapses, otherwise the segment may be compacted and/or deleted. If a non-positive value, no per-topic limit is applied.\n\nIf `retention.ms` is set to a positive value, it overrides the cluster property xref:./cluster-properties.adoc#log_retention_ms[`log_retention_ms`] for the topic.\n\nWhen both size-based (`retention.bytes`) and time-based (`retention.ms`) retention limits are set, the earliest occurring limit applies.", + "related_topics": [ + "xref:./cluster-properties.adoc#log_retention_ms[`log_retention_ms`]", + "xref:./cluster-properties.adoc#log_retention_ms[`log_retention_ms`]", + "xref:manage:cluster-maintenance/disk-utilization.adoc#configure-message-retention[Configure message retention]" + ], + "config_scope": "topic" + }, + "retention_bytes": { + "description": "Default maximum number of bytes per partition on disk before triggering deletion of the oldest messages. If `null` (the default value), no limit is applied.\n\nThe topic property xref:./topic-properties.adoc#retentionbytes[`retention.bytes`] overrides the value of `retention_bytes` at the topic level.", + "related_topics": [ + "xref:./topic-properties.adoc#retentionbytes[`retention.bytes`]" + ], + "config_scope": "cluster" + }, + "retention_local_target_bytes_default": { + "related_topics": [ + "xref:manage:cluster-maintenance/disk-utilization.adoc#configure-message-retention[Configure message retention]" + ], + "config_scope": "cluster" + }, + "retention_local_target_capacity_bytes": { + "description": "The target capacity (in bytes) that log storage will try to use before additional retention rules take over to trim data to meet the target. When no target is specified, storage usage is unbounded.\n\nNOTE: Redpanda Data recommends setting only one of <> or <>. If both are set, the minimum of the two is used as the effective target capacity.", + "config_scope": "cluster" + }, + "retention_local_target_capacity_percent": { + "description": "The target capacity in percent of unreserved space (<>) that log storage will try to use before additional retention rules will take over to trim data in order to meet the target. When no target is specified storage usage is unbounded.\n\nNOTE: Redpanda Data recommends setting only one of <> or <>. If both are set, the minimum of the two is used as the effective target capacity.", + "config_scope": "cluster" + }, + "retention_local_target_ms_default": { + "related_topics": [ + "xref:manage:cluster-maintenance/disk-utilization.adoc#configure-message-retention[Configure message retention]" + ], + "config_scope": "cluster" + }, + "rpc_server_tls": { + "example": [ + ".Example", + "[,yaml]", + "----", + "redpanda:", + " rpc_server_tls:", + " enabled: true", + " cert_file: \"\"", + " key_file: \"\"", + " truststore_file: \"\"", + " require_client_auth: true", + "----" + ], + "config_scope": "broker", + "category": "redpanda" + }, + "sasl_mechanisms": { + "description": "A list of supported SASL mechanisms.\n\n*Accepted values:*\n\n* `SCRAM`\n* `GSSAPI`\n* `OAUTHBEARER`\n* `PLAIN`\n\nNote that in order to enable PLAIN, you must also enable SCRAM.", + "related_topics": [ + "xref:get-started:licensing/index.adoc[Redpanda Licensing]" + ], + "config_scope": "cluster" + }, + "schema_registry_enable_authorization": { + "description": "Enables ACL-based authorization for Schema Registry requests. When `true`, Schema Registry\nuses ACL-based authorization instead of the default `public/user/superuser` authorization model. \nifdef::env-cloud[]\nRequires authentication to be enabled using the `authentication_method` property in the `schema_registry_api` broker configuration.\nendif::[]", + "related_topics": [ + "xref:get-started:licensing/index.adoc[Redpanda Licensing]" + ], + "config_scope": "cluster" + }, + "seed_servers": { + "description": "List of the seed servers used to join current cluster. If the `seed_servers` list is empty the broker will be a cluster root and it will form a new cluster.\n\n* When `empty_seed_starts_cluster` is `true`, Redpanda enables one broker with an empty `seed_servers` list to initiate a new cluster. The broker with an empty `seed_servers` becomes the cluster root, to which other brokers must connect to join the cluster. Brokers looking to join the cluster should have their `seed_servers` populated with the cluster root's address, facilitating their connection to the cluster.\n+\n[IMPORTANT]\n====\nOnly one broker, the designated cluster root, should have an empty `seed_servers` list during the initial cluster bootstrapping. This ensures a single initiation point for cluster formation.\n====\n\n* When `empty_seed_starts_cluster` is `false`, Redpanda requires all brokers to start with a known set of brokers listed in `seed_servers`. The `seed_servers` list must not be empty and should be identical across these initial seed brokers, containing the addresses of all seed brokers. Brokers not included in the `seed_servers` list use it to discover and join the cluster, allowing for expansion beyond the foundational members.\n+\n[NOTE]\n====\nThe `seed_servers` list must be consistent across all seed brokers to prevent cluster fragmentation and ensure stable cluster formation.\n====", + "example": [ + ".Example with `empty_seed_starts_cluster: true`\n[,yaml]\n----\n# Cluster root broker (seed starter)\nredpanda:\n empty_seed_starts_cluster: true\n seed_servers: []\n----\n\n[,yaml]\n----\n# Additional brokers joining the cluster\nredpanda:\n empty_seed_starts_cluster: true\n seed_servers:\n - host:\n address: \n port: \n----\n\n.Example with `empty_seed_starts_cluster: false`\n[,yaml]\n----\n# All initial seed brokers use the same configuration\nredpanda:\n empty_seed_starts_cluster: false\n seed_servers:\n - host:\n address: \n port: \n - host:\n address: \n port: \n - host:\n address: \n port: \n----\n\nReplace the following placeholders with your values:\n\n* ``: IP address of the cluster root broker\n* ``: IP addresses of each seed broker in the cluster\n* ``: RPC port for brokers (default: `33145`)" + ], + "config_scope": "broker", + "category": "redpanda" + }, + "segment.bytes": { + "description": "The maximum size of an active log segment for a topic. When the size of an active segment exceeds `segment.bytes`, the segment is closed and a new active segment is created. The closed, inactive segment is then eligible to be cleaned up according to retention properties.\n\nWhen `segment.bytes` is set to a positive value, it overrides the cluster property xref:./cluster-properties.adoc#log_segment_size[`log_segment_size`] for the topic.", + "related_topics": [ + "xref:./cluster-properties.adoc#log_segment_size[`log_segment_size`]", + "xref:./cluster-properties.adoc#log_segment_size[`log_segment_size`]", + "xref:manage:cluster-maintenance/disk-utilization.adoc#configure-segment-size[Configure segment size]", + "xref:manage:cluster-maintenance/disk-utilization.adoc#configure-message-retention[Configure message retention]", + "xref:manage:remote-read-replicas.adoc[Remote Read Replicas]" + ], + "config_scope": "topic" + }, + "segment.ms": { + "description": "The maximum duration that a log segment of a topic is active (open for writes and not deletable). A periodic event, with `segment.ms` as its period, forcibly closes the active segment and transitions, or rolls, to a new active segment. The closed (inactive) segment is then eligible to be cleaned up according to cleanup and retention properties.\n\nIf set to a positive duration, `segment.ms` overrides the cluster property xref:./cluster-properties.adoc#log_segment_ms[`log_segment_ms`]. Values are automatically clamped between the cluster bounds set by xref:./cluster-properties.adoc#log_segment_ms_min[`log_segment_ms_min`] (default: 10 minutes) and xref:./cluster-properties.adoc#log_segment_ms_max[`log_segment_ms_max`] (default: 1 year). If your configured value exceeds these bounds, Redpanda uses the bound value and logs a warning. Check current cluster bounds with `rpk cluster config get log_segment_ms_min log_segment_ms_max`.", + "related_topics": [ + "xref:./cluster-properties.adoc#log_segment_ms[`log_segment_ms`]", + "xref:./cluster-properties.adoc#log_segment_ms_min[`log_segment_ms_min`]", + "xref:./cluster-properties.adoc#log_segment_ms_max[`log_segment_ms_max`]", + "xref:./cluster-properties.adoc#log_segment_ms[`log_segment_ms`]", + "xref:manage:cluster-maintenance/disk-utilization.adoc#log-rolling[Log rolling]" + ], + "config_scope": "topic" + }, + "storage_compaction_key_map_memory": { + "description": "Maximum number of bytes that may be used on each shard by compaction key-offset maps. Only applies when <> is set to `true`.", + "config_scope": "cluster" + }, + "storage_compaction_key_map_memory_limit_percent": { + "description": "Limit on <>, expressed as a percentage of memory per shard, that bounds the amount of memory used by compaction key-offset maps. \n\nNOTE: Memory per shard is computed after <>, and only applies when <> is set to `true`.", + "config_scope": "cluster" + }, + "storage_strict_data_init": { + "description": "Requires that an empty file named `.redpanda_data_dir` be present in the xref:reference:properties/broker-properties.adoc#data_directory[`data_ directory`]. If set to `true`, Redpanda will refuse to start if the file is not found in the data directory.", + "related_topics": [ + "xref:reference:properties/broker-properties.adoc#data_directory[`data_ directory`]" + ], + "config_scope": "cluster" + }, + "tombstone_retention_ms": { + "description": "The retention time for tombstone records in a compacted topic. Cannot be enabled at the same time as any of `cloud_storage_enabled`, `cloud_storage_enable_remote_read`, or `cloud_storage_enable_remote_write`. A typical default setting is `86400000`, or 24 hours.", + "related_topics": [ + "xref:manage:cluster-maintenance/compaction-settings.adoc#tombstone-record-removal[Tombstone record removal]" + ], + "config_scope": "cluster" + }, + "topic_partitions_memory_allocation_percent": { + "description": "Percentage of total memory to reserve for topic partitions. See <> for details.", + "config_scope": "cluster" + }, + "verbose_logging_timeout_sec_max": { + "example": [ + ".Example", + "[,yaml]", + "----", + "schema_registry:", + " schema_registry_api:", + " address: 0.0.0.0", + " port: 8081", + " authentication_method: http_basic", + " schema_registry_replication_factor: 3", + " mode_mutability: true", + "----" + ], + "related_topics": [ + "xref:reference:properties/cluster-properties.adoc#http_authentication[`http_authentication`]", + "xref:reference:properties/cluster-properties.adoc#http_authentication[`http_authentication`]" + ], + "config_scope": "broker", + "category": "redpanda" + }, + "write.caching": { + "description": "The write caching mode to apply to a topic.\n\nWhen `write.caching` is set, it overrides the cluster property xref:cluster-properties.adoc#write_caching_default[`write_caching_default`]. Write caching acknowledges a message as soon as it is received and acknowledged on a majority of brokers, without waiting for it to be written to disk. With `acks=all`, this provides lower latency while still ensuring that a majority of brokers acknowledge the write. Fsyncs follow <> and <>, whichever is reached first.", + "related_topics": [ + "xref:cluster-properties.adoc#write_caching_default[`write_caching_default`]", + "xref:./cluster-properties.adoc#write_caching_default[`write_caching_default`]", + "xref:develop:config-topics.adoc#configure-write-caching[Write caching]", + "xref:manage:tiered-storage.adoc[Tiered Storage]" + ], + "config_scope": "topic" + }, + "confluent.key.schema.validation": { + "description": "Enable validation of the schema ID for keys on a record. This is a compatibility alias for `redpanda.key.schema.id.validation`. When enabled, Redpanda validates that the schema ID encoded in the record's key is registered in the Schema Registry according to the configured subject name strategy.", + "config_scope": "topic" + }, + "confluent.key.subject.name.strategy": { + "description": "The subject name strategy for keys when `confluent.key.schema.validation` is enabled. This is a compatibility alias for `redpanda.key.subject.name.strategy` that determines how the topic and schema are mapped to a subject name in the Schema Registry.", + "config_scope": "topic" + }, + "confluent.value.schema.validation": { + "description": "Enable validation of the schema ID for values on a record. This is a compatibility alias for `redpanda.value.schema.id.validation`. When enabled, Redpanda validates that the schema ID encoded in the record's value is registered in the Schema Registry according to the configured subject name strategy.", + "config_scope": "topic" + }, + "confluent.value.subject.name.strategy": { + "description": "The subject name strategy for values when `confluent.value.schema.validation` is enabled. This is a compatibility alias for `redpanda.value.subject.name.strategy`. This determines how the topic and schema are mapped to a subject name in the Schema Registry.", + "config_scope": "topic" + }, + "write_caching_default": { + "related_topics": [ + "xref:reference:properties/topic-properties.adoc#writecaching[`write.caching`]", + "xref:develop:config-topics.adoc#configure-write-caching[Write caching]" + ], + "config_scope": "cluster" + }, + "advertised_kafka_api": { + "description": "Address of the Kafka API published to the clients. If not set, the <> broker property is used. When behind a load balancer or in containerized environments, this should be the externally-accessible address that clients use to connect.", + "example": [ + ".Example", + "[,yaml]", + "----", + "redpanda:", + " advertised_kafka_api:", + " - name: ", + " address: ", + " port: ", + "----" + ], + "config_scope": "broker", + "category": "redpanda" + }, + "advertised_rpc_api": { + "description": "Address of RPC endpoint published to other cluster members. If not set, the <> broker property is used. This should be the address other brokers can use to communicate with this broker.", + "example": [ + ".Example", + "[,yaml]", + "----", + "redpanda:", + " advertised_rpc_api:", + " address: ", + " port: ", + "----" + ], + "config_scope": "broker", + "category": "redpanda" + }, + "api_doc_dir": { + "description": "Path to the API specifications directory. This directory contains API documentation for both the HTTP Proxy API and Schema Registry API.", + "config_scope": "broker", + "category": "pandaproxy" + }, + "audit_enabled": { + "related_topics": [ + "xref:get-started:licensing/index.adoc[Redpanda Licensing]" + ], + "config_scope": "cluster" + }, + "client_keep_alive": { + "description": "Time, in milliseconds, that an idle client connection may remain open to the HTTP Proxy API.", + "config_scope": "broker", + "category": "pandaproxy" + }, + "cloud_storage_access_key": { + "description": "AWS or GCP access key. This access key is part of the credentials that Redpanda requires to authenticate with object storage services for Tiered Storage. This access key is used with the <> to form the complete credentials required for authentication.\nTo authenticate using IAM roles, see <>.", + "config_scope": "object-storage" + }, + "cloud_storage_client_lease_timeout_ms": { + "description": "Maximum time to hold a cloud storage client lease (ms), after which any outstanding connection is immediately closed.", + "config_scope": "cluster" + }, + "cloud_storage_disable_archiver_manager": { + "description": "Use legacy upload mode and do not start archiver_manager.", + "config_scope": "cluster" + }, + "cloud_storage_inventory_hash_path_directory": { + "description": "Directory to store inventory report hashes for use by cloud storage scrubber. If not specified, Redpanda uses a default path within the data directory.", + "example": [ + ".Example", + "[,yaml]", + "----", + "redpanda:", + " cloud_storage_inventory_hash_store: ", + "----" + ], + "config_scope": "object-storage" + }, + "consumer_heartbeat_interval_ms": { + "description": "Interval (in milliseconds) for consumer heartbeats.", + "config_scope": "broker", + "category": "pandaproxy-client" + }, + "consumer_instance_timeout_ms": { + "description": "How long to wait for an idle consumer before removing it. A consumer is considered idle when it's not making requests or heartbeats.", + "config_scope": "broker", + "category": "pandaproxy" + }, + "consumer_offsets_topic_batch_cache_enabled": { + "description": "This property lets you enable the batch cache for the consumer offsets topic. By default, the cache for consumer offsets topic is disabled. Changing this property is not recommended in production systems, as it may affect performance. The change is applied only after the restart.", + "config_scope": "cluster" + }, + "consumer_rebalance_timeout_ms": { + "description": "Timeout (in milliseconds) for consumer rebalance.", + "config_scope": "broker", + "category": "pandaproxy-client" + }, + "consumer_request_max_bytes": { + "description": "Maximum bytes to fetch per request.", + "config_scope": "broker", + "category": "pandaproxy-client" + }, + "consumer_request_min_bytes": { + "description": "Minimum bytes to fetch per request.", + "config_scope": "broker", + "category": "pandaproxy-client" + }, + "consumer_request_timeout_ms": { + "description": "Interval (in milliseconds) for consumer request timeout.", + "config_scope": "broker", + "category": "pandaproxy-client" + }, + "consumer_session_timeout_ms": { + "description": "Timeout (in milliseconds) for consumer session.", + "config_scope": "broker", + "category": "pandaproxy-client" + }, + "development_enable_cluster_link": { + "description": "Enable cluster linking.", + "config_scope": "cluster" + }, + "development_feature_property_testing_only": { + "description": "Development feature property for testing only.", + "config_scope": "cluster" + }, + "disable_cluster_recovery_loop_for_tests": { + "description": "Disables the cluster recovery loop. This property is used to simplify testing and should not be set in production.", + "config_scope": "cluster" + }, + "enable_developmental_unrecoverable_data_corrupting_features": { + "description": "Development features should never be enabled in a production cluster, or any cluster where stability, data loss, or the ability to upgrade are a concern. To enable experimental features, set the value of this configuration option to the current unix epoch expressed in seconds. The value must be within one hour of the current time on the broker.Once experimental features are enabled they cannot be disabled", + "config_scope": "cluster" + }, + "iceberg_delete": { + "description": "Default value for the redpanda.iceberg.delete topic property that determines if the corresponding Iceberg table is deleted upon deleting the topic.", + "config_scope": "cluster" + }, + "iceberg_disable_snapshot_tagging": { + "description": "Whether to disable tagging of Iceberg snapshots. These tags are used to ensure that the snapshots that Redpanda writes are retained during snapshot removal, which in turn, helps Redpanda ensure exactly once delivery of records. Disabling tags is therefore not recommended, but may be useful if the Iceberg catalog does not support tags.", + "config_scope": "cluster" + }, + "iceberg_rest_catalog_authentication_mode": { + "description": "The authentication mode for client requests made to the Iceberg catalog. Choose from: `none`, `bearer`, `oauth2`, and `aws_sigv4`. In `bearer` mode, the token specified in `iceberg_rest_catalog_token` is used unconditonally, and no attempts are made to refresh the token. In `oauth2` mode, the credentials specified in `iceberg_rest_catalog_client_id` and `iceberg_rest_catalog_client_secret` are used to obtain a bearer token from the URI defined by `iceberg_rest_catalog_oauth2_server_uri`. In `aws_sigv4` mode, the same AWS credentials used for cloud storage (see `cloud_storage_region`, `cloud_storage_access_key`, `cloud_storage_secret_key`, and `cloud_storage_credentials_source`) are used to sign requests to AWS Glue catalog with SigV4.", + "config_scope": "cluster" + }, + "iceberg_rest_catalog_client_id": { + "description": "Iceberg REST catalog user ID. This ID is used to query the catalog API for the OAuth token. Required if catalog type is set to `rest` and `iceberg_rest_catalog_authentication_mode` is set to `oauth2`.", + "config_scope": "cluster" + }, + "iceberg_rest_catalog_credentials_source": { + "description": "ifndef::env-cloud[]\nSource of AWS credentials for Iceberg REST catalog SigV4 authentication. If not set, falls back to xref:reference:properties/object-storage-properties.adoc#cloud_storage_credentials_source[`cloud_storage_credentials_source`] when using aws_sigv4 authentication mode.\nendif::[]\n\nifdef::env-cloud[]\nSource of AWS credentials for Iceberg REST catalog SigV4 authentication. If providing explicit credentials using `iceberg_rest_catalog_aws_access_key` and `iceberg_rest_catalog_aws_secret_key` for Glue catalog authentication, you must set this property to `config_file`.\nendif::[]\n\n*Accepted values*: `aws_instance_metadata`, `azure_aks_oidc_federation`, `azure_vm_instance_metadata`, `config_file`, `gcp_instance_metadata`, `sts`.", + "related_topics": [ + "xref:reference:properties/object-storage-properties.adoc#cloud_storage_credentials_source[`cloud_storage_credentials_source`]" + ], + "config_scope": "cluster" + }, + "iceberg_rest_catalog_crl": { + "description": "The contents of a certificate revocation list for `iceberg_rest_catalog_trust`. Takes precedence over `iceberg_rest_catalog_crl_file`.", + "config_scope": "cluster" + }, + "iceberg_rest_catalog_oauth2_scope": { + "description": "The OAuth scope used to retrieve access tokens for Iceberg catalog authentication. Only meaningful when `iceberg_rest_catalog_authentication_mode` is set to `oauth2`", + "config_scope": "cluster" + }, + "iceberg_rest_catalog_oauth2_server_uri": { + "description": "The OAuth URI used to retrieve access tokens for Iceberg catalog authentication. If left undefined, the deprecated Iceberg catalog endpoint `/v1/oauth/tokens` is used instead.", + "config_scope": "cluster" + }, + "iceberg_rest_catalog_request_timeout_ms": { + "description": "Maximum length of time that Redpanda waits for a response from the REST catalog before aborting the request", + "config_scope": "cluster" + }, + "iceberg_topic_name_dot_replacement": { + "description": "Optional replacement string for dots in topic names when deriving Iceberg table names, useful when downstream systems do not permit dots in table names. The replacement string cannot contain dots. Be careful to avoid table name collisions caused by the replacement.If an Iceberg topic with dots in the name exists in the cluster, the value of this property should not be changed.", + "config_scope": "cluster" + }, + "kafka_enable_authorization": { + "description": "Flag to require authorization for Kafka connections. If `null`, the property is disabled, and authorization is instead enabled by enable_sasl. * `null`: Ignored. Authorization is enabled with `enable_sasl`: `true` * `true`: authorization is required. * `false`: authorization is disabled.", + "config_scope": "cluster" + }, + "kafka_produce_batch_validation": { + "description": "Controls the level of validation performed on batches produced to Redpanda. When set to `legacy`, there is minimal validation performed on the produce path. When set to `relaxed`, full validation is performed on uncompressed batches and on compressed batches with the `max_timestamp` value left unset. When set to `strict`, full validation of uncompressed and compressed batches is performed. This should be the default in environments where producing clients are not trusted.", + "config_scope": "cluster" + }, + "mode_mutability": { + "description": "Enable modifications to the read-only `mode` of the Schema Registry. When set to `true`, the entire Schema Registry or its subjects can be switched to `READONLY` or `READWRITE`. This property is useful for preventing unwanted changes to the entire Schema Registry or specific subjects.", + "config_scope": "broker", + "category": "schema-registry" + }, + "pandaproxy_api": { + "description": "Rest API listener address and port.", + "example": [ + ".Example", + "[,yaml]", + "----", + "pandaproxy:", + " pandaproxy_api:", + " address: 0.0.0.0", + " port: 8082", + " authentication_method: http_basic", + "----" + ], + "config_scope": "broker", + "category": "pandaproxy" + }, + "pandaproxy_api_tls": { + "description": "TLS configuration for Pandaproxy API.", + "config_scope": "broker", + "category": "pandaproxy" + }, + "produce_batch_delay_ms": { + "description": "Delay (in milliseconds) to wait before sending batch.", + "config_scope": "broker", + "category": "pandaproxy-client" + }, + "produce_batch_size_bytes": { + "description": "Number of bytes to batch before sending to broker.", + "config_scope": "broker", + "category": "pandaproxy-client" + }, + "produce_shutdown_delay_ms": { + "description": "Delay (in milliseconds) to allow for final flush of buffers before shutting down.", + "config_scope": "broker", + "category": "pandaproxy-client" + }, + "retry_base_backoff_ms": { + "description": "Delay (in milliseconds) for initial retry backoff.", + "config_scope": "broker", + "category": "pandaproxy-client" + }, + "sasl_mechanism": { + "description": "The SASL mechanism to use when the HTTP Proxy client connects to the Kafka API. These credentials are used when the HTTP Proxy API listener has <>: `none` but the cluster requires authenticated access to the Kafka API.\nThis property specifies which individual SASL mechanism the HTTP Proxy client should use, while the cluster-wide available mechanisms are configured using the xref:reference:properties/cluster-properties.adoc#sasl_mechanisms[`sasl_mechanisms`] cluster property.\ninclude::shared:partial$http-proxy-ephemeral-credentials-breaking-change.adoc[]", + "related_topics": [ + "xref:reference:properties/cluster-properties.adoc#sasl_mechanisms[`sasl_mechanisms`]" + ], + "config_scope": "broker", + "category": "pandaproxy-client" + }, + "sasl_mechanisms_overrides": { + "description": "A list of overrides for SASL mechanisms, defined by listener. SASL mechanisms defined here will replace the ones set in `sasl_mechanisms`. The same limitations apply as for `sasl_mechanisms`.", + "related_topics": [ + "xref:get-started:licensing/index.adoc[Redpanda Licensing]" + ], + "config_scope": "cluster" + }, + "schema_registry_api": { + "description": "Schema Registry API listener address and port.", + "example": [ + ".Example", + "[,yaml]", + "----", + "schema_registry:", + " schema_registry_api:", + " address: 0.0.0.0", + " port: 8081", + " authentication_method: http_basic", + "----" + ], + "config_scope": "broker", + "category": "schema-registry" + }, + "schema_registry_replication_factor": { + "description": "Replication factor for internal `_schemas` topic. If unset, defaults to the xref:../cluster-properties.adoc#default_topic_replication[`default_topic_replication`] cluster property.", + "related_topics": [ + "xref:../cluster-properties.adoc#default_topic_replication[`default_topic_replication`]" + ], + "config_scope": "broker", + "category": "schema-registry" + }, + "scram_password": { + "description": "Password to use for SCRAM authentication mechanisms when the HTTP Proxy client connects to the Kafka API. This property is required when the HTTP Proxy API listener has <>: `none` but the cluster requires authenticated access to the Kafka API.\ninclude::shared:partial$http-proxy-ephemeral-credentials-breaking-change.adoc[]", + "config_scope": "broker", + "category": "pandaproxy-client" + }, + "scram_username": { + "description": "Username to use for SCRAM authentication mechanisms when the HTTP Proxy client connects to the Kafka API. This property is required when the HTTP Proxy API listener has <>: `none` but the cluster requires authenticated access to the Kafka API.\ninclude::shared:partial$http-proxy-ephemeral-credentials-breaking-change.adoc[]", + "config_scope": "broker", + "category": "pandaproxy-client" + }, + "transaction_coordinator_cleanup_policy": { + "description": "Cleanup policy for a transaction coordinator topic.\n\n*Accepted values:*\n\n* `compact`\n* `delete`\n* `[\"compact\",\"delete\"]`\n* `none`", + "config_scope": "cluster" + }, + "admin_api_doc_dir": { + "config_scope": "broker", + "category": "redpanda" + }, + "crash_loop_limit": { + "config_scope": "broker", + "category": "redpanda" + }, + "data_directory": { + "config_scope": "broker", + "category": "redpanda" + }, + "fips_mode": { + "config_scope": "broker", + "category": "redpanda" + }, + "memory_allocation_warning_threshold": { + "config_scope": "broker", + "category": "redpanda" + }, + "node_id": { + "config_scope": "broker", + "category": "redpanda" + }, + "openssl_config_file": { + "config_scope": "broker", + "category": "redpanda" + }, + "openssl_module_directory": { + "config_scope": "broker", + "category": "redpanda" + }, + "rpc_server": { + "config_scope": "broker", + "category": "redpanda" + }, + "storage_failure_injection_config_path": { + "config_scope": "broker", + "category": "redpanda" + }, + "storage_failure_injection_enabled": { + "config_scope": "broker", + "category": "redpanda" + }, + "upgrade_override_checks": { + "config_scope": "broker", + "category": "redpanda" + }, + "schema_registry_api_tls": { + "config_scope": "broker", + "category": "schema-registry" + }, + "advertised_pandaproxy_api": { + "config_scope": "broker", + "category": "pandaproxy" + }, + "client_cache_max_size": { + "config_scope": "broker", + "category": "pandaproxy" + }, + "broker_tls": { + "config_scope": "broker", + "category": "pandaproxy-client" + }, + "brokers": { + "config_scope": "broker", + "category": "pandaproxy-client" + }, + "client_identifier": { + "config_scope": "broker", + "category": "pandaproxy-client" + }, + "produce_ack_level": { + "config_scope": "broker", + "category": "pandaproxy-client" + }, + "produce_batch_record_count": { + "config_scope": "broker", + "category": "pandaproxy-client" + }, + "produce_compression_type": { + "config_scope": "broker", + "category": "pandaproxy-client" + }, + "retries": { + "config_scope": "broker", + "category": "pandaproxy-client" + }, + "redpanda.cloud_topic.enabled": { + "config_scope": "topic", + "category": "tiered-storage" + } + } +} \ No newline at end of file From 450e7b833f0248958d260babbf032ebaf8949536 Mon Sep 17 00:00:00 2001 From: JakeSCahill Date: Wed, 29 Oct 2025 10:17:37 +0000 Subject: [PATCH 2/8] Add broker property diff --- docs-data/property-overrides.json | 62 +++++++++++++++++++------------ 1 file changed, 39 insertions(+), 23 deletions(-) diff --git a/docs-data/property-overrides.json b/docs-data/property-overrides.json index 3b7a40a94c..60083d5910 100644 --- a/docs-data/property-overrides.json +++ b/docs-data/property-overrides.json @@ -208,7 +208,7 @@ "config_scope": "cluster" }, "crash_loop_sleep_sec": { - "description": "*Introduced in v24.3.4*\n\nThe amount of time the broker sleeps before terminating when the limit on consecutive broker crashes (<>) is reached. This property provides a debugging window for you to access the broker before it terminates, and is particularly useful in Kubernetes environments.\n\nIf `null`, the property is disabled, and the broker terminates immediately after reaching the crash loop limit.\n\nFor information about how to reset the crash loop limit, see the <> broker property.", + "description": "The amount of time the broker sleeps before terminating when the limit on consecutive broker crashes (<>) is reached. This property provides a debugging window for you to access the broker before it terminates, and is particularly useful in Kubernetes environments.\n\nIf `null`, the property is disabled, and the broker terminates immediately after reaching the crash loop limit.\n\nFor information about how to reset the crash loop limit, see the <> broker property.", "version": "v24.3.4", "config_scope": "broker", "category": "redpanda" @@ -696,7 +696,8 @@ "xref:./cluster-properties.adoc#enable_rack_awareness[enable_rack_awareness]" ], "config_scope": "broker", - "category": "redpanda" + "category": "redpanda", + "description": "A label that identifies a failure zone. Apply the same label to all brokers in the same failure zone. When xref:./cluster-properties.adoc#enable_rack_awareness[`enable_rack_awareness`] is set to `true` at the cluster level, the system uses the rack labels to spread partition replicas across different failure zones." }, "raft_recovery_throttle_disable_dynamic_mode": { "description": "include::reference:partial$internal-use-property.adoc[]\n\nDisables cross shard sharing used to throttle recovery traffic. Should only be used to debug unexpected problems.", @@ -975,7 +976,8 @@ "xref:reference:properties/cluster-properties.adoc#http_authentication[`http_authentication`]" ], "config_scope": "broker", - "category": "redpanda" + "category": "redpanda", + "description": "Maximum duration in seconds for verbose (`TRACE` or `DEBUG`) logging. Values configured above this will be clamped. If null (the default) there is no limit. Can be overridden in the Admin API on a per-request basis." }, "write.caching": { "description": "The write caching mode to apply to a topic.\n\nWhen `write.caching` is set, it overrides the cluster property xref:cluster-properties.adoc#write_caching_default[`write_caching_default`]. Write caching acknowledges a message as soon as it is received and acknowledged on a majority of brokers, without waiting for it to be written to disk. With `acks=all`, this provides lower latency while still ensuring that a majority of brokers acknowledge the write. Fsyncs follow <> and <>, whichever is reached first.", @@ -1101,12 +1103,12 @@ "category": "pandaproxy-client" }, "consumer_request_max_bytes": { - "description": "Maximum bytes to fetch per request.", + "description": "Maximum bytes to fetch per request.\n\n*Unit:* bytes", "config_scope": "broker", "category": "pandaproxy-client" }, "consumer_request_min_bytes": { - "description": "Minimum bytes to fetch per request.", + "description": "Minimum bytes to fetch per request.\n\n*Unit:* bytes", "config_scope": "broker", "category": "pandaproxy-client" }, @@ -1219,7 +1221,7 @@ "category": "pandaproxy-client" }, "produce_batch_size_bytes": { - "description": "Number of bytes to batch before sending to broker.", + "description": "Number of bytes to batch before sending to broker.\n\n*Unit:* bytes", "config_scope": "broker", "category": "pandaproxy-client" }, @@ -1234,7 +1236,7 @@ "category": "pandaproxy-client" }, "sasl_mechanism": { - "description": "The SASL mechanism to use when the HTTP Proxy client connects to the Kafka API. These credentials are used when the HTTP Proxy API listener has <>: `none` but the cluster requires authenticated access to the Kafka API.\nThis property specifies which individual SASL mechanism the HTTP Proxy client should use, while the cluster-wide available mechanisms are configured using the xref:reference:properties/cluster-properties.adoc#sasl_mechanisms[`sasl_mechanisms`] cluster property.\ninclude::shared:partial$http-proxy-ephemeral-credentials-breaking-change.adoc[]", + "description": "The SASL mechanism to use when the HTTP Proxy client connects to the Kafka API. These credentials are used when the HTTP Proxy API listener has <>: `none` but the cluster requires authenticated access to the Kafka API.\n\nThis property specifies which individual SASL mechanism the HTTP Proxy client should use, while the cluster-wide available mechanisms are configured using the xref:reference:properties/cluster-properties.adoc#sasl_mechanisms[`sasl_mechanisms`] cluster property.\n\ninclude::shared:partial$http-proxy-ephemeral-credentials-breaking-change.adoc[]", "related_topics": [ "xref:reference:properties/cluster-properties.adoc#sasl_mechanisms[`sasl_mechanisms`]" ], @@ -1273,12 +1275,12 @@ "category": "schema-registry" }, "scram_password": { - "description": "Password to use for SCRAM authentication mechanisms when the HTTP Proxy client connects to the Kafka API. This property is required when the HTTP Proxy API listener has <>: `none` but the cluster requires authenticated access to the Kafka API.\ninclude::shared:partial$http-proxy-ephemeral-credentials-breaking-change.adoc[]", + "description": "Password to use for SCRAM authentication mechanisms when the HTTP Proxy client connects to the Kafka API. This property is required when the HTTP Proxy API listener has <>: `none` but the cluster requires authenticated access to the Kafka API.\n\ninclude::shared:partial$http-proxy-ephemeral-credentials-breaking-change.adoc[]", "config_scope": "broker", "category": "pandaproxy-client" }, "scram_username": { - "description": "Username to use for SCRAM authentication mechanisms when the HTTP Proxy client connects to the Kafka API. This property is required when the HTTP Proxy API listener has <>: `none` but the cluster requires authenticated access to the Kafka API.\ninclude::shared:partial$http-proxy-ephemeral-credentials-breaking-change.adoc[]", + "description": "Username to use for SCRAM authentication mechanisms when the HTTP Proxy client connects to the Kafka API. This property is required when the HTTP Proxy API listener has <>: `none` but the cluster requires authenticated access to the Kafka API.\n\ninclude::shared:partial$http-proxy-ephemeral-credentials-breaking-change.adoc[]", "config_scope": "broker", "category": "pandaproxy-client" }, @@ -1292,7 +1294,8 @@ }, "crash_loop_limit": { "config_scope": "broker", - "category": "redpanda" + "category": "redpanda", + "description": "A limit on the number of consecutive times a broker can crash within one hour before its crash-tracking logic is reset. This limit prevents a broker from getting stuck in an infinite cycle of crashes.\n\nIf `null`, the property is disabled and no limit is applied.\n\nThe crash-tracking logic is reset (to zero consecutive crashes) by any of the following conditions:\n\n* The broker shuts down cleanly.\n* One hour passes since the last crash.\n* The `redpanda.yaml` broker configuration file is updated.\n* The `startup_log` file in the broker's <> broker property is manually deleted." }, "data_directory": { "config_scope": "broker", @@ -1300,15 +1303,18 @@ }, "fips_mode": { "config_scope": "broker", - "category": "redpanda" + "category": "redpanda", + "description": "Controls whether Redpanda starts in FIPS mode. This property allows for three values: \n\n* Disabled - Redpanda does not start in FIPS mode.\n\n* Permissive - Redpanda performs the same check as enabled, but a warning is logged, and Redpanda continues to run. Redpanda loads the OpenSSL FIPS provider into the OpenSSL library. After this completes, Redpanda is operating in FIPS mode, which means that the TLS cipher suites available to users are limited to the TLSv1.2 and TLSv1.3 NIST-approved cryptographic methods.\n\n* Enabled - Redpanda verifies that the operating system is enabled for FIPS by checking `/proc/sys/crypto/fips_enabled`. If the file does not exist or does not return `1`, Redpanda immediately exits." }, "memory_allocation_warning_threshold": { "config_scope": "broker", - "category": "redpanda" + "category": "redpanda", + "description": "Threshold for log messages that contain a larger memory allocation than specified." }, "node_id": { "config_scope": "broker", - "category": "redpanda" + "category": "redpanda", + "description": "A number that uniquely identifies the broker within the cluster. If `null` (the default value), Redpanda automatically assigns an ID. If set, it must be non-negative value.\n\n.Do not set `node_id` manually.\n[WARNING]\n====\nRedpanda assigns unique IDs automatically to prevent issues such as:\n\n- Brokers with empty disks rejoining the cluster.\n- Conflicts during recovery or scaling.\n\nManually setting or reusing `node_id` values, even for decommissioned brokers, can cause cluster inconsistencies and operational failures.\n====\n\nBroker IDs are immutable. After a broker joins the cluster, its `node_id` *cannot* be changed." }, "openssl_config_file": { "config_scope": "broker", @@ -1336,43 +1342,53 @@ }, "schema_registry_api_tls": { "config_scope": "broker", - "category": "schema-registry" + "category": "schema-registry", + "description": "TLS configuration for Schema Registry API." }, "advertised_pandaproxy_api": { "config_scope": "broker", - "category": "pandaproxy" + "category": "pandaproxy", + "description": "Network address for the HTTP Proxy API server to publish to clients." }, "client_cache_max_size": { "config_scope": "broker", - "category": "pandaproxy" + "category": "pandaproxy", + "description": "The maximum number of Kafka client connections that Redpanda can cache in the LRU (least recently used) cache. The LRU cache helps optimize resource utilization by keeping the most recently used clients in memory, facilitating quicker reconnections for frequent clients while limiting memory usage." }, "broker_tls": { "config_scope": "broker", - "category": "pandaproxy-client" + "category": "pandaproxy-client", + "description": "TLS configuration for the Kafka API servers to which the HTTP Proxy client should connect." }, "brokers": { "config_scope": "broker", - "category": "pandaproxy-client" + "category": "pandaproxy-client", + "description": "Network addresses of the Kafka API servers to which the HTTP Proxy client should connect." }, "client_identifier": { "config_scope": "broker", - "category": "pandaproxy-client" + "category": "pandaproxy-client", + "description": "Custom identifier to include in the Kafka request header for the HTTP Proxy client. This identifier can help debug or monitor client activities." }, "produce_ack_level": { "config_scope": "broker", - "category": "pandaproxy-client" + "category": "pandaproxy-client", + "description": "Number of acknowledgments the producer requires the leader to have received before considering a request complete." }, "produce_batch_record_count": { "config_scope": "broker", - "category": "pandaproxy-client" + "category": "pandaproxy-client", + "description": "Number of records to batch before sending to broker." }, "produce_compression_type": { "config_scope": "broker", - "category": "pandaproxy-client" + "category": "pandaproxy-client", + "description": "Enable or disable compression by the Kafka client. Specify `none` to disable compression or one of the supported types [gzip, snappy, lz4, zstd]." }, "retries": { "config_scope": "broker", - "category": "pandaproxy-client" + "category": "pandaproxy-client", + "description": "Number of times to retry a request to a broker." }, "redpanda.cloud_topic.enabled": { "config_scope": "topic", From 714840b39efdf4d9b057f9018e1b1182bb591ffc Mon Sep 17 00:00:00 2001 From: JakeSCahill Date: Wed, 29 Oct 2025 10:37:02 +0000 Subject: [PATCH 3/8] Add cluster property diff --- docs-data/property-overrides.json | 268 +++++++++++++++++++++++++++--- 1 file changed, 242 insertions(+), 26 deletions(-) diff --git a/docs-data/property-overrides.json b/docs-data/property-overrides.json index 60083d5910..bdf13fd569 100644 --- a/docs-data/property-overrides.json +++ b/docs-data/property-overrides.json @@ -1,7 +1,7 @@ { "properties": { "abort_index_segment_size": { - "description": "Capacity (in number of txns) of an abort index segment.\nEach partition tracks the aborted transaction offset ranges to help service client requests. If the number of transactions increases beyond this threshold, they are flushed to disk to ease memory pressure. Then they're loaded on demand. This configuration controls the maximum number of aborted transactions before they are flushed to disk.", + "description": "Capacity (in number of txns) of an abort index segment.\n\nEach partition tracks the aborted transaction offset ranges to help service client requests. If the number of transactions increases beyond this threshold, they are flushed to disk to ease memory pressure. Then they're loaded on demand. This configuration controls the maximum number of aborted transactions before they are flushed to disk.", "config_scope": "cluster" }, "admin": { @@ -234,7 +234,7 @@ "config_scope": "cluster" }, "default_leaders_preference": { - "description": "Default settings for preferred location of topic partition leaders. It can be either \"none\" (no preference), or \"racks:,,...\" (prefer brokers with rack ID from the list).\nThe list can contain one or more rack IDs. If you specify multiple IDs, Redpanda tries to distribute the partition leader locations equally across brokers in these racks.\nIf config_ref:enable_rack_awareness,true,properties/cluster-properties[] is set to `false`, leader pinning is disabled across the cluster.\nifndef::env-cloud[]", + "description": "Default settings for preferred location of topic partition leaders. It can be either \"none\" (no preference), or \"racks:,,...\" (prefer brokers with rack ID from the list).\n\nThe list can contain one or more rack IDs. If you specify multiple IDs, Redpanda tries to distribute the partition leader locations equally across brokers in these racks.\n\nIf config_ref:enable_rack_awareness,true,properties/cluster-properties[] is set to `false`, leader pinning is disabled across the cluster.", "related_topics": [ "xref:get-started:licensing/index.adoc[Redpanda Licensing]" ], @@ -274,7 +274,7 @@ "config_scope": "cluster" }, "enable_consumer_group_metrics": { - "description": "List of enabled consumer group metrics.\n\n*Accepted values:*\n\n- `group`: Enables the xref:reference:public-metrics-reference.adoc#redpanda_kafka_consumer_group_consumers[`redpanda_kafka_consumer_group_consumers`] and xref:reference:public-metrics-reference.adoc#redpanda_kafka_consumer_group_topics[`redpanda_kafka_consumer_group_topics`] metrics.\n- `partition`: Enables the xref:reference:public-metrics-reference.adoc#redpanda_kafka_consumer_group_committed_offset[`redpanda_kafka_consumer_group_committed_offset`] metric.\n- `consumer_lag`: Enables the xref:reference:public-metrics-reference.adoc#redpanda_kafka_consumer_group_lag_max[`redpanda_kafka_consumer_group_lag_max`] and xref:reference:public-metrics-reference.adoc#redpanda_kafka_consumer_group_lag_sum[`redpanda_kafka_consumer_group_lag_sum`] metrics\n+\nEnabling `consumer_lag` may add a small amount of additional processing overhead to the brokers, especially in environments with a high number of consumer groups or partitions.\n+\nifndef::env-cloud[]\nUse the xref:reference:properties/cluster-properties.adoc#consumer_group_lag_collection_interval_sec[`consumer_group_lag_collection_interval_sec`] property to control the frequency of consumer lag metric collection.\nendif::[]", + "description": "List of enabled consumer group metrics. Accepted values include:\n\n- `group`: Enables the xref:reference:public-metrics-reference.adoc#redpanda_kafka_consumer_group_consumers[`redpanda_kafka_consumer_group_consumers`] and xref:reference:public-metrics-reference.adoc#redpanda_kafka_consumer_group_topics[`redpanda_kafka_consumer_group_topics`] metrics.\n- `partition`: Enables the xref:reference:public-metrics-reference.adoc#redpanda_kafka_consumer_group_committed_offset[`redpanda_kafka_consumer_group_committed_offset`] metric.\n- `consumer_lag`: Enables the xref:reference:public-metrics-reference.adoc#redpanda_kafka_consumer_group_lag_max[`redpanda_kafka_consumer_group_lag_max`] and xref:reference:public-metrics-reference.adoc#redpanda_kafka_consumer_group_lag_sum[`redpanda_kafka_consumer_group_lag_sum`] metrics\n+\nEnabling `consumer_lag` may add a small amount of additional processing overhead to the brokers, especially in environments with a high number of consumer groups or partitions.\n+", "related_topics": [ "xref:reference:public-metrics-reference.adoc#redpanda_kafka_consumer_group_consumers[`redpanda_kafka_consumer_group_consumers`]", "xref:reference:public-metrics-reference.adoc#redpanda_kafka_consumer_group_topics[`redpanda_kafka_consumer_group_topics`]", @@ -396,7 +396,7 @@ "config_scope": "cluster" }, "iceberg_rest_catalog_token": { - "description": "Token used to access the REST Iceberg catalog. If the token is present, Redpanda ignores credentials stored in the properties <> and <>.\nRequired if <> is set to `bearer`.", + "description": "Token used to access the REST Iceberg catalog. If the token is present, Redpanda ignores credentials stored in the properties <> and <>.\n\nRequired if <> is set to `bearer`.", "config_scope": "cluster" }, "iceberg_rest_catalog_trust": { @@ -486,7 +486,8 @@ "related_topics": [ "xref:manage:cluster-maintenance/configure-availability.adoc#limit-client-connections[Limit client connections]" ], - "config_scope": "cluster" + "config_scope": "cluster", + "description": "A list of IP addresses for which Kafka client connection limits are overridden and don't apply. For example, `(['127.0.0.1:90', '50.20.1.1:40']).`." }, "kafka_connections_max_per_ip": { "related_topics": [ @@ -505,7 +506,8 @@ "related_topics": [ "xref:manage:cluster-maintenance/manage-throughput.adoc[Manage throughput]" ], - "config_scope": "cluster" + "config_scope": "cluster", + "description": "List of throughput control groups that define exclusions from broker-wide throughput limits. Clients excluded from broker-wide throughput limits are still potentially subject to client-specific throughput limits.\n\nEach throughput control group consists of:\n\n* `name` (optional) - any unique group name\n* `client_id` - regex to match client_id\n\nExample values:\n\n* `[{'name': 'first_group','client_id': 'client1'}, {'client_id': 'consumer-\\d+'}]`\n* `[{'name': 'catch all'}]`\n* `[{'name': 'missing_id', 'client_id': '+empty'}]`\n\nA connection is assigned the first matching group and is then excluded from throughput control. A `name` is not required, but can help you categorize the exclusions. Specifying `+empty` for the `client_id` will match on clients that opt not to send a `client_id`. You can also optionally omit the `client_id` and specify only a `name`, as shown. In this situation, all clients will match the rule and Redpanda will exclude them from all from broker-wide throughput control." }, "kafka_throughput_limit_node_in_bps": { "related_topics": [ @@ -525,7 +527,8 @@ "xref:reference:cluster-properties.adoc#kafka_throughput_limit_node_out_bps[`kafka_throughput_limit_node_out_bps`]", "xref:manage:cluster-maintenance/manage-throughput.adoc[Manage Throughput]" ], - "config_scope": "cluster" + "config_scope": "cluster", + "description": "Threshold for refilling the token bucket as part of enforcing throughput limits.\n\nThis threshold is evaluated with each request for data. When the number of tokens to replenish exceeds this threshold, then tokens are added to the token bucket. This ensures that the atomic is not being updated for the token count with each request. The range for this threshold is automatically clamped to the corresponding throughput limit for ingress and egress." }, "leader_balancer_mute_timeout": { "description": "The length of time that a glossterm:Raft[] group is muted after a leadership rebalance operation. Any group that has been moved, regardless of whether the move succeeded or failed, undergoes a cooling-off period. This prevents Raft groups from repeatedly experiencing leadership rebalance operations in a short time frame, which can lead to instability in the cluster.\n\nThe leader balancer maintains a list of muted groups and reevaluates muted status at the start of each balancing iteration. Muted groups still contribute to overall cluster balance calculations although they can't themselves be moved until the mute period is over.", @@ -560,7 +563,8 @@ "related_topics": [ "xref:./topic-properties.adoc#retentionms[`retention.ms`]" ], - "config_scope": "cluster" + "config_scope": "cluster", + "description": "The amount of time to keep a log file before deleting it (in milliseconds). If set to `-1`, no time limit is applied. This is a cluster-wide default when a topic does not set or disable xref:./topic-properties.adoc#retentionms[`retention.ms`]." }, "log_segment_ms": { "related_topics": [ @@ -627,7 +631,8 @@ "related_topics": [ "xref:reference:properties/topic-properties.adoc#min.compaction.lag.ms[`min.compaction.lag.ms`]" ], - "config_scope": "cluster" + "config_scope": "cluster", + "description": "The minimum amount of time (in ms) that a log segment must remain unaltered before it can be compacted in a compact topic." }, "node_id_overrides": { "description": "List of node ID and UUID overrides applied at broker startup. Each entry includes the current UUID, the desired new ID and UUID, and an ignore flag. An entry applies only if `current_uuid` matches the broker's actual UUID.\n\nRemove this property after the cluster restarts successfully and operates normally. This prevents reapplication and maintains consistent configuration across brokers.", @@ -678,7 +683,7 @@ "xref:get-started:licensing/index.adoc[enterprise license]", "xref:manage:cluster-maintenance/continuous-data-balancing.adoc[Configure Continuous Data Balancing]" ], - "description": "Mode of partition balancing for a cluster. * `node_add`: partition balancing happens when a node is added. * `continuous`: partition balancing happens automatically to maintain optimal performance and availability, based on continuous monitoring for node changes (same as `node_add`) and also high disk usage. This option requires an Enterprise license, and it is customized by `partition_autobalancing_node_availability_timeout_sec` and `partition_autobalancing_max_disk_usage_percent` properties. * `off`: partition balancing is disabled. This option is not recommended for production clusters.", + "description": "Mode of xref:manage:cluster-maintenance/cluster-balancing.adoc[partition balancing] for a cluster.\n\n*Accepted values:*\n\n* `continuous`: partition balancing happens automatically to maintain optimal performance and availability, based on continuous monitoring for node changes (same as `node_add`) and also high disk usage. This option requires an xref:get-started:licensing/index.adoc[enterprise license], and it is customized by <> and <> properties.\n* `node_add`: partition balancing happens when a node is added.\n* `off`: partition balancing is disabled. This option is not recommended for production clusters.", "config_scope": "cluster" }, "partition_autobalancing_node_availability_timeout_sec": { @@ -697,7 +702,7 @@ ], "config_scope": "broker", "category": "redpanda", - "description": "A label that identifies a failure zone. Apply the same label to all brokers in the same failure zone. When xref:./cluster-properties.adoc#enable_rack_awareness[`enable_rack_awareness`] is set to `true` at the cluster level, the system uses the rack labels to spread partition replicas across different failure zones." + "description": "A label that identifies a failure zone. Apply the same label to all brokers in the same failure zone. When xref:./cluster-properties.adoc#enable_rack_awareness[enable_rack_awareness] is set to `true` at the cluster level, the system uses the rack labels to spread partition replicas across different failure zones." }, "raft_recovery_throttle_disable_dynamic_mode": { "description": "include::reference:partial$internal-use-property.adoc[]\n\nDisables cross shard sharing used to throttle recovery traffic. Should only be used to debug unexpected problems.", @@ -844,7 +849,7 @@ "config_scope": "topic" }, "retention_bytes": { - "description": "Default maximum number of bytes per partition on disk before triggering deletion of the oldest messages. If `null` (the default value), no limit is applied.\n\nThe topic property xref:./topic-properties.adoc#retentionbytes[`retention.bytes`] overrides the value of `retention_bytes` at the topic level.", + "description": "Default maximum number of bytes per partition on disk before triggering deletion of the oldest messages. If `null` (the default value), no limit is applied.\n\nThe topic property xref:./topic-properties.adoc#retentionbytes[`retention.bytes`] overrides the value of `retention_bytes` at the topic level.\n\n*Unit*: bytes per partition.", "related_topics": [ "xref:./topic-properties.adoc#retentionbytes[`retention.bytes`]" ], @@ -854,7 +859,8 @@ "related_topics": [ "xref:manage:cluster-maintenance/disk-utilization.adoc#configure-message-retention[Configure message retention]" ], - "config_scope": "cluster" + "config_scope": "cluster", + "description": "Local retention size target for partitions of topics with object storage write enabled. If `null`, the property is disabled.\n\nThis property can be overridden on a per-topic basis by setting `retention.local.target.bytes` in each topic enabled for Tiered Storage. See xref:manage:cluster-maintenance/disk-utilization.adoc#configure-message-retention[Configure message retention]." }, "retention_local_target_capacity_bytes": { "description": "The target capacity (in bytes) that log storage will try to use before additional retention rules take over to trim data to meet the target. When no target is specified, storage usage is unbounded.\n\nNOTE: Redpanda Data recommends setting only one of <> or <>. If both are set, the minimum of the two is used as the effective target capacity.", @@ -868,7 +874,8 @@ "related_topics": [ "xref:manage:cluster-maintenance/disk-utilization.adoc#configure-message-retention[Configure message retention]" ], - "config_scope": "cluster" + "config_scope": "cluster", + "description": "Local retention time target for partitions of topics with object storage write enabled.\n\nThis property can be overridden on a per-topic basis by setting `retention.local.target.ms` in each topic enabled for Tiered Storage. See xref:manage:cluster-maintenance/disk-utilization.adoc#configure-message-retention[Configure message retention]." }, "rpc_server_tls": { "example": [ @@ -977,7 +984,7 @@ ], "config_scope": "broker", "category": "redpanda", - "description": "Maximum duration in seconds for verbose (`TRACE` or `DEBUG`) logging. Values configured above this will be clamped. If null (the default) there is no limit. Can be overridden in the Admin API on a per-request basis." + "description": "Maximum duration in seconds for verbose (`TRACE` or `DEBUG`) logging. Values configured above this will be clamped. If null (the default) there is no limit. Can be overridden in the Admin API on a per-request basis.\n\n*Unit:* seconds" }, "write.caching": { "description": "The write caching mode to apply to a topic.\n\nWhen `write.caching` is set, it overrides the cluster property xref:cluster-properties.adoc#write_caching_default[`write_caching_default`]. Write caching acknowledges a message as soon as it is received and acknowledged on a majority of brokers, without waiting for it to be written to disk. With `acks=all`, this provides lower latency while still ensuring that a majority of brokers acknowledge the write. Fsyncs follow <> and <>, whichever is reached first.", @@ -1010,7 +1017,8 @@ "xref:reference:properties/topic-properties.adoc#writecaching[`write.caching`]", "xref:develop:config-topics.adoc#configure-write-caching[Write caching]" ], - "config_scope": "cluster" + "config_scope": "cluster", + "description": "The default write caching mode to apply to user topics. Write caching acknowledges a message as soon as it is received and acknowledged on a majority of brokers, without waiting for it to be written to disk. With `acks=all`, this provides lower latency while still ensuring that a majority of brokers acknowledge the write. \n\nFsyncs follow <> and <>, whichever is reached first.\n\nThe `write_caching_default` cluster property can be overridden with the xref:reference:properties/topic-properties.adoc#writecaching[`write.caching`] topic property." }, "advertised_kafka_api": { "description": "Address of the Kafka API published to the clients. If not set, the <> broker property is used. When behind a load balancer or in containerized environments, this should be the externally-accessible address that clients use to connect.", @@ -1094,7 +1102,7 @@ "category": "pandaproxy" }, "consumer_offsets_topic_batch_cache_enabled": { - "description": "This property lets you enable the batch cache for the consumer offsets topic. By default, the cache for consumer offsets topic is disabled. Changing this property is not recommended in production systems, as it may affect performance. The change is applied only after the restart.", + "description": "This property lets you enable batch caching for the consumer offsets topic. By default, the cache for this topic is disabled. Changing this property is not recommended in production systems as it may affect performance.", "config_scope": "cluster" }, "consumer_rebalance_timeout_ms": { @@ -1131,7 +1139,7 @@ "config_scope": "cluster" }, "disable_cluster_recovery_loop_for_tests": { - "description": "Disables the cluster recovery loop. This property is used to simplify testing and should not be set in production.", + "description": "include::reference:partial$internal-use-property.adoc[]\n\nDisables the cluster recovery loop.", "config_scope": "cluster" }, "enable_developmental_unrecoverable_data_corrupting_features": { @@ -1139,19 +1147,19 @@ "config_scope": "cluster" }, "iceberg_delete": { - "description": "Default value for the redpanda.iceberg.delete topic property that determines if the corresponding Iceberg table is deleted upon deleting the topic.", + "description": "Default value for the `redpanda.iceberg.delete` topic property that determines if the corresponding Iceberg table is deleted upon deleting the topic.", "config_scope": "cluster" }, "iceberg_disable_snapshot_tagging": { - "description": "Whether to disable tagging of Iceberg snapshots. These tags are used to ensure that the snapshots that Redpanda writes are retained during snapshot removal, which in turn, helps Redpanda ensure exactly once delivery of records. Disabling tags is therefore not recommended, but may be useful if the Iceberg catalog does not support tags.", + "description": "Whether to disable tagging of Iceberg snapshots. These tags are used to ensure that the snapshots that Redpanda writes are retained during snapshot removal, which in turn, helps Redpanda ensure exactly-once delivery of records. Disabling tags is therefore not recommended, but it may be useful if the Iceberg catalog does not support tags.", "config_scope": "cluster" }, "iceberg_rest_catalog_authentication_mode": { - "description": "The authentication mode for client requests made to the Iceberg catalog. Choose from: `none`, `bearer`, `oauth2`, and `aws_sigv4`. In `bearer` mode, the token specified in `iceberg_rest_catalog_token` is used unconditonally, and no attempts are made to refresh the token. In `oauth2` mode, the credentials specified in `iceberg_rest_catalog_client_id` and `iceberg_rest_catalog_client_secret` are used to obtain a bearer token from the URI defined by `iceberg_rest_catalog_oauth2_server_uri`. In `aws_sigv4` mode, the same AWS credentials used for cloud storage (see `cloud_storage_region`, `cloud_storage_access_key`, `cloud_storage_secret_key`, and `cloud_storage_credentials_source`) are used to sign requests to AWS Glue catalog with SigV4.", + "description": "The authentication mode for client requests made to the Iceberg catalog. Choose from: `none`, `bearer`, and `oauth2`. In `oauth2` mode, the credentials specified in `iceberg_rest_catalog_client_id` and `iceberg_rest_catalog_client_secret` are used to obtain a bearer token from the URI defined by `iceberg_rest_catalog_oauth2_server_uri.`. In `bearer` mode, the token specified in `iceberg_rest_catalog_token` is used unconditionally, and no attempts are made to refresh the token.", "config_scope": "cluster" }, "iceberg_rest_catalog_client_id": { - "description": "Iceberg REST catalog user ID. This ID is used to query the catalog API for the OAuth token. Required if catalog type is set to `rest` and `iceberg_rest_catalog_authentication_mode` is set to `oauth2`.", + "description": "The client ID used to query the REST catalog API for the OAuth token. Required if catalog type is set to `rest`.", "config_scope": "cluster" }, "iceberg_rest_catalog_credentials_source": { @@ -1166,15 +1174,15 @@ "config_scope": "cluster" }, "iceberg_rest_catalog_oauth2_scope": { - "description": "The OAuth scope used to retrieve access tokens for Iceberg catalog authentication. Only meaningful when `iceberg_rest_catalog_authentication_mode` is set to `oauth2`", + "description": "The OAuth scope used to retrieve access tokens for Iceberg catalog authentication. Only meaningful when `iceberg_rest_catalog_authentication_mode` is set to `oauth2`.", "config_scope": "cluster" }, "iceberg_rest_catalog_oauth2_server_uri": { - "description": "The OAuth URI used to retrieve access tokens for Iceberg catalog authentication. If left undefined, the deprecated Iceberg catalog endpoint `/v1/oauth/tokens` is used instead.", + "description": "The OAuth URI used to retrieve access tokens for Iceberg REST catalog authentication. If left undefined, the deprecated Iceberg catalog endpoint `/v1/oauth/tokens` is used instead.", "config_scope": "cluster" }, "iceberg_rest_catalog_request_timeout_ms": { - "description": "Maximum length of time that Redpanda waits for a response from the REST catalog before aborting the request", + "description": "Maximum length of time that Redpanda waits for a response from the REST catalog before aborting the request.", "config_scope": "cluster" }, "iceberg_topic_name_dot_replacement": { @@ -1182,7 +1190,7 @@ "config_scope": "cluster" }, "kafka_enable_authorization": { - "description": "Flag to require authorization for Kafka connections. If `null`, the property is disabled, and authorization is instead enabled by enable_sasl. * `null`: Ignored. Authorization is enabled with `enable_sasl`: `true` * `true`: authorization is required. * `false`: authorization is disabled.", + "description": "Flag to require authorization for Kafka connections. If `null`, the property is disabled, and authorization is instead enabled by <>.", "config_scope": "cluster" }, "kafka_produce_batch_validation": { @@ -1393,6 +1401,214 @@ "redpanda.cloud_topic.enabled": { "config_scope": "topic", "category": "tiered-storage" + }, + "auto_create_topics_enabled": { + "description": "Allow automatic topic creation.\n\nIf you produce to a topic that doesn't exist, the topic will be created with defaults if this property is enabled.", + "config_scope": "cluster" + }, + "consumer_group_lag_collection_interval_sec": { + "description": "How often to run the collection loop when <> contains `consumer_lag`.\n\nReducing the value of `consumer_group_lag_collection_interval_sec` increases the metric collection frequency, which may raise resource utilization. In most environments, this impact is minimal, but it's best practice to monitor broker resource usage in high-scale settings.", + "config_scope": "cluster" + }, + "controller_log_accummulation_rps_capacity_topic_operations": { + "description": "Maximum capacity of rate limit accumulation in controller topic operations limit.", + "config_scope": "cluster" + }, + "core_balancing_debounce_timeout": { + "description": "Interval, in milliseconds, between trigger and invocation of core balancing.", + "config_scope": "cluster" + }, + "datalake_coordinator_snapshot_max_delay_secs": { + "description": "Maximum amount of time the coordinator waits to snapshot after a command appears in the log.", + "config_scope": "cluster" + }, + "datalake_disk_space_monitor_enable": { + "description": "Option to explicitly disable enforcement of datalake disk space usage.", + "config_scope": "cluster" + }, + "datalake_scratch_space_soft_limit_size_percent": { + "description": "Size of the scratch space datalake soft limit expressed as a percentage of the `datalake_scratch_space_size_bytes` configuration value.", + "config_scope": "cluster" + }, + "datalake_scheduler_max_concurrent_translations": { + "description": "The maximum number of translations that the datalake scheduler will allow to run at a given time. If a translation is requested, but the number of running translations exceeds this value, the request will be put to sleep temporarily, polling until capacity becomes available.", + "config_scope": "cluster" + }, + "datalake_scheduler_time_slice_ms": { + "description": "Time, in milliseconds, for a datalake translation as scheduled by the datalake scheduler. After a translation is scheduled, it will run until either the time specified has elapsed or all pending records on its source partition have been translated.", + "config_scope": "cluster" + }, + "disk_reservation_percent": { + "description": "The percentage of total disk capacity that Redpanda will avoid using. This applies both when cloud cache and log data share a disk, as well \nas when cloud cache uses a dedicated disk. \n\nIt is recommended to not run disks near capacity to avoid blocking I/O due to low disk space, as well as avoiding performance issues associated with SSD garbage collection.", + "config_scope": "cluster" + }, + "enable_sasl": { + "description": "Enable SASL authentication for Kafka connections. Authorization is required to modify this property. See also <>.", + "config_scope": "cluster" + }, + "fetch_read_strategy": { + "description": "The strategy used to fulfill fetch requests.\n\n* `polling`: Repeatedly polls every partition in the request for new data. The polling interval is set by <> (deprecated).\n\n* `non_polling`: The backend is signaled when a partition has new data, so Redpanda doesn't need to repeatedly read from every partition in the fetch. Redpanda Data recommends using this value for most workloads, because it can improve fetch latency and CPU utilization.\n\n* `non_polling_with_debounce`: This option behaves like `non_polling`, but it includes a debounce mechanism with a fixed delay specified by <> at the start of each fetch. By introducing this delay, Redpanda can accumulate more data before processing, leading to fewer fetch operations and returning larger amounts of data. Enabling this option reduces reactor utilization, but it may also increase end-to-end latency.", + "config_scope": "cluster" + }, + "iceberg_backlog_controller_i_coeff": { + "description": "Controls how much past backlog (unprocessed work) affects the priority of processing new data in the Iceberg system. The system accumulates backlog errors over time, and this coefficient determines how much that accumulated backlog influences the urgency of data translation.", + "config_scope": "cluster" + }, + "iceberg_disable_automatic_snapshot_expiry": { + "description": "Whether to disable automatic Iceberg snapshot expiry. This property may be useful if the Iceberg catalog expects to perform snapshot expiry on its own.", + "config_scope": "cluster" + }, + "iceberg_rest_catalog_trust_file": { + "description": "Path to a file containing a certificate chain to trust for the REST Iceberg catalog.", + "config_scope": "cluster" + }, + "iceberg_target_backlog_size": { + "description": "Average size per partition of the datalake translation backlog that the backlog controller tries to maintain. When the backlog size is larger than the set point, the backlog controller will increase the translation scheduling group priority.", + "config_scope": "cluster" + }, + "iceberg_throttle_backlog_size_ratio": { + "description": "Ration of the total backlog size to the disk space at which the throttle to iceberg producers is applied.", + "config_scope": "cluster" + }, + "internal_topic_replication_factor": { + "description": "Target replication factor for internal topics.\n\n*Unit*: number of replicas per topic.", + "config_scope": "cluster" + }, + "kafka_qdc_enable": { + "description": "Enable Kafka queue depth control.", + "config_scope": "cluster" + }, + "kafka_qdc_max_depth": { + "description": "Maximum queue depth used in Kafka queue depth control.", + "config_scope": "cluster" + }, + "kafka_qdc_window_count": { + "description": "Number of windows used in Kafka queue depth control latency tracking.", + "config_scope": "cluster" + }, + "kafka_sasl_max_reauth_ms": { + "description": "The maximum time between Kafka client reauthentications. If a client has not reauthenticated a connection within this time frame, that connection is torn down.", + "config_scope": "cluster" + }, + "leader_balancer_idle_timeout": { + "description": "Leadership rebalancing idle timeout.\n\n*Unit*: milliseconds", + "config_scope": "cluster" + }, + "log_compaction_merge_max_ranges": { + "description": "The maximum range of segments that can be processed in a single round of adjacent segment compaction. If `null` (the default value), no maximum is imposed on the number of ranges that can be processed at once. A value below 1 effectively disables adjacent merge compaction.", + "config_scope": "cluster" + }, + "log_compaction_pause_use_sliding_window": { + "description": "Pause use of sliding window compaction. Toggle to `true` _only_ when you want to force adjacent segment compaction. The memory reserved by `storage_compaction_key_map_memory` is not freed when this is set to `true`.", + "config_scope": "cluster" + }, + "log_segment_ms_max": { + "description": "Upper bound on topic `segment.ms`: higher values will be clamped to this value.\n\n*Unit*: milliseconds", + "config_scope": "cluster" + }, + "log_segment_ms_min": { + "description": "Lower bound on topic `segment.ms`: lower values will be clamped to this value.\n\n*Unit*: milliseconds", + "config_scope": "cluster" + }, + "log_segment_size": { + "description": "Default log segment size in bytes for topics which do not set `segment.bytes`.", + "config_scope": "cluster" + }, + "max_concurrent_producer_ids": { + "description": "Maximum number of active producer sessions. When the threshold is passed, Redpanda terminates old sessions. When an idle producer corresponding to the terminated session wakes up and produces, its message batches are rejected, and an out of order sequence error is emitted. Consumers don't affect this setting.", + "config_scope": "cluster" + }, + "metadata_dissemination_retries": { + "description": "Number of attempts to look up a topic's metadata-like shard before a request fails. This configuration controls the number of retries that request handlers perform when internal topic metadata (for topics like tx, consumer offsets, etc) is missing. These topics are usually created on demand when users try to use the cluster for the first time and it may take some time for the creation to happen and the metadata to propagate to all the brokers (particularly the broker handling the request). In the meantime Redpanda waits and retries. This configuration controls the number retries.", + "config_scope": "cluster" + }, + "min_cleanable_dirty_ratio": { + "description": "The minimum ratio between the number of bytes in dirty segments and the total number of bytes in closed segments that must be reached before a partition's log is eligible for compaction in a compact topic. The topic property `min.cleanable.dirty.ratio` overrides this value at the topic level.", + "config_scope": "cluster" + }, + "oidc_clock_skew_tolerance": { + "description": "The amount of time (in seconds) to allow for when validating the expiry claim in the token.\n\n*Unit*: seconds", + "config_scope": "cluster" + }, + "partition_manager_shutdown_watchdog_timeout": { + "description": "A threshold value to detect partitions which might have been stuck while shutting down. After this threshold, a watchdog in partition manager will log information about partition shutdown not making progress.\n\n*Unit*: milliseconds", + "config_scope": "cluster" + }, + "election_timeout_ms": { + "description": "Raft election timeout expressed in milliseconds.", + "config_scope": "cluster" + }, + "raft_max_buffered_follower_append_entries_bytes_per_shard": { + "description": "The total size of append entry requests that may be cached per shard, using the Raft-buffered protocol. When an entry is cached, the leader can continue serving requests because the ordering of the cached requests cannot change. When the total size of cached requests reaches the set limit, back pressure is applied to throttle producers.", + "config_scope": "cluster" + }, + "raft_max_inflight_follower_append_entries_requests_per_shard": { + "description": "The maximum number of append entry requests that may be sent from Raft groups on a Seastar shard to the current node, and are awaiting a reply. This property replaces `raft_max_concurrent_append_requests_per_follower`.", + "config_scope": "cluster" + }, + "reclaim_stable_window": { + "description": "If the duration since the last time memory was reclaimed is longer than the amount of time specified in this property, the memory usage of the batch cache is considered stable, so only the minimum size (<>) is set to be reclaimed.", + "config_scope": "cluster" + }, + "retention_local_trim_interval": { + "description": "The period during which disk usage is checked for disk pressure, and data is optionally trimmed to meet the target.", + "config_scope": "cluster" + }, + "rpc_server_listen_backlog": { + "description": "Maximum TCP connection queue length for Kafka server and internal RPC server. If `null` (the default value), no queue length is set.", + "config_scope": "cluster" + }, + "rpk_path": { + "description": "Path to RPK binary.", + "config_scope": "cluster" + }, + "schema_registry_always_normalize": { + "description": "Always normalize schemas. If set, this overrides the `normalize` parameter in requests to the Schema Registry API.", + "config_scope": "cluster" + }, + "storage_ignore_timestamps_in_future_sec": { + "description": "The maximum number of seconds that a record's timestamp can be ahead of a Redpanda broker's clock and still be used when deciding whether to clean up the record for data retention. This property makes possible the timely cleanup of records from clients with clocks that are drastically unsynchronized relative to Redpanda.\n\nWhen determining whether to clean up a record with timestamp more than `storage_ignore_timestamps_in_future_sec` seconds ahead of the broker, Redpanda ignores the record's timestamp and instead uses a valid timestamp of another record in the same segment, or (if another record's valid timestamp is unavailable) the timestamp of when the segment file was last modified (mtime).\n\nBy default, `storage_ignore_timestamps_in_future_sec` is disabled (null).\n\n[TIP]\n====\nTo figure out whether to set `storage_ignore_timestamps_in_future_sec` for your system:\n\n. Look for logs with segments that are unexpectedly large and not being cleaned up.\n. In the logs, search for records with unsynchronized timestamps that are further into the future than tolerable by your data retention and storage settings. For example, timestamps 60 seconds or more into the future can be considered to be too unsynchronized.\n. If you find unsynchronized timestamps throughout your logs, determine the number of seconds that the timestamps are ahead of their actual time, and set `storage_ignore_timestamps_in_future_sec` to that value so data retention can proceed.\n. If you only find unsynchronized timestamps that are the result of transient behavior, you can disable `storage_ignore_timestamps_in_future_sec`.\n====", + "config_scope": "cluster" + }, + "tls_certificate_name_format": { + "description": "The format of the certificates's distinguished name to use for mTLS principal mapping. The `legacy` format would appear as 'C=US,ST=California,L=San Francisco,O=Redpanda,CN=redpanda', while the `rfc2253` format would appear as 'CN=redpanda,O=Redpanda,L=San Francisco,ST=California,C=US'.", + "config_scope": "cluster" + }, + "tls_enable_renegotiation": { + "description": "TLS client-initiated renegotiation is considered unsafe and is disabled by default . Only re-enable it if you are experiencing issues with your TLS-enabled client. This option has no effect on TLSv1.3 connections as client-initiated renegotiation was removed.", + "config_scope": "cluster" + }, + "topic_fds_per_partition": { + "description": "File descriptors required per partition replica. If topic creation results in the ratio of file descriptor limit to partition replicas being lower than this value, creation of new topics is fails.", + "config_scope": "cluster" + }, + "topic_label_aggregation_limit": { + "description": "When the number of topics exceeds this limit, the topic label in generated metrics will be aggregated. If `null`, then there is no limit.", + "config_scope": "cluster" + }, + "topic_memory_per_partition": { + "description": "Required memory in bytes per partition replica when creating or altering topics. The total size of the memory pool for partitions is the total memory available to Redpanda times `topic_partitions_memory_allocation_percent`. Each partition created requires `topic_memory_per_partition` bytes from that pool. If insufficient memory is available, creating or altering topics fails.", + "config_scope": "cluster" + }, + "topic_partitions_per_shard": { + "description": "Maximum number of partition replicas per shard. If topic creation results in the ratio of partition replicas to shards being higher than this value, creation of new topics fails.", + "config_scope": "cluster" + }, + "topic_partitions_reserve_shard0": { + "description": "Reserved partition slots on shard (CPU core) 0 on each node. If this is greater than or equal to <>, no data partitions will be scheduled on shard 0.", + "config_scope": "cluster" + }, + "transaction_coordinator_delete_retention_ms": { + "description": "Delete segments older than this age. To ensure transaction state is retained for as long as the longest-running transaction, make sure this is greater than or equal to <>.\n\nFor example, if your typical transactions run for one hour, consider setting both `transaction_coordinator_delete_retention_ms` and `transactional_id_expiration_ms` to at least 3600000 (one hour), or a little over.", + "config_scope": "cluster" + }, + "use_kafka_handler_scheduler_group": { + "description": "Use a separate scheduler group to handle parsing Kafka protocol requests.", + "config_scope": "cluster" + }, + "use_produce_scheduler_group": { + "description": "Use a separate scheduler group to process Kafka produce requests.", + "config_scope": "cluster" } } } \ No newline at end of file From ba6c5ef26bf5016eff54e1ea79d5ac7da50b0fa1 Mon Sep 17 00:00:00 2001 From: JakeSCahill Date: Wed, 29 Oct 2025 10:46:05 +0000 Subject: [PATCH 4/8] Add object property diff --- docs-data/property-overrides.json | 242 ++++++++++++++++++++++++++++-- 1 file changed, 232 insertions(+), 10 deletions(-) diff --git a/docs-data/property-overrides.json b/docs-data/property-overrides.json index bdf13fd569..4648a05f3c 100644 --- a/docs-data/property-overrides.json +++ b/docs-data/property-overrides.json @@ -274,7 +274,7 @@ "config_scope": "cluster" }, "enable_consumer_group_metrics": { - "description": "List of enabled consumer group metrics. Accepted values include:\n\n- `group`: Enables the xref:reference:public-metrics-reference.adoc#redpanda_kafka_consumer_group_consumers[`redpanda_kafka_consumer_group_consumers`] and xref:reference:public-metrics-reference.adoc#redpanda_kafka_consumer_group_topics[`redpanda_kafka_consumer_group_topics`] metrics.\n- `partition`: Enables the xref:reference:public-metrics-reference.adoc#redpanda_kafka_consumer_group_committed_offset[`redpanda_kafka_consumer_group_committed_offset`] metric.\n- `consumer_lag`: Enables the xref:reference:public-metrics-reference.adoc#redpanda_kafka_consumer_group_lag_max[`redpanda_kafka_consumer_group_lag_max`] and xref:reference:public-metrics-reference.adoc#redpanda_kafka_consumer_group_lag_sum[`redpanda_kafka_consumer_group_lag_sum`] metrics\n+\nEnabling `consumer_lag` may add a small amount of additional processing overhead to the brokers, especially in environments with a high number of consumer groups or partitions.\n+", + "description": "List of enabled consumer group metrics.\n\n*Accepted values:*\n\n- `group`: Enables the xref:reference:public-metrics-reference.adoc#redpanda_kafka_consumer_group_consumers[`redpanda_kafka_consumer_group_consumers`] and xref:reference:public-metrics-reference.adoc#redpanda_kafka_consumer_group_topics[`redpanda_kafka_consumer_group_topics`] metrics.\n- `partition`: Enables the xref:reference:public-metrics-reference.adoc#redpanda_kafka_consumer_group_committed_offset[`redpanda_kafka_consumer_group_committed_offset`] metric.\n- `consumer_lag`: Enables the xref:reference:public-metrics-reference.adoc#redpanda_kafka_consumer_group_lag_max[`redpanda_kafka_consumer_group_lag_max`] and xref:reference:public-metrics-reference.adoc#redpanda_kafka_consumer_group_lag_sum[`redpanda_kafka_consumer_group_lag_sum`] metrics\n+\nEnabling `consumer_lag` may add a small amount of additional processing overhead to the brokers, especially in environments with a high number of consumer groups or partitions.\n+\nifndef::env-cloud[]\nUse the xref:reference:properties/cluster-properties.adoc#consumer_group_lag_collection_interval_sec[`consumer_group_lag_collection_interval_sec`] property to control the frequency of consumer lag metric collection.\nendif::[]", "related_topics": [ "xref:reference:public-metrics-reference.adoc#redpanda_kafka_consumer_group_consumers[`redpanda_kafka_consumer_group_consumers`]", "xref:reference:public-metrics-reference.adoc#redpanda_kafka_consumer_group_topics[`redpanda_kafka_consumer_group_topics`]", @@ -849,7 +849,7 @@ "config_scope": "topic" }, "retention_bytes": { - "description": "Default maximum number of bytes per partition on disk before triggering deletion of the oldest messages. If `null` (the default value), no limit is applied.\n\nThe topic property xref:./topic-properties.adoc#retentionbytes[`retention.bytes`] overrides the value of `retention_bytes` at the topic level.\n\n*Unit*: bytes per partition.", + "description": "Default maximum number of bytes per partition on disk before triggering deletion of the oldest messages. If `null` (the default value), no limit is applied.\n\nThe topic property xref:./topic-properties.adoc#retentionbytes[`retention.bytes`] overrides the value of `retention_bytes` at the topic level.", "related_topics": [ "xref:./topic-properties.adoc#retentionbytes[`retention.bytes`]" ], @@ -1102,7 +1102,7 @@ "category": "pandaproxy" }, "consumer_offsets_topic_batch_cache_enabled": { - "description": "This property lets you enable batch caching for the consumer offsets topic. By default, the cache for this topic is disabled. Changing this property is not recommended in production systems as it may affect performance.", + "description": "This property lets you enable the batch cache for the consumer offsets topic. By default, the cache for consumer offsets topic is disabled. Changing this property is not recommended in production systems, as it may affect performance. The change is applied only after the restart.", "config_scope": "cluster" }, "consumer_rebalance_timeout_ms": { @@ -1139,7 +1139,7 @@ "config_scope": "cluster" }, "disable_cluster_recovery_loop_for_tests": { - "description": "include::reference:partial$internal-use-property.adoc[]\n\nDisables the cluster recovery loop.", + "description": "include::reference:partial$internal-use-property.adoc[]\n\nDisables the cluster recovery loop. This property is used to simplify testing and should not be set in production.", "config_scope": "cluster" }, "enable_developmental_unrecoverable_data_corrupting_features": { @@ -1155,11 +1155,11 @@ "config_scope": "cluster" }, "iceberg_rest_catalog_authentication_mode": { - "description": "The authentication mode for client requests made to the Iceberg catalog. Choose from: `none`, `bearer`, and `oauth2`. In `oauth2` mode, the credentials specified in `iceberg_rest_catalog_client_id` and `iceberg_rest_catalog_client_secret` are used to obtain a bearer token from the URI defined by `iceberg_rest_catalog_oauth2_server_uri.`. In `bearer` mode, the token specified in `iceberg_rest_catalog_token` is used unconditionally, and no attempts are made to refresh the token.", + "description": "The authentication mode for client requests made to the Iceberg catalog. Choose from: `none`, `bearer`, `oauth2`, and `aws_sigv4`. In `bearer` mode, the token specified in `iceberg_rest_catalog_token` is used unconditonally, and no attempts are made to refresh the token. In `oauth2` mode, the credentials specified in `iceberg_rest_catalog_client_id` and `iceberg_rest_catalog_client_secret` are used to obtain a bearer token from the URI defined by `iceberg_rest_catalog_oauth2_server_uri`. In `aws_sigv4` mode, the same AWS credentials used for cloud storage (see `cloud_storage_region`, `cloud_storage_access_key`, `cloud_storage_secret_key`, and `cloud_storage_credentials_source`) are used to sign requests to AWS Glue catalog with SigV4.", "config_scope": "cluster" }, "iceberg_rest_catalog_client_id": { - "description": "The client ID used to query the REST catalog API for the OAuth token. Required if catalog type is set to `rest`.", + "description": "Iceberg REST catalog user ID. This ID is used to query the catalog API for the OAuth token. Required if catalog type is set to `rest` and `iceberg_rest_catalog_authentication_mode` is set to `oauth2`.", "config_scope": "cluster" }, "iceberg_rest_catalog_credentials_source": { @@ -1174,15 +1174,15 @@ "config_scope": "cluster" }, "iceberg_rest_catalog_oauth2_scope": { - "description": "The OAuth scope used to retrieve access tokens for Iceberg catalog authentication. Only meaningful when `iceberg_rest_catalog_authentication_mode` is set to `oauth2`.", + "description": "The OAuth scope used to retrieve access tokens for Iceberg catalog authentication. Only meaningful when `iceberg_rest_catalog_authentication_mode` is set to `oauth2`", "config_scope": "cluster" }, "iceberg_rest_catalog_oauth2_server_uri": { - "description": "The OAuth URI used to retrieve access tokens for Iceberg REST catalog authentication. If left undefined, the deprecated Iceberg catalog endpoint `/v1/oauth/tokens` is used instead.", + "description": "The OAuth URI used to retrieve access tokens for Iceberg catalog authentication. If left undefined, the deprecated Iceberg catalog endpoint `/v1/oauth/tokens` is used instead.", "config_scope": "cluster" }, "iceberg_rest_catalog_request_timeout_ms": { - "description": "Maximum length of time that Redpanda waits for a response from the REST catalog before aborting the request.", + "description": "Maximum length of time that Redpanda waits for a response from the REST catalog before aborting the request", "config_scope": "cluster" }, "iceberg_topic_name_dot_replacement": { @@ -1190,7 +1190,7 @@ "config_scope": "cluster" }, "kafka_enable_authorization": { - "description": "Flag to require authorization for Kafka connections. If `null`, the property is disabled, and authorization is instead enabled by <>.", + "description": "Flag to require authorization for Kafka connections. If `null`, the property is disabled, and authorization is instead enabled by <>.\n\n* `null`: Ignored. Authorization is enabled with `enable_sasl`: `true`\n* `true`: authorization is required.\n* `false`: authorization is disabled.", "config_scope": "cluster" }, "kafka_produce_batch_validation": { @@ -1609,6 +1609,228 @@ "use_produce_scheduler_group": { "description": "Use a separate scheduler group to process Kafka produce requests.", "config_scope": "cluster" + }, + "cloud_storage_api_endpoint": { + "description": "Optional API endpoint. The only instance in which you must set this value is when using a custom domain with your object storage service.\n\n- AWS: If not set, this is automatically generated using <> and <>. Otherwise, this uses the value assigned.\n- GCP: If not set, this is automatically generated using `storage.googleapis.com` and <>.\n- Azure: If not set, this is automatically generated using `blob.core.windows.net` and <>. If you have enabled hierarchical namespaces for your storage account and use a custom endpoint, use <>.", + "config_scope": "object-storage" + }, + "cloud_storage_azure_hierarchical_namespace_enabled": { + "description": "Force Redpanda to use or not use an Azure Data Lake Storage (ADLS) Gen2 hierarchical namespace-compliant client in <>. \n\nWhen this property is not set, <> must be set, and each broker checks at startup if a hierarchical namespace is enabled. \n\nWhen set to `true`, this property disables the check and assumes a hierarchical namespace is enabled. \n\nWhen set to `false`, this property disables the check and assumes a hierarchical namespace is not enabled. \n\nThis setting should be used only in emergencies where Redpanda fails to detect the correct a hierarchical namespace status.", + "config_scope": "object-storage" + }, + "cloud_storage_azure_shared_key": { + "description": "The account access key to be used for Azure Shared Key authentication with the Azure storage account configured by <>. If `null`, the property is disabled.", + "config_scope": "object-storage" + }, + "cloud_storage_backend": { + "description": "Optional object storage backend variant used to select API capabilities. If not supplied, this will be inferred from other configuration properties.", + "config_scope": "object-storage" + }, + "cloud_storage_cache_max_objects": { + "description": "Maximum number of objects that may be held in the Tiered Storage cache. This applies simultaneously with <>, and whichever limit is hit first will trigger trimming of the cache.", + "config_scope": "object-storage" + }, + "cloud_storage_cache_size": { + "description": "Maximum size of the object storage cache, in bytes.\n\nThis property works together with <> to define cache behavior:\n\n- When both properties are set, Redpanda uses the smaller calculated value of the two, in bytes.\n\n- If one of these properties is set to `0`, Redpanda uses the non-zero value.\n\n- These properties cannot both be `0`.\n\n- `cloud_storage_cache_size` cannot be `0` while `cloud_storage_cache_size_percent` is `null`.", + "config_scope": "object-storage" + }, + "cloud_storage_cache_trim_threshold_percent_objects": { + "description": "Cache trimming is triggered when the number of objects in the cache reaches this percentage relative to its maximum object count. If unset, the default behavior is to start trimming when the cache is full.", + "config_scope": "object-storage", + "version": "24.1.10" + }, + "cloud_storage_cache_trim_threshold_percent_size": { + "description": "Cache trimming is triggered when the cache size reaches this percentage relative to its maximum capacity. If unset, the default behavior is to start trimming when the cache is full.", + "config_scope": "object-storage", + "version": "24.1.10" + }, + "cloud_storage_cache_trim_walk_concurrency": { + "description": "The maximum number of concurrent tasks launched for traversing the directory structure during cache trimming. A higher number allows cache trimming to run faster but can cause latency spikes due to increased pressure on I/O subsystem and syscall threads.", + "config_scope": "object-storage" + }, + "cloud_storage_credentials_source": { + "description": "The source of credentials used to authenticate to object storage services.\nRequired for AWS or GCP authentication with IAM roles.\n\nTo authenticate using access keys, see <>.", + "config_scope": "object-storage" + }, + "cloud_storage_disable_archival_stm_rw_fence": { + "description": "Disables the concurrency control mechanism in Tiered Storage. This safety feature keeps data organized and correct when multiple processes access it simultaneously. Disabling it can cause data consistency problems, so use this setting only for testing, never in production systems.", + "config_scope": "object-storage" + }, + "cloud_storage_disable_read_replica_loop_for_tests": { + "description": "Begins the read replica sync loop in topic partitions with Tiered Storage enabled. The property exists to simplify testing and shouldn't be set in production.", + "config_scope": "object-storage" + }, + "cloud_storage_disable_remote_labels_for_tests": { + "description": "If `true`, Redpanda disables remote labels and falls back on the hash-based object naming scheme for new topics.", + "config_scope": "object-storage" + }, + "cloud_storage_disable_upload_consistency_checks": { + "description": "Disable all upload consistency checks to allow Redpanda to upload logs with gaps and replicate metadata with consistency violations. Do not change the default value unless requested by Redpanda Support.", + "config_scope": "object-storage" + }, + "cloud_storage_disable_upload_loop_for_tests": { + "description": "Begins the upload loop in topic partitions with Tiered Storage enabled. The property exists to simplify testing and shouldn't be set in production.", + "config_scope": "object-storage" + }, + "cloud_storage_enable_remote_allow_gaps": { + "description": "Controls the eviction of locally stored log segments when Tiered Storage uploads are paused. Set to `false` to only evict data that has already been uploaded to object storage. If the retained data fills the local volume, Redpanda throttles producers. Set to `true` to allow the eviction of locally stored log segments, which may create gaps in offsets.", + "config_scope": "object-storage" + }, + "cloud_storage_enable_scrubbing": { + "description": "Enable routine checks (scrubbing) of object storage partitions. The scrubber validates the integrity of data and metadata uploaded to object storage.", + "config_scope": "object-storage" + }, + "cloud_storage_enable_segment_uploads": { + "description": "Controls the upload of log segments to Tiered Storage. If set to `false`, this property temporarily pauses all log segment uploads from the Redpanda cluster. When the uploads are paused, the <> cluster configuration and `redpanda.remote.allowgaps` topic properties control local retention behavior.", + "config_scope": "object-storage" + }, + "cloud_storage_full_scrub_interval_ms": { + "description": "Interval, in milliseconds, between a final scrub and the next scrub.", + "config_scope": "object-storage" + }, + "cloud_storage_garbage_collect_timeout_ms": { + "description": "Timeout for running the cloud storage garbage collection, in milliseconds.", + "config_scope": "object-storage" + }, + "cloud_storage_graceful_transfer_timeout_ms": { + "description": "Time limit on waiting for uploads to complete before a leadership transfer. If this is `null`, leadership transfers proceed without waiting.", + "config_scope": "object-storage" + }, + "cloud_storage_hydrated_chunks_per_segment_ratio": { + "description": "The maximum number of chunks per segment that can be hydrated at a time. Above this number, unused chunks are trimmed.\n\nA segment is divided into chunks. Chunk hydration means downloading the chunk (which is a small part of a full segment) from cloud storage and placing it in the local disk cache. Redpanda periodically removes old, unused chunks from your local disk. This process is called chunk eviction. This property controls how many chunks can be present for a given segment in local disk at a time, before eviction is triggered, removing the oldest ones from disk. Note that this property is not used for the default eviction strategy which simply removes all unused chunks.", + "config_scope": "object-storage" + }, + "cloud_storage_hydration_timeout_ms": { + "description": "Time to wait for a hydration request to be fulfilled. If hydration is not completed within this time, the consumer is notified with a timeout error.\n\nNegative doesn't make sense, but it may not be checked-for/enforced. Large is subjective, but a huge timeout also doesn't make sense. This particular config doesn't have a min/max bounds control, but it probably should to avoid mistakes.", + "config_scope": "object-storage" + }, + "cloud_storage_idle_threshold_rps": { + "description": "The object storage request rate threshold for idle state detection. If the average request rate for the configured period is lower than this threshold, the object storage is considered idle.", + "config_scope": "object-storage" + }, + "cloud_storage_idle_timeout_ms": { + "description": "The timeout, in milliseconds, used to detect the idle state of the object storage API. If the average object storage request rate is below this threshold for a configured amount of time, the object storage is considered idle and the housekeeping jobs are started.", + "config_scope": "object-storage" + }, + "cloud_storage_initial_backoff_ms": { + "description": "Initial backoff time for exponential backoff algorithm (ms).", + "config_scope": "object-storage" + }, + "cloud_storage_inventory_max_hash_size_during_parse": { + "description": "Maximum bytes of hashes held in memory before writing data to disk during inventory report parsing. This affects the number of files written to disk during inventory report parsing. When this limit is reached, new files are written to disk.", + "config_scope": "object-storage" + }, + "cloud_storage_manifest_cache_size": { + "description": "Amount of memory that can be used to handle Tiered Storage metadata.", + "config_scope": "object-storage" + }, + "cloud_storage_materialized_manifest_ttl_ms": { + "description": "The interval, in milliseconds, determines how long the materialized manifest can stay in the cache under contention. This setting is used for performance tuning. When the spillover manifest is materialized and stored in the cache, and the cache needs to evict it, it uses this value as a timeout. The cursor that uses the spillover manifest uses this value as a TTL interval, after which it stops referencing the manifest making it available for eviction. This only affects spillover manifests under contention.", + "config_scope": "object-storage" + }, + "cloud_storage_manifest_max_upload_interval_sec": { + "description": "Minimum interval, in seconds, between partition manifest uploads. Actual time between uploads may be greater than this interval. If this is `null`, metadata is updated after each segment upload.", + "config_scope": "object-storage" + }, + "cloud_storage_manifest_upload_timeout_ms": { + "description": "Manifest upload timeout, in milliseconds.", + "config_scope": "object-storage" + }, + "cloud_storage_max_concurrent_hydrations_per_shard": { + "description": "Maximum concurrent segment hydrations of remote data per CPU core. If unset, value of `cloud_storage_max_connections / 2` is used, which means that half of available object storage bandwidth could be used to download data from object storage. If the cloud storage cache is empty every new segment reader will require a download. This will lead to 1:1 mapping between number of partitions scanned by the fetch request and number of parallel downloads. If this value is too large the downloads can affect other workloads. In case of any problem caused by the tiered-storage reads this value can be lowered. This will only affect segment hydrations (downloads) but won't affect cached segments. If fetch request is reading from the tiered-storage cache its concurrency will only be limited by available memory.", + "config_scope": "object-storage" + }, + "cloud_storage_max_segment_readers_per_shard": { + "description": "Maximum concurrent I/O cursors of materialized remote segments per CPU core. If unset, the value of `topic_partitions_per_shard` is used, where one segment reader per partition is used if the shard is at its maximum partition capacity. These readers are cached across Kafka consume requests and store a readahead buffer.", + "config_scope": "object-storage" + }, + "cloud_storage_max_segments_pending_deletion_per_partition": { + "description": "The per-partition limit for the number of segments pending deletion from the cloud. Segments can be deleted due to retention or compaction. If this limit is breached and deletion fails, then segments are orphaned in the cloud and must be removed manually.", + "config_scope": "object-storage" + }, + "cloud_storage_max_throughput_per_shard": { + "description": "Maximum bandwidth allocated to Tiered Storage operations per shard, in bytes per second.\nThis setting limits the Tiered Storage subsystem's throughput per shard, facilitating precise control over bandwidth usage in testing scenarios. In production environments, use `cloud_storage_throughput_limit_percent` for more dynamic throughput management based on actual storage capabilities.", + "config_scope": "object-storage" + }, + "cloud_storage_min_chunks_per_segment_threshold": { + "description": "The minimum number of chunks per segment for trimming to be enabled. If the number of chunks in a segment is below this threshold, the segment is small enough that all chunks in it can be hydrated at any given time.", + "config_scope": "object-storage" + }, + "cloud_storage_readreplica_manifest_sync_timeout_ms": { + "description": "Timeout to check if new data is available for partitions in object storage for read replicas.", + "config_scope": "object-storage" + }, + "cloud_storage_recovery_temporary_retention_bytes_default": { + "description": "Retention in bytes for topics created during automated recovery.", + "config_scope": "object-storage" + }, + "cloud_storage_recovery_topic_validation_mode": { + "description": "Validation performed before recovering a topic from object storage. In case of failure, the reason for the failure appears as `ERROR` lines in the Redpanda application log. For each topic, this reports errors for all partitions, but for each partition, only the first error is reported.\n\nThis property accepts the following parameters:\n\n- `no_check`: Skips the checks for topic recovery.\n- `check_manifest_existence`: Runs an existence check on each `partition_manifest`. Fails if there are connection issues to the object storage.\n- `check_manifest_and_segment_metadata`: Downloads the manifest and runs a consistency check, comparing the metadata with the cloud storage objects. The process fails if metadata references any missing cloud storage objects.\n\nExample: Redpanda validates the topic `kafka/panda-topic-recovery-NOT-OK` and stops due to a fatal error on partition 0:\n\n```bash\nERROR 2024-04-24 21:29:08,166 [shard 1:main] cluster - [fiber11|0|299996ms recovery validation of {kafka/panda-topic-recovery-NOT-OK/0}/24] - manifest metadata check: missing segment, validation not ok\nERROR 2024-04-24 21:29:08,166 [shard 1:main] cluster - topics_frontend.cc:519 - Stopping recovery of {kafka/panda-topic-recovery-NOT-OK} due to validation error\n```\n\nEach failing partition error message has the following format:\n\n```bash\nERROR .... [... recovery validation of {}...] - , validation not ok\n```\n\nAt the end of the process, Redpanda outputs a final ERROR message: \n\n```bash\nERROR ... ... - Stopping recovery of {} due to validation error\n```", + "config_scope": "object-storage" + }, + "cloud_storage_roles_operation_timeout_ms": { + "description": "Timeout for IAM role related operations (ms).", + "config_scope": "object-storage" + }, + "cloud_storage_scrubbing_interval_jitter_ms": { + "description": "Jitter applied to the object storage scrubbing interval.", + "config_scope": "object-storage" + }, + "cloud_storage_segment_max_upload_interval_sec": { + "description": "Time that a segment can be kept locally without uploading it to the object storage, in seconds.", + "config_scope": "object-storage" + }, + "cloud_storage_segment_size_min": { + "description": "Smallest acceptable segment size in the object storage. Default: `cloud_storage_segment_size_target`/2.", + "config_scope": "object-storage" + }, + "cloud_storage_segment_upload_timeout_ms": { + "description": "Log segment upload timeout, in milliseconds.", + "config_scope": "object-storage" + }, + "cloud_storage_spillover_manifest_max_segments": { + "description": "Maximum number of segments in the spillover manifest that can be offloaded to the object storage. This setting serves as a threshold for triggering data offload based on the number of segments, rather than the total size of the manifest. It is designed for use in testing environments to control the offload behavior more granularly. In production settings, manage offloads based on the manifest size through `cloud_storage_spillover_manifest_size` for more predictable outcomes.", + "config_scope": "object-storage" + }, + "cloud_storage_spillover_manifest_size": { + "description": "The size of the manifest which can be offloaded to the cloud. If the size of the local manifest stored in Redpanda exceeds `cloud_storage_spillover_manifest_size` by two times the spillover mechanism will split the manifest into two parts and one will be uploaded to object storage.", + "config_scope": "object-storage" + }, + "cloud_storage_throughput_limit_percent": { + "description": "Maximum throughput used by Tiered Storage per broker expressed as a percentage of the disk bandwidth. If the server has several disks, Redpanda uses the one that stores the Tiered Storage cache. Even if Tiered Storage is allowed to use the full bandwidth of the disk (100%), it won't necessarily use it in full. The actual usage depends on your workload and the state of the Tiered Storage cache. This setting is a safeguard that prevents Tiered Storage from using too many system resources: it is not a performance tuning knob.", + "config_scope": "object-storage" + }, + "cloud_storage_topic_purge_grace_period_ms": { + "description": "Grace period during which the purger refuses to purge the topic.", + "config_scope": "object-storage" + }, + "cloud_storage_upload_ctrl_d_coeff": { + "description": "Derivative coefficient for upload PID controller.", + "config_scope": "object-storage" + }, + "cloud_storage_upload_ctrl_max_shares": { + "description": "Maximum number of I/O and CPU shares that archival upload can use.", + "config_scope": "object-storage" + }, + "cloud_storage_upload_ctrl_min_shares": { + "description": "Minimum number of I/O and CPU shares that archival upload can use.", + "config_scope": "object-storage" + }, + "cloud_storage_upload_ctrl_p_coeff": { + "description": "Proportional coefficient for upload PID controller.", + "config_scope": "object-storage" + }, + "cloud_storage_upload_loop_initial_backoff_ms": { + "description": "Initial backoff interval when there is nothing to upload for a partition, in milliseconds.", + "config_scope": "object-storage" + }, + "cloud_storage_upload_loop_max_backoff_ms": { + "description": "Maximum backoff interval when there is nothing to upload for a partition, in milliseconds.", + "config_scope": "object-storage" + }, + "cloud_storage_url_style": { + "description": "Configure the addressing style that controls how Redpanda formats bucket URLs for S3-compatible object storage.\n\nLeave this property unset (`null`) to use automatic configuration:\n\n* For AWS S3: Redpanda attempts `virtual_host` addressing first, then falls back to `path` style if needed\n* For MinIO: Redpanda automatically uses `path` style regardless of `MINIO_DOMAIN` configuration\n\nSet this property explicitly to override automatic configuration, ensure consistent behavior across deployments, or when using S3-compatible storage that requires a specific URL format.", + "config_scope": "object-storage" } } } \ No newline at end of file From e412ba3562598fce76d1c77e7d5e17fc3a71b48c Mon Sep 17 00:00:00 2001 From: JakeSCahill Date: Wed, 29 Oct 2025 16:54:42 +0000 Subject: [PATCH 5/8] Add config property automation --- .github/workflows/update-property-docs.yml | 83 + antora.yml | 127 +- docs-data/property-overrides.json | 1716 ++- local-antora-playbook.yml | 3 + .../examples/v25.2.10-properties.json | 10897 ++++++++++++++++ .../pages/properties/broker-properties.adoc | 1214 +- .../pages/properties/cluster-properties.adoc | 6558 +--------- .../properties/object-storage-properties.adoc | 1749 +-- .../pages/properties/topic-properties.adoc | 704 +- modules/reference/pages/property-report.json | 100 + .../deprecated/deprecated-properties.adoc | 106 + .../properties/broker-properties.adoc | 1914 +++ .../properties/cluster-properties.adoc | 10418 +++++++++++++++ .../properties/object-storage-properties.adoc | 2731 ++++ .../partials/properties/topic-properties.adoc | 845 ++ .../partials/topic-property-mappings.adoc | 49 + .../property-changes-v25.2.1-to-v25.2.10.json | 50 + 17 files changed, 28095 insertions(+), 11169 deletions(-) create mode 100644 .github/workflows/update-property-docs.yml create mode 100644 modules/reference/examples/v25.2.10-properties.json create mode 100644 modules/reference/pages/property-report.json create mode 100644 modules/reference/partials/deprecated/deprecated-properties.adoc create mode 100644 modules/reference/partials/properties/broker-properties.adoc create mode 100644 modules/reference/partials/properties/cluster-properties.adoc create mode 100644 modules/reference/partials/properties/object-storage-properties.adoc create mode 100644 modules/reference/partials/properties/topic-properties.adoc create mode 100644 modules/reference/partials/topic-property-mappings.adoc create mode 100644 modules/reference/property-changes-v25.2.1-to-v25.2.10.json diff --git a/.github/workflows/update-property-docs.yml b/.github/workflows/update-property-docs.yml new file mode 100644 index 0000000000..09f09fd561 --- /dev/null +++ b/.github/workflows/update-property-docs.yml @@ -0,0 +1,83 @@ +--- +name: Generate Property Docs + +on: + workflow_dispatch: + inputs: + tag: + description: "Tag to use for property doc generation" + required: true + type: string + repository_dispatch: + types: [trigger-property-docs-generation] + +jobs: + generate-property-docs: + runs-on: ubuntu-24.04 + permissions: + id-token: write + contents: write + pull-requests: write + + steps: + - name: Configure AWS credentials + uses: aws-actions/configure-aws-credentials@v4 + with: + aws-region: ${{ vars.RP_AWS_CRED_REGION }} + role-to-assume: arn:aws:iam::${{ secrets.RP_AWS_CRED_ACCOUNT_ID }}:role/${{ vars.RP_AWS_CRED_BASE_ROLE_NAME }}${{ github.event.repository.name }} + + - name: Get secrets from AWS Secrets Manager + uses: aws-actions/aws-secretsmanager-get-secrets@v2 + with: + secret-ids: | + ,sdlc/prod/github/actions_bot_token + parse-json-secrets: true + + - name: Checkout the repository + uses: actions/checkout@v4 + with: + ref: main + token: ${{ env.ACTIONS_BOT_TOKEN }} + + - name: Set up Node.js + uses: actions/setup-node@v4 + with: + node-version: 20 + + - name: Install dependencies + run: npm ci + + - name: Determine tag + id: tag + run: | + # Prefer input tag (workflow_dispatch), else use repository_dispatch payload + if [ -n "${{ github.event.inputs.tag }}" ]; then + echo "tag=${{ github.event.inputs.tag }}" >> $GITHUB_OUTPUT + elif [ -n "${{ github.event.client_payload.tag }}" ]; then + echo "tag=${{ github.event.client_payload.tag }}" >> $GITHUB_OUTPUT + else + echo "❌ No tag provided via input or dispatch payload" >&2 + exit 1 + fi + + - name: Generate property docs + run: | + echo "Running doc generation for: ${{ steps.tag.outputs.tag }}" + npx doc-tools generate property-docs \ + --tag "${{ steps.tag.outputs.tag }}" \ + --generate-partials \ + --cloud-support \ + --overrides docs-data/property-overrides.json + env: + GITHUB_TOKEN: ${{ env.ACTIONS_BOT_TOKEN }} + + - name: Create pull request + uses: peter-evans/create-pull-request@v6 + with: + token: ${{ env.ACTIONS_BOT_TOKEN }} + commit-message: "auto-docs: Update property docs for ${{ steps.tag.outputs.tag }}" + branch: update-property-docs-${{ steps.tag.outputs.tag }} + title: "auto-docs: Update property docs for tag ${{ steps.tag.outputs.tag }}" + body: | + This PR auto-generates updated Redpanda property documentation for **${{ steps.tag.outputs.tag }}**. + labels: auto-docs diff --git a/antora.yml b/antora.yml index 48cdea71bf..ce69e3a856 100644 --- a/antora.yml +++ b/antora.yml @@ -3,45 +3,40 @@ title: Self-Managed version: 25.2 start_page: home:index.adoc nav: -- modules/ROOT/nav.adoc + - modules/ROOT/nav.adoc asciidoc: attributes: - # Date of release in the format YYYY-MM-DD - page-release-date: 2025-07-31 - # Only used in the main branch (latest version) + page-release-date: 2025-07-31T00:00:00.000Z page-header-data: order: 2 color: '#107569' - # Name of the Redpanda UI used in single-sourced content with Redpanda Cloud. ui: Redpanda Console@ - # Fallback versions - # We try to fetch the latest versions from GitHub at build time - # -- full-version: 25.2.1 - latest-redpanda-tag: 'v25.2.1' - latest-console-tag: 'v2.8.5' - latest-release-commit: '68c64fd' - latest-operator-version: 'v2.3.8-24.3.6' - operator-beta-tag: 'v25.1.1-beta1' - helm-beta-tag: 'v25.1.1-beta1' + latest-redpanda-tag: v25.2.10 + latest-console-tag: v2.8.5 + latest-release-commit: 68c64fd + latest-operator-version: v2.3.8-24.3.6 + operator-beta-tag: v25.1.1-beta1 + helm-beta-tag: v25.1.1-beta1 latest-redpanda-helm-chart-version: 5.10.1 - redpanda-beta-version: '25.1.1-rc3' - redpanda-beta-tag: '25.1.1-rc3' - console-beta-version: '3.0.0-beta.1' - console-beta-tag: 'v3.0.0-beta.1' - # -- + redpanda-beta-version: 25.1.1-rc3 + redpanda-beta-tag: 25.1.1-rc3 + console-beta-version: 3.0.0-beta.1 + console-beta-tag: v3.0.0-beta.1 supported-kubernetes-version: 1.27.0-0 supported-helm-version: 3.10.0 supported-rhel-required: '8' - supported-rhel-recommended: '9+' - supported-ubuntu-required: '20.04 LTS' - supported-ubuntu-recommended: '22.04+' - badge-deprecated: 'image:https://img.shields.io/badge/-Deprecated-red.svg[xref=upgrade:deprecated/index.adoc]' + supported-rhel-recommended: 9+ + supported-ubuntu-required: 20.04 LTS + supported-ubuntu-recommended: 22.04+ + badge-deprecated: image:https://img.shields.io/badge/-Deprecated-red.svg[xref=upgrade:deprecated/index.adoc] removals-without-aliases: - page: reference/rpk/rpk-cloud/ - reason: Pages moved to Redpanda Cloud. # https://github.com/redpanda-data/docs-site/pull/75 + reason: Pages moved to Redpanda Cloud. - page: upgrade/deprecated/cluster-resource/ - reason: The cluster resource was deprecated in version 23.2. We no longer need a page dedicated to migration in this version. + reason: >- + The cluster resource was deprecated in version 23.2. We no longer need a page dedicated to migration in this + version. - page: develop/chat-room/ reason: Page moved to Redpanda Labs. - page: develop/chat-room-docker/ @@ -80,49 +75,51 @@ asciidoc: reason: Page moved to Redpanda Labs. - page: develop/guide-nodejs/ reason: Page moved to Redpanda Labs. - # Data for the home page - page-home-intro: Redpanda is a Kafka-compatible event streaming platform built for data-intensive applications. Install Self-Managed Redpanda in your environment with the free Community Edition or with the Enterprise Edition for additional features like Tiered Storage, Continuous Data Balancing, and Audit Logging. - page-home-image: panda.png # images must be in modules/ROOT/images + page-home-intro: >- + Redpanda is a Kafka-compatible event streaming platform built for data-intensive applications. Install + Self-Managed Redpanda in your environment with the free Community Edition or with the Enterprise Edition for + additional features like Tiered Storage, Continuous Data Balancing, and Audit Logging. + page-home-image: panda.png page-home-intro-learn-more: get-started:intro-to-events.adoc page-home-primary-row-title: Deploy page-home-primary-row: - - title: 'Docker quickstart' - description: 'Start using Redpanda in a self-managed environment.' - url: 'get-started:quick-start.adoc' - link-text: 'Get started' - - title: 'Kubernetes' - description: 'Learn how to deploy Redpanda on Kubernetes.' - url: 'deploy:deployment-option/self-hosted/kubernetes/index.adoc' - link-text: 'Get started' - - title: 'Linux' - description: 'Learn how to deploy Redpanda on Linux.' - url: 'deploy:deployment-option/self-hosted/manual/index.adoc' - link-text: 'Get started' + - title: Docker quickstart + description: Start using Redpanda in a self-managed environment. + url: get-started:quick-start.adoc + link-text: Get started + - title: Kubernetes + description: Learn how to deploy Redpanda on Kubernetes. + url: deploy:deployment-option/self-hosted/kubernetes/index.adoc + link-text: Get started + - title: Linux + description: Learn how to deploy Redpanda on Linux. + url: deploy:deployment-option/self-hosted/manual/index.adoc + link-text: Get started page-home-secondary-row-title: Popular topics page-home-secondary-row: - - title: "What's new" - description: 'Explore new features in this release.' - url: 'get-started:whats-new.adoc' - link-text: 'Learn more' - - title: 'Data transforms' - description: 'Get started with data transforms.' - url: 'develop:data-transforms/run-transforms-index.adoc' - link-text: 'Learn more' - - title: 'Monitor' - description: 'Configure monitoring to optimize performance.' - url: 'manage:monitoring.adoc' - link-text: 'Learn more' - - title: 'rpk commands' - description: 'Redpanda CLI reference for Self-Managed Redpanda.' - url: 'reference:rpk/index.adoc' - link-text: 'Learn more' - page-home-tertiary-row-title: 'Labs' + - title: What's new + description: Explore new features in this release. + url: get-started:whats-new.adoc + link-text: Learn more + - title: Data transforms + description: Get started with data transforms. + url: develop:data-transforms/run-transforms-index.adoc + link-text: Learn more + - title: Monitor + description: Configure monitoring to optimize performance. + url: manage:monitoring.adoc + link-text: Learn more + - title: rpk commands + description: Redpanda CLI reference for Self-Managed Redpanda. + url: reference:rpk/index.adoc + link-text: Learn more + page-home-tertiary-row-title: Labs page-home-tertiary-row: - - title: 'Set up Postgres CDC with Debezium and Redpanda' - url: 'redpanda-labs:docker-compose:cdc-postgres-json.adoc' - - title: 'Redact information in JSON messages' - url: 'redpanda-labs:data-transforms:redaction-go.adoc' - - title: 'Stream text embeddings with Redpanda, OpenAI, and MongoDB' - url: 'redpanda-labs:redpanda-connect:openai.adoc' - - title: 'Build a chat room application with Redpanda and Rust' - url: 'redpanda-labs:clients:docker-rust.adoc' + - title: Set up Postgres CDC with Debezium and Redpanda + url: redpanda-labs:docker-compose:cdc-postgres-json.adoc + - title: Redact information in JSON messages + url: redpanda-labs:data-transforms:redaction-go.adoc + - title: Stream text embeddings with Redpanda, OpenAI, and MongoDB + url: redpanda-labs:redpanda-connect:openai.adoc + - title: Build a chat room application with Redpanda and Rust + url: redpanda-labs:clients:docker-rust.adoc diff --git a/docs-data/property-overrides.json b/docs-data/property-overrides.json index 4648a05f3c..1251689ea8 100644 --- a/docs-data/property-overrides.json +++ b/docs-data/property-overrides.json @@ -20,6 +20,10 @@ "config_scope": "broker", "category": "redpanda" }, + "admin_api_doc_dir": { + "config_scope": "broker", + "category": "redpanda" + }, "admin_api_tls": { "example": [ ".Example", @@ -38,6 +42,42 @@ "config_scope": "broker", "category": "redpanda" }, + "advertised_kafka_api": { + "description": "Address of the Kafka API published to the clients. If not set, the <> broker property is used. When behind a load balancer or in containerized environments, this should be the externally-accessible address that clients use to connect.", + "example": [ + ".Example", + "[,yaml]", + "----", + "redpanda:", + " advertised_kafka_api:", + " - name: ", + " address: ", + " port: ", + "----" + ], + "config_scope": "broker", + "category": "redpanda" + }, + "advertised_pandaproxy_api": { + "config_scope": "broker", + "category": "pandaproxy", + "description": "Network address for the HTTP Proxy API server to publish to clients." + }, + "advertised_rpc_api": { + "description": "Address of RPC endpoint published to other cluster members. If not set, the <> broker property is used. This should be the address other brokers can use to communicate with this broker.", + "example": [ + ".Example", + "[,yaml]", + "----", + "redpanda:", + " advertised_rpc_api:", + " address: ", + " port: ", + "----" + ], + "config_scope": "broker", + "category": "redpanda" + }, "aggregate_metrics": { "description": "Enable aggregation of metrics returned by the xref:reference:internal-metrics-reference.adoc[`/metrics`] endpoint. Aggregation can simplify monitoring by providing summarized data instead of raw, per-instance metrics. Metric aggregation is performed by summing the values of samples by labels and is done when it makes sense by the shard and/or partition labels.", "related_topics": [ @@ -45,6 +85,30 @@ ], "config_scope": "cluster" }, + "api_doc_dir": { + "description": "Path to the API specifications directory. This directory contains API documentation for both the HTTP Proxy API and Schema Registry API.", + "config_scope": "broker", + "category": "pandaproxy" + }, + "audit_enabled": { + "related_topics": [ + ], + "config_scope": "cluster" + }, + "auto_create_topics_enabled": { + "description": "Allow automatic topic creation.\n\nIf you produce to a topic that doesn't exist, the topic will be created with defaults if this property is enabled.", + "config_scope": "cluster" + }, + "broker_tls": { + "config_scope": "broker", + "category": "pandaproxy-client", + "description": "TLS configuration for the Kafka API servers to which the HTTP Proxy client should connect." + }, + "brokers": { + "config_scope": "broker", + "category": "pandaproxy-client", + "description": "Network addresses of the Kafka API servers to which the HTTP Proxy client should connect." + }, "cleanup.policy": { "description": "The cleanup policy to apply for log segments of a topic.\nWhen `cleanup.policy` is set, it overrides the cluster property xref:cluster-properties.adoc#log_cleanup_policy[`log_cleanup_policy`] for the topic.", "related_topics": [ @@ -52,28 +116,53 @@ ], "config_scope": "topic" }, + "client_cache_max_size": { + "config_scope": "broker", + "category": "pandaproxy", + "description": "The maximum number of Kafka client connections that Redpanda can cache in the LRU (least recently used) cache. The LRU cache helps optimize resource utilization by keeping the most recently used clients in memory, facilitating quicker reconnections for frequent clients while limiting memory usage." + }, + "client_identifier": { + "config_scope": "broker", + "category": "pandaproxy-client", + "description": "Custom identifier to include in the Kafka request header for the HTTP Proxy client. This identifier can help debug or monitor client activities." + }, + "client_keep_alive": { + "description": "Time, in milliseconds, that an idle client connection may remain open to the HTTP Proxy API.", + "config_scope": "broker", + "category": "pandaproxy" + }, + "cloud_storage_access_key": { + "description": "AWS or GCP access key. This access key is part of the credentials that Redpanda requires to authenticate with object storage services for Tiered Storage. This access key is used with the <> to form the complete credentials required for authentication.\nTo authenticate using IAM roles, see <>." + }, + "cloud_storage_api_endpoint": { + "description": "Optional API endpoint. The only instance in which you must set this value is when using a custom domain with your object storage service.\n\n- AWS: If not set, this is automatically generated using <> and <>. Otherwise, this uses the value assigned.\n- GCP: If not set, this is automatically generated using `storage.googleapis.com` and <>.\n- Azure: If not set, this is automatically generated using `blob.core.windows.net` and <>. If you have enabled hierarchical namespaces for your storage account and use a custom endpoint, use <>." + }, "cloud_storage_azure_adls_endpoint": { - "description": "Azure Data Lake Storage v2 endpoint override. Use when hierarchical namespaces are enabled on your storage account and you have set up a custom endpoint.\n\nIf not set, this is automatically generated using `dfs.core.windows.net` and <>.", - "config_scope": "object-storage" + "description": "Azure Data Lake Storage v2 endpoint override. Use when hierarchical namespaces are enabled on your storage account and you have set up a custom endpoint.\n\nIf not set, this is automatically generated using `dfs.core.windows.net` and <>." }, "cloud_storage_azure_adls_port": { - "description": "Azure Data Lake Storage v2 port override. See also: <>. Use when hierarchical namespaces are enabled on your storage account and you have set up a custom endpoint.", - "config_scope": "object-storage" + "description": "Azure Data Lake Storage v2 port override. See also: <>. Use when hierarchical namespaces are enabled on your storage account and you have set up a custom endpoint." }, "cloud_storage_azure_container": { - "description": "The name of the Azure container to use with Tiered Storage. If `null`, the property is disabled.\n\nNOTE: The container must belong to <>.", - "config_scope": "object-storage" + "description": "The name of the Azure container to use with Tiered Storage. If `null`, the property is disabled.\n\nNOTE: The container must belong to <>." + }, + "cloud_storage_azure_hierarchical_namespace_enabled": { + "description": "Force Redpanda to use or not use an Azure Data Lake Storage (ADLS) Gen2 hierarchical namespace-compliant client in <>. \n\nWhen this property is not set, <> must be set, and each broker checks at startup if a hierarchical namespace is enabled. \n\nWhen set to `true`, this property disables the check and assumes a hierarchical namespace is enabled. \n\nWhen set to `false`, this property disables the check and assumes a hierarchical namespace is not enabled. \n\nThis setting should be used only in emergencies where Redpanda fails to detect the correct a hierarchical namespace status." }, "cloud_storage_azure_managed_identity_id": { "description": "The managed identity ID to use for access to the Azure storage account. To use Azure managed identities, you must set <> to `azure_vm_instance_metadata`. See xref:manage:security/iam-roles.adoc[IAM Roles] for more information on managed identities.", "related_topics": [ "xref:manage:security/iam-roles.adoc[IAM Roles]" - ], - "config_scope": "object-storage" + ] + }, + "cloud_storage_azure_shared_key": { + "description": "The account access key to be used for Azure Shared Key authentication with the Azure storage account configured by <>. If `null`, the property is disabled." + }, + "cloud_storage_backend": { + "description": "Optional object storage backend variant used to select API capabilities. If not supplied, this will be inferred from other configuration properties." }, "cloud_storage_bucket": { - "description": "AWS or GCP bucket that should be used to store data.\n\nWARNING: Modifying this property after writing data to a bucket could cause data loss.", - "config_scope": "object-storage" + "description": "AWS or GCP bucket that should be used to store data.\n\nWARNING: Modifying this property after writing data to a bucket could cause data loss." }, "cloud_storage_cache_directory": { "description": "Directory for archival cache. Set when the xref:reference:properties/cluster-properties.adoc#cloud_storage_enabled[`cloud_storage_enabled`] cluster property is enabled. If not specified, Redpanda uses a default path within the data directory.", @@ -93,55 +182,130 @@ "config_scope": "broker", "category": "redpanda" }, + "cloud_storage_cache_max_objects": { + "description": "Maximum number of objects that may be held in the Tiered Storage cache. This applies simultaneously with <>, and whichever limit is hit first will trigger trimming of the cache." + }, + "cloud_storage_cache_size": { + "description": "Maximum size of the object storage cache, in bytes.\n\nThis property works together with <> to define cache behavior:\n\n- When both properties are set, Redpanda uses the smaller calculated value of the two, in bytes.\n\n- If one of these properties is set to `0`, Redpanda uses the non-zero value.\n\n- These properties cannot both be `0`.\n\n- `cloud_storage_cache_size` cannot be `0` while `cloud_storage_cache_size_percent` is `null`." + }, "cloud_storage_cache_size_percent": { "related_topics": [ "xref:reference:cluster-properties.adoc#disk_reservation_percent[`disk_reservation_percent`]" - ], - "config_scope": "object-storage" + ] + }, + "cloud_storage_cache_trim_threshold_percent_objects": { + "description": "Cache trimming is triggered when the number of objects in the cache reaches this percentage relative to its maximum object count. If unset, the default behavior is to start trimming when the cache is full.", + "version": "24.1.10" + }, + "cloud_storage_cache_trim_threshold_percent_size": { + "description": "Cache trimming is triggered when the cache size reaches this percentage relative to its maximum capacity. If unset, the default behavior is to start trimming when the cache is full.", + "version": "24.1.10" + }, + "cloud_storage_cache_trim_walk_concurrency": { + "description": "The maximum number of concurrent tasks launched for traversing the directory structure during cache trimming. A higher number allows cache trimming to run faster but can cause latency spikes due to increased pressure on I/O subsystem and syscall threads." }, "cloud_storage_chunk_prefetch": { - "description": "Number of chunks to prefetch ahead of every downloaded chunk. Prefetching additional chunks can enhance read performance by reducing wait times for sequential data access. A value of `0` disables prefetching, relying solely on on-demand downloads. Adjusting this property allows for tuning the balance between improved read performance and increased network and storage I/O.", - "config_scope": "object-storage" + "description": "Number of chunks to prefetch ahead of every downloaded chunk. Prefetching additional chunks can enhance read performance by reducing wait times for sequential data access. A value of `0` disables prefetching, relying solely on on-demand downloads. Adjusting this property allows for tuning the balance between improved read performance and increased network and storage I/O." + }, + "cloud_storage_client_lease_timeout_ms": { + "description": "Maximum time to hold a cloud storage client lease (ms), after which any outstanding connection is immediately closed.", + "config_scope": "cluster" }, "cloud_storage_credentials_host": { - "description": "The hostname to connect to for retrieving role based credentials. Derived from <> if not set. Only required when using IAM role based access. To authenticate using access keys, see <>.", - "config_scope": "object-storage" + "description": "The hostname to connect to for retrieving role based credentials. Derived from <> if not set. Only required when using IAM role based access. To authenticate using access keys, see <>." + }, + "cloud_storage_credentials_source": { + "description": "The source of credentials used to authenticate to object storage services.\nRequired for AWS or GCP authentication with IAM roles.\n\nTo authenticate using access keys, see <>." }, "cloud_storage_crl_file": { - "description": "Path to certificate revocation list for <>.", - "config_scope": "object-storage" + "description": "Path to certificate revocation list for <>." + }, + "cloud_storage_disable_archival_stm_rw_fence": { + "description": "Disables the concurrency control mechanism in Tiered Storage. This safety feature keeps data organized and correct when multiple processes access it simultaneously. Disabling it can cause data consistency problems, so use this setting only for testing, never in production systems." + }, + "cloud_storage_disable_archiver_manager": { + "description": "Use legacy upload mode and do not start archiver_manager.", + "config_scope": "cluster" }, "cloud_storage_disable_chunk_reads": { - "description": "Disable chunk reads and switch back to legacy mode where full segments are downloaded. When set to `true`, this option disables the more efficient chunk-based reads, causing Redpanda to download entire segments. This legacy behavior might be useful in specific scenarios where chunk-based fetching is not optimal.", - "config_scope": "object-storage" + "description": "Disable chunk reads and switch back to legacy mode where full segments are downloaded. When set to `true`, this option disables the more efficient chunk-based reads, causing Redpanda to download entire segments. This legacy behavior might be useful in specific scenarios where chunk-based fetching is not optimal." + }, + "cloud_storage_disable_read_replica_loop_for_tests": { + "description": "Begins the read replica sync loop in topic partitions with Tiered Storage enabled. The property exists to simplify testing and shouldn't be set in production." + }, + "cloud_storage_disable_remote_labels_for_tests": { + "description": "If `true`, Redpanda disables remote labels and falls back on the hash-based object naming scheme for new topics." + }, + "cloud_storage_disable_upload_consistency_checks": { + "description": "Disable all upload consistency checks to allow Redpanda to upload logs with gaps and replicate metadata with consistency violations. Do not change the default value unless requested by Redpanda Support." + }, + "cloud_storage_disable_upload_loop_for_tests": { + "description": "Begins the upload loop in topic partitions with Tiered Storage enabled. The property exists to simplify testing and shouldn't be set in production." }, "cloud_storage_enable_compacted_topic_reupload": { - "description": "Enable re-uploading data for compacted topics.\nWhen set to `true`, Redpanda can re-upload data for compacted topics to object storage, ensuring that the most current state of compacted topics is available in the cloud. Disabling this property (`false`) may reduce storage and network overhead but at the risk of not having the latest compacted data state in object storage.", - "config_scope": "object-storage" + "description": "Enable re-uploading data for compacted topics.\nWhen set to `true`, Redpanda can re-upload data for compacted topics to object storage, ensuring that the most current state of compacted topics is available in the cloud. Disabling this property (`false`) may reduce storage and network overhead but at the risk of not having the latest compacted data state in object storage." + }, + "cloud_storage_enable_remote_allow_gaps": { + "description": "Controls the eviction of locally stored log segments when Tiered Storage uploads are paused. Set to `false` to only evict data that has already been uploaded to object storage. If the retained data fills the local volume, Redpanda throttles producers. Set to `true` to allow the eviction of locally stored log segments, which may create gaps in offsets." }, "cloud_storage_enable_remote_read": { - "description": "Default remote read config value for new topics.\nWhen set to `true`, new topics are by default configured to allow reading data directly from object storage, facilitating access to older data that might have been offloaded as part of Tiered Storage. With the default set to `false`, remote reads must be explicitly enabled at the topic level.", - "config_scope": "object-storage" + "description": "Default remote read config value for new topics.\nWhen set to `true`, new topics are by default configured to allow reading data directly from object storage, facilitating access to older data that might have been offloaded as part of Tiered Storage. With the default set to `false`, remote reads must be explicitly enabled at the topic level." }, "cloud_storage_enable_remote_write": { - "description": "Default remote write value for new topics.\nWhen set to `true`, new topics are by default configured to upload data to object storage. With the default set to `false`, remote write must be explicitly enabled at the topic level.", - "config_scope": "object-storage" + "description": "Default remote write value for new topics.\nWhen set to `true`, new topics are by default configured to upload data to object storage. With the default set to `false`, remote write must be explicitly enabled at the topic level." + }, + "cloud_storage_enable_scrubbing": { + "description": "Enable routine checks (scrubbing) of object storage partitions. The scrubber validates the integrity of data and metadata uploaded to object storage." }, "cloud_storage_enable_segment_merging": { "related_topics": [ "xref:manage:tiered-storage.adoc#object-storage-housekeeping[Object storage housekeeping]" - ], - "config_scope": "object-storage" + ] + }, + "cloud_storage_enable_segment_uploads": { + "description": "Controls the upload of log segments to Tiered Storage. If set to `false`, this property temporarily pauses all log segment uploads from the Redpanda cluster. When the uploads are paused, the <> cluster configuration and `redpanda.remote.allowgaps` topic properties control local retention behavior." }, "cloud_storage_enabled": { "related_topics": [ - "xref:get-started:licensing/index.adoc[Redpanda Licensing]" - ], - "config_scope": "object-storage" + ] + }, + "cloud_storage_full_scrub_interval_ms": { + "description": "Interval, in milliseconds, between a final scrub and the next scrub." + }, + "cloud_storage_garbage_collect_timeout_ms": { + "description": "Timeout for running the cloud storage garbage collection, in milliseconds." + }, + "cloud_storage_graceful_transfer_timeout_ms": { + "description": "Time limit on waiting for uploads to complete before a leadership transfer. If this is `null`, leadership transfers proceed without waiting." }, "cloud_storage_housekeeping_interval_ms": { - "description": "Interval, in milliseconds, between object storage housekeeping tasks.", - "config_scope": "object-storage" + "description": "Interval, in milliseconds, between object storage housekeeping tasks." + }, + "cloud_storage_hydrated_chunks_per_segment_ratio": { + "description": "The maximum number of chunks per segment that can be hydrated at a time. Above this number, unused chunks are trimmed.\n\nA segment is divided into chunks. Chunk hydration means downloading the chunk (which is a small part of a full segment) from cloud storage and placing it in the local disk cache. Redpanda periodically removes old, unused chunks from your local disk. This process is called chunk eviction. This property controls how many chunks can be present for a given segment in local disk at a time, before eviction is triggered, removing the oldest ones from disk. Note that this property is not used for the default eviction strategy which simply removes all unused chunks." + }, + "cloud_storage_hydration_timeout_ms": { + "description": "Time to wait for a hydration request to be fulfilled. If hydration is not completed within this time, the consumer is notified with a timeout error.\n\nNegative doesn't make sense, but it may not be checked-for/enforced. Large is subjective, but a huge timeout also doesn't make sense. This particular config doesn't have a min/max bounds control, but it probably should to avoid mistakes." + }, + "cloud_storage_idle_threshold_rps": { + "description": "The object storage request rate threshold for idle state detection. If the average request rate for the configured period is lower than this threshold, the object storage is considered idle." + }, + "cloud_storage_idle_timeout_ms": { + "description": "The timeout, in milliseconds, used to detect the idle state of the object storage API. If the average object storage request rate is below this threshold for a configured amount of time, the object storage is considered idle and the housekeeping jobs are started." + }, + "cloud_storage_initial_backoff_ms": { + "description": "Initial backoff time for exponential backoff algorithm (ms)." + }, + "cloud_storage_inventory_hash_path_directory": { + "description": "Directory to store inventory report hashes for use by cloud storage scrubber. If not specified, Redpanda uses a default path within the data directory.", + "example": [ + ".Example", + "[,yaml]", + "----", + "redpanda:", + " cloud_storage_inventory_hash_store: ", + "----" + ] }, "cloud_storage_inventory_hash_store": { "description": "Directory to store inventory report hashes for use by cloud storage scrubber. If not specified, Redpanda uses a default path within the data directory.", @@ -156,25 +320,107 @@ "config_scope": "broker", "category": "redpanda" }, + "cloud_storage_inventory_max_hash_size_during_parse": { + "description": "Maximum bytes of hashes held in memory before writing data to disk during inventory report parsing. This affects the number of files written to disk during inventory report parsing. When this limit is reached, new files are written to disk." + }, + "cloud_storage_manifest_cache_size": { + "description": "Amount of memory that can be used to handle Tiered Storage metadata." + }, + "cloud_storage_manifest_max_upload_interval_sec": { + "description": "Minimum interval, in seconds, between partition manifest uploads. Actual time between uploads may be greater than this interval. If this is `null`, metadata is updated after each segment upload." + }, + "cloud_storage_manifest_upload_timeout_ms": { + "description": "Manifest upload timeout, in milliseconds." + }, + "cloud_storage_materialized_manifest_ttl_ms": { + "description": "The interval, in milliseconds, determines how long the materialized manifest can stay in the cache under contention. This setting is used for performance tuning. When the spillover manifest is materialized and stored in the cache, and the cache needs to evict it, it uses this value as a timeout. The cursor that uses the spillover manifest uses this value as a TTL interval, after which it stops referencing the manifest making it available for eviction. This only affects spillover manifests under contention." + }, + "cloud_storage_max_concurrent_hydrations_per_shard": { + "description": "Maximum concurrent segment hydrations of remote data per CPU core. If unset, value of `cloud_storage_max_connections / 2` is used, which means that half of available object storage bandwidth could be used to download data from object storage. If the cloud storage cache is empty every new segment reader will require a download. This will lead to 1:1 mapping between number of partitions scanned by the fetch request and number of parallel downloads. If this value is too large the downloads can affect other workloads. In case of any problem caused by the tiered-storage reads this value can be lowered. This will only affect segment hydrations (downloads) but won't affect cached segments. If fetch request is reading from the tiered-storage cache its concurrency will only be limited by available memory." + }, "cloud_storage_max_connection_idle_time_ms": { - "description": "Defines the maximum duration an HTTPS connection to object storage can stay idle, in milliseconds, before being terminated.\nThis setting reduces resource utilization by closing inactive connections. Adjust this property to balance keeping connections ready for subsequent requests and freeing resources associated with idle connections.", - "config_scope": "object-storage" + "description": "Defines the maximum duration an HTTPS connection to object storage can stay idle, in milliseconds, before being terminated.\nThis setting reduces resource utilization by closing inactive connections. Adjust this property to balance keeping connections ready for subsequent requests and freeing resources associated with idle connections." + }, + "cloud_storage_max_segment_readers_per_shard": { + "description": "Maximum concurrent I/O cursors of materialized remote segments per CPU core. If unset, the value of `topic_partitions_per_shard` is used, where one segment reader per partition is used if the shard is at its maximum partition capacity. These readers are cached across Kafka consume requests and store a readahead buffer." + }, + "cloud_storage_max_segments_pending_deletion_per_partition": { + "description": "The per-partition limit for the number of segments pending deletion from the cloud. Segments can be deleted due to retention or compaction. If this limit is breached and deletion fails, then segments are orphaned in the cloud and must be removed manually." + }, + "cloud_storage_max_throughput_per_shard": { + "description": "Maximum bandwidth allocated to Tiered Storage operations per shard, in bytes per second.\nThis setting limits the Tiered Storage subsystem's throughput per shard, facilitating precise control over bandwidth usage in testing scenarios. In production environments, use `cloud_storage_throughput_limit_percent` for more dynamic throughput management based on actual storage capabilities." }, "cloud_storage_metadata_sync_timeout_ms": { - "description": "Timeout for xref:manage:tiered-storage.adoc[] metadata synchronization.", - "config_scope": "object-storage" + "description": "Timeout for xref:manage:tiered-storage.adoc[] metadata synchronization." + }, + "cloud_storage_min_chunks_per_segment_threshold": { + "description": "The minimum number of chunks per segment for trimming to be enabled. If the number of chunks in a segment is below this threshold, the segment is small enough that all chunks in it can be hydrated at any given time." + }, + "cloud_storage_readreplica_manifest_sync_timeout_ms": { + "description": "Timeout to check if new data is available for partitions in object storage for read replicas." + }, + "cloud_storage_recovery_temporary_retention_bytes_default": { + "description": "Retention in bytes for topics created during automated recovery." }, "cloud_storage_recovery_topic_validation_depth": { - "description": "Number of metadata segments to validate, from newest to oldest, when <> is set to `check_manifest_and_segment_metadata`.", - "config_scope": "object-storage" + "description": "Number of metadata segments to validate, from newest to oldest, when <> is set to `check_manifest_and_segment_metadata`." + }, + "cloud_storage_recovery_topic_validation_mode": { + "description": "Validation performed before recovering a topic from object storage. In case of failure, the reason for the failure appears as `ERROR` lines in the Redpanda application log. For each topic, this reports errors for all partitions, but for each partition, only the first error is reported.\n\nThis property accepts the following parameters:\n\n- `no_check`: Skips the checks for topic recovery.\n- `check_manifest_existence`: Runs an existence check on each `partition_manifest`. Fails if there are connection issues to the object storage.\n- `check_manifest_and_segment_metadata`: Downloads the manifest and runs a consistency check, comparing the metadata with the cloud storage objects. The process fails if metadata references any missing cloud storage objects.\n\nExample: Redpanda validates the topic `kafka/panda-topic-recovery-NOT-OK` and stops due to a fatal error on partition 0:\n\n```bash\nERROR 2024-04-24 21:29:08,166 [shard 1:main] cluster - [fiber11|0|299996ms recovery validation of {kafka/panda-topic-recovery-NOT-OK/0}/24] - manifest metadata check: missing segment, validation not ok\nERROR 2024-04-24 21:29:08,166 [shard 1:main] cluster - topics_frontend.cc:519 - Stopping recovery of {kafka/panda-topic-recovery-NOT-OK} due to validation error\n```\n\nEach failing partition error message has the following format:\n\n```bash\nERROR .... [... recovery validation of {}...] - , validation not ok\n```\n\nAt the end of the process, Redpanda outputs a final ERROR message: \n\n```bash\nERROR ... ... - Stopping recovery of {} due to validation error\n```" + }, + "cloud_storage_roles_operation_timeout_ms": { + "description": "Timeout for IAM role related operations (ms)." + }, + "cloud_storage_scrubbing_interval_jitter_ms": { + "description": "Jitter applied to the object storage scrubbing interval." + }, + "cloud_storage_segment_max_upload_interval_sec": { + "description": "Time that a segment can be kept locally without uploading it to the object storage, in seconds." + }, + "cloud_storage_segment_size_min": { + "description": "Smallest acceptable segment size in the object storage. Default: `cloud_storage_segment_size_target`/2." }, "cloud_storage_segment_size_target": { - "description": "Desired segment size in the object storage. The default is set in the topic-level `segment.bytes` property.", - "config_scope": "object-storage" + "description": "Desired segment size in the object storage. The default is set in the topic-level `segment.bytes` property." + }, + "cloud_storage_segment_upload_timeout_ms": { + "description": "Log segment upload timeout, in milliseconds." + }, + "cloud_storage_spillover_manifest_max_segments": { + "description": "Maximum number of segments in the spillover manifest that can be offloaded to the object storage. This setting serves as a threshold for triggering data offload based on the number of segments, rather than the total size of the manifest. It is designed for use in testing environments to control the offload behavior more granularly. In production settings, manage offloads based on the manifest size through `cloud_storage_spillover_manifest_size` for more predictable outcomes." + }, + "cloud_storage_spillover_manifest_size": { + "description": "The size of the manifest which can be offloaded to the cloud. If the size of the local manifest stored in Redpanda exceeds `cloud_storage_spillover_manifest_size` by two times the spillover mechanism will split the manifest into two parts and one will be uploaded to object storage." + }, + "cloud_storage_throughput_limit_percent": { + "description": "Maximum throughput used by Tiered Storage per broker expressed as a percentage of the disk bandwidth. If the server has several disks, Redpanda uses the one that stores the Tiered Storage cache. Even if Tiered Storage is allowed to use the full bandwidth of the disk (100%), it won't necessarily use it in full. The actual usage depends on your workload and the state of the Tiered Storage cache. This setting is a safeguard that prevents Tiered Storage from using too many system resources: it is not a performance tuning knob." + }, + "cloud_storage_topic_purge_grace_period_ms": { + "description": "Grace period during which the purger refuses to purge the topic." + }, + "cloud_storage_upload_ctrl_d_coeff": { + "description": "Derivative coefficient for upload PID controller." + }, + "cloud_storage_upload_ctrl_max_shares": { + "description": "Maximum number of I/O and CPU shares that archival upload can use." + }, + "cloud_storage_upload_ctrl_min_shares": { + "description": "Minimum number of I/O and CPU shares that archival upload can use." + }, + "cloud_storage_upload_ctrl_p_coeff": { + "description": "Proportional coefficient for upload PID controller." }, "cloud_storage_upload_ctrl_update_interval_ms": { - "description": "The interval (in milliseconds) for updating the controller that manages the priority of Tiered Storage uploads. This property determines how frequently the system recalculates and adjusts the work scheduling for uploads to object storage.\n\nThis is an internal-only configuration and should be enabled only after consulting with Redpanda support.", - "config_scope": "object-storage" + "description": "The interval (in milliseconds) for updating the controller that manages the priority of Tiered Storage uploads. This property determines how frequently the system recalculates and adjusts the work scheduling for uploads to object storage.\n\nThis is an internal-only configuration and should be enabled only after consulting with Redpanda support." + }, + "cloud_storage_upload_loop_initial_backoff_ms": { + "description": "Initial backoff interval when there is nothing to upload for a partition, in milliseconds." + }, + "cloud_storage_upload_loop_max_backoff_ms": { + "description": "Maximum backoff interval when there is nothing to upload for a partition, in milliseconds." + }, + "cloud_storage_url_style": { + "description": "Configure the addressing style that controls how Redpanda formats bucket URLs for S3-compatible object storage.\n\nLeave this property unset (`null`) to use automatic configuration:\n\n* For AWS S3: Redpanda attempts `virtual_host` addressing first, then falls back to `path` style if needed\n* For MinIO: Redpanda automatically uses `path` style regardless of `MINIO_DOMAIN` configuration\n\nSet this property explicitly to override automatic configuration, ensure consistent behavior across deployments, or when using S3-compatible storage that requires a specific URL format." }, "cluster_id": { "description": "NOTE: This property is read-only in Redpanda Cloud.\n\nCluster identifier.", @@ -201,42 +447,136 @@ ], "config_scope": "topic" }, - "core_balancing_continuous": { - "related_topics": [ - "xref:get-started:licensing/index.adoc[Redpanda Licensing]" - ], - "config_scope": "cluster" + "confluent.key.schema.validation": { + "description": "Enable validation of the schema ID for keys on a record. This is a compatibility alias for `redpanda.key.schema.id.validation`. When enabled, Redpanda validates that the schema ID encoded in the record's key is registered in the Schema Registry according to the configured subject name strategy.", + "config_scope": "topic" }, - "crash_loop_sleep_sec": { - "description": "The amount of time the broker sleeps before terminating when the limit on consecutive broker crashes (<>) is reached. This property provides a debugging window for you to access the broker before it terminates, and is particularly useful in Kubernetes environments.\n\nIf `null`, the property is disabled, and the broker terminates immediately after reaching the crash loop limit.\n\nFor information about how to reset the crash loop limit, see the <> broker property.", - "version": "v24.3.4", - "config_scope": "broker", - "category": "redpanda" + "confluent.key.subject.name.strategy": { + "description": "The subject name strategy for keys when `confluent.key.schema.validation` is enabled. This is a compatibility alias for `redpanda.key.subject.name.strategy` that determines how the topic and schema are mapped to a subject name in the Schema Registry.", + "config_scope": "topic" }, - "data_transforms_binary_max_size": { - "description": "ifdef::env-cloud[]\nNOTE: This property is read-only in Redpanda Cloud.\nendif::[]\n\nThe maximum size for a deployable WebAssembly binary that the broker can store.", - "config_scope": "cluster" + "confluent.value.schema.validation": { + "description": "Enable validation of the schema ID for values on a record. This is a compatibility alias for `redpanda.value.schema.id.validation`. When enabled, Redpanda validates that the schema ID encoded in the record's value is registered in the Schema Registry according to the configured subject name strategy.", + "config_scope": "topic" }, - "data_transforms_per_core_memory_reservation": { - "description": "ifdef::env-cloud[]\nNOTE: This property is read-only in Redpanda Cloud.\nendif::[]\n\nThe amount of memory to reserve per core for data transform (Wasm) virtual machines. Memory is reserved on boot. The maximum number of functions that can be deployed to a cluster is equal to `data_transforms_per_core_memory_reservation` / `data_transforms_per_function_memory_limit`.", - "config_scope": "cluster" + "confluent.value.subject.name.strategy": { + "description": "The subject name strategy for values when `confluent.value.schema.validation` is enabled. This is a compatibility alias for `redpanda.value.subject.name.strategy`. This determines how the topic and schema are mapped to a subject name in the Schema Registry.", + "config_scope": "topic" }, - "data_transforms_per_function_memory_limit": { - "description": "ifdef::env-cloud[]\nNOTE: This property is read-only in Redpanda Cloud.\nendif::[]\n\nThe amount of memory to give an instance of a data transform (Wasm) virtual machine. The maximum number of functions that can be deployed to a cluster is equal to `data_transforms_per_core_memory_reservation` / `data_transforms_per_function_memory_limit`.", + "consumer_group_lag_collection_interval_sec": { + "description": "How often to run the collection loop when <> contains `consumer_lag`.\n\nReducing the value of `consumer_group_lag_collection_interval_sec` increases the metric collection frequency, which may raise resource utilization. In most environments, this impact is minimal, but it's best practice to monitor broker resource usage in high-scale settings.", "config_scope": "cluster" }, - "data_transforms_read_buffer_memory_percentage": { - "description": "include::reference:partial$internal-use-property.adoc[]\n\nThe percentage of available memory in the transform subsystem to use for read buffers.", - "config_scope": "cluster" + "consumer_heartbeat_interval_ms": { + "description": "Interval (in milliseconds) for consumer heartbeats.", + "config_scope": "broker", + "category": "pandaproxy-client" }, - "data_transforms_write_buffer_memory_percentage": { + "consumer_instance_timeout_ms": { + "description": "How long to wait for an idle consumer before removing it. A consumer is considered idle when it's not making requests or heartbeats.", + "config_scope": "broker", + "category": "pandaproxy" + }, + "consumer_offsets_topic_batch_cache_enabled": { + "description": "This property lets you enable the batch cache for the consumer offsets topic. By default, the cache for consumer offsets topic is disabled. Changing this property is not recommended in production systems, as it may affect performance. The change is applied only after the restart.", + "config_scope": "cluster" + }, + "consumer_rebalance_timeout_ms": { + "description": "Timeout (in milliseconds) for consumer rebalance.", + "config_scope": "broker", + "category": "pandaproxy-client" + }, + "consumer_request_max_bytes": { + "description": "Maximum bytes to fetch per request.", + "config_scope": "broker", + "category": "pandaproxy-client" + }, + "consumer_request_min_bytes": { + "description": "Minimum bytes to fetch per request.", + "config_scope": "broker", + "category": "pandaproxy-client" + }, + "consumer_request_timeout_ms": { + "description": "Interval (in milliseconds) for consumer request timeout.", + "config_scope": "broker", + "category": "pandaproxy-client" + }, + "consumer_session_timeout_ms": { + "description": "Timeout (in milliseconds) for consumer session.", + "config_scope": "broker", + "category": "pandaproxy-client" + }, + "controller_log_accummulation_rps_capacity_topic_operations": { + "description": "Maximum capacity of rate limit accumulation in controller topic operations limit.", + "config_scope": "cluster" + }, + "core_balancing_continuous": { + "related_topics": [ + ], + "config_scope": "cluster" + }, + "core_balancing_debounce_timeout": { + "description": "Interval, in milliseconds, between trigger and invocation of core balancing.", + "config_scope": "cluster" + }, + "crash_loop_limit": { + "config_scope": "broker", + "category": "redpanda", + "description": "A limit on the number of consecutive times a broker can crash within one hour before its crash-tracking logic is reset. This limit prevents a broker from getting stuck in an infinite cycle of crashes.\n\nIf `null`, the property is disabled and no limit is applied.\n\nThe crash-tracking logic is reset (to zero consecutive crashes) by any of the following conditions:\n\n* The broker shuts down cleanly.\n* One hour passes since the last crash.\n* The `redpanda.yaml` broker configuration file is updated.\n* The `startup_log` file in the broker's <> broker property is manually deleted." + }, + "crash_loop_sleep_sec": { + "description": "The amount of time the broker sleeps before terminating when the limit on consecutive broker crashes (<>) is reached. This property provides a debugging window for you to access the broker before it terminates, and is particularly useful in Kubernetes environments.\n\nIf `null`, the property is disabled, and the broker terminates immediately after reaching the crash loop limit.\n\nFor information about how to reset the crash loop limit, see the <> broker property.", + "version": "v24.3.4", + "config_scope": "broker", + "category": "redpanda" + }, + "data_directory": { + "config_scope": "broker", + "category": "redpanda" + }, + "data_transforms_binary_max_size": { + "description": "ifdef::env-cloud[]\nNOTE: This property is read-only in Redpanda Cloud.\nendif::[]\n\nThe maximum size for a deployable WebAssembly binary that the broker can store.", + "config_scope": "cluster" + }, + "data_transforms_per_core_memory_reservation": { + "description": "ifdef::env-cloud[]\nNOTE: This property is read-only in Redpanda Cloud.\nendif::[]\n\nThe amount of memory to reserve per core for data transform (Wasm) virtual machines. Memory is reserved on boot. The maximum number of functions that can be deployed to a cluster is equal to `data_transforms_per_core_memory_reservation` / `data_transforms_per_function_memory_limit`.", + "config_scope": "cluster" + }, + "data_transforms_per_function_memory_limit": { + "description": "ifdef::env-cloud[]\nNOTE: This property is read-only in Redpanda Cloud.\nendif::[]\n\nThe amount of memory to give an instance of a data transform (Wasm) virtual machine. The maximum number of functions that can be deployed to a cluster is equal to `data_transforms_per_core_memory_reservation` / `data_transforms_per_function_memory_limit`.", + "config_scope": "cluster" + }, + "data_transforms_read_buffer_memory_percentage": { + "description": "include::reference:partial$internal-use-property.adoc[]\n\nThe percentage of available memory in the transform subsystem to use for read buffers.", + "config_scope": "cluster" + }, + "data_transforms_write_buffer_memory_percentage": { "description": "include::reference:partial$internal-use-property.adoc[]\n\nThe percentage of available memory in the transform subsystem to use for write buffers.", "config_scope": "cluster" }, + "datalake_coordinator_snapshot_max_delay_secs": { + "description": "Maximum amount of time the coordinator waits to snapshot after a command appears in the log.", + "config_scope": "cluster" + }, + "datalake_disk_space_monitor_enable": { + "description": "Option to explicitly disable enforcement of datalake disk space usage.", + "config_scope": "cluster" + }, + "datalake_scheduler_max_concurrent_translations": { + "description": "The maximum number of translations that the datalake scheduler will allow to run at a given time. If a translation is requested, but the number of running translations exceeds this value, the request will be put to sleep temporarily, polling until capacity becomes available.", + "config_scope": "cluster" + }, + "datalake_scheduler_time_slice_ms": { + "description": "Time, in milliseconds, for a datalake translation as scheduled by the datalake scheduler. After a translation is scheduled, it will run until either the time specified has elapsed or all pending records on its source partition have been translated.", + "config_scope": "cluster" + }, + "datalake_scratch_space_soft_limit_size_percent": { + "description": "Size of the scratch space datalake soft limit expressed as a percentage of the `datalake_scratch_space_size_bytes` configuration value.", + "config_scope": "cluster" + }, "default_leaders_preference": { "description": "Default settings for preferred location of topic partition leaders. It can be either \"none\" (no preference), or \"racks:,,...\" (prefer brokers with rack ID from the list).\n\nThe list can contain one or more rack IDs. If you specify multiple IDs, Redpanda tries to distribute the partition leader locations equally across brokers in these racks.\n\nIf config_ref:enable_rack_awareness,true,properties/cluster-properties[] is set to `false`, leader pinning is disabled across the cluster.", "related_topics": [ - "xref:get-started:licensing/index.adoc[Redpanda Licensing]" ], "config_scope": "cluster" }, @@ -253,6 +593,18 @@ "config_scope": "broker", "category": "redpanda" }, + "disable_cluster_recovery_loop_for_tests": { + "description": "include::reference:partial$internal-use-property.adoc[]\n\nDisables the cluster recovery loop. This property is used to simplify testing and should not be set in production.", + "config_scope": "cluster" + }, + "disk_reservation_percent": { + "description": "The percentage of total disk capacity that Redpanda will avoid using. This applies both when cloud cache and log data share a disk, as well \nas when cloud cache uses a dedicated disk. \n\nIt is recommended to not run disks near capacity to avoid blocking I/O due to low disk space, as well as avoiding performance issues associated with SSD garbage collection.", + "config_scope": "cluster" + }, + "election_timeout_ms": { + "description": "Raft election timeout expressed in milliseconds.", + "config_scope": "cluster" + }, "emergency_disable_data_transforms": { "description": "Override the cluster property xref:reference:properties/cluster-properties.adoc#data_transforms_enabled[`data_transforms_enabled`] and disable Wasm-powered data transforms. This is an emergency shutoff button.", "related_topics": [ @@ -282,11 +634,14 @@ "xref:reference:public-metrics-reference.adoc#redpanda_kafka_consumer_group_lag_max[`redpanda_kafka_consumer_group_lag_max`]", "xref:reference:public-metrics-reference.adoc#redpanda_kafka_consumer_group_lag_sum[`redpanda_kafka_consumer_group_lag_sum`]", "xref:reference:properties/cluster-properties.adoc#consumer_group_lag_collection_interval_sec[`consumer_group_lag_collection_interval_sec`]", - "xref:manage:monitoring.adoc#consumers[Monitor consumer group lag]", - "xref:manage:monitor-cloud.adoc#consumers[Monitor consumer group lag]" + "xref:manage:monitoring.adoc#consumers[Monitor consumer group lag]" ], "config_scope": "cluster" }, + "enable_developmental_unrecoverable_data_corrupting_features": { + "description": "Development features should never be enabled in a production cluster, or any cluster where stability, data loss, or the ability to upgrade are a concern. To enable experimental features, set the value of this configuration option to the current unix epoch expressed in seconds. The value must be within one hour of the current time on the broker.Once experimental features are enabled they cannot be disabled", + "config_scope": "cluster" + }, "enable_host_metrics": { "description": "Enable exporting of some host metrics like `/proc/diskstats`, `/proc/snmp` and `/proc/net/netstat`.\n\nHost metrics are prefixed with xref:reference:internal-metrics-reference.adoc#vectorized_host_diskstats_discards[`vectorized_host`] and are available on the `/metrics` endpoint.", "related_topics": [ @@ -301,6 +656,10 @@ ], "config_scope": "cluster" }, + "enable_sasl": { + "description": "Enable SASL authentication for Kafka connections. Authorization is required to modify this property. See also <>.", + "config_scope": "cluster" + }, "enable_schema_id_validation": { "related_topics": [ "xref:manage:schema-reg/schema-id-validation.adoc[Server-Side Schema ID Validation]" @@ -308,6 +667,15 @@ "description": "Mode to enable server-side schema ID validation.\n\n*Accepted values:*\n\n* `none`: Schema validation is disabled (no schema ID checks are done). Associated topic properties cannot be modified.\n* `redpanda`: Schema validation is enabled. Only Redpanda topic properties are accepted.\n* `compat`: Schema validation is enabled. Both Redpanda and compatible topic properties are accepted.", "config_scope": "cluster" }, + "fetch_read_strategy": { + "description": "The strategy used to fulfill fetch requests.\n\n* `polling`: Repeatedly polls every partition in the request for new data. The polling interval is set by <> (deprecated).\n\n* `non_polling`: The backend is signaled when a partition has new data, so Redpanda doesn't need to repeatedly read from every partition in the fetch. Redpanda Data recommends using this value for most workloads, because it can improve fetch latency and CPU utilization.\n\n* `non_polling_with_debounce`: This option behaves like `non_polling`, but it includes a debounce mechanism with a fixed delay specified by <> at the start of each fetch. By introducing this delay, Redpanda can accumulate more data before processing, leading to fewer fetch operations and returning larger amounts of data. Enabling this option reduces reactor utilization, but it may also increase end-to-end latency.", + "config_scope": "cluster" + }, + "fips_mode": { + "config_scope": "broker", + "category": "redpanda", + "description": "Controls whether Redpanda starts in FIPS mode. This property allows for three values: \n\n* Disabled - Redpanda does not start in FIPS mode.\n\n* Permissive - Redpanda performs the same check as enabled, but a warning is logged, and Redpanda continues to run. Redpanda loads the OpenSSL FIPS provider into the OpenSSL library. After this completes, Redpanda is operating in FIPS mode, which means that the TLS cipher suites available to users are limited to the TLSv1.2 and TLSv1.3 NIST-approved cryptographic methods.\n\n* Enabled - Redpanda verifies that the operating system is enabled for FIPS by checking `/proc/sys/crypto/fips_enabled`. If the file does not exist or does not return `1`, Redpanda immediately exits." + }, "flush.bytes": { "description": "The maximum bytes not fsynced per partition. If this configured threshold is reached, the log is automatically fsynced, even though it wasn't explicitly requested.", "related_topics": [ @@ -325,10 +693,13 @@ "http_authentication": { "description": "A list of supported HTTP authentication mechanisms.\n\n*Accepted values:*\n\n* `BASIC`: Basic authentication\n* `OIDC`: OpenID Connect", "related_topics": [ - "xref:get-started:licensing/index.adoc[Redpanda Licensing]" ], "config_scope": "cluster" }, + "iceberg_backlog_controller_i_coeff": { + "description": "Controls how much past backlog (unprocessed work) affects the priority of processing new data in the Iceberg system. The system accumulates backlog errors over time, and this coefficient determines how much that accumulated backlog influences the urgency of data translation.", + "config_scope": "cluster" + }, "iceberg_backlog_controller_p_coeff": { "description": "Proportional coefficient for the Iceberg backlog controller. Number of shares assigned to the datalake scheduling group will be proportional to the backlog size error. A negative value means larger and faster changes in the number of shares in the datalake scheduling group.", "config_scope": "cluster" @@ -340,22 +711,34 @@ "iceberg_default_partition_spec": { "description": "ifndef::env-cloud[]\nDefault value for the xref:reference:properties/topic-properties.adoc#redpanda-iceberg-partition-spec[`redpanda.iceberg.partition.spec`] topic property that determines the partition spec for the Iceberg table corresponding to the topic.\nendif::[]\n\nifdef::env-cloud[]\nDefault value for the `redpanda.iceberg.partition.spec` topic property that determines the partition spec for the Iceberg table corresponding to the topic.\nendif::[]", "related_topics": [ - "xref:reference:properties/topic-properties.adoc#redpanda-iceberg-partition-spec[`redpanda.iceberg.partition.spec`]" + "self-managed-only: xref:reference:properties/topic-properties.adoc#redpanda-iceberg-partition-spec[`redpanda.iceberg.partition.spec`]" ], "config_scope": "cluster" }, + "iceberg_delete": { + "description": "Default value for the `redpanda.iceberg.delete` topic property that determines if the corresponding Iceberg table is deleted upon deleting the topic.", + "config_scope": "cluster" + }, + "iceberg_disable_automatic_snapshot_expiry": { + "description": "Whether to disable automatic Iceberg snapshot expiry. This property may be useful if the Iceberg catalog expects to perform snapshot expiry on its own.", + "config_scope": "cluster" + }, + "iceberg_disable_snapshot_tagging": { + "description": "Whether to disable tagging of Iceberg snapshots. These tags are used to ensure that the snapshots that Redpanda writes are retained during snapshot removal, which in turn, helps Redpanda ensure exactly-once delivery of records. Disabling tags is therefore not recommended, but it may be useful if the Iceberg catalog does not support tags.", + "config_scope": "cluster" + }, "iceberg_enabled": { "description": "ifndef::env-cloud[]\nEnables the translation of topic data into Iceberg tables. Setting `iceberg_enabled` to `true` activates the feature at the cluster level, but each topic must also set the xref:reference:properties/topic-properties.adoc#redpanda-iceberg-enabled[`redpanda.iceberg.enabled`] topic-level property to `true` to use it. If `iceberg_enabled` is set to `false`, then the feature is disabled for all topics in the cluster, overriding any topic-level settings.\nendif::[]\nifdef::env-cloud[]\nEnables the translation of topic data into Iceberg tables. Setting `iceberg_enabled` to `true` activates the feature at the cluster level, but each topic must also set the `redpanda.iceberg.enabled` topic-level property to `true` to use it. If `iceberg_enabled` is set to `false`, then the feature is disabled for all topics in the cluster, overriding any topic-level settings.\nendif::[]", "related_topics": [ - "xref:reference:properties/topic-properties.adoc#redpanda-iceberg-enabled[`redpanda.iceberg.enabled`]" + "self-managed-only: xref:reference:properties/topic-properties.adoc#redpanda-iceberg-enabled[`redpanda.iceberg.enabled`]" ], "config_scope": "cluster" }, "iceberg_invalid_record_action": { "description": "ifndef::env-cloud[]\nDefault value for the xref:reference:properties/topic-properties.adoc#redpanda-iceberg-invalid-record-action[`redpanda.iceberg.invalid.record.action`] topic property.\nendif::[]\nifdef::env-cloud[]\nDefault value for the `redpanda.iceberg.invalid.record.action` topic property.\nendif::[]", "related_topics": [ - "xref:reference:properties/topic-properties.adoc#redpanda-iceberg-invalid-record-action[`redpanda.iceberg.invalid.record.action`]", - "xref:manage:iceberg/about-iceberg-topics.adoc#troubleshoot-errors[Troubleshoot errors]" + "self-managed-only: xref:reference:properties/topic-properties.adoc#redpanda-iceberg-invalid-record-action[`redpanda.iceberg.invalid.record.action`]", + "self-managed-only: xref:manage:iceberg/about-iceberg-topics.adoc#troubleshoot-errors[Troubleshoot errors]" ], "config_scope": "cluster" }, @@ -366,6 +749,10 @@ ], "config_scope": "cluster" }, + "iceberg_rest_catalog_authentication_mode": { + "description": "The authentication mode for client requests made to the Iceberg catalog. Choose from: `none`, `bearer`, `oauth2`, and `aws_sigv4`. In `bearer` mode, the token specified in `iceberg_rest_catalog_token` is used unconditonally, and no attempts are made to refresh the token. In `oauth2` mode, the credentials specified in `iceberg_rest_catalog_client_id` and `iceberg_rest_catalog_client_secret` are used to obtain a bearer token from the URI defined by `iceberg_rest_catalog_oauth2_server_uri`. In `aws_sigv4` mode, the same AWS credentials used for cloud storage (see `cloud_storage_region`, `cloud_storage_access_key`, `cloud_storage_secret_key`, and `cloud_storage_credentials_source`) are used to sign requests to AWS Glue catalog with SigV4.", + "config_scope": "cluster" + }, "iceberg_rest_catalog_aws_access_key": { "description": "AWS access key for Iceberg REST catalog SigV4 authentication. If not set, falls back to xref:reference:properties/object-storage-properties.adoc#cloud_storage_access_key[`cloud_storage_access_key`] when using aws_sigv4 authentication mode.", "related_topics": [ @@ -387,14 +774,41 @@ ], "config_scope": "cluster" }, + "iceberg_rest_catalog_client_id": { + "description": "Iceberg REST catalog user ID. This ID is used to query the catalog API for the OAuth token. Required if catalog type is set to `rest` and `iceberg_rest_catalog_authentication_mode` is set to `oauth2`.", + "config_scope": "cluster" + }, "iceberg_rest_catalog_client_secret": { "description": "Secret used with the client ID to query the OAuth token endpoint for Iceberg REST catalog authentication. Required if catalog type is set to `rest` and `iceberg_rest_catalog_authentication_mode` is set to `oauth2`.", "config_scope": "cluster" }, + "iceberg_rest_catalog_credentials_source": { + "description": "ifndef::env-cloud[]\nSource of AWS credentials for Iceberg REST catalog SigV4 authentication. If not set, falls back to xref:reference:properties/object-storage-properties.adoc#cloud_storage_credentials_source[`cloud_storage_credentials_source`] when using aws_sigv4 authentication mode.\nendif::[]\n\nifdef::env-cloud[]\nSource of AWS credentials for Iceberg REST catalog SigV4 authentication. If providing explicit credentials using `iceberg_rest_catalog_aws_access_key` and `iceberg_rest_catalog_aws_secret_key` for Glue catalog authentication, you must set this property to `config_file`.\nendif::[]\n\n*Accepted values*: `aws_instance_metadata`, `azure_aks_oidc_federation`, `azure_vm_instance_metadata`, `config_file`, `gcp_instance_metadata`, `sts`.", + "related_topics": [ + "xref:reference:properties/object-storage-properties.adoc#cloud_storage_credentials_source[`cloud_storage_credentials_source`]" + ], + "config_scope": "cluster" + }, + "iceberg_rest_catalog_crl": { + "description": "The contents of a certificate revocation list for `iceberg_rest_catalog_trust`. Takes precedence over `iceberg_rest_catalog_crl_file`.", + "config_scope": "cluster" + }, "iceberg_rest_catalog_endpoint": { "description": "URL of Iceberg REST catalog endpoint.\nNOTE: If you set <> to `rest`, you must also set this property at the same time.", "config_scope": "cluster" }, + "iceberg_rest_catalog_oauth2_scope": { + "description": "The OAuth scope used to retrieve access tokens for Iceberg catalog authentication. Only meaningful when `iceberg_rest_catalog_authentication_mode` is set to `oauth2`", + "config_scope": "cluster" + }, + "iceberg_rest_catalog_oauth2_server_uri": { + "description": "The OAuth URI used to retrieve access tokens for Iceberg catalog authentication. If left undefined, the deprecated Iceberg catalog endpoint `/v1/oauth/tokens` is used instead.", + "config_scope": "cluster" + }, + "iceberg_rest_catalog_request_timeout_ms": { + "description": "Maximum length of time that Redpanda waits for a response from the REST catalog before aborting the request", + "config_scope": "cluster" + }, "iceberg_rest_catalog_token": { "description": "Token used to access the REST Iceberg catalog. If the token is present, Redpanda ignores credentials stored in the properties <> and <>.\n\nRequired if <> is set to `bearer`.", "config_scope": "cluster" @@ -403,16 +817,32 @@ "description": "The contents of a certificate chain to trust for the REST Iceberg catalog.\nifndef::env-cloud[]\nTakes precedence over <>.\nendif::[]", "config_scope": "cluster" }, + "iceberg_rest_catalog_trust_file": { + "description": "Path to a file containing a certificate chain to trust for the REST Iceberg catalog.", + "config_scope": "cluster" + }, "iceberg_rest_catalog_warehouse": { "description": "Warehouse to use for the Iceberg REST catalog. Redpanda queries the catalog to retrieve warehouse-specific configurations and automatically configures settings like the appropriate prefix. The prefix is appended to the catalog path (for example, `/v1/\\{prefix}/namespaces`).", "config_scope": "cluster" }, + "iceberg_target_backlog_size": { + "description": "Average size per partition of the datalake translation backlog that the backlog controller tries to maintain. When the backlog size is larger than the set point, the backlog controller will increase the translation scheduling group priority.", + "config_scope": "cluster" + }, "iceberg_target_lag_ms": { "related_topics": [ - "xref:reference:properties/topic-properties.adoc#redpanda-iceberg-target-lag-ms[`redpanda.iceberg.target.lag.ms`]" + "self-managed-only: xref:reference:properties/topic-properties.adoc#redpanda-iceberg-target-lag-ms[`redpanda.iceberg.target.lag.ms`]" ], "config_scope": "cluster" }, + "iceberg_throttle_backlog_size_ratio": { + "description": "Ration of the total backlog size to the disk space at which the throttle to iceberg producers is applied.", + "config_scope": "cluster" + }, + "iceberg_topic_name_dot_replacement": { + "description": "Optional replacement string for dots in topic names when deriving Iceberg table names, useful when downstream systems do not permit dots in table names. The replacement string cannot contain dots. Be careful to avoid table name collisions caused by the replacement.If an Iceberg topic with dots in the name exists in the cluster, the value of this property should not be changed.", + "config_scope": "cluster" + }, "initial.retention.local.target.bytes": { "description": "A size-based initial retention limit for Tiered Storage that determines how much data in local storage is transferred to a partition replica when a cluster is resized. If `null` (default), all locally retained data is transferred.", "related_topics": [ @@ -443,6 +873,10 @@ ], "config_scope": "cluster" }, + "internal_topic_replication_factor": { + "description": "Target replication factor for internal topics.\n\n*Unit*: number of replicas per topic.", + "config_scope": "cluster" + }, "kafka_api": { "description": "IP address and port of the Kafka API endpoint that handles requests. Supports multiple listeners with different configurations.", "related_topics": [ @@ -495,6 +929,10 @@ ], "config_scope": "cluster" }, + "kafka_enable_authorization": { + "description": "Flag to require authorization for Kafka connections. If `null`, the property is disabled, and authorization is instead enabled by <>.\n\n* `null`: Ignored. Authorization is enabled with `enable_sasl`: `true`\n* `true`: authorization is required.\n* `false`: authorization is disabled.", + "config_scope": "cluster" + }, "kafka_nodelete_topics": { "related_topics": [ "xref:develop:consume-data/consumer-offsets.adoc[Consumer Offsets]", @@ -502,6 +940,22 @@ ], "config_scope": "cluster" }, + "kafka_produce_batch_validation": { + "description": "Controls the level of validation performed on batches produced to Redpanda. When set to `legacy`, there is minimal validation performed on the produce path. When set to `relaxed`, full validation is performed on uncompressed batches and on compressed batches with the `max_timestamp` value left unset. When set to `strict`, full validation of uncompressed and compressed batches is performed. This should be the default in environments where producing clients are not trusted.", + "config_scope": "cluster" + }, + "kafka_qdc_max_depth": { + "description": "Maximum queue depth used in Kafka queue depth control.", + "config_scope": "cluster" + }, + "kafka_qdc_window_count": { + "description": "Number of windows used in Kafka queue depth control latency tracking.", + "config_scope": "cluster" + }, + "kafka_sasl_max_reauth_ms": { + "description": "The maximum time between Kafka client reauthentications. If a client has not reauthenticated a connection within this time frame, that connection is torn down.", + "config_scope": "cluster" + }, "kafka_throughput_control": { "related_topics": [ "xref:manage:cluster-maintenance/manage-throughput.adoc[Manage throughput]" @@ -530,6 +984,10 @@ "config_scope": "cluster", "description": "Threshold for refilling the token bucket as part of enforcing throughput limits.\n\nThis threshold is evaluated with each request for data. When the number of tokens to replenish exceeds this threshold, then tokens are added to the token bucket. This ensures that the atomic is not being updated for the token count with each request. The range for this threshold is automatically clamped to the corresponding throughput limit for ingress and egress." }, + "leader_balancer_idle_timeout": { + "description": "Leadership rebalancing idle timeout.", + "config_scope": "cluster" + }, "leader_balancer_mute_timeout": { "description": "The length of time that a glossterm:Raft[] group is muted after a leadership rebalance operation. Any group that has been moved, regardless of whether the move succeeded or failed, undergoes a cooling-off period. This prevents Raft groups from repeatedly experiencing leadership rebalance operations in a short time frame, which can lead to instability in the cluster.\n\nThe leader balancer maintains a list of muted groups and reevaluates muted status at the start of each balancing iteration. Muted groups still contribute to overall cluster balance calculations although they can't themselves be moved until the mute period is over.", "config_scope": "cluster" @@ -545,6 +1003,14 @@ ], "config_scope": "cluster" }, + "log_compaction_merge_max_ranges": { + "description": "The maximum range of segments that can be processed in a single round of adjacent segment compaction. If `null` (the default value), no maximum is imposed on the number of ranges that can be processed at once. A value below 1 effectively disables adjacent merge compaction.", + "config_scope": "cluster" + }, + "log_compaction_pause_use_sliding_window": { + "description": "Pause use of sliding window compaction. Toggle to `true` _only_ when you want to force adjacent segment compaction. The memory reserved by `storage_compaction_key_map_memory` is not freed when this is set to `true`.", + "config_scope": "cluster" + }, "log_compression_type": { "description": "IMPORTANT: This property is ignored regardless of the value specified. The behavior is always the same as the `producer` value. Redpanda brokers do not compress or recompress data based on this property. If producers send compressed data, Redpanda stores it as-is; if producers send uncompressed data, Redpanda stores it uncompressed. Other listed values are accepted for Apache Kafka compatibility but are ignored by the broker. This property may appear in Admin API and `rpk topic describe` outputs for compatibility.\n\nDefault for the Kafka-compatible compression.type property. Redpanda does not recompress data.\n\nThe topic property xref:./topic-properties.adoc#compressiontype[`compression.type`] overrides the value of `log_compression_type` at the topic level.", "related_topics": [ @@ -560,16 +1026,22 @@ "config_scope": "cluster" }, "log_retention_ms": { - "related_topics": [ - "xref:./topic-properties.adoc#retentionms[`retention.ms`]" - ], "config_scope": "cluster", - "description": "The amount of time to keep a log file before deleting it (in milliseconds). If set to `-1`, no time limit is applied. This is a cluster-wide default when a topic does not set or disable xref:./topic-properties.adoc#retentionms[`retention.ms`]." + "description": "The amount of time to keep a log file before deleting it (in milliseconds). If set to `-1`, no time limit is applied. This is a cluster-wide default when a topic does not set or disable `retention.ms`." }, "log_segment_ms": { - "related_topics": [ - "xref:./topic-properties.adoc#segmentms[`segment.ms`]" - ], + "config_scope": "cluster" + }, + "log_segment_ms_max": { + "description": "Upper bound on topic `segment.ms`: higher values will be clamped to this value.", + "config_scope": "cluster" + }, + "log_segment_ms_min": { + "description": "Lower bound on topic `segment.ms`: lower values will be clamped to this value.", + "config_scope": "cluster" + }, + "log_segment_size": { + "description": "Default log segment size in bytes for topics which do not set `segment.bytes`.", "config_scope": "cluster" }, "max.compaction.lag.ms": { @@ -596,6 +1068,10 @@ ], "config_scope": "cluster" }, + "max_concurrent_producer_ids": { + "description": "Maximum number of active producer sessions. When the threshold is passed, Redpanda terminates old sessions. When an idle producer corresponding to the terminated session wakes up and produces, its message batches are rejected, and an out of order sequence error is emitted. Consumers don't affect this setting.", + "config_scope": "cluster" + }, "max_transactions_per_coordinator": { "description": "Specifies the maximum number of active transaction sessions per coordinator. When the threshold is passed Redpanda terminates old sessions. When an idle producer corresponding to the terminated session wakes up and produces, it leads to its batches being rejected with invalid producer epoch or invalid_producer_id_mapping error (depends on the transaction execution phase).\n\nFor details, see xref:develop:transactions#transaction-usage-tips[Transaction usage tips].", "related_topics": [ @@ -603,6 +1079,11 @@ ], "config_scope": "cluster" }, + "memory_allocation_warning_threshold": { + "config_scope": "broker", + "category": "redpanda", + "description": "Threshold for log messages that contain a larger memory allocation than specified." + }, "message.timestamp.type": { "description": "The source of a message's timestamp: either the message's creation time or its log append time.\n\nWhen `message.timestamp.type` is set, it overrides the cluster property xref:./cluster-properties.adoc#log_message_timestamp_type[`log_message_timestamp_type`] for the topic.", "related_topics": [ @@ -611,6 +1092,10 @@ ], "config_scope": "topic" }, + "metadata_dissemination_retries": { + "description": "Number of attempts to look up a topic's metadata-like shard before a request fails. This configuration controls the number of retries that request handlers perform when internal topic metadata (for topics like tx, consumer offsets, etc) is missing. These topics are usually created on demand when users try to use the cluster for the first time and it may take some time for the creation to happen and the metadata to propagate to all the brokers (particularly the broker handling the request). In the meantime Redpanda waits and retries. This configuration controls the number retries.", + "config_scope": "cluster" + }, "min.cleanable.dirty.ratio": { "description": "The minimum ratio between the number of bytes in dirty segments and the total number of bytes in closed segments that must be reached before a partition's log is eligible for compaction in a compact topic.", "related_topics": [ @@ -627,6 +1112,10 @@ ], "config_scope": "topic" }, + "min_cleanable_dirty_ratio": { + "description": "The minimum ratio between the number of bytes in dirty segments and the total number of bytes in closed segments that must be reached before a partition's log is eligible for compaction in a compact topic. The topic property `min.cleanable.dirty.ratio` overrides this value at the topic level.", + "config_scope": "cluster" + }, "min_compaction_lag_ms": { "related_topics": [ "xref:reference:properties/topic-properties.adoc#min.compaction.lag.ms[`min.compaction.lag.ms`]" @@ -634,6 +1123,16 @@ "config_scope": "cluster", "description": "The minimum amount of time (in ms) that a log segment must remain unaltered before it can be compacted in a compact topic." }, + "mode_mutability": { + "description": "Enable modifications to the read-only `mode` of the Schema Registry. When set to `true`, the entire Schema Registry or its subjects can be switched to `READONLY` or `READWRITE`. This property is useful for preventing unwanted changes to the entire Schema Registry or specific subjects.", + "config_scope": "broker", + "category": "schema-registry" + }, + "node_id": { + "config_scope": "broker", + "category": "redpanda", + "description": "A number that uniquely identifies the broker within the cluster. If `null` (the default value), Redpanda automatically assigns an ID. If set, it must be non-negative value.\n\n.Do not set `node_id` manually.\n[WARNING]\n====\nRedpanda assigns unique IDs automatically to prevent issues such as:\n\n- Brokers with empty disks rejoining the cluster.\n- Conflicts during recovery or scaling.\n\nManually setting or reusing `node_id` values, even for decommissioned brokers, can cause cluster inconsistencies and operational failures.\n====\n\nBroker IDs are immutable. After a broker joins the cluster, its `node_id` *cannot* be changed." + }, "node_id_overrides": { "description": "List of node ID and UUID overrides applied at broker startup. Each entry includes the current UUID, the desired new ID and UUID, and an ignore flag. An entry applies only if `current_uuid` matches the broker's actual UUID.\n\nRemove this property after the cluster restarts successfully and operates normally. This prevents reapplication and maintains consistent configuration across brokers.", "example": [ @@ -655,6 +1154,10 @@ "config_scope": "broker", "category": "redpanda" }, + "oidc_clock_skew_tolerance": { + "description": "The amount of time (in seconds) to allow for when validating the expiry claim in the token.\n\n*Unit*: seconds", + "config_scope": "cluster" + }, "oidc_discovery_url": { "description": "ifdef::env-cloud[]\nNOTE: This property is read-only in Redpanda Cloud.\nendif::[]\n\nThe URL pointing to the well-known discovery endpoint for the OIDC provider.", "config_scope": "cluster" @@ -663,7 +1166,7 @@ "description": "ifdef::env-cloud[]\nNOTE: This property is read-only in Redpanda Cloud.\nendif::[]\n\nRule for mapping JWT payload claim to a Redpanda user principal.", "related_topics": [ "xref:manage:security/authentication.adoc#oidc[OpenID Connect authentication]", - "xref:manage:kubernetes/security/authentication/k-authentication.adoc[OpenID Connect authentication in Kubernetes]" + "self-managed-only: xref:manage:kubernetes/security/authentication/k-authentication.adoc[OpenID Connect authentication in Kubernetes]" ], "config_scope": "cluster" }, @@ -671,6 +1174,35 @@ "description": "ifdef::env-cloud[]\nNOTE: This property is read-only in Redpanda Cloud.\nendif::[]\n\nA string representing the intended recipient of the token.", "config_scope": "cluster" }, + "openssl_config_file": { + "config_scope": "broker", + "category": "redpanda" + }, + "openssl_module_directory": { + "config_scope": "broker", + "category": "redpanda" + }, + "pandaproxy_api": { + "description": "Rest API listener address and port.", + "example": [ + ".Example", + "[,yaml]", + "----", + "pandaproxy:", + " pandaproxy_api:", + " address: 0.0.0.0", + " port: 8082", + " authentication_method: http_basic", + "----" + ], + "config_scope": "broker", + "category": "pandaproxy" + }, + "pandaproxy_api_tls": { + "description": "TLS configuration for Pandaproxy API.", + "config_scope": "broker", + "category": "pandaproxy" + }, "partition_autobalancing_max_disk_usage_percent": { "related_topics": [ "xref:manage:cluster-maintenance/continuous-data-balancing.adoc[Configure Continuous Data Balancing]" @@ -680,10 +1212,9 @@ "partition_autobalancing_mode": { "related_topics": [ "xref:manage:cluster-maintenance/cluster-balancing.adoc[partition balancing]", - "xref:get-started:licensing/index.adoc[enterprise license]", "xref:manage:cluster-maintenance/continuous-data-balancing.adoc[Configure Continuous Data Balancing]" ], - "description": "Mode of xref:manage:cluster-maintenance/cluster-balancing.adoc[partition balancing] for a cluster.\n\n*Accepted values:*\n\n* `continuous`: partition balancing happens automatically to maintain optimal performance and availability, based on continuous monitoring for node changes (same as `node_add`) and also high disk usage. This option requires an xref:get-started:licensing/index.adoc[enterprise license], and it is customized by <> and <> properties.\n* `node_add`: partition balancing happens when a node is added.\n* `off`: partition balancing is disabled. This option is not recommended for production clusters.", + "description": "Mode of xref:manage:cluster-maintenance/cluster-balancing.adoc[partition balancing] for a cluster.\n\n*Accepted values:*\n\n* `continuous`: partition balancing happens automatically to maintain optimal performance and availability, based on continuous monitoring for node changes (same as `node_add`) and also high disk usage. This option is customized by <> and <> properties.\n* `node_add`: partition balancing happens when a node is added.\n* `off`: partition balancing is disabled. This option is not recommended for production clusters.", "config_scope": "cluster" }, "partition_autobalancing_node_availability_timeout_sec": { @@ -692,10 +1223,41 @@ ], "config_scope": "cluster" }, + "partition_manager_shutdown_watchdog_timeout": { + "description": "A threshold value to detect partitions which might have been stuck while shutting down. After this threshold, a watchdog in partition manager will log information about partition shutdown not making progress.", + "config_scope": "cluster" + }, "pp_sr_smp_max_non_local_requests": { "description": "Maximum number of Cross-core(Inter-shard communication) requests pending in HTTP Proxy and Schema Registry seastar::smp group. (For more details, see the `seastar::smp_service_group` documentation).\n\nSee https://docs.seastar.io/master/[Seastar documentation^]", "config_scope": "cluster" }, + "produce_ack_level": { + "config_scope": "broker", + "category": "pandaproxy-client", + "description": "Number of acknowledgments the producer requires the leader to have received before considering a request complete." + }, + "produce_batch_delay_ms": { + "config_scope": "broker", + "category": "pandaproxy-client" + }, + "produce_batch_record_count": { + "config_scope": "broker", + "category": "pandaproxy-client" + }, + "produce_batch_size_bytes": { + "config_scope": "broker", + "category": "pandaproxy-client" + }, + "produce_compression_type": { + "config_scope": "broker", + "category": "pandaproxy-client", + "description": "Enable or disable compression by the Kafka client. Specify `none` to disable compression or one of the supported types [gzip, snappy, lz4, zstd]." + }, + "produce_shutdown_delay_ms": { + "description": "Delay (in milliseconds) to allow for final flush of buffers before shutting down.", + "config_scope": "broker", + "category": "pandaproxy-client" + }, "rack": { "related_topics": [ "xref:./cluster-properties.adoc#enable_rack_awareness[enable_rack_awareness]" @@ -704,6 +1266,14 @@ "category": "redpanda", "description": "A label that identifies a failure zone. Apply the same label to all brokers in the same failure zone. When xref:./cluster-properties.adoc#enable_rack_awareness[enable_rack_awareness] is set to `true` at the cluster level, the system uses the rack labels to spread partition replicas across different failure zones." }, + "raft_max_buffered_follower_append_entries_bytes_per_shard": { + "description": "The total size of append entry requests that may be cached per shard, using the Raft-buffered protocol. When an entry is cached, the leader can continue serving requests because the ordering of the cached requests cannot change. When the total size of cached requests reaches the set limit, back pressure is applied to throttle producers.", + "config_scope": "cluster" + }, + "raft_max_inflight_follower_append_entries_requests_per_shard": { + "description": "The maximum number of append entry requests that may be sent from Raft groups on a Seastar shard to the current node, and are awaiting a reply. This property replaces `raft_max_concurrent_append_requests_per_follower`.", + "config_scope": "cluster" + }, "raft_recovery_throttle_disable_dynamic_mode": { "description": "include::reference:partial$internal-use-property.adoc[]\n\nDisables cross shard sharing used to throttle recovery traffic. Should only be used to debug unexpected problems.", "config_scope": "cluster" @@ -712,6 +1282,10 @@ "description": "Maximum number of Cross-core(Inter-shard communication) requests pending in Raft seastar::smp group. For details, refer to the `seastar::smp_service_group` documentation).\n\nSee https://docs.seastar.io/master/[Seastar documentation^]", "config_scope": "cluster" }, + "reclaim_stable_window": { + "description": "If the duration since the last time memory was reclaimed is longer than the amount of time specified in this property, the memory usage of the batch cache is considered stable, so only the minimum size (<>) is set to be reclaimed.", + "config_scope": "cluster" + }, "recovery_mode_enabled": { "description": "If `true`, start Redpanda in xref:manage:recovery-mode.adoc[recovery mode], where user partitions are not loaded and only administrative operations are allowed.", "related_topics": [ @@ -720,6 +1294,10 @@ "config_scope": "broker", "category": "redpanda" }, + "redpanda.cloud_topic.enabled": { + "config_scope": "topic", + "category": "tiered-storage" + }, "redpanda.iceberg.delete": { "description": "Whether the corresponding Iceberg table is deleted upon deleting the topic.", "config_scope": "topic" @@ -770,7 +1348,6 @@ "redpanda.remote.read": { "description": "A flag for enabling Redpanda to fetch data for a topic from object storage to local storage. When set to `true` together with <>, it enables the xref:manage:tiered-storage.adoc[Tiered Storage] feature.", "related_topics": [ - "xref:manage:tiered-storage.adoc[Tiered Storage]", "xref:manage:tiered-storage.adoc[Tiered Storage]" ], "config_scope": "topic" @@ -792,7 +1369,6 @@ "redpanda.remote.write": { "description": "A flag for enabling Redpanda to upload data for a topic from local storage to object storage. When set to `true` together with <>, it enables the xref:manage:tiered-storage.adoc[Tiered Storage] feature.", "related_topics": [ - "xref:manage:tiered-storage.adoc[Tiered Storage]", "xref:manage:tiered-storage.adoc[Tiered Storage]" ], "config_scope": "topic" @@ -877,6 +1453,25 @@ "config_scope": "cluster", "description": "Local retention time target for partitions of topics with object storage write enabled.\n\nThis property can be overridden on a per-topic basis by setting `retention.local.target.ms` in each topic enabled for Tiered Storage. See xref:manage:cluster-maintenance/disk-utilization.adoc#configure-message-retention[Configure message retention]." }, + "retention_local_trim_interval": { + "config_scope": "cluster" + }, + "retries": { + "config_scope": "broker", + "category": "pandaproxy-client" + }, + "retry_base_backoff_ms": { + "config_scope": "broker", + "category": "pandaproxy-client" + }, + "rpc_server": { + "config_scope": "broker", + "category": "redpanda" + }, + "rpc_server_listen_backlog": { + "description": "Maximum TCP connection queue length for Kafka server and internal RPC server. If `null` (the default value), no queue length is set.", + "config_scope": "cluster" + }, "rpc_server_tls": { "example": [ ".Example", @@ -894,20 +1489,77 @@ "config_scope": "broker", "category": "redpanda" }, + "rpk_path": { + "description": "Path to RPK binary.", + "config_scope": "cluster" + }, + "sasl_mechanism": { + "description": "The SASL mechanism to use when the HTTP Proxy client connects to the Kafka API. These credentials are used when the HTTP Proxy API listener has <>: `none` but the cluster requires authenticated access to the Kafka API.\n\nThis property specifies which individual SASL mechanism the HTTP Proxy client should use, while the cluster-wide available mechanisms are configured using the xref:reference:properties/cluster-properties.adoc#sasl_mechanisms[`sasl_mechanisms`] cluster property.\n\ninclude::shared:partial$http-proxy-ephemeral-credentials-breaking-change.adoc[]", + "related_topics": [ + "xref:reference:properties/cluster-properties.adoc#sasl_mechanisms[`sasl_mechanisms`]" + ], + "config_scope": "broker", + "category": "pandaproxy-client" + }, "sasl_mechanisms": { "description": "A list of supported SASL mechanisms.\n\n*Accepted values:*\n\n* `SCRAM`\n* `GSSAPI`\n* `OAUTHBEARER`\n* `PLAIN`\n\nNote that in order to enable PLAIN, you must also enable SCRAM.", "related_topics": [ - "xref:get-started:licensing/index.adoc[Redpanda Licensing]" ], "config_scope": "cluster" }, + "sasl_mechanisms_overrides": { + "description": "A list of overrides for SASL mechanisms, defined by listener. SASL mechanisms defined here will replace the ones set in `sasl_mechanisms`. The same limitations apply as for `sasl_mechanisms`.", + "related_topics": [ + ], + "config_scope": "cluster" + }, + "schema_registry_always_normalize": { + "description": "Always normalize schemas. If set, this overrides the `normalize` parameter in requests to the Schema Registry API.", + "config_scope": "cluster" + }, + "schema_registry_api": { + "example": [ + ".Example", + "[,yaml]", + "----", + "schema_registry:", + " schema_registry_api:", + " address: 0.0.0.0", + " port: 8081", + " authentication_method: http_basic", + "----" + ], + "config_scope": "broker", + "category": "schema-registry" + }, + "schema_registry_api_tls": { + "config_scope": "broker", + "category": "schema-registry" + }, "schema_registry_enable_authorization": { "description": "Enables ACL-based authorization for Schema Registry requests. When `true`, Schema Registry\nuses ACL-based authorization instead of the default `public/user/superuser` authorization model. \nifdef::env-cloud[]\nRequires authentication to be enabled using the `authentication_method` property in the `schema_registry_api` broker configuration.\nendif::[]", "related_topics": [ - "xref:get-started:licensing/index.adoc[Redpanda Licensing]" ], "config_scope": "cluster" }, + "schema_registry_replication_factor": { + "description": "Replication factor for internal `_schemas` topic. If unset, defaults to the xref:../cluster-properties.adoc#default_topic_replication[`default_topic_replication`] cluster property.", + "related_topics": [ + "xref:../cluster-properties.adoc#default_topic_replication[`default_topic_replication`]" + ], + "config_scope": "broker", + "category": "schema-registry" + }, + "scram_password": { + "description": "Password to use for SCRAM authentication mechanisms when the HTTP Proxy client connects to the Kafka API. This property is required when the HTTP Proxy API listener has <>: `none` but the cluster requires authenticated access to the Kafka API.\n\ninclude::shared:partial$http-proxy-ephemeral-credentials-breaking-change.adoc[]", + "config_scope": "broker", + "category": "pandaproxy-client" + }, + "scram_username": { + "description": "Username to use for SCRAM authentication mechanisms when the HTTP Proxy client connects to the Kafka API. This property is required when the HTTP Proxy API listener has <>: `none` but the cluster requires authenticated access to the Kafka API.\n\ninclude::shared:partial$http-proxy-ephemeral-credentials-breaking-change.adoc[]", + "config_scope": "broker", + "category": "pandaproxy-client" + }, "seed_servers": { "description": "List of the seed servers used to join current cluster. If the `seed_servers` list is empty the broker will be a cluster root and it will form a new cluster.\n\n* When `empty_seed_starts_cluster` is `true`, Redpanda enables one broker with an empty `seed_servers` list to initiate a new cluster. The broker with an empty `seed_servers` becomes the cluster root, to which other brokers must connect to join the cluster. Brokers looking to join the cluster should have their `seed_servers` populated with the cluster root's address, facilitating their connection to the cluster.\n+\n[IMPORTANT]\n====\nOnly one broker, the designated cluster root, should have an empty `seed_servers` list during the initial cluster bootstrapping. This ensures a single initiation point for cluster formation.\n====\n\n* When `empty_seed_starts_cluster` is `false`, Redpanda requires all brokers to start with a known set of brokers listed in `seed_servers`. The `seed_servers` list must not be empty and should be identical across these initial seed brokers, containing the addresses of all seed brokers. Brokers not included in the `seed_servers` list use it to discover and join the cluster, allowing for expansion beyond the foundational members.\n+\n[NOTE]\n====\nThe `seed_servers` list must be consistent across all seed brokers to prevent cluster fragmentation and ensure stable cluster formation.\n====", "example": [ @@ -946,6 +1598,18 @@ "description": "Limit on <>, expressed as a percentage of memory per shard, that bounds the amount of memory used by compaction key-offset maps. \n\nNOTE: Memory per shard is computed after <>, and only applies when <> is set to `true`.", "config_scope": "cluster" }, + "storage_failure_injection_config_path": { + "config_scope": "broker", + "category": "redpanda" + }, + "storage_failure_injection_enabled": { + "config_scope": "broker", + "category": "redpanda" + }, + "storage_ignore_timestamps_in_future_sec": { + "description": "The maximum number of seconds that a record's timestamp can be ahead of a Redpanda broker's clock and still be used when deciding whether to clean up the record for data retention. This property makes possible the timely cleanup of records from clients with clocks that are drastically unsynchronized relative to Redpanda.\n\nWhen determining whether to clean up a record with timestamp more than `storage_ignore_timestamps_in_future_sec` seconds ahead of the broker, Redpanda ignores the record's timestamp and instead uses a valid timestamp of another record in the same segment, or (if another record's valid timestamp is unavailable) the timestamp of when the segment file was last modified (mtime).\n\nBy default, `storage_ignore_timestamps_in_future_sec` is disabled (null).\n\n[TIP]\n====\nTo figure out whether to set `storage_ignore_timestamps_in_future_sec` for your system:\n\n. Look for logs with segments that are unexpectedly large and not being cleaned up.\n. In the logs, search for records with unsynchronized timestamps that are further into the future than tolerable by your data retention and storage settings. For example, timestamps 60 seconds or more into the future can be considered to be too unsynchronized.\n. If you find unsynchronized timestamps throughout your logs, determine the number of seconds that the timestamps are ahead of their actual time, and set `storage_ignore_timestamps_in_future_sec` to that value so data retention can proceed.\n. If you only find unsynchronized timestamps that are the result of transient behavior, you can disable `storage_ignore_timestamps_in_future_sec`.\n====", + "config_scope": "cluster" + }, "storage_strict_data_init": { "description": "Requires that an empty file named `.redpanda_data_dir` be present in the xref:reference:properties/broker-properties.adoc#data_directory[`data_ directory`]. If set to `true`, Redpanda will refuse to start if the file is not found in the data directory.", "related_topics": [ @@ -953,6 +1617,14 @@ ], "config_scope": "cluster" }, + "tls_certificate_name_format": { + "description": "The format of the certificates's distinguished name to use for mTLS principal mapping. The `legacy` format would appear as 'C=US,ST=California,L=San Francisco,O=Redpanda,CN=redpanda', while the `rfc2253` format would appear as 'CN=redpanda,O=Redpanda,L=San Francisco,ST=California,C=US'.", + "config_scope": "cluster" + }, + "tls_enable_renegotiation": { + "description": "TLS client-initiated renegotiation is considered unsafe and is disabled by default . Only re-enable it if you are experiencing issues with your TLS-enabled client. This option has no effect on TLSv1.3 connections as client-initiated renegotiation was removed.", + "config_scope": "cluster" + }, "tombstone_retention_ms": { "description": "The retention time for tombstone records in a compacted topic. Cannot be enabled at the same time as any of `cloud_storage_enabled`, `cloud_storage_enable_remote_read`, or `cloud_storage_enable_remote_write`. A typical default setting is `86400000`, or 24 hours.", "related_topics": [ @@ -960,10 +1632,50 @@ ], "config_scope": "cluster" }, + "topic_fds_per_partition": { + "description": "File descriptors required per partition replica. If topic creation results in the ratio of file descriptor limit to partition replicas being lower than this value, creation of new topics is fails.", + "config_scope": "cluster" + }, + "topic_label_aggregation_limit": { + "description": "When the number of topics exceeds this limit, the topic label in generated metrics will be aggregated. If `null`, then there is no limit.", + "config_scope": "cluster" + }, + "topic_memory_per_partition": { + "description": "Required memory in bytes per partition replica when creating or altering topics. The total size of the memory pool for partitions is the total memory available to Redpanda times `topic_partitions_memory_allocation_percent`. Each partition created requires `topic_memory_per_partition` bytes from that pool. If insufficient memory is available, creating or altering topics fails.", + "config_scope": "cluster" + }, "topic_partitions_memory_allocation_percent": { "description": "Percentage of total memory to reserve for topic partitions. See <> for details.", "config_scope": "cluster" }, + "topic_partitions_per_shard": { + "description": "Maximum number of partition replicas per shard. If topic creation results in the ratio of partition replicas to shards being higher than this value, creation of new topics fails.", + "config_scope": "cluster" + }, + "topic_partitions_reserve_shard0": { + "description": "Reserved partition slots on shard (CPU core) 0 on each node. If this is greater than or equal to <>, no data partitions will be scheduled on shard 0.", + "config_scope": "cluster" + }, + "transaction_coordinator_cleanup_policy": { + "description": "Cleanup policy for a transaction coordinator topic.\n\n*Accepted values:*\n\n* `compact`\n* `delete`\n* `[\"compact\",\"delete\"]`\n* `none`", + "config_scope": "cluster" + }, + "transaction_coordinator_delete_retention_ms": { + "description": "Delete segments older than this age. To ensure transaction state is retained for as long as the longest-running transaction, make sure this is greater than or equal to <>.\n\nFor example, if your typical transactions run for one hour, consider setting both `transaction_coordinator_delete_retention_ms` and `transactional_id_expiration_ms` to at least 3600000 (one hour), or a little over.", + "config_scope": "cluster" + }, + "upgrade_override_checks": { + "config_scope": "broker", + "category": "redpanda" + }, + "use_kafka_handler_scheduler_group": { + "description": "Use a separate scheduler group to handle parsing Kafka protocol requests.", + "config_scope": "cluster" + }, + "use_produce_scheduler_group": { + "description": "Use a separate scheduler group to process Kafka produce requests.", + "config_scope": "cluster" + }, "verbose_logging_timeout_sec_max": { "example": [ ".Example", @@ -996,22 +1708,6 @@ ], "config_scope": "topic" }, - "confluent.key.schema.validation": { - "description": "Enable validation of the schema ID for keys on a record. This is a compatibility alias for `redpanda.key.schema.id.validation`. When enabled, Redpanda validates that the schema ID encoded in the record's key is registered in the Schema Registry according to the configured subject name strategy.", - "config_scope": "topic" - }, - "confluent.key.subject.name.strategy": { - "description": "The subject name strategy for keys when `confluent.key.schema.validation` is enabled. This is a compatibility alias for `redpanda.key.subject.name.strategy` that determines how the topic and schema are mapped to a subject name in the Schema Registry.", - "config_scope": "topic" - }, - "confluent.value.schema.validation": { - "description": "Enable validation of the schema ID for values on a record. This is a compatibility alias for `redpanda.value.schema.id.validation`. When enabled, Redpanda validates that the schema ID encoded in the record's value is registered in the Schema Registry according to the configured subject name strategy.", - "config_scope": "topic" - }, - "confluent.value.subject.name.strategy": { - "description": "The subject name strategy for values when `confluent.value.schema.validation` is enabled. This is a compatibility alias for `redpanda.value.subject.name.strategy`. This determines how the topic and schema are mapped to a subject name in the Schema Registry.", - "config_scope": "topic" - }, "write_caching_default": { "related_topics": [ "xref:reference:properties/topic-properties.adoc#writecaching[`write.caching`]", @@ -1019,818 +1715,6 @@ ], "config_scope": "cluster", "description": "The default write caching mode to apply to user topics. Write caching acknowledges a message as soon as it is received and acknowledged on a majority of brokers, without waiting for it to be written to disk. With `acks=all`, this provides lower latency while still ensuring that a majority of brokers acknowledge the write. \n\nFsyncs follow <> and <>, whichever is reached first.\n\nThe `write_caching_default` cluster property can be overridden with the xref:reference:properties/topic-properties.adoc#writecaching[`write.caching`] topic property." - }, - "advertised_kafka_api": { - "description": "Address of the Kafka API published to the clients. If not set, the <> broker property is used. When behind a load balancer or in containerized environments, this should be the externally-accessible address that clients use to connect.", - "example": [ - ".Example", - "[,yaml]", - "----", - "redpanda:", - " advertised_kafka_api:", - " - name: ", - " address: ", - " port: ", - "----" - ], - "config_scope": "broker", - "category": "redpanda" - }, - "advertised_rpc_api": { - "description": "Address of RPC endpoint published to other cluster members. If not set, the <> broker property is used. This should be the address other brokers can use to communicate with this broker.", - "example": [ - ".Example", - "[,yaml]", - "----", - "redpanda:", - " advertised_rpc_api:", - " address: ", - " port: ", - "----" - ], - "config_scope": "broker", - "category": "redpanda" - }, - "api_doc_dir": { - "description": "Path to the API specifications directory. This directory contains API documentation for both the HTTP Proxy API and Schema Registry API.", - "config_scope": "broker", - "category": "pandaproxy" - }, - "audit_enabled": { - "related_topics": [ - "xref:get-started:licensing/index.adoc[Redpanda Licensing]" - ], - "config_scope": "cluster" - }, - "client_keep_alive": { - "description": "Time, in milliseconds, that an idle client connection may remain open to the HTTP Proxy API.", - "config_scope": "broker", - "category": "pandaproxy" - }, - "cloud_storage_access_key": { - "description": "AWS or GCP access key. This access key is part of the credentials that Redpanda requires to authenticate with object storage services for Tiered Storage. This access key is used with the <> to form the complete credentials required for authentication.\nTo authenticate using IAM roles, see <>.", - "config_scope": "object-storage" - }, - "cloud_storage_client_lease_timeout_ms": { - "description": "Maximum time to hold a cloud storage client lease (ms), after which any outstanding connection is immediately closed.", - "config_scope": "cluster" - }, - "cloud_storage_disable_archiver_manager": { - "description": "Use legacy upload mode and do not start archiver_manager.", - "config_scope": "cluster" - }, - "cloud_storage_inventory_hash_path_directory": { - "description": "Directory to store inventory report hashes for use by cloud storage scrubber. If not specified, Redpanda uses a default path within the data directory.", - "example": [ - ".Example", - "[,yaml]", - "----", - "redpanda:", - " cloud_storage_inventory_hash_store: ", - "----" - ], - "config_scope": "object-storage" - }, - "consumer_heartbeat_interval_ms": { - "description": "Interval (in milliseconds) for consumer heartbeats.", - "config_scope": "broker", - "category": "pandaproxy-client" - }, - "consumer_instance_timeout_ms": { - "description": "How long to wait for an idle consumer before removing it. A consumer is considered idle when it's not making requests or heartbeats.", - "config_scope": "broker", - "category": "pandaproxy" - }, - "consumer_offsets_topic_batch_cache_enabled": { - "description": "This property lets you enable the batch cache for the consumer offsets topic. By default, the cache for consumer offsets topic is disabled. Changing this property is not recommended in production systems, as it may affect performance. The change is applied only after the restart.", - "config_scope": "cluster" - }, - "consumer_rebalance_timeout_ms": { - "description": "Timeout (in milliseconds) for consumer rebalance.", - "config_scope": "broker", - "category": "pandaproxy-client" - }, - "consumer_request_max_bytes": { - "description": "Maximum bytes to fetch per request.\n\n*Unit:* bytes", - "config_scope": "broker", - "category": "pandaproxy-client" - }, - "consumer_request_min_bytes": { - "description": "Minimum bytes to fetch per request.\n\n*Unit:* bytes", - "config_scope": "broker", - "category": "pandaproxy-client" - }, - "consumer_request_timeout_ms": { - "description": "Interval (in milliseconds) for consumer request timeout.", - "config_scope": "broker", - "category": "pandaproxy-client" - }, - "consumer_session_timeout_ms": { - "description": "Timeout (in milliseconds) for consumer session.", - "config_scope": "broker", - "category": "pandaproxy-client" - }, - "development_enable_cluster_link": { - "description": "Enable cluster linking.", - "config_scope": "cluster" - }, - "development_feature_property_testing_only": { - "description": "Development feature property for testing only.", - "config_scope": "cluster" - }, - "disable_cluster_recovery_loop_for_tests": { - "description": "include::reference:partial$internal-use-property.adoc[]\n\nDisables the cluster recovery loop. This property is used to simplify testing and should not be set in production.", - "config_scope": "cluster" - }, - "enable_developmental_unrecoverable_data_corrupting_features": { - "description": "Development features should never be enabled in a production cluster, or any cluster where stability, data loss, or the ability to upgrade are a concern. To enable experimental features, set the value of this configuration option to the current unix epoch expressed in seconds. The value must be within one hour of the current time on the broker.Once experimental features are enabled they cannot be disabled", - "config_scope": "cluster" - }, - "iceberg_delete": { - "description": "Default value for the `redpanda.iceberg.delete` topic property that determines if the corresponding Iceberg table is deleted upon deleting the topic.", - "config_scope": "cluster" - }, - "iceberg_disable_snapshot_tagging": { - "description": "Whether to disable tagging of Iceberg snapshots. These tags are used to ensure that the snapshots that Redpanda writes are retained during snapshot removal, which in turn, helps Redpanda ensure exactly-once delivery of records. Disabling tags is therefore not recommended, but it may be useful if the Iceberg catalog does not support tags.", - "config_scope": "cluster" - }, - "iceberg_rest_catalog_authentication_mode": { - "description": "The authentication mode for client requests made to the Iceberg catalog. Choose from: `none`, `bearer`, `oauth2`, and `aws_sigv4`. In `bearer` mode, the token specified in `iceberg_rest_catalog_token` is used unconditonally, and no attempts are made to refresh the token. In `oauth2` mode, the credentials specified in `iceberg_rest_catalog_client_id` and `iceberg_rest_catalog_client_secret` are used to obtain a bearer token from the URI defined by `iceberg_rest_catalog_oauth2_server_uri`. In `aws_sigv4` mode, the same AWS credentials used for cloud storage (see `cloud_storage_region`, `cloud_storage_access_key`, `cloud_storage_secret_key`, and `cloud_storage_credentials_source`) are used to sign requests to AWS Glue catalog with SigV4.", - "config_scope": "cluster" - }, - "iceberg_rest_catalog_client_id": { - "description": "Iceberg REST catalog user ID. This ID is used to query the catalog API for the OAuth token. Required if catalog type is set to `rest` and `iceberg_rest_catalog_authentication_mode` is set to `oauth2`.", - "config_scope": "cluster" - }, - "iceberg_rest_catalog_credentials_source": { - "description": "ifndef::env-cloud[]\nSource of AWS credentials for Iceberg REST catalog SigV4 authentication. If not set, falls back to xref:reference:properties/object-storage-properties.adoc#cloud_storage_credentials_source[`cloud_storage_credentials_source`] when using aws_sigv4 authentication mode.\nendif::[]\n\nifdef::env-cloud[]\nSource of AWS credentials for Iceberg REST catalog SigV4 authentication. If providing explicit credentials using `iceberg_rest_catalog_aws_access_key` and `iceberg_rest_catalog_aws_secret_key` for Glue catalog authentication, you must set this property to `config_file`.\nendif::[]\n\n*Accepted values*: `aws_instance_metadata`, `azure_aks_oidc_federation`, `azure_vm_instance_metadata`, `config_file`, `gcp_instance_metadata`, `sts`.", - "related_topics": [ - "xref:reference:properties/object-storage-properties.adoc#cloud_storage_credentials_source[`cloud_storage_credentials_source`]" - ], - "config_scope": "cluster" - }, - "iceberg_rest_catalog_crl": { - "description": "The contents of a certificate revocation list for `iceberg_rest_catalog_trust`. Takes precedence over `iceberg_rest_catalog_crl_file`.", - "config_scope": "cluster" - }, - "iceberg_rest_catalog_oauth2_scope": { - "description": "The OAuth scope used to retrieve access tokens for Iceberg catalog authentication. Only meaningful when `iceberg_rest_catalog_authentication_mode` is set to `oauth2`", - "config_scope": "cluster" - }, - "iceberg_rest_catalog_oauth2_server_uri": { - "description": "The OAuth URI used to retrieve access tokens for Iceberg catalog authentication. If left undefined, the deprecated Iceberg catalog endpoint `/v1/oauth/tokens` is used instead.", - "config_scope": "cluster" - }, - "iceberg_rest_catalog_request_timeout_ms": { - "description": "Maximum length of time that Redpanda waits for a response from the REST catalog before aborting the request", - "config_scope": "cluster" - }, - "iceberg_topic_name_dot_replacement": { - "description": "Optional replacement string for dots in topic names when deriving Iceberg table names, useful when downstream systems do not permit dots in table names. The replacement string cannot contain dots. Be careful to avoid table name collisions caused by the replacement.If an Iceberg topic with dots in the name exists in the cluster, the value of this property should not be changed.", - "config_scope": "cluster" - }, - "kafka_enable_authorization": { - "description": "Flag to require authorization for Kafka connections. If `null`, the property is disabled, and authorization is instead enabled by <>.\n\n* `null`: Ignored. Authorization is enabled with `enable_sasl`: `true`\n* `true`: authorization is required.\n* `false`: authorization is disabled.", - "config_scope": "cluster" - }, - "kafka_produce_batch_validation": { - "description": "Controls the level of validation performed on batches produced to Redpanda. When set to `legacy`, there is minimal validation performed on the produce path. When set to `relaxed`, full validation is performed on uncompressed batches and on compressed batches with the `max_timestamp` value left unset. When set to `strict`, full validation of uncompressed and compressed batches is performed. This should be the default in environments where producing clients are not trusted.", - "config_scope": "cluster" - }, - "mode_mutability": { - "description": "Enable modifications to the read-only `mode` of the Schema Registry. When set to `true`, the entire Schema Registry or its subjects can be switched to `READONLY` or `READWRITE`. This property is useful for preventing unwanted changes to the entire Schema Registry or specific subjects.", - "config_scope": "broker", - "category": "schema-registry" - }, - "pandaproxy_api": { - "description": "Rest API listener address and port.", - "example": [ - ".Example", - "[,yaml]", - "----", - "pandaproxy:", - " pandaproxy_api:", - " address: 0.0.0.0", - " port: 8082", - " authentication_method: http_basic", - "----" - ], - "config_scope": "broker", - "category": "pandaproxy" - }, - "pandaproxy_api_tls": { - "description": "TLS configuration for Pandaproxy API.", - "config_scope": "broker", - "category": "pandaproxy" - }, - "produce_batch_delay_ms": { - "description": "Delay (in milliseconds) to wait before sending batch.", - "config_scope": "broker", - "category": "pandaproxy-client" - }, - "produce_batch_size_bytes": { - "description": "Number of bytes to batch before sending to broker.\n\n*Unit:* bytes", - "config_scope": "broker", - "category": "pandaproxy-client" - }, - "produce_shutdown_delay_ms": { - "description": "Delay (in milliseconds) to allow for final flush of buffers before shutting down.", - "config_scope": "broker", - "category": "pandaproxy-client" - }, - "retry_base_backoff_ms": { - "description": "Delay (in milliseconds) for initial retry backoff.", - "config_scope": "broker", - "category": "pandaproxy-client" - }, - "sasl_mechanism": { - "description": "The SASL mechanism to use when the HTTP Proxy client connects to the Kafka API. These credentials are used when the HTTP Proxy API listener has <>: `none` but the cluster requires authenticated access to the Kafka API.\n\nThis property specifies which individual SASL mechanism the HTTP Proxy client should use, while the cluster-wide available mechanisms are configured using the xref:reference:properties/cluster-properties.adoc#sasl_mechanisms[`sasl_mechanisms`] cluster property.\n\ninclude::shared:partial$http-proxy-ephemeral-credentials-breaking-change.adoc[]", - "related_topics": [ - "xref:reference:properties/cluster-properties.adoc#sasl_mechanisms[`sasl_mechanisms`]" - ], - "config_scope": "broker", - "category": "pandaproxy-client" - }, - "sasl_mechanisms_overrides": { - "description": "A list of overrides for SASL mechanisms, defined by listener. SASL mechanisms defined here will replace the ones set in `sasl_mechanisms`. The same limitations apply as for `sasl_mechanisms`.", - "related_topics": [ - "xref:get-started:licensing/index.adoc[Redpanda Licensing]" - ], - "config_scope": "cluster" - }, - "schema_registry_api": { - "description": "Schema Registry API listener address and port.", - "example": [ - ".Example", - "[,yaml]", - "----", - "schema_registry:", - " schema_registry_api:", - " address: 0.0.0.0", - " port: 8081", - " authentication_method: http_basic", - "----" - ], - "config_scope": "broker", - "category": "schema-registry" - }, - "schema_registry_replication_factor": { - "description": "Replication factor for internal `_schemas` topic. If unset, defaults to the xref:../cluster-properties.adoc#default_topic_replication[`default_topic_replication`] cluster property.", - "related_topics": [ - "xref:../cluster-properties.adoc#default_topic_replication[`default_topic_replication`]" - ], - "config_scope": "broker", - "category": "schema-registry" - }, - "scram_password": { - "description": "Password to use for SCRAM authentication mechanisms when the HTTP Proxy client connects to the Kafka API. This property is required when the HTTP Proxy API listener has <>: `none` but the cluster requires authenticated access to the Kafka API.\n\ninclude::shared:partial$http-proxy-ephemeral-credentials-breaking-change.adoc[]", - "config_scope": "broker", - "category": "pandaproxy-client" - }, - "scram_username": { - "description": "Username to use for SCRAM authentication mechanisms when the HTTP Proxy client connects to the Kafka API. This property is required when the HTTP Proxy API listener has <>: `none` but the cluster requires authenticated access to the Kafka API.\n\ninclude::shared:partial$http-proxy-ephemeral-credentials-breaking-change.adoc[]", - "config_scope": "broker", - "category": "pandaproxy-client" - }, - "transaction_coordinator_cleanup_policy": { - "description": "Cleanup policy for a transaction coordinator topic.\n\n*Accepted values:*\n\n* `compact`\n* `delete`\n* `[\"compact\",\"delete\"]`\n* `none`", - "config_scope": "cluster" - }, - "admin_api_doc_dir": { - "config_scope": "broker", - "category": "redpanda" - }, - "crash_loop_limit": { - "config_scope": "broker", - "category": "redpanda", - "description": "A limit on the number of consecutive times a broker can crash within one hour before its crash-tracking logic is reset. This limit prevents a broker from getting stuck in an infinite cycle of crashes.\n\nIf `null`, the property is disabled and no limit is applied.\n\nThe crash-tracking logic is reset (to zero consecutive crashes) by any of the following conditions:\n\n* The broker shuts down cleanly.\n* One hour passes since the last crash.\n* The `redpanda.yaml` broker configuration file is updated.\n* The `startup_log` file in the broker's <> broker property is manually deleted." - }, - "data_directory": { - "config_scope": "broker", - "category": "redpanda" - }, - "fips_mode": { - "config_scope": "broker", - "category": "redpanda", - "description": "Controls whether Redpanda starts in FIPS mode. This property allows for three values: \n\n* Disabled - Redpanda does not start in FIPS mode.\n\n* Permissive - Redpanda performs the same check as enabled, but a warning is logged, and Redpanda continues to run. Redpanda loads the OpenSSL FIPS provider into the OpenSSL library. After this completes, Redpanda is operating in FIPS mode, which means that the TLS cipher suites available to users are limited to the TLSv1.2 and TLSv1.3 NIST-approved cryptographic methods.\n\n* Enabled - Redpanda verifies that the operating system is enabled for FIPS by checking `/proc/sys/crypto/fips_enabled`. If the file does not exist or does not return `1`, Redpanda immediately exits." - }, - "memory_allocation_warning_threshold": { - "config_scope": "broker", - "category": "redpanda", - "description": "Threshold for log messages that contain a larger memory allocation than specified." - }, - "node_id": { - "config_scope": "broker", - "category": "redpanda", - "description": "A number that uniquely identifies the broker within the cluster. If `null` (the default value), Redpanda automatically assigns an ID. If set, it must be non-negative value.\n\n.Do not set `node_id` manually.\n[WARNING]\n====\nRedpanda assigns unique IDs automatically to prevent issues such as:\n\n- Brokers with empty disks rejoining the cluster.\n- Conflicts during recovery or scaling.\n\nManually setting or reusing `node_id` values, even for decommissioned brokers, can cause cluster inconsistencies and operational failures.\n====\n\nBroker IDs are immutable. After a broker joins the cluster, its `node_id` *cannot* be changed." - }, - "openssl_config_file": { - "config_scope": "broker", - "category": "redpanda" - }, - "openssl_module_directory": { - "config_scope": "broker", - "category": "redpanda" - }, - "rpc_server": { - "config_scope": "broker", - "category": "redpanda" - }, - "storage_failure_injection_config_path": { - "config_scope": "broker", - "category": "redpanda" - }, - "storage_failure_injection_enabled": { - "config_scope": "broker", - "category": "redpanda" - }, - "upgrade_override_checks": { - "config_scope": "broker", - "category": "redpanda" - }, - "schema_registry_api_tls": { - "config_scope": "broker", - "category": "schema-registry", - "description": "TLS configuration for Schema Registry API." - }, - "advertised_pandaproxy_api": { - "config_scope": "broker", - "category": "pandaproxy", - "description": "Network address for the HTTP Proxy API server to publish to clients." - }, - "client_cache_max_size": { - "config_scope": "broker", - "category": "pandaproxy", - "description": "The maximum number of Kafka client connections that Redpanda can cache in the LRU (least recently used) cache. The LRU cache helps optimize resource utilization by keeping the most recently used clients in memory, facilitating quicker reconnections for frequent clients while limiting memory usage." - }, - "broker_tls": { - "config_scope": "broker", - "category": "pandaproxy-client", - "description": "TLS configuration for the Kafka API servers to which the HTTP Proxy client should connect." - }, - "brokers": { - "config_scope": "broker", - "category": "pandaproxy-client", - "description": "Network addresses of the Kafka API servers to which the HTTP Proxy client should connect." - }, - "client_identifier": { - "config_scope": "broker", - "category": "pandaproxy-client", - "description": "Custom identifier to include in the Kafka request header for the HTTP Proxy client. This identifier can help debug or monitor client activities." - }, - "produce_ack_level": { - "config_scope": "broker", - "category": "pandaproxy-client", - "description": "Number of acknowledgments the producer requires the leader to have received before considering a request complete." - }, - "produce_batch_record_count": { - "config_scope": "broker", - "category": "pandaproxy-client", - "description": "Number of records to batch before sending to broker." - }, - "produce_compression_type": { - "config_scope": "broker", - "category": "pandaproxy-client", - "description": "Enable or disable compression by the Kafka client. Specify `none` to disable compression or one of the supported types [gzip, snappy, lz4, zstd]." - }, - "retries": { - "config_scope": "broker", - "category": "pandaproxy-client", - "description": "Number of times to retry a request to a broker." - }, - "redpanda.cloud_topic.enabled": { - "config_scope": "topic", - "category": "tiered-storage" - }, - "auto_create_topics_enabled": { - "description": "Allow automatic topic creation.\n\nIf you produce to a topic that doesn't exist, the topic will be created with defaults if this property is enabled.", - "config_scope": "cluster" - }, - "consumer_group_lag_collection_interval_sec": { - "description": "How often to run the collection loop when <> contains `consumer_lag`.\n\nReducing the value of `consumer_group_lag_collection_interval_sec` increases the metric collection frequency, which may raise resource utilization. In most environments, this impact is minimal, but it's best practice to monitor broker resource usage in high-scale settings.", - "config_scope": "cluster" - }, - "controller_log_accummulation_rps_capacity_topic_operations": { - "description": "Maximum capacity of rate limit accumulation in controller topic operations limit.", - "config_scope": "cluster" - }, - "core_balancing_debounce_timeout": { - "description": "Interval, in milliseconds, between trigger and invocation of core balancing.", - "config_scope": "cluster" - }, - "datalake_coordinator_snapshot_max_delay_secs": { - "description": "Maximum amount of time the coordinator waits to snapshot after a command appears in the log.", - "config_scope": "cluster" - }, - "datalake_disk_space_monitor_enable": { - "description": "Option to explicitly disable enforcement of datalake disk space usage.", - "config_scope": "cluster" - }, - "datalake_scratch_space_soft_limit_size_percent": { - "description": "Size of the scratch space datalake soft limit expressed as a percentage of the `datalake_scratch_space_size_bytes` configuration value.", - "config_scope": "cluster" - }, - "datalake_scheduler_max_concurrent_translations": { - "description": "The maximum number of translations that the datalake scheduler will allow to run at a given time. If a translation is requested, but the number of running translations exceeds this value, the request will be put to sleep temporarily, polling until capacity becomes available.", - "config_scope": "cluster" - }, - "datalake_scheduler_time_slice_ms": { - "description": "Time, in milliseconds, for a datalake translation as scheduled by the datalake scheduler. After a translation is scheduled, it will run until either the time specified has elapsed or all pending records on its source partition have been translated.", - "config_scope": "cluster" - }, - "disk_reservation_percent": { - "description": "The percentage of total disk capacity that Redpanda will avoid using. This applies both when cloud cache and log data share a disk, as well \nas when cloud cache uses a dedicated disk. \n\nIt is recommended to not run disks near capacity to avoid blocking I/O due to low disk space, as well as avoiding performance issues associated with SSD garbage collection.", - "config_scope": "cluster" - }, - "enable_sasl": { - "description": "Enable SASL authentication for Kafka connections. Authorization is required to modify this property. See also <>.", - "config_scope": "cluster" - }, - "fetch_read_strategy": { - "description": "The strategy used to fulfill fetch requests.\n\n* `polling`: Repeatedly polls every partition in the request for new data. The polling interval is set by <> (deprecated).\n\n* `non_polling`: The backend is signaled when a partition has new data, so Redpanda doesn't need to repeatedly read from every partition in the fetch. Redpanda Data recommends using this value for most workloads, because it can improve fetch latency and CPU utilization.\n\n* `non_polling_with_debounce`: This option behaves like `non_polling`, but it includes a debounce mechanism with a fixed delay specified by <> at the start of each fetch. By introducing this delay, Redpanda can accumulate more data before processing, leading to fewer fetch operations and returning larger amounts of data. Enabling this option reduces reactor utilization, but it may also increase end-to-end latency.", - "config_scope": "cluster" - }, - "iceberg_backlog_controller_i_coeff": { - "description": "Controls how much past backlog (unprocessed work) affects the priority of processing new data in the Iceberg system. The system accumulates backlog errors over time, and this coefficient determines how much that accumulated backlog influences the urgency of data translation.", - "config_scope": "cluster" - }, - "iceberg_disable_automatic_snapshot_expiry": { - "description": "Whether to disable automatic Iceberg snapshot expiry. This property may be useful if the Iceberg catalog expects to perform snapshot expiry on its own.", - "config_scope": "cluster" - }, - "iceberg_rest_catalog_trust_file": { - "description": "Path to a file containing a certificate chain to trust for the REST Iceberg catalog.", - "config_scope": "cluster" - }, - "iceberg_target_backlog_size": { - "description": "Average size per partition of the datalake translation backlog that the backlog controller tries to maintain. When the backlog size is larger than the set point, the backlog controller will increase the translation scheduling group priority.", - "config_scope": "cluster" - }, - "iceberg_throttle_backlog_size_ratio": { - "description": "Ration of the total backlog size to the disk space at which the throttle to iceberg producers is applied.", - "config_scope": "cluster" - }, - "internal_topic_replication_factor": { - "description": "Target replication factor for internal topics.\n\n*Unit*: number of replicas per topic.", - "config_scope": "cluster" - }, - "kafka_qdc_enable": { - "description": "Enable Kafka queue depth control.", - "config_scope": "cluster" - }, - "kafka_qdc_max_depth": { - "description": "Maximum queue depth used in Kafka queue depth control.", - "config_scope": "cluster" - }, - "kafka_qdc_window_count": { - "description": "Number of windows used in Kafka queue depth control latency tracking.", - "config_scope": "cluster" - }, - "kafka_sasl_max_reauth_ms": { - "description": "The maximum time between Kafka client reauthentications. If a client has not reauthenticated a connection within this time frame, that connection is torn down.", - "config_scope": "cluster" - }, - "leader_balancer_idle_timeout": { - "description": "Leadership rebalancing idle timeout.\n\n*Unit*: milliseconds", - "config_scope": "cluster" - }, - "log_compaction_merge_max_ranges": { - "description": "The maximum range of segments that can be processed in a single round of adjacent segment compaction. If `null` (the default value), no maximum is imposed on the number of ranges that can be processed at once. A value below 1 effectively disables adjacent merge compaction.", - "config_scope": "cluster" - }, - "log_compaction_pause_use_sliding_window": { - "description": "Pause use of sliding window compaction. Toggle to `true` _only_ when you want to force adjacent segment compaction. The memory reserved by `storage_compaction_key_map_memory` is not freed when this is set to `true`.", - "config_scope": "cluster" - }, - "log_segment_ms_max": { - "description": "Upper bound on topic `segment.ms`: higher values will be clamped to this value.\n\n*Unit*: milliseconds", - "config_scope": "cluster" - }, - "log_segment_ms_min": { - "description": "Lower bound on topic `segment.ms`: lower values will be clamped to this value.\n\n*Unit*: milliseconds", - "config_scope": "cluster" - }, - "log_segment_size": { - "description": "Default log segment size in bytes for topics which do not set `segment.bytes`.", - "config_scope": "cluster" - }, - "max_concurrent_producer_ids": { - "description": "Maximum number of active producer sessions. When the threshold is passed, Redpanda terminates old sessions. When an idle producer corresponding to the terminated session wakes up and produces, its message batches are rejected, and an out of order sequence error is emitted. Consumers don't affect this setting.", - "config_scope": "cluster" - }, - "metadata_dissemination_retries": { - "description": "Number of attempts to look up a topic's metadata-like shard before a request fails. This configuration controls the number of retries that request handlers perform when internal topic metadata (for topics like tx, consumer offsets, etc) is missing. These topics are usually created on demand when users try to use the cluster for the first time and it may take some time for the creation to happen and the metadata to propagate to all the brokers (particularly the broker handling the request). In the meantime Redpanda waits and retries. This configuration controls the number retries.", - "config_scope": "cluster" - }, - "min_cleanable_dirty_ratio": { - "description": "The minimum ratio between the number of bytes in dirty segments and the total number of bytes in closed segments that must be reached before a partition's log is eligible for compaction in a compact topic. The topic property `min.cleanable.dirty.ratio` overrides this value at the topic level.", - "config_scope": "cluster" - }, - "oidc_clock_skew_tolerance": { - "description": "The amount of time (in seconds) to allow for when validating the expiry claim in the token.\n\n*Unit*: seconds", - "config_scope": "cluster" - }, - "partition_manager_shutdown_watchdog_timeout": { - "description": "A threshold value to detect partitions which might have been stuck while shutting down. After this threshold, a watchdog in partition manager will log information about partition shutdown not making progress.\n\n*Unit*: milliseconds", - "config_scope": "cluster" - }, - "election_timeout_ms": { - "description": "Raft election timeout expressed in milliseconds.", - "config_scope": "cluster" - }, - "raft_max_buffered_follower_append_entries_bytes_per_shard": { - "description": "The total size of append entry requests that may be cached per shard, using the Raft-buffered protocol. When an entry is cached, the leader can continue serving requests because the ordering of the cached requests cannot change. When the total size of cached requests reaches the set limit, back pressure is applied to throttle producers.", - "config_scope": "cluster" - }, - "raft_max_inflight_follower_append_entries_requests_per_shard": { - "description": "The maximum number of append entry requests that may be sent from Raft groups on a Seastar shard to the current node, and are awaiting a reply. This property replaces `raft_max_concurrent_append_requests_per_follower`.", - "config_scope": "cluster" - }, - "reclaim_stable_window": { - "description": "If the duration since the last time memory was reclaimed is longer than the amount of time specified in this property, the memory usage of the batch cache is considered stable, so only the minimum size (<>) is set to be reclaimed.", - "config_scope": "cluster" - }, - "retention_local_trim_interval": { - "description": "The period during which disk usage is checked for disk pressure, and data is optionally trimmed to meet the target.", - "config_scope": "cluster" - }, - "rpc_server_listen_backlog": { - "description": "Maximum TCP connection queue length for Kafka server and internal RPC server. If `null` (the default value), no queue length is set.", - "config_scope": "cluster" - }, - "rpk_path": { - "description": "Path to RPK binary.", - "config_scope": "cluster" - }, - "schema_registry_always_normalize": { - "description": "Always normalize schemas. If set, this overrides the `normalize` parameter in requests to the Schema Registry API.", - "config_scope": "cluster" - }, - "storage_ignore_timestamps_in_future_sec": { - "description": "The maximum number of seconds that a record's timestamp can be ahead of a Redpanda broker's clock and still be used when deciding whether to clean up the record for data retention. This property makes possible the timely cleanup of records from clients with clocks that are drastically unsynchronized relative to Redpanda.\n\nWhen determining whether to clean up a record with timestamp more than `storage_ignore_timestamps_in_future_sec` seconds ahead of the broker, Redpanda ignores the record's timestamp and instead uses a valid timestamp of another record in the same segment, or (if another record's valid timestamp is unavailable) the timestamp of when the segment file was last modified (mtime).\n\nBy default, `storage_ignore_timestamps_in_future_sec` is disabled (null).\n\n[TIP]\n====\nTo figure out whether to set `storage_ignore_timestamps_in_future_sec` for your system:\n\n. Look for logs with segments that are unexpectedly large and not being cleaned up.\n. In the logs, search for records with unsynchronized timestamps that are further into the future than tolerable by your data retention and storage settings. For example, timestamps 60 seconds or more into the future can be considered to be too unsynchronized.\n. If you find unsynchronized timestamps throughout your logs, determine the number of seconds that the timestamps are ahead of their actual time, and set `storage_ignore_timestamps_in_future_sec` to that value so data retention can proceed.\n. If you only find unsynchronized timestamps that are the result of transient behavior, you can disable `storage_ignore_timestamps_in_future_sec`.\n====", - "config_scope": "cluster" - }, - "tls_certificate_name_format": { - "description": "The format of the certificates's distinguished name to use for mTLS principal mapping. The `legacy` format would appear as 'C=US,ST=California,L=San Francisco,O=Redpanda,CN=redpanda', while the `rfc2253` format would appear as 'CN=redpanda,O=Redpanda,L=San Francisco,ST=California,C=US'.", - "config_scope": "cluster" - }, - "tls_enable_renegotiation": { - "description": "TLS client-initiated renegotiation is considered unsafe and is disabled by default . Only re-enable it if you are experiencing issues with your TLS-enabled client. This option has no effect on TLSv1.3 connections as client-initiated renegotiation was removed.", - "config_scope": "cluster" - }, - "topic_fds_per_partition": { - "description": "File descriptors required per partition replica. If topic creation results in the ratio of file descriptor limit to partition replicas being lower than this value, creation of new topics is fails.", - "config_scope": "cluster" - }, - "topic_label_aggregation_limit": { - "description": "When the number of topics exceeds this limit, the topic label in generated metrics will be aggregated. If `null`, then there is no limit.", - "config_scope": "cluster" - }, - "topic_memory_per_partition": { - "description": "Required memory in bytes per partition replica when creating or altering topics. The total size of the memory pool for partitions is the total memory available to Redpanda times `topic_partitions_memory_allocation_percent`. Each partition created requires `topic_memory_per_partition` bytes from that pool. If insufficient memory is available, creating or altering topics fails.", - "config_scope": "cluster" - }, - "topic_partitions_per_shard": { - "description": "Maximum number of partition replicas per shard. If topic creation results in the ratio of partition replicas to shards being higher than this value, creation of new topics fails.", - "config_scope": "cluster" - }, - "topic_partitions_reserve_shard0": { - "description": "Reserved partition slots on shard (CPU core) 0 on each node. If this is greater than or equal to <>, no data partitions will be scheduled on shard 0.", - "config_scope": "cluster" - }, - "transaction_coordinator_delete_retention_ms": { - "description": "Delete segments older than this age. To ensure transaction state is retained for as long as the longest-running transaction, make sure this is greater than or equal to <>.\n\nFor example, if your typical transactions run for one hour, consider setting both `transaction_coordinator_delete_retention_ms` and `transactional_id_expiration_ms` to at least 3600000 (one hour), or a little over.", - "config_scope": "cluster" - }, - "use_kafka_handler_scheduler_group": { - "description": "Use a separate scheduler group to handle parsing Kafka protocol requests.", - "config_scope": "cluster" - }, - "use_produce_scheduler_group": { - "description": "Use a separate scheduler group to process Kafka produce requests.", - "config_scope": "cluster" - }, - "cloud_storage_api_endpoint": { - "description": "Optional API endpoint. The only instance in which you must set this value is when using a custom domain with your object storage service.\n\n- AWS: If not set, this is automatically generated using <> and <>. Otherwise, this uses the value assigned.\n- GCP: If not set, this is automatically generated using `storage.googleapis.com` and <>.\n- Azure: If not set, this is automatically generated using `blob.core.windows.net` and <>. If you have enabled hierarchical namespaces for your storage account and use a custom endpoint, use <>.", - "config_scope": "object-storage" - }, - "cloud_storage_azure_hierarchical_namespace_enabled": { - "description": "Force Redpanda to use or not use an Azure Data Lake Storage (ADLS) Gen2 hierarchical namespace-compliant client in <>. \n\nWhen this property is not set, <> must be set, and each broker checks at startup if a hierarchical namespace is enabled. \n\nWhen set to `true`, this property disables the check and assumes a hierarchical namespace is enabled. \n\nWhen set to `false`, this property disables the check and assumes a hierarchical namespace is not enabled. \n\nThis setting should be used only in emergencies where Redpanda fails to detect the correct a hierarchical namespace status.", - "config_scope": "object-storage" - }, - "cloud_storage_azure_shared_key": { - "description": "The account access key to be used for Azure Shared Key authentication with the Azure storage account configured by <>. If `null`, the property is disabled.", - "config_scope": "object-storage" - }, - "cloud_storage_backend": { - "description": "Optional object storage backend variant used to select API capabilities. If not supplied, this will be inferred from other configuration properties.", - "config_scope": "object-storage" - }, - "cloud_storage_cache_max_objects": { - "description": "Maximum number of objects that may be held in the Tiered Storage cache. This applies simultaneously with <>, and whichever limit is hit first will trigger trimming of the cache.", - "config_scope": "object-storage" - }, - "cloud_storage_cache_size": { - "description": "Maximum size of the object storage cache, in bytes.\n\nThis property works together with <> to define cache behavior:\n\n- When both properties are set, Redpanda uses the smaller calculated value of the two, in bytes.\n\n- If one of these properties is set to `0`, Redpanda uses the non-zero value.\n\n- These properties cannot both be `0`.\n\n- `cloud_storage_cache_size` cannot be `0` while `cloud_storage_cache_size_percent` is `null`.", - "config_scope": "object-storage" - }, - "cloud_storage_cache_trim_threshold_percent_objects": { - "description": "Cache trimming is triggered when the number of objects in the cache reaches this percentage relative to its maximum object count. If unset, the default behavior is to start trimming when the cache is full.", - "config_scope": "object-storage", - "version": "24.1.10" - }, - "cloud_storage_cache_trim_threshold_percent_size": { - "description": "Cache trimming is triggered when the cache size reaches this percentage relative to its maximum capacity. If unset, the default behavior is to start trimming when the cache is full.", - "config_scope": "object-storage", - "version": "24.1.10" - }, - "cloud_storage_cache_trim_walk_concurrency": { - "description": "The maximum number of concurrent tasks launched for traversing the directory structure during cache trimming. A higher number allows cache trimming to run faster but can cause latency spikes due to increased pressure on I/O subsystem and syscall threads.", - "config_scope": "object-storage" - }, - "cloud_storage_credentials_source": { - "description": "The source of credentials used to authenticate to object storage services.\nRequired for AWS or GCP authentication with IAM roles.\n\nTo authenticate using access keys, see <>.", - "config_scope": "object-storage" - }, - "cloud_storage_disable_archival_stm_rw_fence": { - "description": "Disables the concurrency control mechanism in Tiered Storage. This safety feature keeps data organized and correct when multiple processes access it simultaneously. Disabling it can cause data consistency problems, so use this setting only for testing, never in production systems.", - "config_scope": "object-storage" - }, - "cloud_storage_disable_read_replica_loop_for_tests": { - "description": "Begins the read replica sync loop in topic partitions with Tiered Storage enabled. The property exists to simplify testing and shouldn't be set in production.", - "config_scope": "object-storage" - }, - "cloud_storage_disable_remote_labels_for_tests": { - "description": "If `true`, Redpanda disables remote labels and falls back on the hash-based object naming scheme for new topics.", - "config_scope": "object-storage" - }, - "cloud_storage_disable_upload_consistency_checks": { - "description": "Disable all upload consistency checks to allow Redpanda to upload logs with gaps and replicate metadata with consistency violations. Do not change the default value unless requested by Redpanda Support.", - "config_scope": "object-storage" - }, - "cloud_storage_disable_upload_loop_for_tests": { - "description": "Begins the upload loop in topic partitions with Tiered Storage enabled. The property exists to simplify testing and shouldn't be set in production.", - "config_scope": "object-storage" - }, - "cloud_storage_enable_remote_allow_gaps": { - "description": "Controls the eviction of locally stored log segments when Tiered Storage uploads are paused. Set to `false` to only evict data that has already been uploaded to object storage. If the retained data fills the local volume, Redpanda throttles producers. Set to `true` to allow the eviction of locally stored log segments, which may create gaps in offsets.", - "config_scope": "object-storage" - }, - "cloud_storage_enable_scrubbing": { - "description": "Enable routine checks (scrubbing) of object storage partitions. The scrubber validates the integrity of data and metadata uploaded to object storage.", - "config_scope": "object-storage" - }, - "cloud_storage_enable_segment_uploads": { - "description": "Controls the upload of log segments to Tiered Storage. If set to `false`, this property temporarily pauses all log segment uploads from the Redpanda cluster. When the uploads are paused, the <> cluster configuration and `redpanda.remote.allowgaps` topic properties control local retention behavior.", - "config_scope": "object-storage" - }, - "cloud_storage_full_scrub_interval_ms": { - "description": "Interval, in milliseconds, between a final scrub and the next scrub.", - "config_scope": "object-storage" - }, - "cloud_storage_garbage_collect_timeout_ms": { - "description": "Timeout for running the cloud storage garbage collection, in milliseconds.", - "config_scope": "object-storage" - }, - "cloud_storage_graceful_transfer_timeout_ms": { - "description": "Time limit on waiting for uploads to complete before a leadership transfer. If this is `null`, leadership transfers proceed without waiting.", - "config_scope": "object-storage" - }, - "cloud_storage_hydrated_chunks_per_segment_ratio": { - "description": "The maximum number of chunks per segment that can be hydrated at a time. Above this number, unused chunks are trimmed.\n\nA segment is divided into chunks. Chunk hydration means downloading the chunk (which is a small part of a full segment) from cloud storage and placing it in the local disk cache. Redpanda periodically removes old, unused chunks from your local disk. This process is called chunk eviction. This property controls how many chunks can be present for a given segment in local disk at a time, before eviction is triggered, removing the oldest ones from disk. Note that this property is not used for the default eviction strategy which simply removes all unused chunks.", - "config_scope": "object-storage" - }, - "cloud_storage_hydration_timeout_ms": { - "description": "Time to wait for a hydration request to be fulfilled. If hydration is not completed within this time, the consumer is notified with a timeout error.\n\nNegative doesn't make sense, but it may not be checked-for/enforced. Large is subjective, but a huge timeout also doesn't make sense. This particular config doesn't have a min/max bounds control, but it probably should to avoid mistakes.", - "config_scope": "object-storage" - }, - "cloud_storage_idle_threshold_rps": { - "description": "The object storage request rate threshold for idle state detection. If the average request rate for the configured period is lower than this threshold, the object storage is considered idle.", - "config_scope": "object-storage" - }, - "cloud_storage_idle_timeout_ms": { - "description": "The timeout, in milliseconds, used to detect the idle state of the object storage API. If the average object storage request rate is below this threshold for a configured amount of time, the object storage is considered idle and the housekeeping jobs are started.", - "config_scope": "object-storage" - }, - "cloud_storage_initial_backoff_ms": { - "description": "Initial backoff time for exponential backoff algorithm (ms).", - "config_scope": "object-storage" - }, - "cloud_storage_inventory_max_hash_size_during_parse": { - "description": "Maximum bytes of hashes held in memory before writing data to disk during inventory report parsing. This affects the number of files written to disk during inventory report parsing. When this limit is reached, new files are written to disk.", - "config_scope": "object-storage" - }, - "cloud_storage_manifest_cache_size": { - "description": "Amount of memory that can be used to handle Tiered Storage metadata.", - "config_scope": "object-storage" - }, - "cloud_storage_materialized_manifest_ttl_ms": { - "description": "The interval, in milliseconds, determines how long the materialized manifest can stay in the cache under contention. This setting is used for performance tuning. When the spillover manifest is materialized and stored in the cache, and the cache needs to evict it, it uses this value as a timeout. The cursor that uses the spillover manifest uses this value as a TTL interval, after which it stops referencing the manifest making it available for eviction. This only affects spillover manifests under contention.", - "config_scope": "object-storage" - }, - "cloud_storage_manifest_max_upload_interval_sec": { - "description": "Minimum interval, in seconds, between partition manifest uploads. Actual time between uploads may be greater than this interval. If this is `null`, metadata is updated after each segment upload.", - "config_scope": "object-storage" - }, - "cloud_storage_manifest_upload_timeout_ms": { - "description": "Manifest upload timeout, in milliseconds.", - "config_scope": "object-storage" - }, - "cloud_storage_max_concurrent_hydrations_per_shard": { - "description": "Maximum concurrent segment hydrations of remote data per CPU core. If unset, value of `cloud_storage_max_connections / 2` is used, which means that half of available object storage bandwidth could be used to download data from object storage. If the cloud storage cache is empty every new segment reader will require a download. This will lead to 1:1 mapping between number of partitions scanned by the fetch request and number of parallel downloads. If this value is too large the downloads can affect other workloads. In case of any problem caused by the tiered-storage reads this value can be lowered. This will only affect segment hydrations (downloads) but won't affect cached segments. If fetch request is reading from the tiered-storage cache its concurrency will only be limited by available memory.", - "config_scope": "object-storage" - }, - "cloud_storage_max_segment_readers_per_shard": { - "description": "Maximum concurrent I/O cursors of materialized remote segments per CPU core. If unset, the value of `topic_partitions_per_shard` is used, where one segment reader per partition is used if the shard is at its maximum partition capacity. These readers are cached across Kafka consume requests and store a readahead buffer.", - "config_scope": "object-storage" - }, - "cloud_storage_max_segments_pending_deletion_per_partition": { - "description": "The per-partition limit for the number of segments pending deletion from the cloud. Segments can be deleted due to retention or compaction. If this limit is breached and deletion fails, then segments are orphaned in the cloud and must be removed manually.", - "config_scope": "object-storage" - }, - "cloud_storage_max_throughput_per_shard": { - "description": "Maximum bandwidth allocated to Tiered Storage operations per shard, in bytes per second.\nThis setting limits the Tiered Storage subsystem's throughput per shard, facilitating precise control over bandwidth usage in testing scenarios. In production environments, use `cloud_storage_throughput_limit_percent` for more dynamic throughput management based on actual storage capabilities.", - "config_scope": "object-storage" - }, - "cloud_storage_min_chunks_per_segment_threshold": { - "description": "The minimum number of chunks per segment for trimming to be enabled. If the number of chunks in a segment is below this threshold, the segment is small enough that all chunks in it can be hydrated at any given time.", - "config_scope": "object-storage" - }, - "cloud_storage_readreplica_manifest_sync_timeout_ms": { - "description": "Timeout to check if new data is available for partitions in object storage for read replicas.", - "config_scope": "object-storage" - }, - "cloud_storage_recovery_temporary_retention_bytes_default": { - "description": "Retention in bytes for topics created during automated recovery.", - "config_scope": "object-storage" - }, - "cloud_storage_recovery_topic_validation_mode": { - "description": "Validation performed before recovering a topic from object storage. In case of failure, the reason for the failure appears as `ERROR` lines in the Redpanda application log. For each topic, this reports errors for all partitions, but for each partition, only the first error is reported.\n\nThis property accepts the following parameters:\n\n- `no_check`: Skips the checks for topic recovery.\n- `check_manifest_existence`: Runs an existence check on each `partition_manifest`. Fails if there are connection issues to the object storage.\n- `check_manifest_and_segment_metadata`: Downloads the manifest and runs a consistency check, comparing the metadata with the cloud storage objects. The process fails if metadata references any missing cloud storage objects.\n\nExample: Redpanda validates the topic `kafka/panda-topic-recovery-NOT-OK` and stops due to a fatal error on partition 0:\n\n```bash\nERROR 2024-04-24 21:29:08,166 [shard 1:main] cluster - [fiber11|0|299996ms recovery validation of {kafka/panda-topic-recovery-NOT-OK/0}/24] - manifest metadata check: missing segment, validation not ok\nERROR 2024-04-24 21:29:08,166 [shard 1:main] cluster - topics_frontend.cc:519 - Stopping recovery of {kafka/panda-topic-recovery-NOT-OK} due to validation error\n```\n\nEach failing partition error message has the following format:\n\n```bash\nERROR .... [... recovery validation of {}...] - , validation not ok\n```\n\nAt the end of the process, Redpanda outputs a final ERROR message: \n\n```bash\nERROR ... ... - Stopping recovery of {} due to validation error\n```", - "config_scope": "object-storage" - }, - "cloud_storage_roles_operation_timeout_ms": { - "description": "Timeout for IAM role related operations (ms).", - "config_scope": "object-storage" - }, - "cloud_storage_scrubbing_interval_jitter_ms": { - "description": "Jitter applied to the object storage scrubbing interval.", - "config_scope": "object-storage" - }, - "cloud_storage_segment_max_upload_interval_sec": { - "description": "Time that a segment can be kept locally without uploading it to the object storage, in seconds.", - "config_scope": "object-storage" - }, - "cloud_storage_segment_size_min": { - "description": "Smallest acceptable segment size in the object storage. Default: `cloud_storage_segment_size_target`/2.", - "config_scope": "object-storage" - }, - "cloud_storage_segment_upload_timeout_ms": { - "description": "Log segment upload timeout, in milliseconds.", - "config_scope": "object-storage" - }, - "cloud_storage_spillover_manifest_max_segments": { - "description": "Maximum number of segments in the spillover manifest that can be offloaded to the object storage. This setting serves as a threshold for triggering data offload based on the number of segments, rather than the total size of the manifest. It is designed for use in testing environments to control the offload behavior more granularly. In production settings, manage offloads based on the manifest size through `cloud_storage_spillover_manifest_size` for more predictable outcomes.", - "config_scope": "object-storage" - }, - "cloud_storage_spillover_manifest_size": { - "description": "The size of the manifest which can be offloaded to the cloud. If the size of the local manifest stored in Redpanda exceeds `cloud_storage_spillover_manifest_size` by two times the spillover mechanism will split the manifest into two parts and one will be uploaded to object storage.", - "config_scope": "object-storage" - }, - "cloud_storage_throughput_limit_percent": { - "description": "Maximum throughput used by Tiered Storage per broker expressed as a percentage of the disk bandwidth. If the server has several disks, Redpanda uses the one that stores the Tiered Storage cache. Even if Tiered Storage is allowed to use the full bandwidth of the disk (100%), it won't necessarily use it in full. The actual usage depends on your workload and the state of the Tiered Storage cache. This setting is a safeguard that prevents Tiered Storage from using too many system resources: it is not a performance tuning knob.", - "config_scope": "object-storage" - }, - "cloud_storage_topic_purge_grace_period_ms": { - "description": "Grace period during which the purger refuses to purge the topic.", - "config_scope": "object-storage" - }, - "cloud_storage_upload_ctrl_d_coeff": { - "description": "Derivative coefficient for upload PID controller.", - "config_scope": "object-storage" - }, - "cloud_storage_upload_ctrl_max_shares": { - "description": "Maximum number of I/O and CPU shares that archival upload can use.", - "config_scope": "object-storage" - }, - "cloud_storage_upload_ctrl_min_shares": { - "description": "Minimum number of I/O and CPU shares that archival upload can use.", - "config_scope": "object-storage" - }, - "cloud_storage_upload_ctrl_p_coeff": { - "description": "Proportional coefficient for upload PID controller.", - "config_scope": "object-storage" - }, - "cloud_storage_upload_loop_initial_backoff_ms": { - "description": "Initial backoff interval when there is nothing to upload for a partition, in milliseconds.", - "config_scope": "object-storage" - }, - "cloud_storage_upload_loop_max_backoff_ms": { - "description": "Maximum backoff interval when there is nothing to upload for a partition, in milliseconds.", - "config_scope": "object-storage" - }, - "cloud_storage_url_style": { - "description": "Configure the addressing style that controls how Redpanda formats bucket URLs for S3-compatible object storage.\n\nLeave this property unset (`null`) to use automatic configuration:\n\n* For AWS S3: Redpanda attempts `virtual_host` addressing first, then falls back to `path` style if needed\n* For MinIO: Redpanda automatically uses `path` style regardless of `MINIO_DOMAIN` configuration\n\nSet this property explicitly to override automatic configuration, ensure consistent behavior across deployments, or when using S3-compatible storage that requires a specific URL format.", - "config_scope": "object-storage" } } } \ No newline at end of file diff --git a/local-antora-playbook.yml b/local-antora-playbook.yml index 9553081124..459675dda5 100644 --- a/local-antora-playbook.yml +++ b/local-antora-playbook.yml @@ -17,7 +17,10 @@ content: - url: https://github.com/redpanda-data/docs branches: [v/*, shared, site-search,'!v-end-of-life/*'] - url: https://github.com/redpanda-data/cloud-docs + branches: 'config-automation' + - url: https://github.com/redpanda-data/docs-site branches: 'main' + start_paths: [home] - url: https://github.com/redpanda-data/redpanda-labs branches: main start_paths: [docs,'*/docs'] diff --git a/modules/reference/examples/v25.2.10-properties.json b/modules/reference/examples/v25.2.10-properties.json new file mode 100644 index 0000000000..f53045a37b --- /dev/null +++ b/modules/reference/examples/v25.2.10-properties.json @@ -0,0 +1,10897 @@ +{ + "definitions": { + "client_group_quota": { + "defined_in": "https://github.com/redpanda-data/redpanda/blob/dev/src/v/config/client_group_byte_rate_quota.h#L29", + "properties": { + "clients_prefix": { + "type": "string" + }, + "group_name": { + "type": "string" + }, + "quota": { + "maximum": 9223372036854775807, + "minimum": -9223372036854775808, + "type": "integer" + } + }, + "type": "object" + }, + "config::broker_authn_endpoint": { + "defined_in": "https://github.com/redpanda-data/redpanda/blob/dev/src/v/config/broker_authn_endpoint.h#L42", + "properties": { + "address": { + "type": "string" + }, + "name": { + "type": "string" + }, + "port": { + "maximum": 4294967295, + "minimum": 0, + "type": "integer" + } + }, + "type": "object" + }, + "config::endpoint_tls_config": { + "defined_in": "https://github.com/redpanda-data/redpanda/blob/dev/src/v/config/endpoint_tls_config.h#L21", + "properties": { + "config": { + "$ref": "#/definitions/config::tls_config" + }, + "name": { + "type": "string" + } + }, + "type": "object" + }, + "config::rest_authn_endpoint": { + "defined_in": "https://github.com/redpanda-data/redpanda/blob/dev/src/v/config/rest_authn_endpoint.h#L42", + "properties": { + "address": { + "type": "string" + }, + "authentication_method": { + "$ref": "#/definitions/config::rest_authn_method" + }, + "name": { + "type": "string" + }, + "port": { + "maximum": 4294967295, + "minimum": 0, + "type": "integer" + } + }, + "type": "object" + }, + "config::rest_authn_method": { + "defined_in": "https://github.com/redpanda-data/redpanda/blob/dev/src/v/config/rest_authn_endpoint.h#L31", + "enum": [ + "none", + "http_basic" + ] + }, + "config::tls_config": { + "defined_in": "https://github.com/redpanda-data/redpanda/blob/dev/src/v/config/tls_config.h#L49", + "properties": { + "cert_file": { + "type": "string" + }, + "enabled": { + "type": "boolean" + }, + "key_file": { + "type": "string" + }, + "require_client_auth": { + "type": "boolean" + }, + "truststore_file": { + "type": "string" + } + }, + "type": "object" + }, + "endpoint_tls_config": { + "$ref": "#/definitions/config::endpoint_tls_config" + }, + "model::broker_endpoint": { + "defined_in": "https://github.com/redpanda-data/redpanda/blob/dev/src/v/model/metadata.h#L88", + "properties": { + "address": { + "type": "string" + }, + "name": { + "type": "string" + }, + "port": { + "maximum": 4294967295, + "minimum": 0, + "type": "integer" + } + }, + "type": "object" + }, + "model::cleanup_policy_bitflags": { + "defined_in": "https://github.com/redpanda-data/redpanda/blob/dev/src/v/model/fundamental.h#L72", + "enum": [ + "none", + "delete", + "compact" + ] + }, + "model::cloud_credentials_source": { + "defined_in": "https://github.com/redpanda-data/redpanda/blob/dev/src/v/model/metadata.h#L454", + "enum": [ + "config_file", + "aws_instance_metadata", + "sts", + "gcp_instance_metadata" + ] + }, + "model::cloud_storage_backend": { + "defined_in": "https://github.com/redpanda-data/redpanda/blob/dev/src/v/model/metadata.h#L481", + "enum": [ + "aws", + "google_s3_compat", + "azure", + "minio", + "unknown" + ] + }, + "model::cloud_storage_chunk_eviction_strategy": { + "defined_in": "https://github.com/redpanda-data/redpanda/blob/dev/src/v/model/metadata.h#L524", + "enum": [ + "eager", + "capped", + "predictive" + ] + }, + "model::compression": { + "defined_in": "https://github.com/redpanda-data/redpanda/blob/dev/src/v/model/compression.h#L36", + "enum": [ + "none", + "gzip", + "snappy", + "lz4", + "zstd", + "producer" + ] + }, + "model::leader_balancer_mode": { + "defined_in": "https://github.com/redpanda-data/redpanda/blob/dev/src/v/model/metadata.h#L504", + "enum": [ + "greedy_balanced_shards", + "random_hill_climbing" + ] + }, + "model::node_id": { + "defined_in": "https://github.com/redpanda-data/redpanda/blob/dev/src/v/model/metadata.h#L36", + "maximum": 2147483647, + "minimum": -2147483648, + "type": "integer" + }, + "model::partition_autobalancing_mode": { + "defined_in": "https://github.com/redpanda-data/redpanda/blob/dev/src/v/model/metadata.h#L463", + "enum": [ + "off", + "node_add", + "continuous" + ] + }, + "model::rack_id": { + "defined_in": "https://github.com/redpanda-data/redpanda/blob/dev/src/v/model/metadata.h#L60", + "type": "string" + }, + "model::timestamp_type": { + "defined_in": "https://github.com/redpanda-data/redpanda/blob/dev/src/v/model/timestamp.h#L30", + "enum": [ + "create_time", + "append_time" + ] + }, + "net::unresolved_address": { + "defined_in": "https://github.com/redpanda-data/redpanda/blob/dev/src/v/net/unresolved_address.h#L27", + "properties": { + "address": { + "type": "string" + }, + "port": { + "maximum": 4294967295, + "minimum": 0, + "type": "integer" + } + } + }, + "pandaproxy::schema_registry::schema_id_validation_mode": { + "defined_in": "https://github.com/redpanda-data/redpanda/blob/dev/src/v/pandaproxy/schema_registry/schema_id_validation.h#L22", + "enum": [ + "none", + "redpanda", + "compat" + ] + }, + "retention_duration_property": { + "defined_in": "https://github.com/redpanda-data/redpanda/blob/dev/src/v/config/property.h#L878", + "maximum": 2147483647, + "minimum": -2147483648, + "type": "integer" + }, + "seed_server": { + "defined_in": "https://github.com/redpanda-data/redpanda/blob/dev/src/v/config/seed_server.h#L24", + "properties": { + "host": { + "$ref": "#/definitions/net::unresolved_address" + } + }, + "type": "object" + }, + "throughput_control_group": { + "defined_in": "https://github.com/redpanda-data/redpanda/blob/dev/src/v/config/throughput_control_group.h#L36", + "properties": { + "client_id": { + "type": "string" + }, + "name": { + "type": "string" + } + }, + "type": "object" + }, + "tls_config": { + "$ref": "#/definitions/config::tls_config" + } + }, + "properties": { + "abort_index_segment_size": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": "50000", + "defined_in": "src/v/config/configuration.cc", + "description": "Capacity (in number of txns) of an abort index segment.\n\nEach partition tracks the aborted transaction offset ranges to help service client requests. If the number of transactions increases beyond this threshold, they are flushed to disk to ease memory pressure. Then they're loaded on demand. This configuration controls the maximum number of aborted transactions before they are flushed to disk.", + "maximum": 4294967295, + "minimum": 0, + "name": "abort_index_segment_size", + "needs_restart": true, + "nullable": false, + "type": "integer", + "visibility": "tunable" + }, + "abort_timed_out_transactions_interval_ms": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": "10s", + "defined_in": "src/v/config/configuration.cc", + "description": "Interval, in milliseconds, at which Redpanda looks for inactive transactions and aborts them.", + "maximum": 17592186044415, + "minimum": -17592186044416, + "name": "abort_timed_out_transactions_interval_ms", + "needs_restart": true, + "nullable": false, + "type": "integer", + "visibility": "tunable" + }, + "admin": { + "category": "redpanda", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "broker", + "default": [ + { + "address": "127.0.0.1", + "port": 9644 + } + ], + "defined_in": "src/v/config/node_config.cc", + "description": "Network address for the glossterm:Admin API[] server.", + "example": ".Example\n[,yaml]\n----\nredpanda:\n admin:\n - name: \n address: \n port: \n----", + "items": { + "type": "object" + }, + "name": "admin", + "needs_restart": true, + "nullable": false, + "type": "array", + "visibility": "user" + }, + "admin_api_doc_dir": { + "category": "redpanda", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "broker", + "default": "/usr/share/redpanda/admin-api-doc", + "defined_in": "src/v/config/node_config.cc", + "description": "Path to the API specifications for the Admin API.", + "name": "admin_api_doc_dir", + "needs_restart": true, + "nullable": false, + "type": "string", + "visibility": "user" + }, + "admin_api_require_auth": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": "false", + "defined_in": "src/v/config/configuration.cc", + "description": "Whether Admin API clients must provide HTTP basic authentication headers.", + "name": "admin_api_require_auth", + "needs_restart": false, + "nullable": false, + "type": "boolean", + "visibility": "user" + }, + "admin_api_tls": { + "category": "redpanda", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "broker", + "default": [], + "defined_in": "src/v/config/node_config.cc", + "description": "Specifies the TLS configuration for the HTTP Admin API.", + "example": ".Example\n[,yaml]\n----\nredpanda:\n admin_api_tls:\n - name: \n enabled: true\n cert_file: \n key_file: \n truststore_file: \n require_client_auth: true\n----", + "items": { + "type": "object" + }, + "name": "admin_api_tls", + "needs_restart": true, + "nullable": false, + "type": "array", + "visibility": "user" + }, + "advertised_kafka_api": { + "category": "redpanda", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "broker", + "default": null, + "defined_in": "override", + "description": "Address of the Kafka API published to the clients. If not set, the <> broker property is used. When behind a load balancer or in containerized environments, this should be the externally-accessible address that clients use to connect.", + "example": ".Example\n[,yaml]\n----\nredpanda:\n advertised_kafka_api:\n - name: \n address: \n port: \n----", + "is_deprecated": false, + "is_topic_property": false, + "name": "advertised_kafka_api", + "type": "string", + "visibility": "user" + }, + "advertised_pandaproxy_api": { + "category": "pandaproxy", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "broker", + "default": [], + "defined_in": "src/v/pandaproxy/rest/configuration.cc", + "description": "Network address for the HTTP Proxy API server to publish to clients.", + "items": { + "type": "object" + }, + "name": "advertised_pandaproxy_api", + "needs_restart": true, + "nullable": false, + "type": "array" + }, + "advertised_rpc_api": { + "category": "redpanda", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "broker", + "default": null, + "defined_in": "override", + "description": "Address of RPC endpoint published to other cluster members. If not set, the <> broker property is used. This should be the address other brokers can use to communicate with this broker.", + "example": ".Example\n[,yaml]\n----\nredpanda:\n advertised_rpc_api:\n address: \n port: \n----", + "is_deprecated": false, + "is_topic_property": false, + "name": "advertised_rpc_api", + "type": "string", + "visibility": "user" + }, + "aggregate_metrics": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": "false", + "defined_in": "src/v/config/configuration.cc", + "description": "Enable aggregation of metrics returned by the xref:reference:internal-metrics-reference.adoc[`/metrics`] endpoint. Aggregation can simplify monitoring by providing summarized data instead of raw, per-instance metrics. Metric aggregation is performed by summing the values of samples by labels and is done when it makes sense by the shard and/or partition labels.", + "name": "aggregate_metrics", + "needs_restart": false, + "nullable": false, + "related_topics": [ + "xref:reference:internal-metrics-reference.adoc[`/metrics`]" + ], + "type": "boolean" + }, + "alive_timeout_ms": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": "5s", + "defined_in": "src/v/config/configuration.cc", + "description": "The amount of time since the last broker status heartbeat. After this time, a broker is considered offline and not alive.", + "maximum": 17592186044415, + "minimum": -17592186044416, + "name": "alive_timeout_ms", + "needs_restart": false, + "nullable": false, + "type": "integer", + "visibility": "tunable" + }, + "alter_topic_cfg_timeout_ms": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": "5s", + "defined_in": "src/v/config/configuration.cc", + "description": "The duration, in milliseconds, that Redpanda waits for the replication of entries in the controller log when executing a request to alter topic configurations. This timeout ensures that configuration changes are replicated across the cluster before the alteration request is considered complete.", + "maximum": 17592186044415, + "minimum": -17592186044416, + "name": "alter_topic_cfg_timeout_ms", + "needs_restart": false, + "nullable": false, + "type": "integer", + "visibility": "tunable" + }, + "api_doc_dir": { + "category": "pandaproxy", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "broker", + "default": "/usr/share/redpanda/proxy-api-doc", + "defined_in": "src/v/pandaproxy/rest/configuration.cc", + "description": "Path to the API specifications directory. This directory contains API documentation for both the HTTP Proxy API and Schema Registry API.", + "name": "api_doc_dir", + "needs_restart": true, + "nullable": false, + "type": "string" + }, + "append_chunk_size": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": "16_KiB", + "defined_in": "src/v/config/configuration.cc", + "description": "Size of direct write operations to disk in bytes. A larger chunk size can improve performance for write-heavy workloads, but increase latency for these writes as more data is collected before each write operation. A smaller chunk size can decrease write latency, but potentially increase the number of disk I/O operations.", + "name": "append_chunk_size", + "needs_restart": true, + "nullable": false, + "type": "integer", + "visibility": "tunable" + }, + "audit_client_max_buffer_size": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": "16_MiB", + "defined_in": "src/v/config/configuration.cc", + "description": "Defines the number of bytes allocated by the internal audit client for audit messages. When changing this, you must disable audit logging and then re-enable it for the change to take effect. Consider increasing this if your system generates a very large number of audit records in a short amount of time.", + "name": "audit_client_max_buffer_size", + "needs_restart": false, + "nullable": false, + "type": "integer", + "visibility": "user" + }, + "audit_enabled": { + "cloud_byoc_only": false, + "cloud_editable": true, + "cloud_readonly": false, + "cloud_supported": true, + "config_scope": "cluster", + "default": "false", + "defined_in": "src/v/config/configuration.cc", + "description": "Enables or disables audit logging. When you set this to true, Redpanda checks for an existing topic named `_redpanda.audit_log`. If none is found, Redpanda automatically creates one for you.", + "enterprise_value": true, + "is_enterprise": true, + "name": "audit_enabled", + "needs_restart": false, + "nullable": false, + "related_topics": [], + "type": "boolean", + "visibility": "user" + }, + "audit_enabled_event_types": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": [ + "management", + "authenticate", + "admin" + ], + "defined_in": "src/v/config/configuration.cc", + "description": "List of strings in JSON style identifying the event types to include in the audit log. This may include any of the following: `management, produce, consume, describe, heartbeat, authenticate, schema_registry, admin`.", + "items": { + "type": "string" + }, + "name": "audit_enabled_event_types", + "needs_restart": false, + "nullable": false, + "type": "array", + "visibility": "user" + }, + "audit_excluded_principals": { + "cloud_byoc_only": false, + "cloud_editable": true, + "cloud_readonly": false, + "cloud_supported": true, + "config_scope": "cluster", + "default": [], + "defined_in": "src/v/config/configuration.cc", + "description": "List of user principals to exclude from auditing.", + "items": { + "type": "string" + }, + "name": "audit_excluded_principals", + "needs_restart": false, + "nullable": false, + "type": "array", + "visibility": "user" + }, + "audit_excluded_topics": { + "cloud_byoc_only": false, + "cloud_editable": true, + "cloud_readonly": false, + "cloud_supported": true, + "config_scope": "cluster", + "default": [], + "defined_in": "src/v/config/configuration.cc", + "description": "List of topics to exclude from auditing.", + "items": { + "type": "string" + }, + "name": "audit_excluded_topics", + "needs_restart": false, + "nullable": false, + "type": "array", + "visibility": "user" + }, + "audit_failure_policy": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": "reject", + "defined_in": "src/v/config/configuration.cc", + "description": "Defines the policy for rejecting audit log messages when the audit log queue is full. If set to 'permit', then new audit messages are dropped and the operation is permitted. If set to 'reject', then the operation is rejected.", + "name": "audit_failure_policy", + "needs_restart": false, + "nullable": false, + "type": "audit_failure_policy", + "visibility": "user" + }, + "audit_log_num_partitions": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": true, + "cloud_supported": true, + "config_scope": "cluster", + "default": "12", + "defined_in": "src/v/config/configuration.cc", + "description": "Defines the number of partitions used by a newly-created audit topic. This configuration applies only to the audit log topic and may be different from the cluster or other topic configurations. This cannot be altered for existing audit log topics.", + "maximum": 2147483647, + "minimum": -2147483648, + "name": "audit_log_num_partitions", + "needs_restart": false, + "nullable": false, + "type": "integer", + "visibility": "user" + }, + "audit_log_replication_factor": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": null, + "defined_in": "src/v/config/configuration.cc", + "description": "Defines the replication factor for a newly-created audit log topic. This configuration applies only to the audit log topic and may be different from the cluster or other topic configurations. This cannot be altered for existing audit log topics. Setting this value is optional. If a value is not provided, Redpanda will use the value specified for `internal_topic_replication_factor`.", + "maximum": 32767, + "minimum": -32768, + "name": "audit_log_replication_factor", + "needs_restart": false, + "nullable": true, + "type": "integer", + "visibility": "user" + }, + "audit_queue_drain_interval_ms": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": "500ms", + "defined_in": "src/v/config/configuration.cc", + "description": "Interval, in milliseconds, at which Redpanda flushes the queued audit log messages to the audit log topic. Longer intervals may help prevent duplicate messages, especially in high throughput scenarios, but they also increase the risk of data loss during shutdowns where the queue is lost.", + "maximum": 17592186044415, + "minimum": -17592186044416, + "name": "audit_queue_drain_interval_ms", + "needs_restart": false, + "nullable": false, + "type": "integer", + "visibility": "tunable" + }, + "audit_queue_max_buffer_size_per_shard": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": "1_MiB", + "defined_in": "src/v/config/configuration.cc", + "description": "Defines the maximum amount of memory in bytes used by the audit buffer in each shard. Once this size is reached, requests to log additional audit messages will return a non-retryable error. Limiting the buffer size per shard helps prevent any single shard from consuming excessive memory due to audit log messages.", + "name": "audit_queue_max_buffer_size_per_shard", + "needs_restart": true, + "nullable": false, + "type": "integer", + "visibility": "tunable" + }, + "auto_create_topics_enabled": { + "cloud_byoc_only": false, + "cloud_editable": true, + "cloud_readonly": false, + "cloud_supported": true, + "config_scope": "cluster", + "default": "false", + "defined_in": "src/v/config/configuration.cc", + "description": "Allow automatic topic creation.\n\nIf you produce to a topic that doesn't exist, the topic will be created with defaults if this property is enabled.", + "name": "auto_create_topics_enabled", + "needs_restart": false, + "nullable": false, + "type": "boolean", + "visibility": "user" + }, + "broker_tls": { + "category": "pandaproxy-client", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "broker", + "default": { + "cert_file": null, + "enabled": null, + "key_file": null, + "require_client_auth": null, + "truststore_file": null + }, + "defined_in": "src/v/kafka/client/configuration.cc", + "description": "TLS configuration for the Kafka API servers to which the HTTP Proxy client should connect.", + "name": "broker_tls", + "needs_restart": true, + "nullable": false, + "type": "object" + }, + "brokers": { + "category": "pandaproxy-client", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "broker", + "default": "vector", + "defined_in": "src/v/kafka/client/configuration.cc", + "description": "Network addresses of the Kafka API servers to which the HTTP Proxy client should connect.", + "items": { + "type": "object" + }, + "name": "brokers", + "needs_restart": true, + "nullable": false, + "type": "array" + }, + "cleanup.policy": { + "acceptable_values": "[`delete`, `compact`, `compact,delete`]", + "category": "retention-compaction", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "topic", + "corresponding_cluster_property": "log_cleanup_policy", + "description": "The cleanup policy to apply for log segments of a topic.\nWhen `cleanup.policy` is set, it overrides the cluster property xref:cluster-properties.adoc#log_cleanup_policy[`log_cleanup_policy`] for the topic.", + "is_deprecated": false, + "is_topic_property": true, + "name": "cleanup.policy", + "related_topics": [ + "xref:cluster-properties.adoc#log_cleanup_policy[`log_cleanup_policy`]" + ], + "source_file": "src/v/kafka/protocol/topic_properties.h", + "type": "string" + }, + "client_cache_max_size": { + "category": "pandaproxy", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "broker", + "default": "10", + "defined_in": "src/v/pandaproxy/rest/configuration.cc", + "description": "The maximum number of Kafka client connections that Redpanda can cache in the LRU (least recently used) cache. The LRU cache helps optimize resource utilization by keeping the most recently used clients in memory, facilitating quicker reconnections for frequent clients while limiting memory usage.", + "name": "client_cache_max_size", + "needs_restart": true, + "nullable": false, + "type": "integer" + }, + "client_identifier": { + "category": "pandaproxy-client", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "broker", + "default": "test_client", + "defined_in": "src/v/kafka/client/configuration.cc", + "description": "Custom identifier to include in the Kafka request header for the HTTP Proxy client. This identifier can help debug or monitor client activities.", + "name": "client_identifier", + "needs_restart": true, + "nullable": true, + "type": "string" + }, + "client_keep_alive": { + "category": "pandaproxy", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "broker", + "default": "5min", + "defined_in": "src/v/pandaproxy/rest/configuration.cc", + "description": "Time, in milliseconds, that an idle client connection may remain open to the HTTP Proxy API.", + "maximum": 17592186044415, + "minimum": -17592186044416, + "name": "client_keep_alive", + "needs_restart": true, + "nullable": false, + "type": "integer" + }, + "cloud_storage_access_key": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": null, + "defined_in": "src/v/config/configuration.cc", + "description": "AWS or GCP access key. This access key is part of the credentials that Redpanda requires to authenticate with object storage services for Tiered Storage. This access key is used with the <> to form the complete credentials required for authentication.\nTo authenticate using IAM roles, see <>.", + "gets_restored": false, + "name": "cloud_storage_access_key", + "needs_restart": true, + "nullable": true, + "type": "string", + "visibility": "user" + }, + "cloud_storage_api_endpoint": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": null, + "defined_in": "src/v/config/configuration.cc", + "description": "Optional API endpoint. The only instance in which you must set this value is when using a custom domain with your object storage service.\n\n- AWS: If not set, this is automatically generated using <> and <>. Otherwise, this uses the value assigned.\n- GCP: If not set, this is automatically generated using `storage.googleapis.com` and <>.\n- Azure: If not set, this is automatically generated using `blob.core.windows.net` and <>. If you have enabled hierarchical namespaces for your storage account and use a custom endpoint, use <>.", + "gets_restored": false, + "name": "cloud_storage_api_endpoint", + "needs_restart": true, + "nullable": true, + "type": "string", + "visibility": "user" + }, + "cloud_storage_api_endpoint_port": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": "443", + "defined_in": "src/v/config/configuration.cc", + "description": "TLS port override.", + "maximum": 32767, + "minimum": -32768, + "name": "cloud_storage_api_endpoint_port", + "needs_restart": true, + "nullable": false, + "type": "integer", + "visibility": "user" + }, + "cloud_storage_attempt_cluster_restore_on_bootstrap": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": "false", + "defined_in": "src/v/config/configuration.cc", + "description": "When set to `true`, Redpanda automatically retrieves cluster metadata from a specified object storage bucket at the cluster's first startup. This option is ideal for orchestrated deployments, such as Kubernetes. Ensure any previous cluster linked to the bucket is fully decommissioned to prevent conflicts between Tiered Storage subsystems.", + "name": "cloud_storage_attempt_cluster_restore_on_bootstrap", + "needs_restart": true, + "nullable": false, + "type": "boolean", + "visibility": "tunable" + }, + "cloud_storage_azure_adls_endpoint": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": null, + "defined_in": "src/v/config/configuration.cc", + "description": "Azure Data Lake Storage v2 endpoint override. Use when hierarchical namespaces are enabled on your storage account and you have set up a custom endpoint.\n\nIf not set, this is automatically generated using `dfs.core.windows.net` and <>.", + "gets_restored": false, + "name": "cloud_storage_azure_adls_endpoint", + "needs_restart": true, + "nullable": true, + "type": "string", + "visibility": "user" + }, + "cloud_storage_azure_adls_port": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": null, + "defined_in": "src/v/config/configuration.cc", + "description": "Azure Data Lake Storage v2 port override. See also: <>. Use when hierarchical namespaces are enabled on your storage account and you have set up a custom endpoint.", + "gets_restored": false, + "maximum": 65535, + "minimum": 0, + "name": "cloud_storage_azure_adls_port", + "needs_restart": true, + "nullable": true, + "type": "integer", + "visibility": "user" + }, + "cloud_storage_azure_container": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": true, + "cloud_supported": true, + "config_scope": "cluster", + "default": null, + "defined_in": "src/v/config/configuration.cc", + "description": "The name of the Azure container to use with Tiered Storage. If `null`, the property is disabled.\n\nNOTE: The container must belong to <>.", + "gets_restored": false, + "name": "cloud_storage_azure_container", + "needs_restart": true, + "nullable": true, + "type": "string", + "visibility": "user" + }, + "cloud_storage_azure_hierarchical_namespace_enabled": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": null, + "defined_in": "src/v/config/configuration.cc", + "description": "Force Redpanda to use or not use an Azure Data Lake Storage (ADLS) Gen2 hierarchical namespace-compliant client in <>. \n\nWhen this property is not set, <> must be set, and each broker checks at startup if a hierarchical namespace is enabled. \n\nWhen set to `true`, this property disables the check and assumes a hierarchical namespace is enabled. \n\nWhen set to `false`, this property disables the check and assumes a hierarchical namespace is not enabled. \n\nThis setting should be used only in emergencies where Redpanda fails to detect the correct a hierarchical namespace status.", + "name": "cloud_storage_azure_hierarchical_namespace_enabled", + "needs_restart": true, + "nullable": true, + "type": "boolean", + "visibility": "tunable" + }, + "cloud_storage_azure_managed_identity_id": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": null, + "defined_in": "src/v/config/configuration.cc", + "description": "The managed identity ID to use for access to the Azure storage account. To use Azure managed identities, you must set <> to `azure_vm_instance_metadata`. See xref:manage:security/iam-roles.adoc[IAM Roles] for more information on managed identities.", + "name": "cloud_storage_azure_managed_identity_id", + "needs_restart": false, + "nullable": true, + "related_topics": [ + "xref:manage:security/iam-roles.adoc[IAM Roles]" + ], + "type": "string", + "visibility": "user" + }, + "cloud_storage_azure_shared_key": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": null, + "defined_in": "src/v/config/configuration.cc", + "description": "The account access key to be used for Azure Shared Key authentication with the Azure storage account configured by <>. If `null`, the property is disabled.", + "gets_restored": false, + "is_secret": true, + "name": "cloud_storage_azure_shared_key", + "needs_restart": false, + "nullable": true, + "type": "string", + "visibility": "user" + }, + "cloud_storage_azure_storage_account": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": true, + "cloud_supported": true, + "config_scope": "cluster", + "default": null, + "defined_in": "src/v/config/configuration.cc", + "description": "The name of the Azure storage account to use with Tiered Storage. If `null`, the property is disabled.", + "gets_restored": false, + "name": "cloud_storage_azure_storage_account", + "needs_restart": true, + "nullable": true, + "type": "string", + "visibility": "user" + }, + "cloud_storage_backend": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": "unknown", + "defined_in": "src/v/config/configuration.cc", + "description": "Optional object storage backend variant used to select API capabilities. If not supplied, this will be inferred from other configuration properties.", + "gets_restored": false, + "name": "cloud_storage_backend", + "needs_restart": true, + "nullable": false, + "type": "object", + "visibility": "user" + }, + "cloud_storage_background_jobs_quota": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": "5000", + "defined_in": "src/v/config/configuration.cc", + "description": "The total number of requests the object storage background jobs can make during one background housekeeping run. This is a per-shard limit. Adjusting this limit can optimize object storage traffic and impact shard performance.", + "maximum": 2147483647, + "minimum": -2147483648, + "name": "cloud_storage_background_jobs_quota", + "needs_restart": false, + "nullable": false, + "type": "integer", + "visibility": "tunable" + }, + "cloud_storage_bucket": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": null, + "defined_in": "src/v/config/configuration.cc", + "description": "AWS or GCP bucket that should be used to store data.\n\nWARNING: Modifying this property after writing data to a bucket could cause data loss.", + "gets_restored": false, + "name": "cloud_storage_bucket", + "needs_restart": true, + "nullable": true, + "type": "string", + "visibility": "user" + }, + "cloud_storage_cache_check_interval_ms": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": "5s", + "defined_in": "src/v/config/configuration.cc", + "description": "Minimum interval between Tiered Storage cache trims, measured in milliseconds. This setting dictates the cooldown period after a cache trim operation before another trim can occur. If a cache fetch operation requests a trim but the interval since the last trim has not yet passed, the trim will be postponed until this cooldown expires. Adjusting this interval helps manage the balance between cache size and retrieval performance.", + "maximum": 17592186044415, + "minimum": -17592186044416, + "name": "cloud_storage_cache_check_interval", + "needs_restart": true, + "nullable": false, + "type": "integer", + "visibility": "tunable" + }, + "cloud_storage_cache_chunk_size": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": "16_MiB", + "defined_in": "src/v/config/configuration.cc", + "description": "Size of chunks of segments downloaded into object storage cache. Reduces space usage by only downloading the necessary chunk from a segment.", + "maximum": 18446744073709551615, + "minimum": 0, + "name": "cloud_storage_cache_chunk_size", + "needs_restart": true, + "nullable": false, + "type": "integer", + "visibility": "tunable" + }, + "cloud_storage_cache_directory": { + "category": "redpanda", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "broker", + "default": null, + "defined_in": "src/v/config/node_config.cc", + "description": "Directory for archival cache. Set when the xref:reference:properties/cluster-properties.adoc#cloud_storage_enabled[`cloud_storage_enabled`] cluster property is enabled. If not specified, Redpanda uses a default path within the data directory.", + "example": ".Example\n[,yaml]\n----\nredpanda:\n cloud_storage_cache_directory: \n----\n\n\nReplace `` with the full path to your desired cache directory.", + "name": "cloud_storage_cache_directory", + "needs_restart": true, + "nullable": true, + "related_topics": [ + "xref:reference:properties/cluster-properties.adoc#cloud_storage_enabled[`cloud_storage_enabled`]" + ], + "type": "string", + "visibility": "user" + }, + "cloud_storage_cache_max_objects": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": "100000", + "defined_in": "src/v/config/configuration.cc", + "description": "Maximum number of objects that may be held in the Tiered Storage cache. This applies simultaneously with <>, and whichever limit is hit first will trigger trimming of the cache.", + "maximum": 4294967295, + "minimum": 0, + "name": "cloud_storage_cache_max_objects", + "needs_restart": false, + "nullable": false, + "type": "integer", + "visibility": "tunable" + }, + "cloud_storage_cache_num_buckets": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": "0", + "defined_in": "src/v/config/configuration.cc", + "description": "Divide the object storage cache across the specified number of buckets. This only works for objects with randomized prefixes. The names are not changed when the value is set to zero.", + "maximum": 4294967295, + "minimum": 0, + "name": "cloud_storage_cache_num_buckets", + "needs_restart": false, + "nullable": false, + "type": "integer", + "visibility": "tunable" + }, + "cloud_storage_cache_size": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": "0", + "defined_in": "src/v/config/configuration.cc", + "description": "Maximum size of the object storage cache, in bytes.\n\nThis property works together with <> to define cache behavior:\n\n- When both properties are set, Redpanda uses the smaller calculated value of the two, in bytes.\n\n- If one of these properties is set to `0`, Redpanda uses the non-zero value.\n\n- These properties cannot both be `0`.\n\n- `cloud_storage_cache_size` cannot be `0` while `cloud_storage_cache_size_percent` is `null`.", + "gets_restored": false, + "maximum": 18446744073709551615, + "minimum": 0, + "name": "cloud_storage_cache_size", + "needs_restart": false, + "nullable": false, + "type": "integer", + "visibility": "user" + }, + "cloud_storage_cache_size_percent": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": "20.0", + "defined_in": "src/v/config/configuration.cc", + "description": "Maximum size of the cloud cache as a percentage of unreserved disk space disk_reservation_percent. The default value for this option is tuned for a shared disk configuration. Consider increasing the value if using a dedicated cache disk. The property <> controls the same limit expressed as a fixed number of bytes. If both `cloud_storage_cache_size` and `cloud_storage_cache_size_percent` are set, Redpanda uses the minimum of the two.", + "name": "cloud_storage_cache_size_percent", + "needs_restart": false, + "nullable": true, + "related_topics": [ + "xref:reference:cluster-properties.adoc#disk_reservation_percent[`disk_reservation_percent`]" + ], + "type": "number", + "visibility": "user" + }, + "cloud_storage_cache_trim_carryover_bytes": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": "0_KiB", + "defined_in": "src/v/config/configuration.cc", + "description": "The cache performs a recursive directory inspection during the cache trim. The information obtained during the inspection can be carried over to the next trim operation. This parameter sets a limit on the memory occupied by objects that can be carried over from one trim to next, and allows cache to quickly unblock readers before starting the directory inspection (deprecated)", + "is_deprecated": true, + "maximum": 4294967295, + "minimum": 0, + "name": "cloud_storage_cache_trim_carryover_bytes", + "needs_restart": false, + "nullable": false, + "type": null, + "visibility": "deprecated" + }, + "cloud_storage_cache_trim_threshold_percent_objects": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": null, + "defined_in": "src/v/config/configuration.cc", + "description": "Cache trimming is triggered when the number of objects in the cache reaches this percentage relative to its maximum object count. If unset, the default behavior is to start trimming when the cache is full.", + "name": "cloud_storage_cache_trim_threshold_percent_objects", + "needs_restart": false, + "nullable": true, + "type": "number", + "version": "24.1.10", + "visibility": "tunable" + }, + "cloud_storage_cache_trim_threshold_percent_size": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": null, + "defined_in": "src/v/config/configuration.cc", + "description": "Cache trimming is triggered when the cache size reaches this percentage relative to its maximum capacity. If unset, the default behavior is to start trimming when the cache is full.", + "name": "cloud_storage_cache_trim_threshold_percent_size", + "needs_restart": false, + "nullable": true, + "type": "number", + "version": "24.1.10", + "visibility": "tunable" + }, + "cloud_storage_cache_trim_walk_concurrency": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": "1", + "defined_in": "src/v/config/configuration.cc", + "description": "The maximum number of concurrent tasks launched for traversing the directory structure during cache trimming. A higher number allows cache trimming to run faster but can cause latency spikes due to increased pressure on I/O subsystem and syscall threads.", + "maximum": 65535, + "minimum": 0, + "name": "cloud_storage_cache_trim_walk_concurrency", + "needs_restart": false, + "nullable": false, + "type": "integer", + "visibility": "tunable" + }, + "cloud_storage_chunk_eviction_strategy": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": "eager", + "defined_in": "src/v/config/configuration.cc", + "description": "Selects a strategy for evicting unused cache chunks.", + "name": "cloud_storage_chunk_eviction_strategy", + "needs_restart": false, + "nullable": false, + "type": "object", + "visibility": "tunable" + }, + "cloud_storage_chunk_prefetch": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": "0", + "defined_in": "src/v/config/configuration.cc", + "description": "Number of chunks to prefetch ahead of every downloaded chunk. Prefetching additional chunks can enhance read performance by reducing wait times for sequential data access. A value of `0` disables prefetching, relying solely on on-demand downloads. Adjusting this property allows for tuning the balance between improved read performance and increased network and storage I/O.", + "maximum": 65535, + "minimum": 0, + "name": "cloud_storage_chunk_prefetch", + "needs_restart": false, + "nullable": false, + "type": "integer", + "visibility": "tunable" + }, + "cloud_storage_client_lease_timeout_ms": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": "900s", + "defined_in": "src/v/config/configuration.cc", + "description": "Maximum time to hold a cloud storage client lease (ms), after which any outstanding connection is immediately closed.", + "maximum": 17592186044415, + "minimum": -17592186044416, + "name": "cloud_storage_client_lease_timeout_ms", + "needs_restart": false, + "nullable": false, + "type": "integer", + "visibility": "tunable" + }, + "cloud_storage_cluster_metadata_num_consumer_groups_per_upload": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": "1000", + "defined_in": "src/v/config/configuration.cc", + "description": "Number of groups to upload in a single snapshot object during consumer offsets upload. Setting a lower value will mean a larger number of smaller snapshots are uploaded.", + "name": "cloud_storage_cluster_metadata_num_consumer_groups_per_upload", + "needs_restart": false, + "nullable": false, + "type": "integer", + "visibility": "tunable" + }, + "cloud_storage_cluster_metadata_retries": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": "5", + "defined_in": "src/v/config/configuration.cc", + "description": "Number of attempts metadata operations may be retried.", + "maximum": 32767, + "minimum": -32768, + "name": "cloud_storage_cluster_metadata_retries", + "needs_restart": true, + "nullable": false, + "type": "integer", + "visibility": "tunable" + }, + "cloud_storage_cluster_metadata_upload_interval_ms": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": "1h", + "defined_in": "src/v/config/configuration.cc", + "description": "Time interval to wait between cluster metadata uploads.", + "maximum": 17592186044415, + "minimum": -17592186044416, + "name": "cloud_storage_cluster_metadata_upload_interval_ms", + "needs_restart": false, + "nullable": false, + "type": "integer", + "visibility": "tunable" + }, + "cloud_storage_cluster_metadata_upload_timeout_ms": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": "60s", + "defined_in": "src/v/config/configuration.cc", + "description": "Timeout for cluster metadata uploads.", + "maximum": 17592186044415, + "minimum": -17592186044416, + "name": "cloud_storage_cluster_metadata_upload_timeout_ms", + "needs_restart": false, + "nullable": false, + "type": "integer", + "visibility": "tunable" + }, + "cloud_storage_credentials_host": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": null, + "defined_in": "src/v/config/configuration.cc", + "description": "The hostname to connect to for retrieving role based credentials. Derived from <> if not set. Only required when using IAM role based access. To authenticate using access keys, see <>.", + "gets_restored": false, + "name": "cloud_storage_credentials_host", + "needs_restart": true, + "nullable": true, + "type": "string", + "visibility": "tunable" + }, + "cloud_storage_credentials_source": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": "config_file", + "defined_in": "src/v/config/configuration.cc", + "description": "The source of credentials used to authenticate to object storage services.\nRequired for AWS or GCP authentication with IAM roles.\n\nTo authenticate using access keys, see <>.", + "gets_restored": false, + "name": "cloud_storage_credentials_source", + "needs_restart": true, + "nullable": false, + "type": "object", + "visibility": "user" + }, + "cloud_storage_crl_file": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": null, + "defined_in": "src/v/config/configuration.cc", + "description": "Path to certificate revocation list for <>.", + "name": "cloud_storage_crl_file", + "needs_restart": true, + "nullable": true, + "type": "string", + "visibility": "user" + }, + "cloud_storage_disable_archival_stm_rw_fence": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": "false", + "defined_in": "src/v/config/configuration.cc", + "description": "Disables the concurrency control mechanism in Tiered Storage. This safety feature keeps data organized and correct when multiple processes access it simultaneously. Disabling it can cause data consistency problems, so use this setting only for testing, never in production systems.", + "name": "cloud_storage_disable_archival_stm_rw_fence", + "needs_restart": false, + "nullable": false, + "type": "boolean", + "visibility": "tunable" + }, + "cloud_storage_disable_archiver_manager": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": "true", + "defined_in": "src/v/config/configuration.cc", + "description": "Use legacy upload mode and do not start archiver_manager.", + "name": "cloud_storage_disable_archiver_manager", + "needs_restart": true, + "nullable": false, + "type": "boolean", + "visibility": "user" + }, + "cloud_storage_disable_chunk_reads": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": "false", + "defined_in": "src/v/config/configuration.cc", + "description": "Disable chunk reads and switch back to legacy mode where full segments are downloaded. When set to `true`, this option disables the more efficient chunk-based reads, causing Redpanda to download entire segments. This legacy behavior might be useful in specific scenarios where chunk-based fetching is not optimal.", + "name": "cloud_storage_disable_chunk_reads", + "needs_restart": false, + "nullable": false, + "type": "boolean", + "visibility": "tunable" + }, + "cloud_storage_disable_metadata_consistency_checks": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "defined_in": "src/v/config/configuration.cc", + "description": null, + "is_deprecated": true, + "name": "cloud_storage_disable_metadata_consistency_checks", + "needs_restart": true, + "nullable": false, + "type": null + }, + "cloud_storage_disable_read_replica_loop_for_tests": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": "false", + "defined_in": "src/v/config/configuration.cc", + "description": "Begins the read replica sync loop in topic partitions with Tiered Storage enabled. The property exists to simplify testing and shouldn't be set in production.", + "name": "cloud_storage_disable_read_replica_loop_for_tests", + "needs_restart": false, + "nullable": false, + "type": "boolean", + "visibility": "tunable" + }, + "cloud_storage_disable_remote_labels_for_tests": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": "false", + "defined_in": "src/v/config/configuration.cc", + "description": "If `true`, Redpanda disables remote labels and falls back on the hash-based object naming scheme for new topics.", + "name": "cloud_storage_disable_remote_labels_for_tests", + "needs_restart": false, + "nullable": false, + "type": "boolean", + "visibility": "tunable" + }, + "cloud_storage_disable_tls": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": "false", + "defined_in": "src/v/config/configuration.cc", + "description": "Disable TLS for all object storage connections.", + "name": "cloud_storage_disable_tls", + "needs_restart": true, + "nullable": false, + "type": "boolean", + "visibility": "user" + }, + "cloud_storage_disable_upload_consistency_checks": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": "false", + "defined_in": "src/v/config/configuration.cc", + "description": "Disable all upload consistency checks to allow Redpanda to upload logs with gaps and replicate metadata with consistency violations. Do not change the default value unless requested by Redpanda Support.", + "name": "cloud_storage_disable_upload_consistency_checks", + "needs_restart": false, + "nullable": false, + "type": "boolean", + "visibility": "tunable" + }, + "cloud_storage_disable_upload_loop_for_tests": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": "false", + "defined_in": "src/v/config/configuration.cc", + "description": "Begins the upload loop in topic partitions with Tiered Storage enabled. The property exists to simplify testing and shouldn't be set in production.", + "name": "cloud_storage_disable_upload_loop_for_tests", + "needs_restart": false, + "nullable": false, + "type": "boolean", + "visibility": "tunable" + }, + "cloud_storage_enable_compacted_topic_reupload": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": "true", + "defined_in": "src/v/config/configuration.cc", + "description": "Enable re-uploading data for compacted topics.\nWhen set to `true`, Redpanda can re-upload data for compacted topics to object storage, ensuring that the most current state of compacted topics is available in the cloud. Disabling this property (`false`) may reduce storage and network overhead but at the risk of not having the latest compacted data state in object storage.", + "name": "cloud_storage_enable_compacted_topic_reupload", + "needs_restart": false, + "nullable": false, + "type": "boolean", + "visibility": "tunable" + }, + "cloud_storage_enable_remote_allow_gaps": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": "false", + "defined_in": "src/v/config/configuration.cc", + "description": "Controls the eviction of locally stored log segments when Tiered Storage uploads are paused. Set to `false` to only evict data that has already been uploaded to object storage. If the retained data fills the local volume, Redpanda throttles producers. Set to `true` to allow the eviction of locally stored log segments, which may create gaps in offsets.", + "name": "cloud_storage_enable_remote_allow_gaps", + "needs_restart": false, + "nullable": false, + "type": "boolean", + "visibility": "tunable" + }, + "cloud_storage_enable_remote_read": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": "false", + "defined_in": "src/v/config/configuration.cc", + "description": "Default remote read config value for new topics.\nWhen set to `true`, new topics are by default configured to allow reading data directly from object storage, facilitating access to older data that might have been offloaded as part of Tiered Storage. With the default set to `false`, remote reads must be explicitly enabled at the topic level.", + "name": "cloud_storage_enable_remote_read", + "needs_restart": false, + "nullable": false, + "type": "boolean", + "visibility": "tunable" + }, + "cloud_storage_enable_remote_write": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": "false", + "defined_in": "src/v/config/configuration.cc", + "description": "Default remote write value for new topics.\nWhen set to `true`, new topics are by default configured to upload data to object storage. With the default set to `false`, remote write must be explicitly enabled at the topic level.", + "name": "cloud_storage_enable_remote_write", + "needs_restart": false, + "nullable": false, + "type": "boolean", + "visibility": "tunable" + }, + "cloud_storage_enable_scrubbing": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": "false", + "defined_in": "src/v/config/configuration.cc", + "description": "Enable routine checks (scrubbing) of object storage partitions. The scrubber validates the integrity of data and metadata uploaded to object storage.", + "name": "cloud_storage_enable_scrubbing", + "needs_restart": false, + "nullable": false, + "type": "boolean", + "visibility": "tunable" + }, + "cloud_storage_enable_segment_merging": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": "true", + "defined_in": "src/v/config/configuration.cc", + "description": "Enables adjacent segment merging. The segments are reuploaded if there is an opportunity for that and if it will improve the tiered-storage performance", + "name": "cloud_storage_enable_segment_merging", + "needs_restart": false, + "nullable": false, + "related_topics": [ + "xref:manage:tiered-storage.adoc#object-storage-housekeeping[Object storage housekeeping]" + ], + "type": "boolean", + "visibility": "tunable" + }, + "cloud_storage_enable_segment_uploads": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": "true", + "defined_in": "src/v/config/configuration.cc", + "description": "Controls the upload of log segments to Tiered Storage. If set to `false`, this property temporarily pauses all log segment uploads from the Redpanda cluster. When the uploads are paused, the <> cluster configuration and `redpanda.remote.allowgaps` topic properties control local retention behavior.", + "name": "cloud_storage_enable_segment_uploads", + "needs_restart": false, + "nullable": false, + "type": "boolean", + "visibility": "tunable" + }, + "cloud_storage_enabled": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": "false", + "defined_in": "src/v/config/configuration.cc", + "description": "Enable object storage. Must be set to `true` to use Tiered Storage or Remote Read Replicas.", + "enterprise_value": true, + "is_enterprise": true, + "name": "cloud_storage_enabled", + "needs_restart": true, + "nullable": false, + "related_topics": [], + "type": "boolean", + "visibility": "user" + }, + "cloud_storage_full_scrub_interval_ms": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": "12h", + "defined_in": "src/v/config/configuration.cc", + "description": "Interval, in milliseconds, between a final scrub and the next scrub.", + "maximum": 17592186044415, + "minimum": -17592186044416, + "name": "cloud_storage_full_scrub_interval_ms", + "needs_restart": false, + "nullable": false, + "type": "integer", + "visibility": "tunable" + }, + "cloud_storage_garbage_collect_timeout_ms": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": "30s", + "defined_in": "src/v/config/configuration.cc", + "description": "Timeout for running the cloud storage garbage collection, in milliseconds.", + "maximum": 17592186044415, + "minimum": -17592186044416, + "name": "cloud_storage_garbage_collect_timeout_ms", + "needs_restart": false, + "nullable": false, + "type": "integer", + "visibility": "tunable" + }, + "cloud_storage_graceful_transfer_timeout_ms": { + "aliases": [ + "cloud_storage_graceful_transfer_timeout" + ], + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": "5s", + "defined_in": "src/v/config/configuration.cc", + "description": "Time limit on waiting for uploads to complete before a leadership transfer. If this is `null`, leadership transfers proceed without waiting.", + "maximum": 17592186044415, + "minimum": -17592186044416, + "name": "cloud_storage_graceful_transfer_timeout_ms", + "needs_restart": false, + "nullable": true, + "type": "integer", + "visibility": "tunable" + }, + "cloud_storage_housekeeping_interval_ms": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": "5min", + "defined_in": "src/v/config/configuration.cc", + "description": "Interval, in milliseconds, between object storage housekeeping tasks.", + "maximum": 17592186044415, + "minimum": -17592186044416, + "name": "cloud_storage_housekeeping_interval_ms", + "needs_restart": false, + "nullable": false, + "type": "integer", + "visibility": "tunable" + }, + "cloud_storage_hydrated_chunks_per_segment_ratio": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": "0.7", + "defined_in": "src/v/config/configuration.cc", + "description": "The maximum number of chunks per segment that can be hydrated at a time. Above this number, unused chunks are trimmed.\n\nA segment is divided into chunks. Chunk hydration means downloading the chunk (which is a small part of a full segment) from cloud storage and placing it in the local disk cache. Redpanda periodically removes old, unused chunks from your local disk. This process is called chunk eviction. This property controls how many chunks can be present for a given segment in local disk at a time, before eviction is triggered, removing the oldest ones from disk. Note that this property is not used for the default eviction strategy which simply removes all unused chunks.", + "name": "cloud_storage_hydrated_chunks_per_segment_ratio", + "needs_restart": false, + "nullable": false, + "type": "number", + "visibility": "tunable" + }, + "cloud_storage_hydration_timeout_ms": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": "600s", + "defined_in": "src/v/config/configuration.cc", + "description": "Time to wait for a hydration request to be fulfilled. If hydration is not completed within this time, the consumer is notified with a timeout error.\n\nNegative doesn't make sense, but it may not be checked-for/enforced. Large is subjective, but a huge timeout also doesn't make sense. This particular config doesn't have a min/max bounds control, but it probably should to avoid mistakes.", + "maximum": 17592186044415, + "minimum": -17592186044416, + "name": "cloud_storage_hydration_timeout_ms", + "needs_restart": false, + "nullable": false, + "type": "integer", + "visibility": "tunable" + }, + "cloud_storage_idle_threshold_rps": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": "10.0", + "defined_in": "src/v/config/configuration.cc", + "description": "The object storage request rate threshold for idle state detection. If the average request rate for the configured period is lower than this threshold, the object storage is considered idle.", + "name": "cloud_storage_idle_threshold_rps", + "needs_restart": false, + "nullable": false, + "type": "number", + "visibility": "tunable" + }, + "cloud_storage_idle_timeout_ms": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": "10s", + "defined_in": "src/v/config/configuration.cc", + "description": "The timeout, in milliseconds, used to detect the idle state of the object storage API. If the average object storage request rate is below this threshold for a configured amount of time, the object storage is considered idle and the housekeeping jobs are started.", + "maximum": 17592186044415, + "minimum": -17592186044416, + "name": "cloud_storage_idle_timeout_ms", + "needs_restart": false, + "nullable": false, + "type": "integer", + "visibility": "tunable" + }, + "cloud_storage_initial_backoff_ms": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": "100ms", + "defined_in": "src/v/config/configuration.cc", + "description": "Initial backoff time for exponential backoff algorithm (ms).", + "maximum": 17592186044415, + "minimum": -17592186044416, + "name": "cloud_storage_initial_backoff_ms", + "needs_restart": false, + "nullable": false, + "type": "integer", + "visibility": "tunable" + }, + "cloud_storage_inventory_based_scrub_enabled": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": "false", + "defined_in": "src/v/config/configuration.cc", + "description": "Scrubber uses the latest cloud storage inventory report, if available, to check if the required objects exist in the bucket or container.", + "name": "cloud_storage_inventory_based_scrub_enabled", + "needs_restart": true, + "nullable": false, + "type": "boolean", + "visibility": "tunable" + }, + "cloud_storage_inventory_hash_path_directory": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "topic", + "default": null, + "defined_in": "override", + "description": "Directory to store inventory report hashes for use by cloud storage scrubber. If not specified, Redpanda uses a default path within the data directory.", + "example": ".Example\n[,yaml]\n----\nredpanda:\n cloud_storage_inventory_hash_store: \n----", + "is_deprecated": false, + "is_topic_property": true, + "name": "cloud_storage_inventory_hash_path_directory", + "type": "string", + "visibility": "user" + }, + "cloud_storage_inventory_hash_store": { + "category": "redpanda", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "broker", + "default": null, + "defined_in": "src/v/config/node_config.cc", + "description": "Directory to store inventory report hashes for use by cloud storage scrubber. If not specified, Redpanda uses a default path within the data directory.", + "example": ".Example\n[,yaml]\n----\nredpanda:\n cloud_storage_inventory_hash_store: \n----", + "name": "cloud_storage_inventory_hash_path_directory", + "needs_restart": true, + "nullable": true, + "type": "string", + "visibility": "user" + }, + "cloud_storage_inventory_id": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": "redpanda_scrubber_inventory", + "defined_in": "src/v/config/configuration.cc", + "description": "The name of the scheduled inventory job created by Redpanda to generate bucket or container inventory reports.", + "name": "cloud_storage_inventory_id", + "needs_restart": true, + "nullable": false, + "type": "string", + "visibility": "tunable" + }, + "cloud_storage_inventory_max_hash_size_during_parse": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": "64_MiB", + "defined_in": "src/v/config/configuration.cc", + "description": "Maximum bytes of hashes held in memory before writing data to disk during inventory report parsing. This affects the number of files written to disk during inventory report parsing. When this limit is reached, new files are written to disk.", + "maximum": 18446744073709551615, + "minimum": 0, + "name": "cloud_storage_inventory_max_hash_size_during_parse", + "needs_restart": false, + "nullable": false, + "type": "integer", + "visibility": "tunable" + }, + "cloud_storage_inventory_report_check_interval_ms": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": "6h", + "defined_in": "src/v/config/configuration.cc", + "description": "Time interval between checks for a new inventory report in the cloud storage bucket or container.", + "maximum": 17592186044415, + "minimum": -17592186044416, + "name": "cloud_storage_inventory_report_check_interval_ms", + "needs_restart": true, + "nullable": false, + "type": "integer", + "visibility": "tunable" + }, + "cloud_storage_inventory_reports_prefix": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": "redpanda_scrubber_inventory", + "defined_in": "src/v/config/configuration.cc", + "description": "The prefix to the path in the cloud storage bucket or container where inventory reports will be placed.", + "name": "cloud_storage_inventory_reports_prefix", + "needs_restart": true, + "nullable": false, + "type": "string", + "visibility": "tunable" + }, + "cloud_storage_inventory_self_managed_report_config": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": "false", + "defined_in": "src/v/config/configuration.cc", + "description": "If enabled, Redpanda will not attempt to create the scheduled report configuration using cloud storage APIs. The scrubbing process will look for reports in the expected paths in the bucket or container, and use the latest report found. Primarily intended for use in testing and on backends where scheduled inventory reports are not supported.", + "name": "cloud_storage_inventory_self_managed_report_config", + "needs_restart": true, + "nullable": false, + "type": "boolean", + "visibility": "tunable" + }, + "cloud_storage_manifest_cache_size": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": "1_MiB", + "defined_in": "src/v/config/configuration.cc", + "description": "Amount of memory that can be used to handle Tiered Storage metadata.", + "name": "cloud_storage_manifest_cache_size", + "needs_restart": false, + "nullable": false, + "type": "integer", + "visibility": "tunable" + }, + "cloud_storage_manifest_cache_ttl_ms": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": "10s", + "defined_in": "src/v/config/configuration.cc", + "description": "The time interval that determines how long the materialized manifest can stay in cache under contention. This parameter is used for performance tuning. When the spillover manifest is materialized and stored in cache and the cache needs to evict it it will use 'cloud_storage_materialized_manifest_ttl_ms' value as a timeout. The cursor that uses the spillover manifest uses this value as a TTL interval after which it stops referencing the manifest making it available for eviction. This only affects spillover manifests under contention.", + "maximum": 17592186044415, + "minimum": -17592186044416, + "name": "cloud_storage_materialized_manifest_ttl_ms", + "needs_restart": false, + "nullable": false, + "type": "integer", + "visibility": "tunable" + }, + "cloud_storage_manifest_max_upload_interval_sec": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": "60s", + "defined_in": "src/v/config/configuration.cc", + "description": "Minimum interval, in seconds, between partition manifest uploads. Actual time between uploads may be greater than this interval. If this is `null`, metadata is updated after each segment upload.", + "maximum": 17179869183, + "minimum": -17179869184, + "name": "cloud_storage_manifest_max_upload_interval_sec", + "needs_restart": false, + "nullable": true, + "type": "integer", + "visibility": "tunable" + }, + "cloud_storage_manifest_upload_timeout_ms": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": "30s", + "defined_in": "src/v/config/configuration.cc", + "description": "Manifest upload timeout, in milliseconds.", + "maximum": 17592186044415, + "minimum": -17592186044416, + "name": "cloud_storage_manifest_upload_timeout_ms", + "needs_restart": false, + "nullable": false, + "type": "integer", + "visibility": "tunable" + }, + "cloud_storage_materialized_manifest_ttl_ms": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "topic", + "default": null, + "defined_in": "override", + "description": "The interval, in milliseconds, determines how long the materialized manifest can stay in the cache under contention. This setting is used for performance tuning. When the spillover manifest is materialized and stored in the cache, and the cache needs to evict it, it uses this value as a timeout. The cursor that uses the spillover manifest uses this value as a TTL interval, after which it stops referencing the manifest making it available for eviction. This only affects spillover manifests under contention.", + "is_deprecated": false, + "is_topic_property": true, + "name": "cloud_storage_materialized_manifest_ttl_ms", + "type": "string", + "visibility": "user" + }, + "cloud_storage_max_concurrent_hydrations_per_shard": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": null, + "defined_in": "src/v/config/configuration.cc", + "description": "Maximum concurrent segment hydrations of remote data per CPU core. If unset, value of `cloud_storage_max_connections / 2` is used, which means that half of available object storage bandwidth could be used to download data from object storage. If the cloud storage cache is empty every new segment reader will require a download. This will lead to 1:1 mapping between number of partitions scanned by the fetch request and number of parallel downloads. If this value is too large the downloads can affect other workloads. In case of any problem caused by the tiered-storage reads this value can be lowered. This will only affect segment hydrations (downloads) but won't affect cached segments. If fetch request is reading from the tiered-storage cache its concurrency will only be limited by available memory.", + "maximum": 4294967295, + "minimum": 0, + "name": "cloud_storage_max_concurrent_hydrations_per_shard", + "needs_restart": false, + "nullable": true, + "type": "integer", + "visibility": "tunable" + }, + "cloud_storage_max_connection_idle_time_ms": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": "5s", + "defined_in": "src/v/config/configuration.cc", + "description": "Defines the maximum duration an HTTPS connection to object storage can stay idle, in milliseconds, before being terminated.\nThis setting reduces resource utilization by closing inactive connections. Adjust this property to balance keeping connections ready for subsequent requests and freeing resources associated with idle connections.", + "maximum": 17592186044415, + "minimum": -17592186044416, + "name": "cloud_storage_max_connection_idle_time_ms", + "needs_restart": true, + "nullable": false, + "type": "integer", + "visibility": "tunable" + }, + "cloud_storage_max_connections": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": "20", + "defined_in": "src/v/config/configuration.cc", + "description": "Maximum simultaneous object storage connections per shard, applicable to upload and download activities.", + "maximum": 32767, + "minimum": -32768, + "name": "cloud_storage_max_connections", + "needs_restart": true, + "nullable": false, + "type": "integer", + "visibility": "user" + }, + "cloud_storage_max_materialized_segments_per_shard": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": null, + "defined_in": "src/v/config/configuration.cc", + "description": "Maximum concurrent readers of remote data per CPU core. If unset, value of `topic_partitions_per_shard` multiplied by 2 is used.", + "is_deprecated": true, + "maximum": 4294967295, + "minimum": 0, + "name": "cloud_storage_max_materialized_segments_per_shard", + "needs_restart": true, + "nullable": true, + "type": null, + "visibility": "deprecated" + }, + "cloud_storage_max_partition_readers_per_shard": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": null, + "defined_in": "src/v/config/configuration.cc", + "description": "Maximum partition readers per shard (deprecated)", + "is_deprecated": true, + "maximum": 4294967295, + "minimum": 0, + "name": "cloud_storage_max_partition_readers_per_shard", + "needs_restart": false, + "nullable": true, + "type": null, + "visibility": "deprecated" + }, + "cloud_storage_max_segment_readers_per_shard": { + "aliases": [ + "cloud_storage_max_readers_per_shard" + ], + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": null, + "defined_in": "src/v/config/configuration.cc", + "description": "Maximum concurrent I/O cursors of materialized remote segments per CPU core. If unset, the value of `topic_partitions_per_shard` is used, where one segment reader per partition is used if the shard is at its maximum partition capacity. These readers are cached across Kafka consume requests and store a readahead buffer.", + "maximum": 4294967295, + "minimum": 0, + "name": "cloud_storage_max_segment_readers_per_shard", + "needs_restart": false, + "nullable": true, + "type": "integer", + "visibility": "tunable" + }, + "cloud_storage_max_segments_pending_deletion_per_partition": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": "5000", + "defined_in": "src/v/config/configuration.cc", + "description": "The per-partition limit for the number of segments pending deletion from the cloud. Segments can be deleted due to retention or compaction. If this limit is breached and deletion fails, then segments are orphaned in the cloud and must be removed manually.", + "name": "cloud_storage_max_segments_pending_deletion_per_partition", + "needs_restart": false, + "nullable": false, + "type": "integer", + "visibility": "tunable" + }, + "cloud_storage_max_throughput_per_shard": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": "1_GiB", + "defined_in": "src/v/config/configuration.cc", + "description": "Maximum bandwidth allocated to Tiered Storage operations per shard, in bytes per second.\nThis setting limits the Tiered Storage subsystem's throughput per shard, facilitating precise control over bandwidth usage in testing scenarios. In production environments, use `cloud_storage_throughput_limit_percent` for more dynamic throughput management based on actual storage capabilities.", + "name": "cloud_storage_max_throughput_per_shard", + "needs_restart": false, + "nullable": true, + "type": "integer", + "visibility": "tunable" + }, + "cloud_storage_metadata_sync_timeout_ms": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": "10s", + "defined_in": "src/v/config/configuration.cc", + "description": "Timeout for xref:manage:tiered-storage.adoc[] metadata synchronization.", + "maximum": 17592186044415, + "minimum": -17592186044416, + "name": "cloud_storage_metadata_sync_timeout_ms", + "needs_restart": false, + "nullable": false, + "type": "integer", + "visibility": "tunable" + }, + "cloud_storage_min_chunks_per_segment_threshold": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": "5", + "defined_in": "src/v/config/configuration.cc", + "description": "The minimum number of chunks per segment for trimming to be enabled. If the number of chunks in a segment is below this threshold, the segment is small enough that all chunks in it can be hydrated at any given time.", + "maximum": 18446744073709551615, + "minimum": 0, + "name": "cloud_storage_min_chunks_per_segment_threshold", + "needs_restart": false, + "nullable": false, + "type": "integer", + "visibility": "tunable" + }, + "cloud_storage_partial_scrub_interval_ms": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": "1h", + "defined_in": "src/v/config/configuration.cc", + "description": "Time interval between two partial scrubs of the same partition.", + "maximum": 17592186044415, + "minimum": -17592186044416, + "name": "cloud_storage_partial_scrub_interval_ms", + "needs_restart": false, + "nullable": false, + "type": "integer", + "visibility": "tunable" + }, + "cloud_storage_readreplica_manifest_sync_timeout_ms": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": "30s", + "defined_in": "src/v/config/configuration.cc", + "description": "Timeout to check if new data is available for partitions in object storage for read replicas.", + "maximum": 17592186044415, + "minimum": -17592186044416, + "name": "cloud_storage_readreplica_manifest_sync_timeout_ms", + "needs_restart": false, + "nullable": false, + "type": "integer", + "visibility": "tunable" + }, + "cloud_storage_reconciliation_ms": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "defined_in": "src/v/config/configuration.cc", + "description": null, + "is_deprecated": true, + "name": "cloud_storage_reconciliation_interval_ms", + "needs_restart": true, + "nullable": false, + "type": null + }, + "cloud_storage_recovery_temporary_retention_bytes_default": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": "1_GiB", + "defined_in": "src/v/config/configuration.cc", + "description": "Retention in bytes for topics created during automated recovery.", + "name": "cloud_storage_recovery_temporary_retention_bytes_default", + "needs_restart": false, + "nullable": false, + "type": "integer", + "visibility": "tunable" + }, + "cloud_storage_recovery_topic_validation_depth": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": "10", + "defined_in": "src/v/config/configuration.cc", + "description": "Number of metadata segments to validate, from newest to oldest, when <> is set to `check_manifest_and_segment_metadata`.", + "maximum": 4294967295, + "minimum": 0, + "name": "cloud_storage_recovery_topic_validation_depth", + "needs_restart": false, + "nullable": false, + "type": "integer", + "visibility": "tunable" + }, + "cloud_storage_recovery_topic_validation_mode": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": "check_manifest_existence", + "defined_in": "src/v/config/configuration.cc", + "description": "Validation performed before recovering a topic from object storage. In case of failure, the reason for the failure appears as `ERROR` lines in the Redpanda application log. For each topic, this reports errors for all partitions, but for each partition, only the first error is reported.\n\nThis property accepts the following parameters:\n\n- `no_check`: Skips the checks for topic recovery.\n- `check_manifest_existence`: Runs an existence check on each `partition_manifest`. Fails if there are connection issues to the object storage.\n- `check_manifest_and_segment_metadata`: Downloads the manifest and runs a consistency check, comparing the metadata with the cloud storage objects. The process fails if metadata references any missing cloud storage objects.\n\nExample: Redpanda validates the topic `kafka/panda-topic-recovery-NOT-OK` and stops due to a fatal error on partition 0:\n\n```bash\nERROR 2024-04-24 21:29:08,166 [shard 1:main] cluster - [fiber11|0|299996ms recovery validation of {kafka/panda-topic-recovery-NOT-OK/0}/24] - manifest metadata check: missing segment, validation not ok\nERROR 2024-04-24 21:29:08,166 [shard 1:main] cluster - topics_frontend.cc:519 - Stopping recovery of {kafka/panda-topic-recovery-NOT-OK} due to validation error\n```\n\nEach failing partition error message has the following format:\n\n```bash\nERROR .... [... recovery validation of {}...] - , validation not ok\n```\n\nAt the end of the process, Redpanda outputs a final ERROR message: \n\n```bash\nERROR ... ... - Stopping recovery of {} due to validation error\n```", + "name": "cloud_storage_recovery_topic_validation_mode", + "needs_restart": false, + "nullable": false, + "type": "string", + "visibility": "tunable" + }, + "cloud_storage_region": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": null, + "defined_in": "src/v/config/configuration.cc", + "description": "Cloud provider region that houses the bucket or container used for storage.", + "gets_restored": false, + "name": "cloud_storage_region", + "needs_restart": true, + "nullable": true, + "type": "string", + "visibility": "user" + }, + "cloud_storage_roles_operation_timeout_ms": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": "30s", + "defined_in": "src/v/config/configuration.cc", + "description": "Timeout for IAM role related operations (ms).", + "maximum": 17592186044415, + "minimum": -17592186044416, + "name": "cloud_storage_roles_operation_timeout_ms", + "needs_restart": true, + "nullable": false, + "type": "integer", + "visibility": "tunable" + }, + "cloud_storage_scrubbing_interval_jitter_ms": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": "10min", + "defined_in": "src/v/config/configuration.cc", + "description": "Jitter applied to the object storage scrubbing interval.", + "maximum": 17592186044415, + "minimum": -17592186044416, + "name": "cloud_storage_scrubbing_interval_jitter_ms", + "needs_restart": false, + "nullable": false, + "type": "integer", + "visibility": "tunable" + }, + "cloud_storage_secret_key": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": null, + "defined_in": "src/v/config/configuration.cc", + "description": "Cloud provider secret key.", + "gets_restored": false, + "is_secret": true, + "name": "cloud_storage_secret_key", + "needs_restart": true, + "nullable": true, + "type": "string", + "visibility": "user" + }, + "cloud_storage_segment_max_upload_interval_sec": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": "1h", + "defined_in": "src/v/config/configuration.cc", + "description": "Time that a segment can be kept locally without uploading it to the object storage, in seconds.", + "maximum": 17179869183, + "minimum": -17179869184, + "name": "cloud_storage_segment_max_upload_interval_sec", + "needs_restart": false, + "nullable": true, + "type": "integer", + "visibility": "tunable" + }, + "cloud_storage_segment_size_min": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": null, + "defined_in": "src/v/config/configuration.cc", + "description": "Smallest acceptable segment size in the object storage. Default: `cloud_storage_segment_size_target`/2.", + "name": "cloud_storage_segment_size_min", + "needs_restart": false, + "nullable": true, + "type": "integer", + "visibility": "tunable" + }, + "cloud_storage_segment_size_target": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": null, + "defined_in": "src/v/config/configuration.cc", + "description": "Desired segment size in the object storage. The default is set in the topic-level `segment.bytes` property.", + "name": "cloud_storage_segment_size_target", + "needs_restart": false, + "nullable": true, + "type": "integer", + "visibility": "tunable" + }, + "cloud_storage_segment_upload_timeout_ms": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": "90s", + "defined_in": "src/v/config/configuration.cc", + "description": "Log segment upload timeout, in milliseconds.", + "maximum": 17592186044415, + "minimum": -17592186044416, + "name": "cloud_storage_segment_upload_timeout_ms", + "needs_restart": false, + "nullable": false, + "type": "integer", + "visibility": "tunable" + }, + "cloud_storage_spillover_manifest_max_segments": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": null, + "defined_in": "src/v/config/configuration.cc", + "description": "Maximum number of segments in the spillover manifest that can be offloaded to the object storage. This setting serves as a threshold for triggering data offload based on the number of segments, rather than the total size of the manifest. It is designed for use in testing environments to control the offload behavior more granularly. In production settings, manage offloads based on the manifest size through `cloud_storage_spillover_manifest_size` for more predictable outcomes.", + "name": "cloud_storage_spillover_manifest_max_segments", + "needs_restart": false, + "nullable": true, + "type": "integer", + "visibility": "tunable" + }, + "cloud_storage_spillover_manifest_size": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": "64_KiB", + "defined_in": "src/v/config/configuration.cc", + "description": "The size of the manifest which can be offloaded to the cloud. If the size of the local manifest stored in Redpanda exceeds `cloud_storage_spillover_manifest_size` by two times the spillover mechanism will split the manifest into two parts and one will be uploaded to object storage.", + "name": "cloud_storage_spillover_manifest_size", + "needs_restart": false, + "nullable": true, + "type": "integer", + "visibility": "tunable" + }, + "cloud_storage_throughput_limit_percent": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": "50", + "defined_in": "src/v/config/configuration.cc", + "description": "Maximum throughput used by Tiered Storage per broker expressed as a percentage of the disk bandwidth. If the server has several disks, Redpanda uses the one that stores the Tiered Storage cache. Even if Tiered Storage is allowed to use the full bandwidth of the disk (100%), it won't necessarily use it in full. The actual usage depends on your workload and the state of the Tiered Storage cache. This setting is a safeguard that prevents Tiered Storage from using too many system resources: it is not a performance tuning knob.", + "name": "cloud_storage_throughput_limit_percent", + "needs_restart": false, + "nullable": true, + "type": "integer", + "visibility": "tunable" + }, + "cloud_storage_topic_purge_grace_period_ms": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": "30s", + "defined_in": "src/v/config/configuration.cc", + "description": "Grace period during which the purger refuses to purge the topic.", + "maximum": 17592186044415, + "minimum": -17592186044416, + "name": "cloud_storage_topic_purge_grace_period_ms", + "needs_restart": false, + "nullable": false, + "type": "integer", + "visibility": "tunable" + }, + "cloud_storage_trust_file": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": null, + "defined_in": "src/v/config/configuration.cc", + "description": "Path to certificate that should be used to validate server certificate during TLS handshake.", + "gets_restored": false, + "name": "cloud_storage_trust_file", + "needs_restart": true, + "nullable": true, + "type": "string", + "visibility": "user" + }, + "cloud_storage_upload_ctrl_d_coeff": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": "0.0", + "defined_in": "src/v/config/configuration.cc", + "description": "Derivative coefficient for upload PID controller.", + "name": "cloud_storage_upload_ctrl_d_coeff", + "needs_restart": true, + "nullable": false, + "type": "number", + "visibility": "tunable" + }, + "cloud_storage_upload_ctrl_max_shares": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": "1000", + "defined_in": "src/v/config/configuration.cc", + "description": "Maximum number of I/O and CPU shares that archival upload can use.", + "maximum": 32767, + "minimum": -32768, + "name": "cloud_storage_upload_ctrl_max_shares", + "needs_restart": true, + "nullable": false, + "type": "integer", + "visibility": "tunable" + }, + "cloud_storage_upload_ctrl_min_shares": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": "100", + "defined_in": "src/v/config/configuration.cc", + "description": "Minimum number of I/O and CPU shares that archival upload can use.", + "maximum": 32767, + "minimum": -32768, + "name": "cloud_storage_upload_ctrl_min_shares", + "needs_restart": true, + "nullable": false, + "type": "integer", + "visibility": "tunable" + }, + "cloud_storage_upload_ctrl_p_coeff": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": "-2.0", + "defined_in": "src/v/config/configuration.cc", + "description": "Proportional coefficient for upload PID controller.", + "name": "cloud_storage_upload_ctrl_p_coeff", + "needs_restart": true, + "nullable": false, + "type": "number", + "visibility": "tunable" + }, + "cloud_storage_upload_ctrl_update_interval_ms": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": "60s", + "defined_in": "src/v/config/configuration.cc", + "description": "The interval (in milliseconds) for updating the controller that manages the priority of Tiered Storage uploads. This property determines how frequently the system recalculates and adjusts the work scheduling for uploads to object storage.\n\nThis is an internal-only configuration and should be enabled only after consulting with Redpanda support.", + "maximum": 17592186044415, + "minimum": -17592186044416, + "name": "cloud_storage_upload_ctrl_update_interval_ms", + "needs_restart": true, + "nullable": false, + "type": "integer", + "visibility": "tunable" + }, + "cloud_storage_upload_loop_initial_backoff_ms": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": "100ms", + "defined_in": "src/v/config/configuration.cc", + "description": "Initial backoff interval when there is nothing to upload for a partition, in milliseconds.", + "maximum": 17592186044415, + "minimum": -17592186044416, + "name": "cloud_storage_upload_loop_initial_backoff_ms", + "needs_restart": false, + "nullable": false, + "type": "integer", + "visibility": "tunable" + }, + "cloud_storage_upload_loop_max_backoff_ms": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": "10s", + "defined_in": "src/v/config/configuration.cc", + "description": "Maximum backoff interval when there is nothing to upload for a partition, in milliseconds.", + "maximum": 17592186044415, + "minimum": -17592186044416, + "name": "cloud_storage_upload_loop_max_backoff_ms", + "needs_restart": false, + "nullable": false, + "type": "integer", + "visibility": "tunable" + }, + "cloud_storage_url_style": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": null, + "defined_in": "src/v/config/configuration.cc", + "description": "Configure the addressing style that controls how Redpanda formats bucket URLs for S3-compatible object storage.\n\nLeave this property unset (`null`) to use automatic configuration:\n\n* For AWS S3: Redpanda attempts `virtual_host` addressing first, then falls back to `path` style if needed\n* For MinIO: Redpanda automatically uses `path` style regardless of `MINIO_DOMAIN` configuration\n\nSet this property explicitly to override automatic configuration, ensure consistent behavior across deployments, or when using S3-compatible storage that requires a specific URL format.", + "name": "cloud_storage_url_style", + "needs_restart": true, + "nullable": true, + "type": "s3_url_style", + "visibility": "user" + }, + "cluster_id": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": null, + "defined_in": "src/v/config/configuration.cc", + "description": "NOTE: This property is read-only in Redpanda Cloud.\n\nCluster identifier.", + "gets_restored": false, + "name": "cluster_id", + "needs_restart": false, + "nullable": true, + "type": "string" + }, + "compacted_log_segment_size": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": "256_MiB", + "defined_in": "src/v/config/configuration.cc", + "description": "Size (in bytes) for each compacted log segment.", + "maximum": 18446744073709551615, + "minimum": 0, + "name": "compacted_log_segment_size", + "needs_restart": false, + "nullable": false, + "type": "integer", + "visibility": "tunable" + }, + "compaction.strategy": { + "acceptable_values": "", + "category": "retention-compaction", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "topic", + "corresponding_cluster_property": "compaction_strategy", + "description": "Specifies the strategy used to determine which records to remove during log compaction. The compaction strategy controls how Redpanda identifies and removes duplicate records while preserving the latest value for each key.", + "is_deprecated": false, + "is_topic_property": true, + "name": "compaction.strategy", + "related_topics": [ + "xref:./cluster-properties.adoc#compaction_strategy[`compaction_strategy`]" + ], + "source_file": "src/v/kafka/server/handlers/topics/types.h", + "type": "string" + }, + "compaction_ctrl_backlog_size": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": null, + "defined_in": "src/v/config/configuration.cc", + "description": "Target backlog size for compaction controller. If not set the max backlog size is configured to 80% of total disk space available.", + "name": "compaction_ctrl_backlog_size", + "needs_restart": true, + "nullable": true, + "type": "integer", + "visibility": "tunable" + }, + "compaction_ctrl_d_coeff": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": "0.2", + "defined_in": "src/v/config/configuration.cc", + "description": "Derivative coefficient for compaction PID controller.", + "name": "compaction_ctrl_d_coeff", + "needs_restart": true, + "nullable": false, + "type": "number", + "visibility": "tunable" + }, + "compaction_ctrl_i_coeff": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": "0.0", + "defined_in": "src/v/config/configuration.cc", + "description": "Integral coefficient for compaction PID controller.", + "name": "compaction_ctrl_i_coeff", + "needs_restart": true, + "nullable": false, + "type": "number", + "visibility": "tunable" + }, + "compaction_ctrl_max_shares": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": "1000", + "defined_in": "src/v/config/configuration.cc", + "description": "Maximum number of I/O and CPU shares that compaction process can use.", + "maximum": 32767, + "minimum": -32768, + "name": "compaction_ctrl_max_shares", + "needs_restart": true, + "nullable": false, + "type": "integer", + "visibility": "tunable" + }, + "compaction_ctrl_min_shares": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": "10", + "defined_in": "src/v/config/configuration.cc", + "description": "Minimum number of I/O and CPU shares that compaction process can use.", + "maximum": 32767, + "minimum": -32768, + "name": "compaction_ctrl_min_shares", + "needs_restart": true, + "nullable": false, + "type": "integer", + "visibility": "tunable" + }, + "compaction_ctrl_p_coeff": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": "-12.5", + "defined_in": "src/v/config/configuration.cc", + "description": "Proportional coefficient for compaction PID controller. This must be negative, because the compaction backlog should decrease when the number of compaction shares increases.", + "name": "compaction_ctrl_p_coeff", + "needs_restart": true, + "nullable": false, + "type": "number", + "visibility": "tunable" + }, + "compaction_ctrl_update_interval_ms": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": "30s", + "defined_in": "src/v/config/configuration.cc", + "description": "The interval (in milliseconds) for updating the controller responsible for compaction tasks. The controller uses this interval to decide how to prioritize background compaction work, which is essential for maintaining efficient storage use.\n\nThis is an internal-only configuration and should be enabled only after consulting with Redpanda support.", + "maximum": 17592186044415, + "minimum": -17592186044416, + "name": "compaction_ctrl_update_interval_ms", + "needs_restart": true, + "nullable": false, + "type": "integer", + "visibility": "tunable" + }, + "compression.type": { + "acceptable_values": "[`none`, `gzip`, `snappy`, `lz4`, `zstd`]", + "category": "segment-message", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "topic", + "corresponding_cluster_property": "log_compression_type", + "description": "Redpanda ignores this property and always uses producer compression semantics. If producers send compressed data, Redpanda stores and serves it as-is. If producers send uncompressed data, Redpanda stores it uncompressed.\n\nThis property exists for Apache Kafka compatibility. Configure compression in your producers instead of using this topic property.\n\nCompression reduces message size and improves throughput, but increases CPU utilization. Enable producer batching to increase compression efficiency.\n\nWhen set, this property overrides the cluster property xref:./cluster-properties.adoc#log_compression_type[`log_compression_type`] for the topic.", + "is_deprecated": false, + "is_topic_property": true, + "name": "compression.type", + "related_topics": [ + "xref:./cluster-properties.adoc#log_compression_type[`log_compression_type`]", + "xref:./cluster-properties.adoc#log_compression_type[`log_compression_type`]", + "xref:develop:produce-data/configure-producers.adoc#message-batching[Message batching]", + "xref:develop:produce-data/configure-producers.adoc#commonly-used-producer-configuration-options[Common producer configuration options]" + ], + "source_file": "src/v/kafka/protocol/topic_properties.h", + "type": "string" + }, + "confluent.key.schema.validation": { + "acceptable_values": "", + "category": "schema-registry", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "topic", + "corresponding_cluster_property": null, + "description": "Enable validation of the schema ID for keys on a record. This is a compatibility alias for `redpanda.key.schema.id.validation`. When enabled, Redpanda validates that the schema ID encoded in the record's key is registered in the Schema Registry according to the configured subject name strategy.", + "is_deprecated": false, + "is_topic_property": true, + "name": "confluent.key.schema.validation", + "source_file": "src/v/kafka/server/handlers/topics/types.h", + "type": "string" + }, + "confluent.key.subject.name.strategy": { + "acceptable_values": "", + "category": "schema-registry", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "topic", + "corresponding_cluster_property": null, + "description": "The subject name strategy for keys when `confluent.key.schema.validation` is enabled. This is a compatibility alias for `redpanda.key.subject.name.strategy` that determines how the topic and schema are mapped to a subject name in the Schema Registry.", + "is_deprecated": false, + "is_topic_property": true, + "name": "confluent.key.subject.name.strategy", + "source_file": "src/v/kafka/server/handlers/topics/types.h", + "type": "string" + }, + "confluent.value.schema.validation": { + "acceptable_values": "", + "category": "schema-registry", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "topic", + "corresponding_cluster_property": null, + "description": "Enable validation of the schema ID for values on a record. This is a compatibility alias for `redpanda.value.schema.id.validation`. When enabled, Redpanda validates that the schema ID encoded in the record's value is registered in the Schema Registry according to the configured subject name strategy.", + "is_deprecated": false, + "is_topic_property": true, + "name": "confluent.value.schema.validation", + "source_file": "src/v/kafka/server/handlers/topics/types.h", + "type": "string" + }, + "confluent.value.subject.name.strategy": { + "acceptable_values": "", + "category": "schema-registry", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "topic", + "corresponding_cluster_property": null, + "description": "The subject name strategy for values when `confluent.value.schema.validation` is enabled. This is a compatibility alias for `redpanda.value.subject.name.strategy`. This determines how the topic and schema are mapped to a subject name in the Schema Registry.", + "is_deprecated": false, + "is_topic_property": true, + "name": "confluent.value.subject.name.strategy", + "source_file": "src/v/kafka/server/handlers/topics/types.h", + "type": "string" + }, + "consumer_group_lag_collection_interval": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": "60s", + "defined_in": "src/v/config/configuration.cc", + "description": "How often to run the collection loop when enable_consumer_group_metrics contains consumer_lag", + "maximum": 17179869183, + "minimum": -17179869184, + "name": "consumer_group_lag_collection_interval_sec", + "needs_restart": false, + "nullable": false, + "type": "integer", + "visibility": "tunable" + }, + "consumer_group_lag_collection_interval_sec": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": null, + "defined_in": "override", + "description": "How often to run the collection loop when <> contains `consumer_lag`.\n\nReducing the value of `consumer_group_lag_collection_interval_sec` increases the metric collection frequency, which may raise resource utilization. In most environments, this impact is minimal, but it's best practice to monitor broker resource usage in high-scale settings.", + "is_deprecated": false, + "is_topic_property": false, + "name": "consumer_group_lag_collection_interval_sec", + "type": "string", + "visibility": "user" + }, + "consumer_heartbeat_interval": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "broker", + "default": "500ms", + "defined_in": "src/v/kafka/client/configuration.cc", + "description": "Interval (in milliseconds) for consumer heartbeats.", + "maximum": 17592186044415, + "minimum": -17592186044416, + "name": "consumer_heartbeat_interval_ms", + "needs_restart": true, + "nullable": false, + "type": "integer" + }, + "consumer_heartbeat_interval_ms": { + "category": "pandaproxy-client", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "broker", + "default": null, + "defined_in": "override", + "description": "Interval (in milliseconds) for consumer heartbeats.", + "is_deprecated": false, + "is_topic_property": false, + "name": "consumer_heartbeat_interval_ms", + "type": "string", + "visibility": "user" + }, + "consumer_instance_timeout": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "broker", + "default": "minutes", + "defined_in": "src/v/pandaproxy/rest/configuration.cc", + "description": "How long to wait for an idle consumer before removing it. A consumer is considered idle when it's not making requests or heartbeats.", + "maximum": 17592186044415, + "minimum": -17592186044416, + "name": "consumer_instance_timeout_ms", + "needs_restart": true, + "nullable": false, + "type": "integer" + }, + "consumer_instance_timeout_ms": { + "category": "pandaproxy", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "broker", + "default": null, + "defined_in": "override", + "description": "How long to wait for an idle consumer before removing it. A consumer is considered idle when it's not making requests or heartbeats.", + "is_deprecated": false, + "is_topic_property": false, + "name": "consumer_instance_timeout_ms", + "type": "string", + "visibility": "user" + }, + "consumer_offsets_topic_batch_cache_enabled": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": "false", + "defined_in": "src/v/config/configuration.cc", + "description": "This property lets you enable the batch cache for the consumer offsets topic. By default, the cache for consumer offsets topic is disabled. Changing this property is not recommended in production systems, as it may affect performance. The change is applied only after the restart.", + "name": "consumer_offsets_topic_batch_cache_enabled", + "needs_restart": true, + "nullable": false, + "type": "boolean", + "visibility": "tunable" + }, + "consumer_rebalance_timeout": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "broker", + "default": "2s", + "defined_in": "src/v/kafka/client/configuration.cc", + "description": "Timeout (in milliseconds) for consumer rebalance.", + "maximum": 17592186044415, + "minimum": -17592186044416, + "name": "consumer_rebalance_timeout_ms", + "needs_restart": true, + "nullable": false, + "type": "integer" + }, + "consumer_rebalance_timeout_ms": { + "category": "pandaproxy-client", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "broker", + "default": null, + "defined_in": "override", + "description": "Timeout (in milliseconds) for consumer rebalance.", + "is_deprecated": false, + "is_topic_property": false, + "name": "consumer_rebalance_timeout_ms", + "type": "string", + "visibility": "user" + }, + "consumer_request_max_bytes": { + "category": "pandaproxy-client", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "broker", + "default": "1_MiB", + "defined_in": "src/v/kafka/client/configuration.cc", + "description": "Maximum bytes to fetch per request.", + "maximum": 2147483647, + "minimum": -2147483648, + "name": "consumer_request_max_bytes", + "needs_restart": true, + "nullable": false, + "type": "integer" + }, + "consumer_request_min_bytes": { + "category": "pandaproxy-client", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "broker", + "default": "1", + "defined_in": "src/v/kafka/client/configuration.cc", + "description": "Minimum bytes to fetch per request.", + "maximum": 2147483647, + "minimum": -2147483648, + "name": "consumer_request_min_bytes", + "needs_restart": true, + "nullable": false, + "type": "integer" + }, + "consumer_request_timeout": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "broker", + "default": "100ms", + "defined_in": "src/v/kafka/client/configuration.cc", + "description": "Interval (in milliseconds) for consumer request timeout.", + "maximum": 17592186044415, + "minimum": -17592186044416, + "name": "consumer_request_timeout_ms", + "needs_restart": true, + "nullable": false, + "type": "integer" + }, + "consumer_request_timeout_ms": { + "category": "pandaproxy-client", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "broker", + "default": null, + "defined_in": "override", + "description": "Interval (in milliseconds) for consumer request timeout.", + "is_deprecated": false, + "is_topic_property": false, + "name": "consumer_request_timeout_ms", + "type": "string", + "visibility": "user" + }, + "consumer_session_timeout": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "broker", + "default": "10s", + "defined_in": "src/v/kafka/client/configuration.cc", + "description": "Timeout (in milliseconds) for consumer session.", + "maximum": 17592186044415, + "minimum": -17592186044416, + "name": "consumer_session_timeout_ms", + "needs_restart": true, + "nullable": false, + "type": "integer" + }, + "consumer_session_timeout_ms": { + "category": "pandaproxy-client", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "broker", + "default": null, + "defined_in": "override", + "description": "Timeout (in milliseconds) for consumer session.", + "is_deprecated": false, + "is_topic_property": false, + "name": "consumer_session_timeout_ms", + "type": "string", + "visibility": "user" + }, + "controller_backend_housekeeping_interval_ms": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": "1s", + "defined_in": "src/v/config/configuration.cc", + "description": "Interval between iterations of controller backend housekeeping loop.", + "maximum": 17592186044415, + "minimum": -17592186044416, + "name": "controller_backend_housekeeping_interval_ms", + "needs_restart": false, + "nullable": false, + "type": "integer", + "visibility": "tunable" + }, + "controller_log_accummulation_rps_capacity_acls_and_users_operations": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": null, + "defined_in": "src/v/config/configuration.cc", + "description": "Maximum capacity of rate limit accumulation in controller ACLs and users operations limit.", + "name": "controller_log_accummulation_rps_capacity_acls_and_users_operations", + "needs_restart": false, + "nullable": true, + "type": "integer", + "visibility": "tunable" + }, + "controller_log_accummulation_rps_capacity_configuration_operations": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": null, + "defined_in": "src/v/config/configuration.cc", + "description": "Maximum capacity of rate limit accumulation in controller configuration operations limit.", + "name": "controller_log_accummulation_rps_capacity_configuration_operations", + "needs_restart": false, + "nullable": true, + "type": "integer", + "visibility": "tunable" + }, + "controller_log_accummulation_rps_capacity_move_operations": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": null, + "defined_in": "src/v/config/configuration.cc", + "description": "Maximum capacity of rate limit accumulation in controller move operations limit.", + "name": "controller_log_accummulation_rps_capacity_move_operations", + "needs_restart": false, + "nullable": true, + "type": "integer", + "visibility": "tunable" + }, + "controller_log_accummulation_rps_capacity_node_management_operations": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": null, + "defined_in": "src/v/config/configuration.cc", + "description": "Maximum capacity of rate limit accumulation in controller node management operations limit.", + "name": "controller_log_accummulation_rps_capacity_node_management_operations", + "needs_restart": false, + "nullable": true, + "type": "integer", + "visibility": "tunable" + }, + "controller_log_accummulation_rps_capacity_topic_operations": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": null, + "defined_in": "src/v/config/configuration.cc", + "description": "Maximum capacity of rate limit accumulation in controller topic operations limit.", + "name": "controller_log_accummulation_rps_capacity_topic_operations", + "needs_restart": false, + "nullable": true, + "type": "integer", + "visibility": "tunable" + }, + "controller_snapshot_max_age_sec": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": "60s", + "defined_in": "src/v/config/configuration.cc", + "description": "Maximum amount of time before Redpanda attempts to create a controller snapshot after a new controller command appears.", + "maximum": 17179869183, + "minimum": -17179869184, + "name": "controller_snapshot_max_age_sec", + "needs_restart": false, + "nullable": false, + "type": "integer", + "visibility": "tunable" + }, + "coproc_max_batch_size": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "defined_in": "src/v/config/configuration.cc", + "description": null, + "is_deprecated": true, + "name": "coproc_max_batch_size", + "needs_restart": true, + "nullable": false, + "type": null + }, + "coproc_max_inflight_bytes": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "defined_in": "src/v/config/configuration.cc", + "description": null, + "is_deprecated": true, + "name": "coproc_max_inflight_bytes", + "needs_restart": true, + "nullable": false, + "type": null + }, + "coproc_max_ingest_bytes": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "defined_in": "src/v/config/configuration.cc", + "description": null, + "is_deprecated": true, + "name": "coproc_max_ingest_bytes", + "needs_restart": true, + "nullable": false, + "type": null + }, + "coproc_offset_flush_interval_ms": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "defined_in": "src/v/config/configuration.cc", + "description": null, + "is_deprecated": true, + "name": "coproc_offset_flush_interval_ms", + "needs_restart": true, + "nullable": false, + "type": null + }, + "coproc_supervisor_server": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "broker", + "defined_in": "src/v/config/node_config.cc", + "description": null, + "is_deprecated": true, + "name": "coproc_supervisor_server", + "needs_restart": true, + "nullable": false, + "type": null + }, + "core_balancing_continuous": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": "false", + "defined_in": "src/v/config/configuration.cc", + "description": "If set to `true`, move partitions between cores in runtime to maintain balanced partition distribution.", + "enterprise_value": true, + "is_enterprise": true, + "name": "core_balancing_continuous", + "needs_restart": false, + "nullable": false, + "related_topics": [], + "type": "boolean", + "visibility": "user" + }, + "core_balancing_debounce_timeout": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": "10s", + "defined_in": "src/v/config/configuration.cc", + "description": "Interval, in milliseconds, between trigger and invocation of core balancing.", + "maximum": 17592186044415, + "minimum": -17592186044416, + "name": "core_balancing_debounce_timeout", + "needs_restart": false, + "nullable": false, + "type": "integer", + "visibility": "tunable" + }, + "core_balancing_on_core_count_change": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": "true", + "defined_in": "src/v/config/configuration.cc", + "description": "If set to `true`, and if after a restart the number of cores changes, Redpanda will move partitions between cores to maintain balanced partition distribution.", + "name": "core_balancing_on_core_count_change", + "needs_restart": false, + "nullable": false, + "type": "boolean", + "visibility": "user" + }, + "cpu_profiler_enabled": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": "false", + "defined_in": "src/v/config/configuration.cc", + "description": "Enables CPU profiling for Redpanda.", + "name": "cpu_profiler_enabled", + "needs_restart": false, + "nullable": false, + "type": "boolean", + "visibility": "user" + }, + "cpu_profiler_sample_period_ms": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": "100ms", + "defined_in": "src/v/config/configuration.cc", + "description": "The sample period for the CPU profiler.", + "maximum": 17592186044415, + "minimum": -17592186044416, + "name": "cpu_profiler_sample_period_ms", + "needs_restart": false, + "nullable": false, + "type": "integer", + "visibility": "user" + }, + "crash_loop_limit": { + "category": "redpanda", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "broker", + "default": "5", + "defined_in": "src/v/config/node_config.cc", + "description": "A limit on the number of consecutive times a broker can crash within one hour before its crash-tracking logic is reset. This limit prevents a broker from getting stuck in an infinite cycle of crashes.\n\nIf `null`, the property is disabled and no limit is applied.\n\nThe crash-tracking logic is reset (to zero consecutive crashes) by any of the following conditions:\n\n* The broker shuts down cleanly.\n* One hour passes since the last crash.\n* The `redpanda.yaml` broker configuration file is updated.\n* The `startup_log` file in the broker's <> broker property is manually deleted.", + "maximum": 4294967295, + "minimum": 0, + "name": "crash_loop_limit", + "needs_restart": true, + "nullable": true, + "type": "integer", + "visibility": "user" + }, + "crash_loop_sleep_sec": { + "category": "redpanda", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "broker", + "default": null, + "defined_in": "src/v/config/node_config.cc", + "description": "The amount of time the broker sleeps before terminating when the limit on consecutive broker crashes (<>) is reached. This property provides a debugging window for you to access the broker before it terminates, and is particularly useful in Kubernetes environments.\n\nIf `null`, the property is disabled, and the broker terminates immediately after reaching the crash loop limit.\n\nFor information about how to reset the crash loop limit, see the <> broker property.", + "maximum": 17179869183, + "minimum": -17179869184, + "name": "crash_loop_sleep_sec", + "needs_restart": true, + "nullable": true, + "type": "integer", + "version": "v24.3.4", + "visibility": "user" + }, + "create_topic_timeout_ms": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": "2'000ms", + "defined_in": "src/v/config/configuration.cc", + "description": "Timeout, in milliseconds, to wait for new topic creation.", + "maximum": 17592186044415, + "minimum": -17592186044416, + "name": "create_topic_timeout_ms", + "needs_restart": false, + "nullable": false, + "type": "integer", + "visibility": "tunable" + }, + "dashboard_dir": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "broker", + "defined_in": "src/v/config/node_config.cc", + "description": null, + "is_deprecated": true, + "name": "dashboard_dir", + "needs_restart": true, + "nullable": false, + "type": null + }, + "data_directory": { + "category": "redpanda", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "broker", + "defined_in": "src/v/config/node_config.cc", + "description": "Path to the directory for storing Redpanda's streaming data files.", + "name": "data_directory", + "needs_restart": true, + "nullable": false, + "type": "string", + "visibility": "user" + }, + "data_transforms_binary_max_size": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": true, + "cloud_supported": true, + "config_scope": "cluster", + "default": "10_MiB", + "defined_in": "src/v/config/configuration.cc", + "description": "ifdef::env-cloud[]\nNOTE: This property is read-only in Redpanda Cloud.\nendif::[]\n\nThe maximum size for a deployable WebAssembly binary that the broker can store.", + "name": "data_transforms_binary_max_size", + "needs_restart": false, + "nullable": false, + "type": "integer", + "visibility": "tunable" + }, + "data_transforms_commit_interval_ms": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": "3s", + "defined_in": "src/v/config/configuration.cc", + "description": "The commit interval at which data transforms progress.", + "maximum": 17592186044415, + "minimum": -17592186044416, + "name": "data_transforms_commit_interval_ms", + "needs_restart": false, + "nullable": false, + "type": "integer", + "visibility": "tunable" + }, + "data_transforms_enabled": { + "cloud_byoc_only": false, + "cloud_editable": true, + "cloud_readonly": false, + "cloud_supported": true, + "config_scope": "cluster", + "default": "false", + "defined_in": "src/v/config/configuration.cc", + "description": "Enables WebAssembly-powered data transforms directly in the broker. When `data_transforms_enabled` is set to `true`, Redpanda reserves memory for data transforms, even if no transform functions are currently deployed. This memory reservation ensures that adequate resources are available for transform functions when they are needed, but it also means that some memory is allocated regardless of usage.", + "name": "data_transforms_enabled", + "needs_restart": true, + "nullable": false, + "type": "boolean", + "visibility": "user" + }, + "data_transforms_logging_buffer_capacity_bytes": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": "500_KiB", + "defined_in": "src/v/config/configuration.cc", + "description": "Buffer capacity for transform logs, per shard. Buffer occupancy is calculated as the total size of buffered log messages; that is, logs emitted but not yet produced.", + "name": "data_transforms_logging_buffer_capacity_bytes", + "needs_restart": true, + "nullable": false, + "type": "integer", + "visibility": "tunable" + }, + "data_transforms_logging_flush_interval_ms": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": "500ms", + "defined_in": "src/v/config/configuration.cc", + "description": "Flush interval for transform logs. When a timer expires, pending logs are collected and published to the `transform_logs` topic.", + "maximum": 17592186044415, + "minimum": -17592186044416, + "name": "data_transforms_logging_flush_interval_ms", + "needs_restart": false, + "nullable": false, + "type": "integer", + "visibility": "tunable" + }, + "data_transforms_logging_line_max_bytes": { + "cloud_byoc_only": false, + "cloud_editable": true, + "cloud_readonly": false, + "cloud_supported": true, + "config_scope": "cluster", + "default": "1_KiB", + "defined_in": "src/v/config/configuration.cc", + "description": "Transform log lines truncate to this length. Truncation occurs after any character escaping.", + "name": "data_transforms_logging_line_max_bytes", + "needs_restart": false, + "nullable": false, + "type": "integer", + "visibility": "tunable" + }, + "data_transforms_per_core_memory_reservation": { + "aliases": [ + "wasm_per_core_memory_reservation" + ], + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": true, + "cloud_supported": true, + "config_scope": "cluster", + "default": "20_MiB", + "defined_in": "src/v/config/configuration.cc", + "description": "ifdef::env-cloud[]\nNOTE: This property is read-only in Redpanda Cloud.\nendif::[]\n\nThe amount of memory to reserve per core for data transform (Wasm) virtual machines. Memory is reserved on boot. The maximum number of functions that can be deployed to a cluster is equal to `data_transforms_per_core_memory_reservation` / `data_transforms_per_function_memory_limit`.", + "name": "data_transforms_per_core_memory_reservation", + "needs_restart": true, + "nullable": false, + "type": "integer", + "visibility": "user" + }, + "data_transforms_per_function_memory_limit": { + "aliases": [ + "wasm_per_function_memory_limit" + ], + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": true, + "cloud_supported": true, + "config_scope": "cluster", + "default": "2_MiB", + "defined_in": "src/v/config/configuration.cc", + "description": "ifdef::env-cloud[]\nNOTE: This property is read-only in Redpanda Cloud.\nendif::[]\n\nThe amount of memory to give an instance of a data transform (Wasm) virtual machine. The maximum number of functions that can be deployed to a cluster is equal to `data_transforms_per_core_memory_reservation` / `data_transforms_per_function_memory_limit`.", + "name": "data_transforms_per_function_memory_limit", + "needs_restart": true, + "nullable": false, + "type": "integer", + "visibility": "user" + }, + "data_transforms_read_buffer_memory_percentage": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": "45", + "defined_in": "src/v/config/configuration.cc", + "description": "include::reference:partial$internal-use-property.adoc[]\n\nThe percentage of available memory in the transform subsystem to use for read buffers.", + "name": "data_transforms_read_buffer_memory_percentage", + "needs_restart": true, + "nullable": false, + "type": "integer", + "visibility": "tunable" + }, + "data_transforms_runtime_limit_ms": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": "3s", + "defined_in": "src/v/config/configuration.cc", + "description": "The maximum amount of runtime to start up a data transform, and the time it takes for a single record to be transformed.", + "maximum": 17592186044415, + "minimum": -17592186044416, + "name": "data_transforms_runtime_limit_ms", + "needs_restart": true, + "nullable": false, + "type": "integer", + "visibility": "tunable" + }, + "data_transforms_write_buffer_memory_percentage": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": "45", + "defined_in": "src/v/config/configuration.cc", + "description": "include::reference:partial$internal-use-property.adoc[]\n\nThe percentage of available memory in the transform subsystem to use for write buffers.", + "name": "data_transforms_write_buffer_memory_percentage", + "needs_restart": true, + "nullable": false, + "type": "integer", + "visibility": "tunable" + }, + "datalake_coordinator_snapshot_max_delay_secs": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": "15min seconds", + "defined_in": "src/v/config/configuration.cc", + "description": "Maximum amount of time the coordinator waits to snapshot after a command appears in the log.", + "maximum": 17179869183, + "minimum": -17179869184, + "name": "datalake_coordinator_snapshot_max_delay_secs", + "needs_restart": false, + "nullable": false, + "type": "integer", + "visibility": "tunable" + }, + "datalake_disk_space_monitor_enable": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": "true", + "defined_in": "src/v/config/configuration.cc", + "description": "Option to explicitly disable enforcement of datalake disk space usage.", + "name": "datalake_disk_space_monitor_enable", + "needs_restart": false, + "nullable": false, + "type": "boolean", + "visibility": "user" + }, + "datalake_disk_space_monitor_interval": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "defined_in": "src/v/config/configuration.cc", + "description": null, + "is_deprecated": true, + "name": "datalake_disk_space_monitor_interval", + "needs_restart": true, + "nullable": false, + "type": null + }, + "datalake_disk_usage_overage_coeff": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": "2.0", + "defined_in": "src/v/config/configuration.cc", + "description": "The datalake disk usage monitor reclaims the overage multiplied by this this coefficient to compensate for data that is written during the idle period between control loop invocations.", + "name": "datalake_disk_usage_overage_coeff", + "needs_restart": false, + "nullable": false, + "type": "number", + "visibility": "tunable" + }, + "datalake_scheduler_block_size_bytes": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": "4_MiB", + "defined_in": "src/v/config/configuration.cc", + "description": "Size, in bytes, of each memory block reserved for record translation, as tracked by the datalake scheduler.", + "name": "datalake_scheduler_block_size_bytes", + "needs_restart": true, + "nullable": false, + "type": "integer", + "visibility": "tunable" + }, + "datalake_scheduler_disk_reservation_block_size": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": "50_MiB", + "defined_in": "src/v/config/configuration.cc", + "description": "The size, in bytes, of the block of disk reservation that the datalake manager will assign to each datalake scheduler when it runs out of local reservation.", + "name": "datalake_scheduler_disk_reservation_block_size", + "needs_restart": false, + "nullable": false, + "type": "integer", + "visibility": "tunable" + }, + "datalake_scheduler_max_concurrent_translations": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": "4", + "defined_in": "src/v/config/configuration.cc", + "description": "The maximum number of translations that the datalake scheduler will allow to run at a given time. If a translation is requested, but the number of running translations exceeds this value, the request will be put to sleep temporarily, polling until capacity becomes available.", + "name": "datalake_scheduler_max_concurrent_translations", + "needs_restart": false, + "nullable": false, + "type": "integer", + "visibility": "tunable" + }, + "datalake_scheduler_time_slice_ms": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": "30s", + "defined_in": "src/v/config/configuration.cc", + "description": "Time, in milliseconds, for a datalake translation as scheduled by the datalake scheduler. After a translation is scheduled, it will run until either the time specified has elapsed or all pending records on its source partition have been translated.", + "maximum": 17592186044415, + "minimum": -17592186044416, + "name": "datalake_scheduler_time_slice_ms", + "needs_restart": true, + "nullable": false, + "type": "integer", + "visibility": "tunable" + }, + "datalake_scratch_space_size_bytes": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": "5_GiB", + "defined_in": "src/v/config/configuration.cc", + "description": "Size, in bytes, of the amount of scratch space datalake should use.", + "name": "datalake_scratch_space_size_bytes", + "needs_restart": false, + "nullable": false, + "type": "integer", + "visibility": "tunable" + }, + "datalake_scratch_space_soft_limit_size_percent": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": "80.0", + "defined_in": "src/v/config/configuration.cc", + "description": "Size of the scratch space datalake soft limit expressed as a percentage of the `datalake_scratch_space_size_bytes` configuration value.", + "name": "datalake_scratch_space_soft_limit_size_percent", + "needs_restart": false, + "nullable": false, + "type": "number", + "visibility": "user" + }, + "datalake_translator_flush_bytes": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": "32_MiB", + "defined_in": "src/v/config/configuration.cc", + "description": "Size, in bytes, of the amount of per translator data that may be flushed to disk before the translator will upload and remove its current on disk data.", + "name": "datalake_translator_flush_bytes", + "needs_restart": false, + "nullable": false, + "type": "integer", + "visibility": "tunable" + }, + "debug_bundle_auto_removal_seconds": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": null, + "defined_in": "src/v/config/configuration.cc", + "description": "If set, how long debug bundles are kept in the debug bundle storage directory after they are created. If not set, debug bundles are kept indefinitely.", + "maximum": 17179869183, + "minimum": -17179869184, + "name": "debug_bundle_auto_removal_seconds", + "needs_restart": false, + "nullable": true, + "type": "integer", + "visibility": "user" + }, + "debug_bundle_storage_dir": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": null, + "defined_in": "src/v/config/configuration.cc", + "description": "Path to the debug bundle storage directory. Note: Changing this path does not clean up existing debug bundles. If not set, the debug bundle is stored in the Redpanda data directory specified in the redpanda.yaml broker configuration file.", + "name": "debug_bundle_storage_dir", + "needs_restart": false, + "nullable": true, + "type": "string", + "visibility": "user" + }, + "debug_load_slice_warning_depth": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": null, + "defined_in": "src/v/config/configuration.cc", + "description": "The recursion depth after which debug logging is enabled automatically for the log reader.", + "maximum": 4294967295, + "minimum": 0, + "name": "debug_load_slice_warning_depth", + "needs_restart": false, + "nullable": true, + "type": "integer", + "visibility": "tunable" + }, + "default_leaders_preference": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": "leaders_preference", + "defined_in": "src/v/config/configuration.cc", + "description": "Default settings for preferred location of topic partition leaders. It can be either \"none\" (no preference), or \"racks:,,...\" (prefer brokers with rack ID from the list).\n\nThe list can contain one or more rack IDs. If you specify multiple IDs, Redpanda tries to distribute the partition leader locations equally across brokers in these racks.\n\nIf config_ref:enable_rack_awareness,true,properties/cluster-properties[] is set to `false`, leader pinning is disabled across the cluster.", + "enterprise_value": "Any rack preference (not `none`)", + "is_enterprise": true, + "name": "default_leaders_preference", + "needs_restart": false, + "nullable": false, + "related_topics": [], + "type": "string", + "visibility": "user" + }, + "default_num_windows": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": "10", + "defined_in": "src/v/config/configuration.cc", + "description": "Default number of quota tracking windows.", + "maximum": 32767, + "minimum": -32768, + "name": "default_num_windows", + "needs_restart": false, + "nullable": false, + "type": "integer", + "visibility": "tunable" + }, + "default_topic_partitions": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": "1", + "defined_in": "src/v/config/configuration.cc", + "description": "Default number of partitions per topic.", + "maximum": 2147483647, + "minimum": -2147483648, + "name": "default_topic_partitions", + "needs_restart": false, + "nullable": false, + "type": "integer", + "visibility": "user" + }, + "default_topic_replication": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": "1", + "defined_in": "src/v/config/configuration.cc", + "description": "Default replication factor for new topics.", + "maximum": 32767, + "minimum": -32768, + "name": "default_topic_replications", + "needs_restart": false, + "nullable": false, + "type": "integer", + "visibility": "user" + }, + "default_window_sec": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": "1000 milliseconds", + "defined_in": "src/v/config/configuration.cc", + "description": "Default quota tracking window size in milliseconds.", + "maximum": 17592186044415, + "minimum": -17592186044416, + "name": "default_window_sec", + "needs_restart": false, + "nullable": false, + "type": "integer", + "visibility": "tunable" + }, + "delete.retention.ms": { + "acceptable_values": "", + "category": "retention-compaction", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "topic", + "corresponding_cluster_property": "delete_retention_ms", + "description": "The retention time for tombstone records in a compacted topic. Redpanda removes tombstone records after the retention limit is exceeded.\n\nIf you have enabled Tiered Storage and set <> or <> for the topic, you cannot enable tombstone removal.\n\nIf both `delete.retention.ms` and the cluster property config_ref:tombstone_retention_ms,true,properties/cluster-properties[] are set, `delete.retention.ms` overrides the cluster level tombstone retention for an individual topic.", + "is_deprecated": false, + "is_topic_property": true, + "name": "delete.retention.ms", + "related_topics": [ + "xref:./cluster-properties.adoc#tombstone_retention_ms[`tombstone_retention_ms`]", + "xref:manage:cluster-maintenance/compaction-settings.adoc#tombstone-record-removal[Tombstone record removal]" + ], + "source_file": "src/v/kafka/server/handlers/topics/types.h", + "type": "string" + }, + "developer_mode": { + "category": "redpanda", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "broker", + "default": "false", + "defined_in": "src/v/config/node_config.cc", + "description": "CAUTION: Enabling `developer_mode` isn't recommended for production use.\n\nEnable developer mode, which skips most of the checks performed at startup.", + "name": "developer_mode", + "needs_restart": true, + "nullable": false, + "type": "boolean", + "visibility": "tunable" + }, + "development_enable_cloud_topics": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": "false", + "defined_in": "src/v/config/configuration.cc", + "description": "Enable cloud topics.", + "is_experimental_property": true, + "name": "development_enable_cloud_topics", + "needs_restart": false, + "nullable": false, + "type": "boolean", + "visibility": "user" + }, + "development_enable_cluster_link": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": "false", + "defined_in": "src/v/config/configuration.cc", + "description": "Enable cluster linking.", + "is_experimental_property": true, + "name": "development_enable_cluster_link", + "needs_restart": false, + "nullable": false, + "type": "boolean", + "visibility": "user" + }, + "development_feature_property_testing_only": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": "false", + "defined_in": "src/v/config/configuration.cc", + "description": "Development feature property for testing only.", + "is_experimental_property": true, + "maximum": 2147483647, + "minimum": -2147483648, + "name": "development_feature_property_testing_only", + "needs_restart": false, + "nullable": false, + "type": "integer", + "visibility": "user" + }, + "disable_batch_cache": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": "false", + "defined_in": "src/v/config/configuration.cc", + "description": "Disable batch cache in log manager.", + "name": "disable_batch_cache", + "needs_restart": true, + "nullable": false, + "type": "boolean", + "visibility": "tunable" + }, + "disable_cluster_recovery_loop_for_tests": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": "false", + "defined_in": "src/v/config/configuration.cc", + "description": "include::reference:partial$internal-use-property.adoc[]\n\nDisables the cluster recovery loop. This property is used to simplify testing and should not be set in production.", + "name": "disable_cluster_recovery_loop_for_tests", + "needs_restart": false, + "nullable": false, + "type": "boolean", + "visibility": "tunable" + }, + "disable_metrics": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": "false", + "defined_in": "src/v/config/configuration.cc", + "description": "Disable registering the metrics exposed on the internal `/metrics` endpoint.", + "name": "disable_metrics", + "needs_restart": true, + "nullable": false, + "type": "boolean" + }, + "disable_public_metrics": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": "false", + "defined_in": "src/v/config/configuration.cc", + "description": "Disable registering the metrics exposed on the `/public_metrics` endpoint.", + "name": "disable_public_metrics", + "needs_restart": true, + "nullable": false, + "type": "boolean" + }, + "disk_reservation_percent": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": "25.0", + "defined_in": "src/v/config/configuration.cc", + "description": "The percentage of total disk capacity that Redpanda will avoid using. This applies both when cloud cache and log data share a disk, as well \nas when cloud cache uses a dedicated disk. \n\nIt is recommended to not run disks near capacity to avoid blocking I/O due to low disk space, as well as avoiding performance issues associated with SSD garbage collection.", + "name": "disk_reservation_percent", + "needs_restart": false, + "nullable": false, + "type": "number", + "visibility": "tunable" + }, + "election_timeout_ms": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": null, + "defined_in": "override", + "description": "Raft election timeout expressed in milliseconds.", + "is_deprecated": false, + "is_topic_property": false, + "name": "election_timeout_ms", + "type": "string", + "visibility": "user" + }, + "emergency_disable_data_transforms": { + "category": "redpanda", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "broker", + "default": "false", + "defined_in": "src/v/config/node_config.cc", + "description": "Override the cluster property xref:reference:properties/cluster-properties.adoc#data_transforms_enabled[`data_transforms_enabled`] and disable Wasm-powered data transforms. This is an emergency shutoff button.", + "name": "emergency_disable_data_transforms", + "needs_restart": true, + "nullable": false, + "related_topics": [ + "xref:reference:properties/cluster-properties.adoc#data_transforms_enabled[`data_transforms_enabled`]" + ], + "type": "boolean", + "visibility": "user" + }, + "empty_seed_starts_cluster": { + "category": "redpanda", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "broker", + "default": "true", + "defined_in": "src/v/config/node_config.cc", + "description": "Controls how a new cluster is formed. All brokers in a cluster must have the same value.\n\n<> to form a cluster.\n\nTIP: For backward compatibility, `true` is the default. Redpanda recommends using `false` in production environments to prevent accidental cluster formation.", + "name": "empty_seed_starts_cluster", + "needs_restart": true, + "nullable": false, + "type": "boolean", + "visibility": "user" + }, + "enable_admin_api": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "defined_in": "src/v/config/configuration.cc", + "description": null, + "is_deprecated": true, + "name": "enable_admin_api", + "needs_restart": true, + "nullable": false, + "type": null + }, + "enable_auto_rebalance_on_node_add": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": "false", + "defined_in": "src/v/config/configuration.cc", + "description": "Enable automatic partition rebalancing when new nodes are added", + "is_deprecated": true, + "name": "enable_auto_rebalance_on_node_add", + "needs_restart": false, + "nullable": false, + "type": null, + "visibility": "deprecated" + }, + "enable_central_config": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "broker", + "defined_in": "src/v/config/node_config.cc", + "description": null, + "is_deprecated": true, + "name": "enable_central_config", + "needs_restart": true, + "nullable": false, + "type": null + }, + "enable_cluster_metadata_upload_loop": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": "true", + "defined_in": "src/v/config/configuration.cc", + "description": "Enables cluster metadata uploads. Required for xref:manage:whole-cluster-restore.adoc[whole cluster restore].", + "name": "enable_cluster_metadata_upload_loop", + "needs_restart": true, + "nullable": false, + "related_topics": [ + "xref:manage:whole-cluster-restore.adoc[whole cluster restore]" + ], + "type": "boolean", + "visibility": "tunable" + }, + "enable_consumer_group_metrics": { + "cloud_byoc_only": false, + "cloud_editable": true, + "cloud_readonly": false, + "cloud_supported": true, + "config_scope": "cluster", + "default": [ + "group", + "partition" + ], + "defined_in": "src/v/config/configuration.cc", + "description": "List of enabled consumer group metrics.\n\n*Accepted values:*\n\n- `group`: Enables the xref:reference:public-metrics-reference.adoc#redpanda_kafka_consumer_group_consumers[`redpanda_kafka_consumer_group_consumers`] and xref:reference:public-metrics-reference.adoc#redpanda_kafka_consumer_group_topics[`redpanda_kafka_consumer_group_topics`] metrics.\n- `partition`: Enables the xref:reference:public-metrics-reference.adoc#redpanda_kafka_consumer_group_committed_offset[`redpanda_kafka_consumer_group_committed_offset`] metric.\n- `consumer_lag`: Enables the xref:reference:public-metrics-reference.adoc#redpanda_kafka_consumer_group_lag_max[`redpanda_kafka_consumer_group_lag_max`] and xref:reference:public-metrics-reference.adoc#redpanda_kafka_consumer_group_lag_sum[`redpanda_kafka_consumer_group_lag_sum`] metrics\n+\nEnabling `consumer_lag` may add a small amount of additional processing overhead to the brokers, especially in environments with a high number of consumer groups or partitions.\n+\nifndef::env-cloud[]\nUse the xref:reference:properties/cluster-properties.adoc#consumer_group_lag_collection_interval_sec[`consumer_group_lag_collection_interval_sec`] property to control the frequency of consumer lag metric collection.\nendif::[]", + "items": { + "type": "string" + }, + "name": "enable_consumer_group_metrics", + "needs_restart": false, + "nullable": false, + "related_topics": [ + "xref:reference:public-metrics-reference.adoc#redpanda_kafka_consumer_group_consumers[`redpanda_kafka_consumer_group_consumers`]", + "xref:reference:public-metrics-reference.adoc#redpanda_kafka_consumer_group_topics[`redpanda_kafka_consumer_group_topics`]", + "xref:reference:public-metrics-reference.adoc#redpanda_kafka_consumer_group_committed_offset[`redpanda_kafka_consumer_group_committed_offset`]", + "xref:reference:public-metrics-reference.adoc#redpanda_kafka_consumer_group_lag_max[`redpanda_kafka_consumer_group_lag_max`]", + "xref:reference:public-metrics-reference.adoc#redpanda_kafka_consumer_group_lag_sum[`redpanda_kafka_consumer_group_lag_sum`]", + "xref:reference:properties/cluster-properties.adoc#consumer_group_lag_collection_interval_sec[`consumer_group_lag_collection_interval_sec`]", + "xref:manage:monitoring.adoc#consumers[Monitor consumer group lag]" + ], + "type": "array" + }, + "enable_controller_log_rate_limiting": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": "false", + "defined_in": "src/v/config/configuration.cc", + "description": "Limits the write rate for the controller log.", + "name": "enable_controller_log_rate_limiting", + "needs_restart": false, + "nullable": false, + "type": "boolean", + "visibility": "user" + }, + "enable_coproc": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "defined_in": "src/v/config/configuration.cc", + "description": null, + "is_deprecated": true, + "name": "enable_coproc", + "needs_restart": true, + "nullable": false, + "type": null + }, + "enable_developmental_unrecoverable_data_corrupting_features": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": "", + "defined_in": "src/v/config/configuration.cc", + "description": "Development features should never be enabled in a production cluster, or any cluster where stability, data loss, or the ability to upgrade are a concern. To enable experimental features, set the value of this configuration option to the current unix epoch expressed in seconds. The value must be within one hour of the current time on the broker.Once experimental features are enabled they cannot be disabled", + "is_experimental_property": true, + "name": "enable_developmental_unrecoverable_data_corrupting_features", + "needs_restart": false, + "nullable": false, + "type": "string", + "visibility": "tunable" + }, + "enable_host_metrics": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": "true", + "defined_in": "src/v/config/configuration.cc", + "description": "Enable exporting of some host metrics like `/proc/diskstats`, `/proc/snmp` and `/proc/net/netstat`.\n\nHost metrics are prefixed with xref:reference:internal-metrics-reference.adoc#vectorized_host_diskstats_discards[`vectorized_host`] and are available on the `/metrics` endpoint.", + "name": "enable_host_metrics", + "needs_restart": true, + "nullable": false, + "related_topics": [ + "xref:reference:internal-metrics-reference.adoc#vectorized_host_diskstats_discards[`vectorized_host`]" + ], + "type": "boolean", + "visibility": "tunable" + }, + "enable_idempotence": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": "true", + "defined_in": "src/v/config/configuration.cc", + "description": "Enable idempotent producers.", + "name": "enable_idempotence", + "needs_restart": true, + "nullable": false, + "type": "boolean", + "visibility": "user" + }, + "enable_leader_balancer": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": "true", + "defined_in": "src/v/config/configuration.cc", + "description": "Enable automatic leadership rebalancing.", + "name": "enable_leader_balancer", + "needs_restart": false, + "nullable": false, + "type": "boolean", + "visibility": "user" + }, + "enable_metrics_reporter": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": "true", + "defined_in": "src/v/config/configuration.cc", + "description": "Enable the cluster metrics reporter. If `true`, the metrics reporter collects and exports to Redpanda Data a set of customer usage metrics at the interval set by <>.\n\n[NOTE]\n====\nThe cluster metrics of the metrics reporter are different from xref:manage:monitoring.adoc[monitoring metrics].\n\n* The metrics reporter exports customer usage metrics for consumption by Redpanda Data.\n* Monitoring metrics are exported for consumption by Redpanda users.\n====", + "name": "enable_metrics_reporter", + "needs_restart": false, + "nullable": false, + "related_topics": [ + "xref:manage:monitoring.adoc[monitoring metrics]" + ], + "type": "boolean", + "visibility": "user" + }, + "enable_mpx_extensions": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": "false", + "defined_in": "src/v/config/configuration.cc", + "description": "Enable Redpanda extensions for MPX.", + "name": "enable_mpx_extensions", + "needs_restart": false, + "nullable": false, + "type": "boolean", + "visibility": "tunable" + }, + "enable_pid_file": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": "true", + "defined_in": "src/v/config/configuration.cc", + "description": "Enable PID file. You should not need to change.", + "name": "enable_pid_file", + "needs_restart": true, + "nullable": false, + "type": "boolean", + "visibility": "tunable" + }, + "enable_rack_awareness": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": "false", + "defined_in": "src/v/config/configuration.cc", + "description": "Enable rack-aware replica assignment.", + "name": "enable_rack_awareness", + "needs_restart": false, + "nullable": false, + "type": "boolean", + "visibility": "user" + }, + "enable_sasl": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": "false", + "defined_in": "src/v/config/configuration.cc", + "description": "Enable SASL authentication for Kafka connections. Authorization is required to modify this property. See also <>.", + "name": "enable_sasl", + "needs_restart": false, + "nullable": false, + "type": "boolean", + "visibility": "user" + }, + "enable_schema_id_validation": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": "none", + "defined_in": "src/v/config/configuration.cc", + "description": "Mode to enable server-side schema ID validation.\n\n*Accepted values:*\n\n* `none`: Schema validation is disabled (no schema ID checks are done). Associated topic properties cannot be modified.\n* `redpanda`: Schema validation is enabled. Only Redpanda topic properties are accepted.\n* `compat`: Schema validation is enabled. Both Redpanda and compatible topic properties are accepted.", + "enterprise_value": [ + "compat", + "redpanda" + ], + "is_enterprise": true, + "name": "enable_schema_id_validation", + "needs_restart": false, + "nullable": false, + "related_topics": [ + "xref:manage:schema-reg/schema-id-validation.adoc[Server-Side Schema ID Validation]" + ], + "type": "object", + "visibility": "user" + }, + "enable_transactions": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": "true", + "defined_in": "src/v/config/configuration.cc", + "description": "Enable transactions (atomic writes).", + "name": "enable_transactions", + "needs_restart": true, + "nullable": false, + "type": "boolean", + "visibility": "user" + }, + "enable_usage": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": "false", + "defined_in": "src/v/config/configuration.cc", + "description": "Enables the usage tracking mechanism, storing windowed history of kafka/cloud_storage metrics over time.", + "name": "enable_usage", + "needs_restart": false, + "nullable": false, + "type": "boolean", + "visibility": "user" + }, + "features_auto_enable": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": "true", + "defined_in": "src/v/config/configuration.cc", + "description": "Whether new feature flags auto-activate after upgrades (true) or must wait for manual activation via the Admin API (false).", + "name": "features_auto_enable", + "needs_restart": false, + "nullable": false, + "type": "boolean", + "visibility": "tunable" + }, + "fetch_max_bytes": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": "55_MiB", + "defined_in": "src/v/config/configuration.cc", + "description": "Maximum number of bytes returned in a fetch request.", + "name": "fetch_max_bytes", + "needs_restart": false, + "nullable": false, + "type": "integer", + "visibility": "user" + }, + "fetch_pid_d_coeff": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": "0.0", + "defined_in": "src/v/config/configuration.cc", + "description": "Derivative coefficient for fetch PID controller.", + "name": "fetch_pid_d_coeff", + "needs_restart": false, + "nullable": false, + "type": "number", + "visibility": "tunable" + }, + "fetch_pid_i_coeff": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": "0.01", + "defined_in": "src/v/config/configuration.cc", + "description": "Integral coefficient for fetch PID controller.", + "name": "fetch_pid_i_coeff", + "needs_restart": false, + "nullable": false, + "type": "number", + "visibility": "tunable" + }, + "fetch_pid_max_debounce_ms": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": "100ms", + "defined_in": "src/v/config/configuration.cc", + "description": "The maximum debounce time the fetch PID controller will apply, in milliseconds.", + "maximum": 17592186044415, + "minimum": -17592186044416, + "name": "fetch_pid_max_debounce_ms", + "needs_restart": false, + "nullable": false, + "type": "integer", + "visibility": "tunable" + }, + "fetch_pid_p_coeff": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": "100.0", + "defined_in": "src/v/config/configuration.cc", + "description": "Proportional coefficient for fetch PID controller.", + "name": "fetch_pid_p_coeff", + "needs_restart": false, + "nullable": false, + "type": "number", + "visibility": "tunable" + }, + "fetch_pid_target_utilization_fraction": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": "0.2", + "defined_in": "src/v/config/configuration.cc", + "description": "A fraction, between 0 and 1, for the target reactor utilization of the fetch scheduling group.", + "name": "fetch_pid_target_utilization_fraction", + "needs_restart": false, + "nullable": false, + "type": "number", + "visibility": "tunable" + }, + "fetch_read_strategy": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": "non_polling", + "defined_in": "src/v/config/configuration.cc", + "description": "The strategy used to fulfill fetch requests.\n\n* `polling`: Repeatedly polls every partition in the request for new data. The polling interval is set by <> (deprecated).\n\n* `non_polling`: The backend is signaled when a partition has new data, so Redpanda doesn't need to repeatedly read from every partition in the fetch. Redpanda Data recommends using this value for most workloads, because it can improve fetch latency and CPU utilization.\n\n* `non_polling_with_debounce`: This option behaves like `non_polling`, but it includes a debounce mechanism with a fixed delay specified by <> at the start of each fetch. By introducing this delay, Redpanda can accumulate more data before processing, leading to fewer fetch operations and returning larger amounts of data. Enabling this option reduces reactor utilization, but it may also increase end-to-end latency.", + "name": "fetch_read_strategy", + "needs_restart": false, + "nullable": false, + "type": "string", + "visibility": "tunable" + }, + "fetch_reads_debounce_timeout": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": "1ms", + "defined_in": "src/v/config/configuration.cc", + "description": "Time to wait for the next read in fetch requests when the requested minimum bytes was not reached.", + "maximum": 17592186044415, + "minimum": -17592186044416, + "name": "fetch_reads_debounce_timeout", + "needs_restart": false, + "nullable": false, + "type": "integer", + "visibility": "tunable" + }, + "fetch_session_eviction_timeout_ms": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": "60s", + "defined_in": "src/v/config/configuration.cc", + "description": "Time duration after which the inactive fetch session is removed from the fetch session cache. Fetch sessions are used to implement the incremental fetch requests where a consumer does not send all requested partitions to the server but the server tracks them for the consumer.", + "maximum": 17592186044415, + "minimum": -17592186044416, + "name": "fetch_session_eviction_timeout_ms", + "needs_restart": true, + "nullable": false, + "type": "integer", + "visibility": "tunable" + }, + "find_coordinator_timeout_ms": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "defined_in": "src/v/config/configuration.cc", + "description": null, + "is_deprecated": true, + "name": "find_coordinator_timeout_ms", + "needs_restart": true, + "nullable": false, + "type": null + }, + "fips_mode": { + "category": "redpanda", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "broker", + "default": "disabled", + "defined_in": "src/v/config/node_config.cc", + "description": "Controls whether Redpanda starts in FIPS mode. This property allows for three values: \n\n* Disabled - Redpanda does not start in FIPS mode.\n\n* Permissive - Redpanda performs the same check as enabled, but a warning is logged, and Redpanda continues to run. Redpanda loads the OpenSSL FIPS provider into the OpenSSL library. After this completes, Redpanda is operating in FIPS mode, which means that the TLS cipher suites available to users are limited to the TLSv1.2 and TLSv1.3 NIST-approved cryptographic methods.\n\n* Enabled - Redpanda verifies that the operating system is enabled for FIPS by checking `/proc/sys/crypto/fips_enabled`. If the file does not exist or does not return `1`, Redpanda immediately exits.", + "name": "fips_mode", + "needs_restart": true, + "nullable": false, + "type": "fips_mode_flag", + "visibility": "user" + }, + "flush.bytes": { + "acceptable_values": "bytes (integer)", + "category": "performance-cluster", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "topic", + "corresponding_cluster_property": "flush_bytes", + "description": "The maximum bytes not fsynced per partition. If this configured threshold is reached, the log is automatically fsynced, even though it wasn't explicitly requested.", + "is_deprecated": false, + "is_topic_property": true, + "name": "flush.bytes", + "related_topics": [ + "xref:./cluster-properties.adoc#flush_bytes[`flush_bytes`]" + ], + "source_file": "src/v/kafka/server/handlers/topics/types.h", + "type": "integer" + }, + "flush.ms": { + "acceptable_values": "milliseconds (integer)", + "category": "performance-cluster", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "topic", + "corresponding_cluster_property": "flush_ms", + "description": "The maximum delay (in ms) between two subsequent fsyncs. After this delay, the log is automatically fsynced.", + "is_deprecated": false, + "is_topic_property": true, + "name": "flush.ms", + "related_topics": [ + "xref:./cluster-properties.adoc#flush_ms[`flush_ms`]" + ], + "source_file": "src/v/kafka/server/handlers/topics/types.h", + "type": "integer" + }, + "full_raft_configuration_recovery_pattern": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "defined_in": "src/v/config/configuration.cc", + "description": null, + "is_deprecated": true, + "name": "full_raft_configuration_recovery_pattern", + "needs_restart": true, + "nullable": false, + "type": null + }, + "group_initial_rebalance_delay": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": "3s", + "defined_in": "src/v/config/configuration.cc", + "description": "Delay added to the rebalance phase to wait for new members.", + "maximum": 17592186044415, + "minimum": -17592186044416, + "name": "group_initial_rebalance_delay", + "needs_restart": false, + "nullable": false, + "type": "integer", + "visibility": "tunable" + }, + "group_max_session_timeout_ms": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": "300s", + "defined_in": "src/v/config/configuration.cc", + "description": "The maximum allowed session timeout for registered consumers. Longer timeouts give consumers more time to process messages in between heartbeats at the cost of a longer time to detect failures.", + "maximum": 17592186044415, + "minimum": -17592186044416, + "name": "group_max_session_timeout_ms", + "needs_restart": false, + "nullable": false, + "type": "integer" + }, + "group_min_session_timeout_ms": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": "6000ms", + "defined_in": "src/v/config/configuration.cc", + "description": "The minimum allowed session timeout for registered consumers. Shorter timeouts result in quicker failure detection at the cost of more frequent consumer heartbeating, which can overwhelm broker resources.", + "maximum": 17592186044415, + "minimum": -17592186044416, + "name": "group_min_session_timeout_ms", + "needs_restart": false, + "nullable": false, + "type": "integer" + }, + "group_new_member_join_timeout": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": "30'000ms", + "defined_in": "src/v/config/configuration.cc", + "description": "Timeout for new member joins.", + "maximum": 17592186044415, + "minimum": -17592186044416, + "name": "group_new_member_join_timeout", + "needs_restart": false, + "nullable": false, + "type": "integer", + "visibility": "tunable" + }, + "group_offset_retention_check_ms": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": "10min", + "defined_in": "src/v/config/configuration.cc", + "description": "Frequency rate at which the system should check for expired group offsets.", + "maximum": 17592186044415, + "minimum": -17592186044416, + "name": "group_offset_retention_check_ms", + "needs_restart": false, + "nullable": false, + "type": "integer", + "visibility": "tunable" + }, + "group_offset_retention_sec": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": "24h * 7", + "defined_in": "src/v/config/configuration.cc", + "description": "Consumer group offset retention seconds. To disable offset retention, set this to null.", + "maximum": 17179869183, + "minimum": -17179869184, + "name": "group_offset_retention_sec", + "needs_restart": false, + "nullable": true, + "type": "integer", + "visibility": "tunable" + }, + "group_topic_partitions": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": "16", + "defined_in": "src/v/config/configuration.cc", + "description": "Number of partitions in the internal group membership topic.", + "maximum": 2147483647, + "minimum": -2147483648, + "name": "group_topic_partitions", + "needs_restart": false, + "nullable": false, + "type": "integer", + "visibility": "tunable" + }, + "health_manager_tick_interval": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": "3min", + "defined_in": "src/v/config/configuration.cc", + "description": "How often the health manager runs.", + "maximum": 17592186044415, + "minimum": -17592186044416, + "name": "health_manager_tick_interval", + "needs_restart": true, + "nullable": false, + "type": "integer", + "visibility": "tunable" + }, + "health_monitor_max_metadata_age": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": "10s", + "defined_in": "src/v/config/configuration.cc", + "description": "Maximum age of the metadata cached in the health monitor of a non-controller broker.", + "maximum": 17592186044415, + "minimum": -17592186044416, + "name": "health_monitor_max_metadata_age", + "needs_restart": false, + "nullable": false, + "type": "integer", + "visibility": "tunable" + }, + "health_monitor_tick_interval": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": "10s", + "defined_in": "src/v/config/configuration.cc", + "description": "How often health monitor refresh cluster state", + "is_deprecated": true, + "maximum": 17592186044415, + "minimum": -17592186044416, + "name": "health_monitor_tick_interval", + "needs_restart": false, + "nullable": false, + "type": null, + "visibility": "deprecated" + }, + "http_authentication": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": true, + "cloud_supported": true, + "config_scope": "cluster", + "default": [ + "BASIC" + ], + "defined_in": "src/v/config/configuration.cc", + "description": "A list of supported HTTP authentication mechanisms.\n\n*Accepted values:*\n\n* `BASIC`: Basic authentication\n* `OIDC`: OpenID Connect", + "enterprise_value": "OIDC", + "is_enterprise": true, + "items": { + "type": "string" + }, + "name": "http_authentication", + "needs_restart": false, + "nullable": false, + "related_topics": [], + "type": "array", + "visibility": "user" + }, + "iceberg_backlog_controller_i_coeff": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": "0.005", + "defined_in": "src/v/config/configuration.cc", + "description": "Controls how much past backlog (unprocessed work) affects the priority of processing new data in the Iceberg system. The system accumulates backlog errors over time, and this coefficient determines how much that accumulated backlog influences the urgency of data translation.", + "name": "iceberg_backlog_controller_i_coeff", + "needs_restart": false, + "nullable": false, + "type": "number", + "visibility": "tunable" + }, + "iceberg_backlog_controller_p_coeff": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": "0.00001", + "defined_in": "src/v/config/configuration.cc", + "description": "Proportional coefficient for the Iceberg backlog controller. Number of shares assigned to the datalake scheduling group will be proportional to the backlog size error. A negative value means larger and faster changes in the number of shares in the datalake scheduling group.", + "name": "iceberg_backlog_controller_p_coeff", + "needs_restart": false, + "nullable": false, + "type": "number", + "visibility": "tunable" + }, + "iceberg_catalog_base_location": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": true, + "cloud_supported": true, + "config_scope": "cluster", + "default": "redpanda-iceberg-catalog", + "defined_in": "src/v/config/configuration.cc", + "description": "Base path for the cloud-storage-object-backed Iceberg filesystem catalog. After Iceberg is enabled, do not change this value.", + "name": "iceberg_catalog_base_location", + "needs_restart": true, + "nullable": false, + "type": "string", + "visibility": "user" + }, + "iceberg_catalog_commit_interval_ms": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": "1min milliseconds", + "defined_in": "src/v/config/configuration.cc", + "description": "The frequency at which the Iceberg coordinator commits topic files to the catalog. This is the interval between commit transactions across all topics monitored by the coordinator, not the interval between individual commits.", + "maximum": 17592186044415, + "minimum": -17592186044416, + "name": "iceberg_catalog_commit_interval_ms", + "needs_restart": false, + "nullable": false, + "type": "integer", + "visibility": "tunable" + }, + "iceberg_catalog_type": { + "cloud_byoc_only": true, + "cloud_editable": true, + "cloud_readonly": false, + "cloud_supported": true, + "config_scope": "cluster", + "default": "object_storage", + "defined_in": "src/v/config/configuration.cc", + "description": "Iceberg catalog type that Redpanda will use to commit table metadata updates. Supported types: `rest`, `object_storage`.\nNOTE: You must set <> at the same time that you set `iceberg_catalog_type` to `rest`.", + "name": "iceberg_catalog_type", + "needs_restart": true, + "nullable": false, + "type": "datalake_catalog_type", + "visibility": "user" + }, + "iceberg_default_partition_spec": { + "cloud_byoc_only": true, + "cloud_editable": true, + "cloud_readonly": false, + "cloud_supported": true, + "config_scope": "cluster", + "default": "(hour(redpanda.timestamp))", + "defined_in": "src/v/config/configuration.cc", + "description": "ifndef::env-cloud[]\nDefault value for the xref:reference:properties/topic-properties.adoc#redpanda-iceberg-partition-spec[`redpanda.iceberg.partition.spec`] topic property that determines the partition spec for the Iceberg table corresponding to the topic.\nendif::[]\n\nifdef::env-cloud[]\nDefault value for the `redpanda.iceberg.partition.spec` topic property that determines the partition spec for the Iceberg table corresponding to the topic.\nendif::[]", + "name": "iceberg_default_partition_spec", + "needs_restart": false, + "nullable": false, + "related_topics": [ + "self-managed-only: xref:reference:properties/topic-properties.adoc#redpanda-iceberg-partition-spec[`redpanda.iceberg.partition.spec`]" + ], + "type": "string", + "visibility": "user" + }, + "iceberg_delete": { + "cloud_byoc_only": true, + "cloud_editable": true, + "cloud_readonly": false, + "cloud_supported": true, + "config_scope": "cluster", + "default": "true", + "defined_in": "src/v/config/configuration.cc", + "description": "Default value for the `redpanda.iceberg.delete` topic property that determines if the corresponding Iceberg table is deleted upon deleting the topic.", + "name": "iceberg_delete", + "needs_restart": false, + "nullable": false, + "type": "boolean", + "visibility": "user" + }, + "iceberg_disable_automatic_snapshot_expiry": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": "false", + "defined_in": "src/v/config/configuration.cc", + "description": "Whether to disable automatic Iceberg snapshot expiry. This property may be useful if the Iceberg catalog expects to perform snapshot expiry on its own.", + "name": "iceberg_disable_automatic_snapshot_expiry", + "needs_restart": false, + "nullable": false, + "type": "boolean", + "visibility": "user" + }, + "iceberg_disable_snapshot_tagging": { + "cloud_byoc_only": true, + "cloud_editable": true, + "cloud_readonly": false, + "cloud_supported": true, + "config_scope": "cluster", + "default": "false", + "defined_in": "src/v/config/configuration.cc", + "description": "Whether to disable tagging of Iceberg snapshots. These tags are used to ensure that the snapshots that Redpanda writes are retained during snapshot removal, which in turn, helps Redpanda ensure exactly-once delivery of records. Disabling tags is therefore not recommended, but it may be useful if the Iceberg catalog does not support tags.", + "name": "iceberg_disable_snapshot_tagging", + "needs_restart": false, + "nullable": false, + "type": "boolean", + "visibility": "user" + }, + "iceberg_enabled": { + "cloud_byoc_only": true, + "cloud_editable": true, + "cloud_readonly": false, + "cloud_supported": true, + "config_scope": "cluster", + "default": "false", + "defined_in": "src/v/config/configuration.cc", + "description": "ifndef::env-cloud[]\nEnables the translation of topic data into Iceberg tables. Setting `iceberg_enabled` to `true` activates the feature at the cluster level, but each topic must also set the xref:reference:properties/topic-properties.adoc#redpanda-iceberg-enabled[`redpanda.iceberg.enabled`] topic-level property to `true` to use it. If `iceberg_enabled` is set to `false`, then the feature is disabled for all topics in the cluster, overriding any topic-level settings.\nendif::[]\nifdef::env-cloud[]\nEnables the translation of topic data into Iceberg tables. Setting `iceberg_enabled` to `true` activates the feature at the cluster level, but each topic must also set the `redpanda.iceberg.enabled` topic-level property to `true` to use it. If `iceberg_enabled` is set to `false`, then the feature is disabled for all topics in the cluster, overriding any topic-level settings.\nendif::[]", + "enterprise_value": true, + "is_enterprise": true, + "name": "iceberg_enabled", + "needs_restart": true, + "nullable": false, + "related_topics": [ + "self-managed-only: xref:reference:properties/topic-properties.adoc#redpanda-iceberg-enabled[`redpanda.iceberg.enabled`]" + ], + "type": "boolean", + "visibility": "user" + }, + "iceberg_invalid_record_action": { + "cloud_byoc_only": true, + "cloud_editable": true, + "cloud_readonly": false, + "cloud_supported": true, + "config_scope": "cluster", + "default": "dlq_table", + "defined_in": "src/v/config/configuration.cc", + "description": "ifndef::env-cloud[]\nDefault value for the xref:reference:properties/topic-properties.adoc#redpanda-iceberg-invalid-record-action[`redpanda.iceberg.invalid.record.action`] topic property.\nendif::[]\nifdef::env-cloud[]\nDefault value for the `redpanda.iceberg.invalid.record.action` topic property.\nendif::[]", + "name": "iceberg_invalid_record_action", + "needs_restart": false, + "nullable": false, + "related_topics": [ + "self-managed-only: xref:reference:properties/topic-properties.adoc#redpanda-iceberg-invalid-record-action[`redpanda.iceberg.invalid.record.action`]", + "self-managed-only: xref:manage:iceberg/about-iceberg-topics.adoc#troubleshoot-errors[Troubleshoot errors]" + ], + "type": "string", + "visibility": "user" + }, + "iceberg_latest_schema_cache_ttl_ms": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": "5min milliseconds", + "defined_in": "src/v/config/configuration.cc", + "description": "The TTL for caching the latest schema during translation when using the xref:manage:iceberg/specify-iceberg-schema.adoc#value_schema_latest[`value_schema_latest`] iceberg mode. This setting controls how long the latest schema remains cached during translation, which affects schema refresh behavior and performance.", + "maximum": 17592186044415, + "minimum": -17592186044416, + "name": "iceberg_latest_schema_cache_ttl_ms", + "needs_restart": false, + "nullable": false, + "related_topics": [ + "xref:manage:iceberg/specify-iceberg-schema.adoc#value_schema_latest[`value_schema_latest`]" + ], + "type": "integer", + "visibility": "tunable" + }, + "iceberg_rest_catalog_authentication_mode": { + "cloud_byoc_only": true, + "cloud_editable": true, + "cloud_readonly": false, + "cloud_supported": true, + "config_scope": "cluster", + "default": "none", + "defined_in": "src/v/config/configuration.cc", + "description": "The authentication mode for client requests made to the Iceberg catalog. Choose from: `none`, `bearer`, `oauth2`, and `aws_sigv4`. In `bearer` mode, the token specified in `iceberg_rest_catalog_token` is used unconditonally, and no attempts are made to refresh the token. In `oauth2` mode, the credentials specified in `iceberg_rest_catalog_client_id` and `iceberg_rest_catalog_client_secret` are used to obtain a bearer token from the URI defined by `iceberg_rest_catalog_oauth2_server_uri`. In `aws_sigv4` mode, the same AWS credentials used for cloud storage (see `cloud_storage_region`, `cloud_storage_access_key`, `cloud_storage_secret_key`, and `cloud_storage_credentials_source`) are used to sign requests to AWS Glue catalog with SigV4.", + "name": "iceberg_rest_catalog_authentication_mode", + "needs_restart": true, + "nullable": false, + "type": "datalake_catalog_auth_mode", + "visibility": "user" + }, + "iceberg_rest_catalog_aws_access_key": { + "cloud_byoc_only": true, + "cloud_editable": true, + "cloud_readonly": false, + "cloud_supported": true, + "config_scope": "cluster", + "default": null, + "defined_in": "src/v/config/configuration.cc", + "description": "AWS access key for Iceberg REST catalog SigV4 authentication. If not set, falls back to xref:reference:properties/object-storage-properties.adoc#cloud_storage_access_key[`cloud_storage_access_key`] when using aws_sigv4 authentication mode.", + "name": "iceberg_rest_catalog_aws_access_key", + "needs_restart": true, + "nullable": true, + "related_topics": [ + "xref:reference:properties/object-storage-properties.adoc#cloud_storage_access_key[`cloud_storage_access_key`]" + ], + "type": "string", + "visibility": "user" + }, + "iceberg_rest_catalog_aws_credentials_source": { + "aliases": [ + "iceberg_rest_catalog_aws_credentials_source" + ], + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": null, + "defined_in": "src/v/config/configuration.cc", + "description": "Source of AWS credentials for Iceberg REST catalog SigV4 authentication. If not set, falls back to cloud_storage_credentials_source when using aws_sigv4 authentication mode. Accepted values: config_file, aws_instance_metadata, sts, gcp_instance_metadata, azure_vm_instance_metadata, azure_aks_oidc_federation.", + "name": "iceberg_rest_catalog_credentials_source", + "needs_restart": true, + "nullable": true, + "type": "object", + "visibility": "user" + }, + "iceberg_rest_catalog_aws_region": { + "cloud_byoc_only": true, + "cloud_editable": true, + "cloud_readonly": false, + "cloud_supported": true, + "config_scope": "cluster", + "default": null, + "defined_in": "src/v/config/configuration.cc", + "description": "AWS region for Iceberg REST catalog SigV4 authentication. If not set, falls back to xref:reference:properties/object-storage-properties.adoc#cloud_storage_region[`cloud_storage_region`] when using aws_sigv4 authentication mode.", + "name": "iceberg_rest_catalog_aws_region", + "needs_restart": true, + "nullable": true, + "related_topics": [ + "xref:reference:properties/object-storage-properties.adoc#cloud_storage_region[`cloud_storage_region`]" + ], + "type": "string", + "visibility": "user" + }, + "iceberg_rest_catalog_aws_secret_key": { + "cloud_byoc_only": true, + "cloud_editable": true, + "cloud_readonly": false, + "cloud_supported": true, + "config_scope": "cluster", + "default": null, + "defined_in": "src/v/config/configuration.cc", + "description": "AWS secret key for Iceberg REST catalog SigV4 authentication. If not set, falls back to xref:reference:properties/object-storage-properties.adoc#cloud_storage_secret_key[`cloud_storage_secret_key`] when using aws_sigv4 authentication mode.", + "is_secret": true, + "name": "iceberg_rest_catalog_aws_secret_key", + "needs_restart": true, + "nullable": true, + "related_topics": [ + "xref:reference:properties/object-storage-properties.adoc#cloud_storage_secret_key[`cloud_storage_secret_key`]" + ], + "type": "string", + "visibility": "user" + }, + "iceberg_rest_catalog_aws_service_name": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": "glue", + "defined_in": "src/v/config/configuration.cc", + "description": "AWS service name for SigV4 signing when using aws_sigv4 authentication mode. Defaults to 'glue' for AWS Glue Data Catalog. Can be changed to support other AWS services that provide Iceberg REST catalog APIs.", + "name": "iceberg_rest_catalog_aws_service_name", + "needs_restart": true, + "nullable": false, + "type": "string", + "visibility": "user" + }, + "iceberg_rest_catalog_base_location": { + "cloud_byoc_only": true, + "cloud_editable": true, + "cloud_readonly": false, + "cloud_supported": true, + "config_scope": "cluster", + "defined_in": "src/v/config/configuration.cc", + "description": "Base URI for the Iceberg REST catalog. If unset, the REST catalog server determines the location. Some REST catalogs, like AWS Glue, require the client to set this. After Iceberg is enabled, do not change this value.", + "name": "iceberg_rest_catalog_base_location", + "needs_restart": true, + "nullable": true, + "type": "string", + "visibility": "user" + }, + "iceberg_rest_catalog_client_id": { + "cloud_byoc_only": true, + "cloud_editable": true, + "cloud_readonly": false, + "cloud_supported": true, + "config_scope": "cluster", + "default": null, + "defined_in": "src/v/config/configuration.cc", + "description": "Iceberg REST catalog user ID. This ID is used to query the catalog API for the OAuth token. Required if catalog type is set to `rest` and `iceberg_rest_catalog_authentication_mode` is set to `oauth2`.", + "name": "iceberg_rest_catalog_client_id", + "needs_restart": true, + "nullable": true, + "type": "string", + "visibility": "user" + }, + "iceberg_rest_catalog_client_secret": { + "cloud_byoc_only": true, + "cloud_editable": true, + "cloud_readonly": false, + "cloud_supported": true, + "config_scope": "cluster", + "default": null, + "defined_in": "src/v/config/configuration.cc", + "description": "Secret used with the client ID to query the OAuth token endpoint for Iceberg REST catalog authentication. Required if catalog type is set to `rest` and `iceberg_rest_catalog_authentication_mode` is set to `oauth2`.", + "is_secret": true, + "name": "iceberg_rest_catalog_client_secret", + "needs_restart": true, + "nullable": true, + "type": "string", + "visibility": "user" + }, + "iceberg_rest_catalog_credentials_source": { + "cloud_byoc_only": true, + "cloud_editable": true, + "cloud_readonly": false, + "cloud_supported": true, + "config_scope": "cluster", + "default": null, + "defined_in": "override", + "description": "ifndef::env-cloud[]\nSource of AWS credentials for Iceberg REST catalog SigV4 authentication. If not set, falls back to xref:reference:properties/object-storage-properties.adoc#cloud_storage_credentials_source[`cloud_storage_credentials_source`] when using aws_sigv4 authentication mode.\nendif::[]\n\nifdef::env-cloud[]\nSource of AWS credentials for Iceberg REST catalog SigV4 authentication. If providing explicit credentials using `iceberg_rest_catalog_aws_access_key` and `iceberg_rest_catalog_aws_secret_key` for Glue catalog authentication, you must set this property to `config_file`.\nendif::[]\n\n*Accepted values*: `aws_instance_metadata`, `azure_aks_oidc_federation`, `azure_vm_instance_metadata`, `config_file`, `gcp_instance_metadata`, `sts`.", + "is_deprecated": false, + "is_topic_property": false, + "name": "iceberg_rest_catalog_credentials_source", + "related_topics": [ + "xref:reference:properties/object-storage-properties.adoc#cloud_storage_credentials_source[`cloud_storage_credentials_source`]" + ], + "type": "string", + "visibility": "user" + }, + "iceberg_rest_catalog_crl": { + "cloud_byoc_only": true, + "cloud_editable": true, + "cloud_readonly": false, + "cloud_supported": true, + "config_scope": "cluster", + "default": null, + "defined_in": "src/v/config/configuration.cc", + "description": "The contents of a certificate revocation list for `iceberg_rest_catalog_trust`. Takes precedence over `iceberg_rest_catalog_crl_file`.", + "is_secret": true, + "name": "iceberg_rest_catalog_crl", + "needs_restart": true, + "nullable": true, + "type": "string", + "visibility": "user" + }, + "iceberg_rest_catalog_crl_file": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": null, + "defined_in": "src/v/config/configuration.cc", + "description": "Path to certificate revocation list for `iceberg_rest_catalog_trust_file`.", + "name": "iceberg_rest_catalog_crl_file", + "needs_restart": true, + "nullable": true, + "type": "string", + "visibility": "user" + }, + "iceberg_rest_catalog_endpoint": { + "cloud_byoc_only": true, + "cloud_editable": true, + "cloud_readonly": false, + "cloud_supported": true, + "config_scope": "cluster", + "default": null, + "defined_in": "src/v/config/configuration.cc", + "description": "URL of Iceberg REST catalog endpoint.\nNOTE: If you set <> to `rest`, you must also set this property at the same time.", + "name": "iceberg_rest_catalog_endpoint", + "needs_restart": true, + "nullable": true, + "type": "string", + "visibility": "user" + }, + "iceberg_rest_catalog_oauth2_scope": { + "cloud_byoc_only": true, + "cloud_editable": true, + "cloud_readonly": false, + "cloud_supported": true, + "config_scope": "cluster", + "default": "PRINCIPAL_ROLE:ALL", + "defined_in": "src/v/config/configuration.cc", + "description": "The OAuth scope used to retrieve access tokens for Iceberg catalog authentication. Only meaningful when `iceberg_rest_catalog_authentication_mode` is set to `oauth2`", + "name": "iceberg_rest_catalog_oauth2_scope", + "needs_restart": true, + "nullable": false, + "type": "string", + "visibility": "user" + }, + "iceberg_rest_catalog_oauth2_server_uri": { + "cloud_byoc_only": true, + "cloud_editable": true, + "cloud_readonly": false, + "cloud_supported": true, + "config_scope": "cluster", + "default": null, + "defined_in": "src/v/config/configuration.cc", + "description": "The OAuth URI used to retrieve access tokens for Iceberg catalog authentication. If left undefined, the deprecated Iceberg catalog endpoint `/v1/oauth/tokens` is used instead.", + "name": "iceberg_rest_catalog_oauth2_server_uri", + "needs_restart": true, + "nullable": true, + "type": "string", + "visibility": "user" + }, + "iceberg_rest_catalog_request_timeout_ms": { + "cloud_byoc_only": true, + "cloud_editable": true, + "cloud_readonly": false, + "cloud_supported": true, + "config_scope": "cluster", + "default": "10s", + "defined_in": "src/v/config/configuration.cc", + "description": "Maximum length of time that Redpanda waits for a response from the REST catalog before aborting the request", + "maximum": 17592186044415, + "minimum": -17592186044416, + "name": "iceberg_rest_catalog_request_timeout_ms", + "needs_restart": false, + "nullable": false, + "type": "integer", + "visibility": "tunable" + }, + "iceberg_rest_catalog_token": { + "cloud_byoc_only": true, + "cloud_editable": true, + "cloud_readonly": false, + "cloud_supported": true, + "config_scope": "cluster", + "default": null, + "defined_in": "src/v/config/configuration.cc", + "description": "Token used to access the REST Iceberg catalog. If the token is present, Redpanda ignores credentials stored in the properties <> and <>.\n\nRequired if <> is set to `bearer`.", + "is_secret": true, + "name": "iceberg_rest_catalog_token", + "needs_restart": true, + "nullable": true, + "type": "string", + "visibility": "user" + }, + "iceberg_rest_catalog_trust": { + "cloud_byoc_only": true, + "cloud_editable": true, + "cloud_readonly": false, + "cloud_supported": true, + "config_scope": "cluster", + "default": null, + "defined_in": "src/v/config/configuration.cc", + "description": "The contents of a certificate chain to trust for the REST Iceberg catalog.\nifndef::env-cloud[]\nTakes precedence over <>.\nendif::[]", + "is_secret": true, + "name": "iceberg_rest_catalog_trust", + "needs_restart": true, + "nullable": true, + "type": "string", + "visibility": "user" + }, + "iceberg_rest_catalog_trust_file": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": null, + "defined_in": "src/v/config/configuration.cc", + "description": "Path to a file containing a certificate chain to trust for the REST Iceberg catalog.", + "name": "iceberg_rest_catalog_trust_file", + "needs_restart": true, + "nullable": true, + "type": "string", + "visibility": "user" + }, + "iceberg_rest_catalog_warehouse": { + "aliases": [ + "iceberg_rest_catalog_prefix" + ], + "cloud_byoc_only": true, + "cloud_editable": true, + "cloud_readonly": false, + "cloud_supported": true, + "config_scope": "cluster", + "default": null, + "defined_in": "src/v/config/configuration.cc", + "description": "Warehouse to use for the Iceberg REST catalog. Redpanda queries the catalog to retrieve warehouse-specific configurations and automatically configures settings like the appropriate prefix. The prefix is appended to the catalog path (for example, `/v1/\\{prefix}/namespaces`).", + "name": "iceberg_rest_catalog_warehouse", + "needs_restart": true, + "nullable": true, + "type": "string", + "visibility": "user" + }, + "iceberg_target_backlog_size": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": "100_MiB", + "defined_in": "src/v/config/configuration.cc", + "description": "Average size per partition of the datalake translation backlog that the backlog controller tries to maintain. When the backlog size is larger than the set point, the backlog controller will increase the translation scheduling group priority.", + "maximum": 4294967295, + "minimum": 0, + "name": "iceberg_target_backlog_size", + "needs_restart": false, + "nullable": false, + "type": "integer", + "visibility": "tunable" + }, + "iceberg_target_lag_ms": { + "cloud_byoc_only": true, + "cloud_editable": true, + "cloud_readonly": false, + "cloud_supported": true, + "config_scope": "cluster", + "default": "milliseconds", + "defined_in": "src/v/config/configuration.cc", + "description": "Default value for the redpanda.iceberg.target.lag.ms topic property, which controls how often data in an Iceberg table is refreshed with new data from the corresponding Redpanda topic. Redpanda attempts to commit all the data produced to the topic within the lag target in a best effort fashion, subject to resource availability.", + "maximum": 17592186044415, + "minimum": -17592186044416, + "name": "iceberg_target_lag_ms", + "needs_restart": false, + "nullable": false, + "related_topics": [ + "self-managed-only: xref:reference:properties/topic-properties.adoc#redpanda-iceberg-target-lag-ms[`redpanda.iceberg.target.lag.ms`]" + ], + "type": "integer", + "visibility": "user" + }, + "iceberg_throttle_backlog_size_ratio": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": "0.3", + "defined_in": "src/v/config/configuration.cc", + "description": "Ration of the total backlog size to the disk space at which the throttle to iceberg producers is applied.", + "name": "iceberg_throttle_backlog_size_ratio", + "needs_restart": false, + "nullable": false, + "type": "number", + "visibility": "tunable" + }, + "iceberg_topic_name_dot_replacement": { + "cloud_byoc_only": true, + "cloud_editable": true, + "cloud_readonly": false, + "cloud_supported": true, + "config_scope": "cluster", + "default": null, + "defined_in": "src/v/config/configuration.cc", + "description": "Optional replacement string for dots in topic names when deriving Iceberg table names, useful when downstream systems do not permit dots in table names. The replacement string cannot contain dots. Be careful to avoid table name collisions caused by the replacement.If an Iceberg topic with dots in the name exists in the cluster, the value of this property should not be changed.", + "name": "iceberg_topic_name_dot_replacement", + "needs_restart": false, + "nullable": true, + "type": "string", + "visibility": "user" + }, + "id_allocator_batch_size": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": "1000", + "defined_in": "src/v/config/configuration.cc", + "description": "The ID allocator allocates messages in batches (each batch is a one log record) and then serves requests from memory without touching the log until the batch is exhausted.", + "maximum": 32767, + "minimum": -32768, + "name": "id_allocator_batch_size", + "needs_restart": true, + "nullable": false, + "type": "integer", + "visibility": "tunable" + }, + "id_allocator_log_capacity": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": "100", + "defined_in": "src/v/config/configuration.cc", + "description": "Capacity of the `id_allocator` log in number of batches. After it reaches `id_allocator_stm`, it truncates the log's prefix.", + "maximum": 32767, + "minimum": -32768, + "name": "id_allocator_log_capacity", + "needs_restart": true, + "nullable": false, + "type": "integer", + "visibility": "tunable" + }, + "id_allocator_replication": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "defined_in": "src/v/config/configuration.cc", + "description": null, + "is_deprecated": true, + "name": "id_allocator_replication", + "needs_restart": true, + "nullable": false, + "type": null + }, + "initial.retention.local.target.bytes": { + "acceptable_values": "bytes (integer)", + "category": "tiered-storage", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "topic", + "corresponding_cluster_property": "initial_retention_local_target_bytes", + "description": "A size-based initial retention limit for Tiered Storage that determines how much data in local storage is transferred to a partition replica when a cluster is resized. If `null` (default), all locally retained data is transferred.", + "is_deprecated": false, + "is_topic_property": true, + "name": "initial.retention.local.target.bytes", + "related_topics": [ + "xref:./cluster-properties.adoc#initial_retention_local_target_bytes[`initial_retention_local_target_bytes`]", + "xref:manage:tiered-storage.adoc#fast-commission-and-decommission[Fast commission and decommission through Tiered Storage]" + ], + "source_file": "src/v/kafka/protocol/topic_properties.h", + "type": "integer" + }, + "initial.retention.local.target.ms": { + "acceptable_values": "milliseconds (integer)", + "category": "tiered-storage", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "topic", + "corresponding_cluster_property": "initial_retention_local_target_ms", + "description": "A time-based initial retention limit for Tiered Storage that determines how much data in local storage is transferred to a partition replica when a cluster is resized. If `null` (default), all locally retained data is transferred.", + "is_deprecated": false, + "is_topic_property": true, + "name": "initial.retention.local.target.ms", + "related_topics": [ + "xref:./cluster-properties.adoc#initial_retention_local_target_ms[`initial_retention_local_target_ms`]", + "xref:manage:tiered-storage.adoc#fast-commission-and-decommission[Fast commission and decommission through Tiered Storage]" + ], + "source_file": "src/v/kafka/protocol/topic_properties.h", + "type": "integer" + }, + "initial_retention_local_target_bytes_default": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": null, + "defined_in": "src/v/config/configuration.cc", + "description": "Initial local retention size target for partitions of topics with xref:manage:tiered-storage.adoc[Tiered Storage] enabled. If no initial local target retention is configured, then all locally-retained data will be delivered to learner when joining the partition replica set.", + "name": "initial_retention_local_target_bytes_default", + "needs_restart": false, + "nullable": true, + "related_topics": [ + "xref:manage:tiered-storage.adoc[Tiered Storage]" + ], + "type": "integer", + "visibility": "user" + }, + "initial_retention_local_target_ms_default": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": null, + "defined_in": "src/v/config/configuration.cc", + "description": "Initial local retention time target for partitions of topics with xref:manage:tiered-storage.adoc[Tiered Storage] enabled. If no initial local target retention is configured, then all locally-retained data will be delivered to learner when joining the partition replica is set.", + "maximum": 17592186044415, + "minimum": -17592186044416, + "name": "initial_retention_local_target_ms_default", + "needs_restart": false, + "nullable": true, + "related_topics": [ + "xref:manage:tiered-storage.adoc[Tiered Storage]" + ], + "type": "integer", + "visibility": "user" + }, + "internal_topic_replication_factor": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": "3", + "defined_in": "src/v/config/configuration.cc", + "description": "Target replication factor for internal topics.\n\n*Unit*: number of replicas per topic.", + "maximum": 2147483647, + "minimum": -2147483648, + "name": "internal_topic_replication_factor", + "needs_restart": true, + "nullable": false, + "type": "integer", + "visibility": "user" + }, + "join_retry_timeout_ms": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": "5s", + "defined_in": "src/v/config/configuration.cc", + "description": "Time between cluster join retries in milliseconds.", + "maximum": 17592186044415, + "minimum": -17592186044416, + "name": "join_retry_timeout_ms", + "needs_restart": true, + "nullable": false, + "type": "integer", + "visibility": "tunable" + }, + "kafka_admin_topic_api_rate": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "defined_in": "src/v/config/configuration.cc", + "description": null, + "is_deprecated": true, + "name": "kafka_admin_topic_api_rate", + "needs_restart": true, + "nullable": false, + "type": null + }, + "kafka_api": { + "category": "redpanda", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "broker", + "default": [ + { + "address": "127.0.0.1", + "port": 9092 + } + ], + "defined_in": "src/v/config/node_config.cc", + "description": "IP address and port of the Kafka API endpoint that handles requests. Supports multiple listeners with different configurations.", + "items": { + "type": "object" + }, + "name": "kafka_api", + "needs_restart": true, + "nullable": false, + "related_topics": [ + "xref:reference:properties/cluster-properties.adoc#sasl_mechanisms[`sasl_mechanisms`]", + "xref:reference:properties/cluster-properties.adoc#sasl_mechanisms[`sasl_mechanisms`]" + ], + "type": "array", + "visibility": "user" + }, + "kafka_api_tls": { + "category": "redpanda", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "broker", + "default": [], + "defined_in": "src/v/config/node_config.cc", + "description": "Transport Layer Security (TLS) configuration for the Kafka API endpoint.", + "example": ".Example\n[,yaml]\n----\nredpanda:\n kafka_api_tls:\n - name: \n enabled: true\n cert_file: \n key_file: \n truststore_file: \n require_client_auth: false\n----", + "items": { + "type": "object" + }, + "name": "kafka_api_tls", + "needs_restart": true, + "nullable": false, + "type": "array", + "visibility": "user" + }, + "kafka_batch_max_bytes": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": "1_MiB", + "defined_in": "src/v/config/configuration.cc", + "description": "Maximum size of a batch processed by the server. If the batch is compressed, the limit applies to the compressed batch size.", + "maximum": 4294967295, + "minimum": 0, + "name": "kafka_batch_max_bytes", + "needs_restart": false, + "nullable": false, + "type": "integer", + "visibility": "tunable" + }, + "kafka_client_group_byte_rate_quota": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "defined_in": "src/v/config/configuration.cc", + "description": null, + "is_deprecated": true, + "name": "kafka_client_group_byte_rate_quota", + "needs_restart": true, + "nullable": false, + "type": null + }, + "kafka_client_group_fetch_byte_rate_quota": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "defined_in": "src/v/config/configuration.cc", + "description": null, + "is_deprecated": true, + "name": "kafka_client_group_fetch_byte_rate_quota", + "needs_restart": true, + "nullable": false, + "type": null + }, + "kafka_connection_rate_limit": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": null, + "defined_in": "src/v/config/configuration.cc", + "description": "Maximum connections per second for one core. If `null` (the default), then the number of connections per second is unlimited.", + "maximum": 9223372036854775807, + "minimum": -9223372036854775808, + "name": "kafka_connection_rate_limit", + "needs_restart": false, + "nullable": true, + "type": "integer", + "visibility": "user" + }, + "kafka_connection_rate_limit_overrides": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": [], + "defined_in": "src/v/config/configuration.cc", + "description": "Overrides the maximum connections per second for one core for the specified IP addresses (for example, `['127.0.0.1:90', '50.20.1.1:40']`)", + "items": { + "type": "string" + }, + "name": "kafka_connection_rate_limit_overrides", + "needs_restart": false, + "nullable": false, + "related_topics": [ + "xref:manage:cluster-maintenance/configure-availability.adoc#limit-client-connections[Limit client connections]" + ], + "type": "array", + "visibility": "user" + }, + "kafka_connections_max": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": null, + "defined_in": "src/v/config/configuration.cc", + "description": "Maximum number of Kafka client connections per broker. If `null`, the property is disabled.", + "maximum": 4294967295, + "minimum": 0, + "name": "kafka_connections_max", + "needs_restart": false, + "nullable": true, + "related_topics": [ + "xref:manage:cluster-maintenance/configure-availability.adoc#limit-client-connections[Limit client connections]" + ], + "type": "integer", + "visibility": "user" + }, + "kafka_connections_max_overrides": { + "cloud_byoc_only": false, + "cloud_editable": true, + "cloud_readonly": false, + "cloud_supported": true, + "config_scope": "cluster", + "default": [], + "defined_in": "src/v/config/configuration.cc", + "description": "A list of IP addresses for which Kafka client connection limits are overridden and don't apply. For example, `(['127.0.0.1:90', '50.20.1.1:40']).`.", + "items": { + "type": "string" + }, + "name": "kafka_connections_max_overrides", + "needs_restart": false, + "nullable": false, + "related_topics": [ + "xref:manage:cluster-maintenance/configure-availability.adoc#limit-client-connections[Limit client connections]" + ], + "type": "array", + "visibility": "user" + }, + "kafka_connections_max_per_ip": { + "cloud_byoc_only": false, + "cloud_editable": true, + "cloud_readonly": false, + "cloud_supported": true, + "config_scope": "cluster", + "default": null, + "defined_in": "src/v/config/configuration.cc", + "description": "Maximum number of Kafka client connections per IP address, per broker. If `null`, the property is disabled.", + "maximum": 4294967295, + "minimum": 0, + "name": "kafka_connections_max_per_ip", + "needs_restart": false, + "nullable": true, + "related_topics": [ + "xref:manage:cluster-maintenance/configure-availability.adoc#limit-client-connections[Limit client connections]" + ], + "type": "integer", + "visibility": "user" + }, + "kafka_enable_authorization": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": null, + "defined_in": "src/v/config/configuration.cc", + "description": "Flag to require authorization for Kafka connections. If `null`, the property is disabled, and authorization is instead enabled by <>.\n\n* `null`: Ignored. Authorization is enabled with `enable_sasl`: `true`\n* `true`: authorization is required.\n* `false`: authorization is disabled.", + "name": "kafka_enable_authorization", + "needs_restart": false, + "nullable": true, + "type": "boolean", + "visibility": "user" + }, + "kafka_enable_describe_log_dirs_remote_storage": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": "true", + "defined_in": "src/v/config/configuration.cc", + "description": "Whether to include Tiered Storage as a special remote:// directory in `DescribeLogDirs Kafka` API requests.", + "name": "kafka_enable_describe_log_dirs_remote_storage", + "needs_restart": false, + "nullable": false, + "type": "boolean", + "visibility": "user" + }, + "kafka_enable_partition_reassignment": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": "true", + "defined_in": "src/v/config/configuration.cc", + "description": "Enable the Kafka partition reassignment API.", + "name": "kafka_enable_partition_reassignment", + "needs_restart": false, + "nullable": false, + "type": "boolean", + "visibility": "user" + }, + "kafka_group_recovery_timeout_ms": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": "30'000ms", + "defined_in": "src/v/config/configuration.cc", + "description": "Kafka group recovery timeout.", + "maximum": 17592186044415, + "minimum": -17592186044416, + "name": "kafka_group_recovery_timeout_ms", + "needs_restart": false, + "nullable": false, + "type": "integer", + "visibility": "user" + }, + "kafka_max_bytes_per_fetch": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": "64_MiB", + "defined_in": "src/v/config/configuration.cc", + "description": "Limit fetch responses to this many bytes, even if the total of partition bytes limits is higher.", + "name": "kafka_max_bytes_per_fetch", + "needs_restart": false, + "nullable": false, + "type": "integer", + "visibility": "tunable" + }, + "kafka_memory_batch_size_estimate_for_fetch": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": "1_MiB", + "defined_in": "src/v/config/configuration.cc", + "description": "The size of the batch used to estimate memory consumption for fetch requests, in bytes. Smaller sizes allow more concurrent fetch requests per shard. Larger sizes prevent running out of memory because of too many concurrent fetch requests.", + "name": "kafka_memory_batch_size_estimate_for_fetch", + "needs_restart": false, + "nullable": false, + "type": "integer", + "visibility": "user" + }, + "kafka_memory_share_for_fetch": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": "0.5", + "defined_in": "src/v/config/configuration.cc", + "description": "The share of Kafka subsystem memory that can be used for fetch read buffers, as a fraction of the Kafka subsystem memory amount.", + "name": "kafka_memory_share_for_fetch", + "needs_restart": true, + "nullable": false, + "type": "number", + "visibility": "user" + }, + "kafka_mtls_principal_mapping_rules": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": null, + "defined_in": "src/v/config/configuration.cc", + "description": "Principal mapping rules for mTLS authentication on the Kafka API. If `null`, the property is disabled.", + "items": { + "type": "string" + }, + "name": "kafka_mtls_principal_mapping_rules", + "needs_restart": false, + "nullable": true, + "type": "array", + "visibility": "user" + }, + "kafka_nodelete_topics": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": [ + "_redpanda.audit_log", + "__consumer_offsets", + "_schemas" + ], + "defined_in": "src/v/config/configuration.cc", + "description": "A list of topics that are protected from deletion and configuration changes by Kafka clients. Set by default to a list of Redpanda internal topics.", + "items": { + "type": "string" + }, + "name": "kafka_nodelete_topics", + "needs_restart": false, + "nullable": false, + "related_topics": [ + "xref:develop:consume-data/consumer-offsets.adoc[Consumer Offsets]", + "xref:manage:schema-registry.adoc[Schema Registry]" + ], + "type": "array", + "visibility": "user" + }, + "kafka_noproduce_topics": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": [], + "defined_in": "src/v/config/configuration.cc", + "description": "A list of topics that are protected from being produced to by Kafka clients. Set by default to a list of Redpanda internal topics.", + "items": { + "type": "string" + }, + "name": "kafka_noproduce_topics", + "needs_restart": false, + "nullable": false, + "type": "array", + "visibility": "user" + }, + "kafka_produce_batch_validation": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": "relaxed", + "defined_in": "src/v/config/configuration.cc", + "description": "Controls the level of validation performed on batches produced to Redpanda. When set to `legacy`, there is minimal validation performed on the produce path. When set to `relaxed`, full validation is performed on uncompressed batches and on compressed batches with the `max_timestamp` value left unset. When set to `strict`, full validation of uncompressed and compressed batches is performed. This should be the default in environments where producing clients are not trusted.", + "name": "kafka_produce_batch_validation", + "needs_restart": false, + "nullable": false, + "type": "string", + "visibility": "tunable" + }, + "kafka_qdc_depth_alpha": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": "0.8", + "defined_in": "src/v/config/configuration.cc", + "description": "Smoothing factor for Kafka queue depth control depth tracking.", + "name": "kafka_qdc_depth_alpha", + "needs_restart": true, + "nullable": false, + "type": "number", + "visibility": "tunable" + }, + "kafka_qdc_depth_update_ms": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": "7s", + "defined_in": "src/v/config/configuration.cc", + "description": "Update frequency for Kafka queue depth control.", + "maximum": 17592186044415, + "minimum": -17592186044416, + "name": "kafka_qdc_depth_update_ms", + "needs_restart": true, + "nullable": false, + "type": "integer", + "visibility": "tunable" + }, + "kafka_qdc_enable": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": "false", + "defined_in": "src/v/config/configuration.cc", + "description": "Enable kafka queue depth control.", + "name": "kafka_qdc_enable", + "needs_restart": true, + "nullable": false, + "type": "boolean", + "visibility": "user" + }, + "kafka_qdc_idle_depth": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": "10", + "defined_in": "src/v/config/configuration.cc", + "description": "Queue depth when idleness is detected in Kafka queue depth control.", + "name": "kafka_qdc_idle_depth", + "needs_restart": true, + "nullable": false, + "type": "integer", + "visibility": "tunable" + }, + "kafka_qdc_latency_alpha": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": "0.002", + "defined_in": "src/v/config/configuration.cc", + "description": "Smoothing parameter for Kafka queue depth control latency tracking.", + "name": "kafka_qdc_latency_alpha", + "needs_restart": true, + "nullable": false, + "type": "number", + "visibility": "tunable" + }, + "kafka_qdc_max_depth": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": "100", + "defined_in": "src/v/config/configuration.cc", + "description": "Maximum queue depth used in Kafka queue depth control.", + "name": "kafka_qdc_max_depth", + "needs_restart": true, + "nullable": false, + "type": "integer", + "visibility": "tunable" + }, + "kafka_qdc_max_latency_ms": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": "80ms", + "defined_in": "src/v/config/configuration.cc", + "description": "Maximum latency threshold for Kafka queue depth control depth tracking.", + "maximum": 17592186044415, + "minimum": -17592186044416, + "name": "kafka_qdc_max_latency_ms", + "needs_restart": true, + "nullable": false, + "type": "integer", + "visibility": "user" + }, + "kafka_qdc_min_depth": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": "1", + "defined_in": "src/v/config/configuration.cc", + "description": "Minimum queue depth used in Kafka queue depth control.", + "name": "kafka_qdc_min_depth", + "needs_restart": true, + "nullable": false, + "type": "integer", + "visibility": "tunable" + }, + "kafka_qdc_window_count": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": "12", + "defined_in": "src/v/config/configuration.cc", + "description": "Number of windows used in Kafka queue depth control latency tracking.", + "name": "kafka_qdc_window_count", + "needs_restart": true, + "nullable": false, + "type": "integer", + "visibility": "tunable" + }, + "kafka_qdc_window_size_ms": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": "1500ms", + "defined_in": "src/v/config/configuration.cc", + "description": "Window size for Kafka queue depth control latency tracking.", + "maximum": 17592186044415, + "minimum": -17592186044416, + "name": "kafka_qdc_window_size_ms", + "needs_restart": true, + "nullable": false, + "type": "integer", + "visibility": "tunable" + }, + "kafka_quota_balancer_min_shard_throughput_bps": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "defined_in": "src/v/config/configuration.cc", + "description": null, + "is_deprecated": true, + "name": "kafka_quota_balancer_min_shard_throughput_bps", + "needs_restart": true, + "nullable": false, + "type": null + }, + "kafka_quota_balancer_min_shard_throughput_ratio": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "defined_in": "src/v/config/configuration.cc", + "description": null, + "is_deprecated": true, + "name": "kafka_quota_balancer_min_shard_throughput_ratio", + "needs_restart": true, + "nullable": false, + "type": null + }, + "kafka_quota_balancer_node_period": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "defined_in": "src/v/config/configuration.cc", + "description": null, + "is_deprecated": true, + "name": "kafka_quota_balancer_node_period_ms", + "needs_restart": true, + "nullable": false, + "type": null + }, + "kafka_quota_balancer_window": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "defined_in": "src/v/config/configuration.cc", + "description": null, + "is_deprecated": true, + "name": "kafka_quota_balancer_window_ms", + "needs_restart": true, + "nullable": false, + "type": null + }, + "kafka_request_max_bytes": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": "100_MiB", + "defined_in": "src/v/config/configuration.cc", + "description": "Maximum size of a single request processed using the Kafka API.", + "maximum": 4294967295, + "minimum": 0, + "name": "kafka_request_max_bytes", + "needs_restart": false, + "nullable": false, + "type": "integer", + "visibility": "tunable" + }, + "kafka_rpc_server_stream_recv_buf": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": null, + "defined_in": "src/v/config/configuration.cc", + "description": "Maximum size of the user-space receive buffer. If `null`, this limit is not applied.", + "name": "kafka_rpc_server_stream_recv_buf", + "needs_restart": true, + "nullable": true, + "type": "integer", + "visibility": "tunable" + }, + "kafka_rpc_server_tcp_recv_buf": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": null, + "defined_in": "src/v/config/configuration.cc", + "description": "Size of the Kafka server TCP receive buffer. If `null`, the property is disabled.", + "maximum": 2147483647, + "minimum": -2147483648, + "name": "kafka_rpc_server_tcp_recv_buf", + "needs_restart": true, + "nullable": true, + "type": "integer" + }, + "kafka_rpc_server_tcp_send_buf": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": null, + "defined_in": "src/v/config/configuration.cc", + "description": "Size of the Kafka server TCP transmit buffer. If `null`, the property is disabled.", + "maximum": 2147483647, + "minimum": -2147483648, + "name": "kafka_rpc_server_tcp_send_buf", + "needs_restart": true, + "nullable": true, + "type": "integer" + }, + "kafka_sasl_max_reauth_ms": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": null, + "defined_in": "src/v/config/configuration.cc", + "description": "The maximum time between Kafka client reauthentications. If a client has not reauthenticated a connection within this time frame, that connection is torn down.", + "maximum": 17592186044415, + "minimum": -17592186044416, + "name": "kafka_sasl_max_reauth_ms", + "needs_restart": false, + "nullable": true, + "type": "integer", + "visibility": "user" + }, + "kafka_schema_id_validation_cache_capacity": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": "128", + "defined_in": "src/v/config/configuration.cc", + "description": "Per-shard capacity of the cache for validating schema IDs.", + "name": "kafka_schema_id_validation_cache_capacity", + "needs_restart": false, + "nullable": false, + "type": "integer", + "visibility": "tunable" + }, + "kafka_tcp_keepalive_idle_timeout_seconds": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": "120s", + "defined_in": "src/v/config/configuration.cc", + "description": "TCP keepalive idle timeout in seconds for Kafka connections. This describes the timeout between TCP keepalive probes that the remote site successfully acknowledged. Refers to the TCP_KEEPIDLE socket option. When changed, applies to new connections only.", + "maximum": 17179869183, + "minimum": -17179869184, + "name": "kafka_tcp_keepalive_timeout", + "needs_restart": false, + "nullable": false, + "type": "integer", + "visibility": "tunable" + }, + "kafka_tcp_keepalive_probe_interval_seconds": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": "60s", + "defined_in": "src/v/config/configuration.cc", + "description": "TCP keepalive probe interval in seconds for Kafka connections. This describes the timeout between unacknowledged TCP keepalives. Refers to the TCP_KEEPINTVL socket option. When changed, applies to new connections only.", + "maximum": 17179869183, + "minimum": -17179869184, + "name": "kafka_tcp_keepalive_probe_interval_seconds", + "needs_restart": false, + "nullable": false, + "type": "integer", + "visibility": "tunable" + }, + "kafka_tcp_keepalive_probes": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": "3", + "defined_in": "src/v/config/configuration.cc", + "description": "TCP keepalive unacknowledged probes until the connection is considered dead for Kafka connections. Refers to the TCP_KEEPCNT socket option. When changed, applies to new connections only.", + "maximum": 4294967295, + "minimum": 0, + "name": "kafka_tcp_keepalive_probes", + "needs_restart": false, + "nullable": false, + "type": "integer", + "visibility": "tunable" + }, + "kafka_throughput_control": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": [], + "defined_in": "src/v/config/configuration.cc", + "description": "List of throughput control groups that define exclusions from broker-wide throughput limits. Clients excluded from broker-wide throughput limits are still potentially subject to client-specific throughput limits.\n\nEach throughput control group consists of:\n\n* `name` (optional) - any unique group name\n* `client_id` - regex to match client_id\n\nExample values:\n\n* `[{'name': 'first_group','client_id': 'client1'}, {'client_id': 'consumer-\\d+'}]`\n* `[{'name': 'catch all'}]`\n* `[{'name': 'missing_id', 'client_id': '+empty'}]`\n\nA connection is assigned the first matching group and is then excluded from throughput control. A `name` is not required, but can help you categorize the exclusions. Specifying `+empty` for the `client_id` will match on clients that opt not to send a `client_id`. You can also optionally omit the `client_id` and specify only a `name`, as shown. In this situation, all clients will match the rule and Redpanda will exclude them from all from broker-wide throughput control.", + "items": { + "type": "object" + }, + "name": "kafka_throughput_control", + "needs_restart": false, + "nullable": false, + "related_topics": [ + "xref:manage:cluster-maintenance/manage-throughput.adoc[Manage throughput]" + ], + "type": "array", + "visibility": "user" + }, + "kafka_throughput_controlled_api_keys": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": [ + "produce", + "fetch" + ], + "defined_in": "src/v/config/configuration.cc", + "description": "List of Kafka API keys that are subject to cluster-wide and node-wide throughput limit control.", + "items": { + "type": "string" + }, + "name": "kafka_throughput_controlled_api_keys", + "needs_restart": false, + "nullable": false, + "type": "array", + "visibility": "user" + }, + "kafka_throughput_limit_node_in_bps": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": null, + "defined_in": "src/v/config/configuration.cc", + "description": "The maximum rate of all ingress Kafka API traffic for a node. Includes all Kafka API traffic (requests, responses, headers, fetched data, produced data, etc.). If `null`, the property is disabled, and traffic is not limited.", + "maximum": 9223372036854775807, + "minimum": -9223372036854775808, + "name": "kafka_throughput_limit_node_in_bps", + "needs_restart": false, + "nullable": true, + "related_topics": [ + "xref:manage:cluster-maintenance/manage-throughput.adoc#node-wide-throughput-limits[Node-wide throughput limits]" + ], + "type": "integer", + "visibility": "user" + }, + "kafka_throughput_limit_node_out_bps": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": null, + "defined_in": "src/v/config/configuration.cc", + "description": "The maximum rate of all egress Kafka traffic for a node. Includes all Kafka API traffic (requests, responses, headers, fetched data, produced data, etc.). If `null`, the property is disabled, and traffic is not limited.", + "maximum": 9223372036854775807, + "minimum": -9223372036854775808, + "name": "kafka_throughput_limit_node_out_bps", + "needs_restart": false, + "nullable": true, + "related_topics": [ + "xref:manage:cluster-maintenance/manage-throughput.adoc#node-wide-throughput-limits[Node-wide throughput limits]" + ], + "type": "integer", + "visibility": "user" + }, + "kafka_throughput_replenish_threshold": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": null, + "defined_in": "src/v/config/configuration.cc", + "description": "Threshold for refilling the token bucket as part of enforcing throughput limits.\n\nThis threshold is evaluated with each request for data. When the number of tokens to replenish exceeds this threshold, then tokens are added to the token bucket. This ensures that the atomic is not being updated for the token count with each request. The range for this threshold is automatically clamped to the corresponding throughput limit for ingress and egress.", + "maximum": 9223372036854775807, + "minimum": -9223372036854775808, + "name": "kafka_throughput_replenish_threshold", + "needs_restart": false, + "nullable": true, + "related_topics": [ + "xref:reference:cluster-properties.adoc#kafka_throughput_limit_node_in_bps[`kafka_throughput_limit_node_in_bps`]", + "xref:reference:cluster-properties.adoc#kafka_throughput_limit_node_out_bps[`kafka_throughput_limit_node_out_bps`]", + "xref:manage:cluster-maintenance/manage-throughput.adoc[Manage Throughput]" + ], + "type": "integer", + "visibility": "tunable" + }, + "kafka_throughput_throttling_v2": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "defined_in": "src/v/config/configuration.cc", + "description": null, + "is_deprecated": true, + "name": "kafka_throughput_throttling_v2", + "needs_restart": true, + "nullable": false, + "type": null + }, + "kafka_topics_max": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": null, + "defined_in": "src/v/config/configuration.cc", + "description": "Maximum number of Kafka user topics that can be created. If `null`, then no limit is enforced.", + "maximum": 4294967295, + "minimum": 0, + "name": "kafka_topics_max", + "needs_restart": false, + "nullable": true, + "type": "integer", + "visibility": "user" + }, + "kvstore_flush_interval": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": "10 milliseconds", + "defined_in": "src/v/config/configuration.cc", + "description": "Key-value store flush interval (in milliseconds).", + "maximum": 17592186044415, + "minimum": -17592186044416, + "name": "kvstore_flush_interval", + "needs_restart": false, + "nullable": false, + "type": "integer", + "visibility": "tunable" + }, + "kvstore_max_segment_size": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": "16_MiB", + "defined_in": "src/v/config/configuration.cc", + "description": "Key-value maximum segment size (in bytes).", + "name": "kvstore_max_segment_size", + "needs_restart": true, + "nullable": false, + "type": "integer", + "visibility": "tunable" + }, + "leader_balancer_idle_timeout": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": "2min", + "defined_in": "src/v/config/configuration.cc", + "description": "Leadership rebalancing idle timeout.", + "maximum": 17592186044415, + "minimum": -17592186044416, + "name": "leader_balancer_idle_timeout", + "needs_restart": false, + "nullable": false, + "type": "integer", + "visibility": "tunable" + }, + "leader_balancer_mode": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "defined_in": "src/v/config/configuration.cc", + "description": null, + "is_deprecated": true, + "name": "leader_balancer_mode", + "needs_restart": true, + "nullable": false, + "type": null + }, + "leader_balancer_mute_timeout": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": "5min", + "defined_in": "src/v/config/configuration.cc", + "description": "The length of time that a glossterm:Raft[] group is muted after a leadership rebalance operation. Any group that has been moved, regardless of whether the move succeeded or failed, undergoes a cooling-off period. This prevents Raft groups from repeatedly experiencing leadership rebalance operations in a short time frame, which can lead to instability in the cluster.\n\nThe leader balancer maintains a list of muted groups and reevaluates muted status at the start of each balancing iteration. Muted groups still contribute to overall cluster balance calculations although they can't themselves be moved until the mute period is over.", + "maximum": 17592186044415, + "minimum": -17592186044416, + "name": "leader_balancer_mute_timeout", + "needs_restart": false, + "nullable": false, + "type": "integer", + "visibility": "tunable" + }, + "leader_balancer_node_mute_timeout": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": "20s", + "defined_in": "src/v/config/configuration.cc", + "description": "The duration after which a broker that hasn't sent a heartbeat is considered muted. This timeout sets a threshold for identifying brokers that shouldn't be targeted for leadership transfers when the cluster rebalances, for example, because of unreliable network connectivity.", + "maximum": 17592186044415, + "minimum": -17592186044416, + "name": "leader_balancer_mute_timeout", + "needs_restart": false, + "nullable": false, + "type": "integer", + "visibility": "tunable" + }, + "leader_balancer_transfer_limit_per_shard": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": "512", + "defined_in": "src/v/config/configuration.cc", + "description": "Per shard limit for in-progress leadership transfers.", + "name": "leader_balancer_transfer_limit_per_shard", + "needs_restart": false, + "nullable": false, + "type": "integer", + "visibility": "tunable" + }, + "legacy_group_offset_retention_enabled": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": "false", + "defined_in": "src/v/config/configuration.cc", + "description": "Group offset retention is enabled by default starting in Redpanda version 23.1. To enable offset retention after upgrading from an older version, set this option to true.", + "name": "legacy_group_offset_retention_enabled", + "needs_restart": false, + "nullable": false, + "type": "boolean", + "visibility": "tunable" + }, + "legacy_permit_unsafe_log_operation": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": "true", + "defined_in": "src/v/config/configuration.cc", + "description": "Flag to enable a Redpanda cluster operator to use unsafe control characters within strings, such as consumer group names or user names. This flag applies only for Redpanda clusters that were originally on version 23.1 or earlier and have been upgraded to version 23.2 or later. Starting in version 23.2, newly-created Redpanda clusters ignore this property.", + "name": "legacy_permit_unsafe_log_operation", + "needs_restart": false, + "nullable": false, + "type": "boolean", + "visibility": "user" + }, + "legacy_unsafe_log_warning_interval_sec": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": "300s", + "defined_in": "src/v/config/configuration.cc", + "description": "Period at which to log a warning about using unsafe strings containing control characters. If unsafe strings are permitted by `legacy_permit_unsafe_log_operation`, a warning will be logged at an interval specified by this property.", + "maximum": 17179869183, + "minimum": -17179869184, + "name": "legacy_unsafe_log_warning_interval_sec", + "needs_restart": false, + "nullable": false, + "type": "integer", + "visibility": "user" + }, + "log_cleanup_policy": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": "deletion", + "defined_in": "src/v/config/configuration.cc", + "description": "Default cleanup policy for topic logs.\n\nThe topic property xref:./topic-properties.adoc#cleanuppolicy[`cleanup.policy`] overrides the value of `log_cleanup_policy` at the topic level.", + "name": "log_cleanup_policy", + "needs_restart": false, + "nullable": false, + "related_topics": [ + "xref:./topic-properties.adoc#cleanuppolicy[`cleanup.policy`]" + ], + "type": "object", + "visibility": "user" + }, + "log_compaction_adjacent_merge_self_compaction_count": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "defined_in": "src/v/config/configuration.cc", + "description": null, + "is_deprecated": true, + "name": "log_compaction_adjacent_merge_self_compaction_count", + "needs_restart": true, + "nullable": false, + "type": null + }, + "log_compaction_interval_ms": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": "10s", + "defined_in": "src/v/config/configuration.cc", + "description": "How often to trigger background compaction.", + "maximum": 17592186044415, + "minimum": -17592186044416, + "name": "log_compaction_interval_ms", + "needs_restart": false, + "nullable": false, + "type": "integer", + "visibility": "user" + }, + "log_compaction_merge_max_ranges": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": null, + "defined_in": "src/v/config/configuration.cc", + "description": "The maximum range of segments that can be processed in a single round of adjacent segment compaction. If `null` (the default value), no maximum is imposed on the number of ranges that can be processed at once. A value below 1 effectively disables adjacent merge compaction.", + "maximum": 4294967295, + "minimum": 0, + "name": "log_compaction_merge_max_ranges", + "needs_restart": false, + "nullable": true, + "type": "integer", + "visibility": "tunable" + }, + "log_compaction_merge_max_segments_per_range": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": null, + "defined_in": "src/v/config/configuration.cc", + "description": "The maximum number of segments that can be combined into a single segment during an adjacent merge operation. If `null` (the default value), no maximum is imposed on the number of segments that can be combined at once. A value below 2 effectively disables adjacent merge compaction.", + "maximum": 4294967295, + "minimum": 0, + "name": "log_compaction_merge_max_segments_per_range", + "needs_restart": false, + "nullable": true, + "type": "integer", + "visibility": "tunable" + }, + "log_compaction_pause_use_sliding_window": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": "false", + "defined_in": "src/v/config/configuration.cc", + "description": "Pause use of sliding window compaction. Toggle to `true` _only_ when you want to force adjacent segment compaction. The memory reserved by `storage_compaction_key_map_memory` is not freed when this is set to `true`.", + "name": "log_compaction_pause_use_sliding_window", + "needs_restart": false, + "nullable": false, + "type": "boolean", + "visibility": "tunable" + }, + "log_compaction_use_sliding_window": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": "true", + "defined_in": "src/v/config/configuration.cc", + "description": "Use sliding window compaction.", + "name": "log_compaction_use_sliding_window", + "needs_restart": true, + "nullable": false, + "type": "boolean", + "visibility": "tunable" + }, + "log_compression_type": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": "producer", + "defined_in": "src/v/config/configuration.cc", + "description": "IMPORTANT: This property is ignored regardless of the value specified. The behavior is always the same as the `producer` value. Redpanda brokers do not compress or recompress data based on this property. If producers send compressed data, Redpanda stores it as-is; if producers send uncompressed data, Redpanda stores it uncompressed. Other listed values are accepted for Apache Kafka compatibility but are ignored by the broker. This property may appear in Admin API and `rpk topic describe` outputs for compatibility.\n\nDefault for the Kafka-compatible compression.type property. Redpanda does not recompress data.\n\nThe topic property xref:./topic-properties.adoc#compressiontype[`compression.type`] overrides the value of `log_compression_type` at the topic level.", + "name": "log_compression_type", + "needs_restart": false, + "nullable": false, + "related_topics": [ + "xref:./topic-properties.adoc#compressiontype[`compression.type`]" + ], + "type": "object", + "visibility": "user" + }, + "log_disable_housekeeping_for_tests": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": "false", + "defined_in": "src/v/config/configuration.cc", + "description": "Disables the housekeeping loop for local storage. This property is used to simplify testing, and should not be set in production.", + "name": "log_disable_housekeeping_for_tests", + "needs_restart": true, + "nullable": false, + "type": "boolean", + "visibility": "tunable" + }, + "log_message_timestamp_alert_after_ms": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": "2h", + "defined_in": "src/v/config/configuration.cc", + "description": "Threshold in milliseconds for alerting on messages with a timestamp after the broker's time, meaning the messages are in the future relative to the broker's clock.", + "maximum": 17592186044415, + "minimum": -17592186044416, + "name": "log_message_timestamp_alert_after_ms", + "needs_restart": false, + "nullable": false, + "type": "integer", + "visibility": "tunable" + }, + "log_message_timestamp_alert_before_ms": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": null, + "defined_in": "src/v/config/configuration.cc", + "description": "Threshold in milliseconds for alerting on messages with a timestamp before the broker's time, meaning the messages are in the past relative to the broker's clock. To disable this check, set to `null`.", + "maximum": 17592186044415, + "minimum": -17592186044416, + "name": "log_message_timestamp_alert_before_ms", + "needs_restart": false, + "nullable": true, + "type": "integer", + "visibility": "tunable" + }, + "log_message_timestamp_type": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": "create_time", + "defined_in": "src/v/config/configuration.cc", + "description": "Default timestamp type for topic messages (CreateTime or LogAppendTime).\n\nThe topic property xref:./topic-properties.adoc#messagetimestamptype[`message.timestamp.type`] overrides the value of `log_message_timestamp_type` at the topic level.", + "name": "log_message_timestamp_type", + "needs_restart": false, + "nullable": false, + "related_topics": [ + "xref:./topic-properties.adoc#messagetimestamptype[`message.timestamp.type`]" + ], + "type": "object", + "visibility": "user" + }, + "log_retention_ms": { + "aliases": [ + "delete_retention_ms" + ], + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": "7 * 24h", + "defined_in": "src/v/config/configuration.cc", + "description": "The amount of time to keep a log file before deleting it (in milliseconds). If set to `-1`, no time limit is applied. This is a cluster-wide default when a topic does not set or disable `retention.ms`.", + "name": "log_retention_ms", + "needs_restart": false, + "nullable": false, + "type": "integer", + "visibility": "user" + }, + "log_segment_ms": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": true, + "cloud_supported": true, + "config_scope": "cluster", + "default": "weeks", + "defined_in": "src/v/config/configuration.cc", + "description": "Default lifetime of log segments. If `null`, the property is disabled, and no default lifetime is set. Any value under 60 seconds (60000 ms) is rejected. This property can also be set in the Kafka API using the Kafka-compatible alias, `log.roll.ms`. The topic property `segment.ms` overrides the value of `log_segment_ms` at the topic level.", + "maximum": 17592186044415, + "minimum": -17592186044416, + "name": "log_segment_ms", + "needs_restart": false, + "nullable": true, + "type": "integer", + "visibility": "user" + }, + "log_segment_ms_max": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": "24h * 365", + "defined_in": "src/v/config/configuration.cc", + "description": "Upper bound on topic `segment.ms`: higher values will be clamped to this value.", + "maximum": 17592186044415, + "minimum": -17592186044416, + "name": "log_segment_ms_max", + "needs_restart": false, + "nullable": false, + "type": "integer", + "visibility": "tunable" + }, + "log_segment_ms_min": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": "10min", + "defined_in": "src/v/config/configuration.cc", + "description": "Lower bound on topic `segment.ms`: lower values will be clamped to this value.", + "maximum": 17592186044415, + "minimum": -17592186044416, + "name": "log_segment_ms_min", + "needs_restart": false, + "nullable": false, + "type": "integer", + "visibility": "tunable" + }, + "log_segment_size": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": "128_MiB", + "defined_in": "src/v/config/configuration.cc", + "description": "Default log segment size in bytes for topics which do not set `segment.bytes`.", + "maximum": 18446744073709551615, + "minimum": 0, + "name": "log_segment_size", + "needs_restart": false, + "nullable": false, + "type": "integer", + "visibility": "tunable" + }, + "log_segment_size_jitter_percent": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": "5", + "defined_in": "src/v/config/configuration.cc", + "description": "Random variation to the segment size limit used for each partition.", + "maximum": 65535, + "minimum": 0, + "name": "log_segment_size_jitter_percent", + "needs_restart": true, + "nullable": false, + "type": "integer", + "visibility": "tunable" + }, + "log_segment_size_max": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": null, + "defined_in": "src/v/config/configuration.cc", + "description": "Upper bound on topic `segment.bytes`: higher values will be clamped to this limit.", + "maximum": 18446744073709551615, + "minimum": 0, + "name": "log_segment_size_max", + "needs_restart": false, + "nullable": true, + "type": "integer", + "visibility": "tunable" + }, + "log_segment_size_min": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": "1_MiB", + "defined_in": "src/v/config/configuration.cc", + "description": "Lower bound on topic `segment.bytes`: lower values will be clamped to this limit.", + "maximum": 18446744073709551615, + "minimum": 0, + "name": "log_segment_size_min", + "needs_restart": false, + "nullable": true, + "type": "integer", + "visibility": "tunable" + }, + "lz4_decompress_reusable_buffers_disabled": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": "false", + "defined_in": "src/v/config/configuration.cc", + "description": "Disable reusable preallocated buffers for LZ4 decompression.", + "name": "lz4_decompress_reusable_buffers_disabled", + "needs_restart": true, + "nullable": false, + "type": "boolean", + "visibility": "tunable" + }, + "max.compaction.lag.ms": { + "acceptable_values": "milliseconds (integer)", + "category": "retention-compaction", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "topic", + "corresponding_cluster_property": "max_compaction_lag_ms", + "description": "The maximum amount of time (in ms) that a log segment can remain unaltered before it is eligible for compaction in a compact topic. Overrides the cluster property xref:cluster-properties.adoc#max_compaction_lag_ms[`max_compaction_lag_ms`] for the topic.", + "is_deprecated": false, + "is_topic_property": true, + "name": "max.compaction.lag.ms", + "related_topics": [ + "xref:cluster-properties.adoc#max_compaction_lag_ms[`max_compaction_lag_ms`]", + "xref:./cluster-properties.adoc#max_compaction_lag_ms[`max_compaction_lag_ms`]", + "xref:manage:cluster-maintenance/compaction-settings.adoc#configuration-options[Configure maximum compaction lag]" + ], + "source_file": "src/v/kafka/server/handlers/topics/types.h", + "type": "integer" + }, + "max.message.bytes": { + "acceptable_values": "bytes (integer)", + "category": "segment-message", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "topic", + "corresponding_cluster_property": "kafka_batch_max_bytes", + "description": "The maximum size of a message or batch of a topic. If a compression type is enabled, `max.message.bytes` sets the maximum size of the compressed message or batch.\n\nIf `max.message.bytes` is set to a positive value, it overrides the cluster property xref:./cluster-properties.adoc#kafka_batch_max_bytes[`kafka_batch_max_bytes`] for the topic.", + "is_deprecated": false, + "is_topic_property": true, + "name": "max.message.bytes", + "related_topics": [ + "xref:./cluster-properties.adoc#kafka_batch_max_bytes[`kafka_batch_max_bytes`]", + "xref:./cluster-properties.adoc#kafka_batch_max_bytes[`kafka_batch_max_bytes`]", + "xref:develop:produce-data/configure-producers.adoc#message-batching[Message batching]" + ], + "source_file": "src/v/kafka/server/handlers/topics/types.h", + "type": "integer" + }, + "max_compacted_log_segment_size": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": "512_MiB", + "defined_in": "src/v/config/configuration.cc", + "description": "Maximum compacted segment size after consolidation.", + "name": "max_compacted_log_segment_size", + "needs_restart": false, + "nullable": false, + "type": "integer", + "visibility": "tunable" + }, + "max_compaction_lag_ms": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": "max_serializable_ms", + "defined_in": "src/v/config/configuration.cc", + "description": "For a compacted topic, the maximum time a message remains ineligible for compaction. The topic property `max.compaction.lag.ms` overrides this property.", + "maximum": 17592186044415, + "minimum": -17592186044416, + "name": "max_compaction_lag_ms", + "needs_restart": false, + "nullable": false, + "related_topics": [ + "xref:reference:properties/topic-properties.adoc#max.compaction.lag.ms[`max.compaction.lag.ms`]" + ], + "type": "integer", + "visibility": "user" + }, + "max_concurrent_producer_ids": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": "Maximum value", + "defined_in": "src/v/config/configuration.cc", + "description": "Maximum number of active producer sessions. When the threshold is passed, Redpanda terminates old sessions. When an idle producer corresponding to the terminated session wakes up and produces, its message batches are rejected, and an out of order sequence error is emitted. Consumers don't affect this setting.", + "maximum": 18446744073709551615, + "minimum": 0, + "name": "max_concurrent_producer_ids", + "needs_restart": false, + "nullable": false, + "type": "integer", + "visibility": "tunable" + }, + "max_in_flight_pandaproxy_requests_per_shard": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": "500", + "defined_in": "src/v/config/configuration.cc", + "description": "Maximum number of in-flight HTTP requests to HTTP Proxy permitted per shard. Any additional requests above this limit will be rejected with a 429 error.", + "name": "max_in_flight_pandaproxy_requests_per_shard", + "needs_restart": false, + "nullable": false, + "type": "integer", + "visibility": "tunable" + }, + "max_in_flight_schema_registry_requests_per_shard": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": "500", + "defined_in": "src/v/config/configuration.cc", + "description": "Maximum number of in-flight HTTP requests to Schema Registry permitted per shard. Any additional requests above this limit will be rejected with a 429 error.", + "name": "max_in_flight_schema_registry_requests_per_shard", + "needs_restart": false, + "nullable": false, + "type": "integer", + "visibility": "tunable" + }, + "max_kafka_throttle_delay_ms": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": "30'000ms", + "defined_in": "src/v/config/configuration.cc", + "description": "Fail-safe maximum throttle delay on Kafka requests.", + "maximum": 17592186044415, + "minimum": -17592186044416, + "name": "max_kafka_throttle_delay_ms", + "needs_restart": false, + "nullable": false, + "type": "integer", + "visibility": "tunable" + }, + "max_transactions_per_coordinator": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": "Maximum value", + "defined_in": "src/v/config/configuration.cc", + "description": "Specifies the maximum number of active transaction sessions per coordinator. When the threshold is passed Redpanda terminates old sessions. When an idle producer corresponding to the terminated session wakes up and produces, it leads to its batches being rejected with invalid producer epoch or invalid_producer_id_mapping error (depends on the transaction execution phase).\n\nFor details, see xref:develop:transactions#transaction-usage-tips[Transaction usage tips].", + "maximum": 18446744073709551615, + "minimum": 0, + "name": "max_transactions_per_coordinator", + "needs_restart": false, + "nullable": false, + "related_topics": [ + "xref:develop:transactions#transaction-usage-tips[Transaction usage tips]" + ], + "type": "integer", + "visibility": "tunable" + }, + "max_version": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "defined_in": "src/v/config/configuration.cc", + "description": null, + "is_deprecated": true, + "name": "max_version", + "needs_restart": true, + "nullable": false, + "type": null + }, + "members_backend_retry_ms": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": "5s", + "defined_in": "src/v/config/configuration.cc", + "description": "Time between members backend reconciliation loop retries.", + "maximum": 17592186044415, + "minimum": -17592186044416, + "name": "members_backend_retry_ms", + "needs_restart": true, + "nullable": false, + "type": "integer", + "visibility": "tunable" + }, + "memory_abort_on_alloc_failure": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": "true", + "defined_in": "src/v/config/configuration.cc", + "description": "If `true`, the Redpanda process will terminate immediately when an allocation cannot be satisfied due to memory exhaustion. If false, an exception is thrown.", + "name": "memory_abort_on_alloc_failure", + "needs_restart": false, + "nullable": false, + "type": "boolean", + "visibility": "tunable" + }, + "memory_allocation_warning_threshold": { + "category": "redpanda", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "broker", + "default": "128_KiB + 1", + "defined_in": "src/v/config/node_config.cc", + "description": "Threshold for log messages that contain a larger memory allocation than specified.", + "name": "memory_allocation_warning_threshold", + "needs_restart": true, + "nullable": true, + "type": "integer", + "visibility": "tunable" + }, + "message.timestamp.type": { + "acceptable_values": "[`CreateTime`, `LogAppendTime`]", + "category": "segment-message", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "topic", + "corresponding_cluster_property": "log_message_timestamp_type", + "description": "The source of a message's timestamp: either the message's creation time or its log append time.\n\nWhen `message.timestamp.type` is set, it overrides the cluster property xref:./cluster-properties.adoc#log_message_timestamp_type[`log_message_timestamp_type`] for the topic.", + "is_deprecated": false, + "is_topic_property": true, + "name": "message.timestamp.type", + "related_topics": [ + "xref:./cluster-properties.adoc#log_message_timestamp_type[`log_message_timestamp_type`]", + "xref:./cluster-properties.adoc#log_message_timestamp_type[`log_message_timestamp_type`]" + ], + "source_file": "src/v/kafka/server/handlers/topics/types.h", + "type": "string" + }, + "metadata_dissemination_interval_ms": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": "3'000ms", + "defined_in": "src/v/config/configuration.cc", + "description": "Interval for metadata dissemination batching.", + "maximum": 17592186044415, + "minimum": -17592186044416, + "name": "metadata_dissemination_interval_ms", + "needs_restart": true, + "nullable": false, + "type": "integer", + "visibility": "tunable" + }, + "metadata_dissemination_retries": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": "30", + "defined_in": "src/v/config/configuration.cc", + "description": "Number of attempts to look up a topic's metadata-like shard before a request fails. This configuration controls the number of retries that request handlers perform when internal topic metadata (for topics like tx, consumer offsets, etc) is missing. These topics are usually created on demand when users try to use the cluster for the first time and it may take some time for the creation to happen and the metadata to propagate to all the brokers (particularly the broker handling the request). In the meantime Redpanda waits and retries. This configuration controls the number retries.", + "maximum": 32767, + "minimum": -32768, + "name": "metadata_dissemination_retries", + "needs_restart": true, + "nullable": false, + "type": "integer", + "visibility": "tunable" + }, + "metadata_dissemination_retry_delay_ms": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": "0'500ms", + "defined_in": "src/v/config/configuration.cc", + "description": "Delay before retrying a topic lookup in a shard or other meta tables.", + "maximum": 17592186044415, + "minimum": -17592186044416, + "name": "metadata_dissemination_retry_delay_ms", + "needs_restart": true, + "nullable": false, + "type": "integer", + "visibility": "tunable" + }, + "metadata_status_wait_timeout_ms": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": "2s", + "defined_in": "src/v/config/configuration.cc", + "description": "Maximum time to wait in metadata request for cluster health to be refreshed.", + "maximum": 17592186044415, + "minimum": -17592186044416, + "name": "metadata_status_wait_timeout_ms", + "needs_restart": true, + "nullable": false, + "type": "integer", + "visibility": "tunable" + }, + "metrics_reporter_report_interval": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": "24h", + "defined_in": "src/v/config/configuration.cc", + "description": "Cluster metrics reporter report interval.", + "maximum": 17592186044415, + "minimum": -17592186044416, + "name": "metrics_reporter_report_interval", + "needs_restart": false, + "nullable": false, + "type": "integer", + "visibility": "tunable" + }, + "metrics_reporter_tick_interval": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": "1min", + "defined_in": "src/v/config/configuration.cc", + "description": "Cluster metrics reporter tick interval.", + "maximum": 17592186044415, + "minimum": -17592186044416, + "name": "metrics_reporter_tick_interval", + "needs_restart": false, + "nullable": false, + "type": "integer", + "visibility": "tunable" + }, + "metrics_reporter_url": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": "https://m.rp.vectorized.io/v2", + "defined_in": "src/v/config/configuration.cc", + "description": "URL of the cluster metrics reporter.", + "name": "metrics_reporter_url", + "needs_restart": false, + "nullable": false, + "type": "string", + "visibility": "tunable" + }, + "min.cleanable.dirty.ratio": { + "acceptable_values": "[`0`, `1.0`]", + "category": "retention-compaction", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "topic", + "corresponding_cluster_property": "min_cleanable_dirty_ratio", + "description": "The minimum ratio between the number of bytes in dirty segments and the total number of bytes in closed segments that must be reached before a partition's log is eligible for compaction in a compact topic.", + "is_deprecated": false, + "is_topic_property": true, + "name": "min.cleanable.dirty.ratio", + "related_topics": [ + "xref:./cluster-properties.adoc#min_cleanable_dirty_ratio[`min_cleanable_dirty_ratio`]" + ], + "source_file": "src/v/kafka/server/handlers/topics/types.h", + "type": "number" + }, + "min.compaction.lag.ms": { + "acceptable_values": "milliseconds (integer)", + "category": "retention-compaction", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "topic", + "corresponding_cluster_property": "min_compaction_lag_ms", + "description": "The minimum amount of time (in ms) that a log segment must remain unaltered before it can be compacted in a compact topic. Overrides the cluster property xref:cluster-properties.adoc#min_compaction_lag_ms[`min_compaction_lag_ms`] for the topic.", + "is_deprecated": false, + "is_topic_property": true, + "name": "min.compaction.lag.ms", + "related_topics": [ + "xref:cluster-properties.adoc#min_compaction_lag_ms[`min_compaction_lag_ms`]", + "xref:./cluster-properties.adoc#min_compaction_lag_ms[`min_compaction_lag_ms`]", + "xref:manage:cluster-maintenance/compaction-settings.adoc#configure-min-compaction-lag[Configure minimum compaction lag]" + ], + "source_file": "src/v/kafka/server/handlers/topics/types.h", + "type": "integer" + }, + "min_cleanable_dirty_ratio": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": "0.2", + "defined_in": "src/v/config/configuration.cc", + "description": "The minimum ratio between the number of bytes in dirty segments and the total number of bytes in closed segments that must be reached before a partition's log is eligible for compaction in a compact topic. The topic property `min.cleanable.dirty.ratio` overrides this value at the topic level.", + "name": "min_cleanable_dirty_ratio", + "needs_restart": false, + "nullable": true, + "type": "number", + "visibility": "user" + }, + "min_compaction_lag_ms": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": "0ms", + "defined_in": "src/v/config/configuration.cc", + "description": "The minimum amount of time (in ms) that a log segment must remain unaltered before it can be compacted in a compact topic.", + "maximum": 17592186044415, + "minimum": -17592186044416, + "name": "min_compaction_lag_ms", + "needs_restart": false, + "nullable": false, + "related_topics": [ + "xref:reference:properties/topic-properties.adoc#min.compaction.lag.ms[`min.compaction.lag.ms`]" + ], + "type": "integer", + "visibility": "user" + }, + "min_version": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "defined_in": "src/v/config/configuration.cc", + "description": null, + "is_deprecated": true, + "name": "min_version", + "needs_restart": true, + "nullable": false, + "type": null + }, + "minimum_topic_replication": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": "1", + "defined_in": "src/v/config/configuration.cc", + "description": "Minimum allowable replication factor for topics in this cluster. The set value must be positive, odd, and equal to or less than the number of available brokers. Changing this parameter only restricts newly-created topics. Redpanda returns an `INVALID_REPLICATION_FACTOR` error on any attempt to create a topic with a replication factor less than this property. If you change the `minimum_topic_replications` setting, the replication factor of existing topics remains unchanged. However, Redpanda will log a warning on start-up with a list of any topics that have fewer replicas than this minimum. For example, you might see a message such as `Topic X has a replication factor less than specified minimum: 1 < 3`.", + "maximum": 32767, + "minimum": -32768, + "name": "minimum_topic_replications", + "needs_restart": false, + "nullable": false, + "type": "integer", + "visibility": "user" + }, + "mode_mutability": { + "category": "schema-registry", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "broker", + "default": "true", + "defined_in": "src/v/pandaproxy/schema_registry/configuration.cc", + "description": "Enable modifications to the read-only `mode` of the Schema Registry. When set to `true`, the entire Schema Registry or its subjects can be switched to `READONLY` or `READWRITE`. This property is useful for preventing unwanted changes to the entire Schema Registry or specific subjects.", + "name": "mode_mutability", + "needs_restart": true, + "nullable": false, + "type": "boolean" + }, + "node_id": { + "category": "redpanda", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "broker", + "default": null, + "defined_in": "src/v/config/node_config.cc", + "description": "A number that uniquely identifies the broker within the cluster. If `null` (the default value), Redpanda automatically assigns an ID. If set, it must be non-negative value.\n\n.Do not set `node_id` manually.\n[WARNING]\n====\nRedpanda assigns unique IDs automatically to prevent issues such as:\n\n- Brokers with empty disks rejoining the cluster.\n- Conflicts during recovery or scaling.\n\nManually setting or reusing `node_id` values, even for decommissioned brokers, can cause cluster inconsistencies and operational failures.\n====\n\nBroker IDs are immutable. After a broker joins the cluster, its `node_id` *cannot* be changed.", + "name": "node_id", + "needs_restart": true, + "nullable": true, + "type": "integer", + "visibility": "user" + }, + "node_id_overrides": { + "category": "redpanda", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "broker", + "default": [], + "defined_in": "src/v/config/node_config.cc", + "description": "List of node ID and UUID overrides applied at broker startup. Each entry includes the current UUID, the desired new ID and UUID, and an ignore flag. An entry applies only if `current_uuid` matches the broker's actual UUID.\n\nRemove this property after the cluster restarts successfully and operates normally. This prevents reapplication and maintains consistent configuration across brokers.", + "example": ".Example\n[,yaml]\n----\nredpanda:\n node_id_overrides:\n - current_uuid: \"\"\n new_id: \n new_uuid: \"\"\n ignore_existing_node_id: \n - current_uuid: \"\"\n new_id: \n new_uuid: \"\"\n ignore_existing_node_id: \n----", + "items": { + "type": "config::node_id_override" + }, + "name": "node_id_overrides", + "needs_restart": true, + "nullable": false, + "type": "array", + "visibility": "user" + }, + "node_isolation_heartbeat_timeout": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": "3000", + "defined_in": "src/v/config/configuration.cc", + "description": "How long after the last heartbeat request a node will wait before considering itself to be isolated.", + "maximum": 9223372036854775807, + "minimum": -9223372036854775808, + "name": "node_isolation_heartbeat_timeout", + "needs_restart": false, + "nullable": false, + "type": "integer", + "visibility": "tunable" + }, + "node_management_operation_timeout_ms": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": "5s", + "defined_in": "src/v/config/configuration.cc", + "description": "Timeout for executing node management operations.", + "maximum": 17592186044415, + "minimum": -17592186044416, + "name": "node_management_operation_timeout_ms", + "needs_restart": true, + "nullable": false, + "type": "integer", + "visibility": "tunable" + }, + "node_status_interval": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": "100ms", + "defined_in": "src/v/config/configuration.cc", + "description": "Time interval between two node status messages. Node status messages establish liveness status outside of the Raft protocol.", + "maximum": 17592186044415, + "minimum": -17592186044416, + "name": "node_status_interval", + "needs_restart": false, + "nullable": false, + "type": "integer", + "visibility": "tunable" + }, + "node_status_reconnect_max_backoff_ms": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": "15s", + "defined_in": "src/v/config/configuration.cc", + "description": "Maximum backoff (in milliseconds) to reconnect to an unresponsive peer during node status liveness checks.", + "maximum": 17592186044415, + "minimum": -17592186044416, + "name": "node_status_reconnect_max_backoff_ms", + "needs_restart": false, + "nullable": false, + "type": "integer", + "visibility": "user" + }, + "oidc_clock_skew_tolerance": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": "seconds", + "defined_in": "src/v/config/configuration.cc", + "description": "The amount of time (in seconds) to allow for when validating the expiry claim in the token.\n\n*Unit*: seconds", + "maximum": 17179869183, + "minimum": -17179869184, + "name": "oidc_clock_skew_tolerance", + "needs_restart": false, + "nullable": false, + "type": "integer", + "visibility": "user" + }, + "oidc_discovery_url": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": true, + "cloud_supported": true, + "config_scope": "cluster", + "default": "https://auth.prd.cloud.redpanda.com/.well-known/openid-configuration", + "defined_in": "src/v/config/configuration.cc", + "description": "ifdef::env-cloud[]\nNOTE: This property is read-only in Redpanda Cloud.\nendif::[]\n\nThe URL pointing to the well-known discovery endpoint for the OIDC provider.", + "name": "oidc_discovery_url", + "needs_restart": false, + "nullable": false, + "type": "string", + "visibility": "user" + }, + "oidc_keys_refresh_interval": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": "1h", + "defined_in": "src/v/config/configuration.cc", + "description": "The frequency of refreshing the JSON Web Keys (JWKS) used to validate access tokens.", + "maximum": 17179869183, + "minimum": -17179869184, + "name": "oidc_keys_refresh_interval", + "needs_restart": false, + "nullable": false, + "type": "integer", + "visibility": "user" + }, + "oidc_principal_mapping": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": true, + "cloud_supported": true, + "config_scope": "cluster", + "default": "$.sub", + "defined_in": "src/v/config/configuration.cc", + "description": "ifdef::env-cloud[]\nNOTE: This property is read-only in Redpanda Cloud.\nendif::[]\n\nRule for mapping JWT payload claim to a Redpanda user principal.", + "name": "oidc_principal_mapping", + "needs_restart": false, + "nullable": false, + "related_topics": [ + "xref:manage:security/authentication.adoc#oidc[OpenID Connect authentication]", + "self-managed-only: xref:manage:kubernetes/security/authentication/k-authentication.adoc[OpenID Connect authentication in Kubernetes]" + ], + "type": "string", + "visibility": "user" + }, + "oidc_token_audience": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": true, + "cloud_supported": true, + "config_scope": "cluster", + "default": "redpanda", + "defined_in": "src/v/config/configuration.cc", + "description": "ifdef::env-cloud[]\nNOTE: This property is read-only in Redpanda Cloud.\nendif::[]\n\nA string representing the intended recipient of the token.", + "name": "oidc_token_audience", + "needs_restart": false, + "nullable": false, + "type": "string", + "visibility": "user" + }, + "openssl_config_file": { + "category": "redpanda", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "broker", + "default": null, + "defined_in": "src/v/config/node_config.cc", + "description": "Path to the configuration file used by OpenSSL to properly load the FIPS-compliant module.", + "name": "openssl_config_file", + "needs_restart": true, + "nullable": true, + "type": "string", + "visibility": "user" + }, + "openssl_module_directory": { + "category": "redpanda", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "broker", + "default": null, + "defined_in": "src/v/config/node_config.cc", + "description": "Path to the directory that contains the OpenSSL FIPS-compliant module. The filename that Redpanda looks for is `fips.so`.", + "name": "openssl_module_directory", + "needs_restart": true, + "nullable": true, + "type": "string", + "visibility": "user" + }, + "pandaproxy_api": { + "category": "pandaproxy", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "broker", + "default": [ + { + "address": "0.0.0.0", + "port": 8082 + } + ], + "defined_in": "src/v/pandaproxy/rest/configuration.cc", + "description": "Rest API listener address and port.", + "example": ".Example\n[,yaml]\n----\npandaproxy:\n pandaproxy_api:\n address: 0.0.0.0\n port: 8082\n authentication_method: http_basic\n----", + "items": { + "type": "object" + }, + "name": "pandaproxy_api", + "needs_restart": true, + "nullable": false, + "type": "array" + }, + "pandaproxy_api_tls": { + "category": "pandaproxy", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "broker", + "default": [], + "defined_in": "src/v/pandaproxy/rest/configuration.cc", + "description": "TLS configuration for Pandaproxy API.", + "items": { + "type": "object" + }, + "name": "pandaproxy_api_tls", + "needs_restart": true, + "nullable": false, + "type": "array" + }, + "partition_autobalancing_concurrent_moves": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": "50", + "defined_in": "src/v/config/configuration.cc", + "description": "Number of partitions that can be reassigned at once.", + "name": "partition_autobalancing_concurrent_moves", + "needs_restart": false, + "nullable": false, + "type": "integer", + "visibility": "tunable" + }, + "partition_autobalancing_max_disk_usage_percent": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": "80", + "defined_in": "src/v/config/configuration.cc", + "description": "When the disk usage of a node exceeds this threshold, it triggers Redpanda to move partitions off of the node. This property applies only when partition_autobalancing_mode is set to `continuous`.", + "maximum": 4294967295, + "minimum": 0, + "name": "partition_autobalancing_max_disk_usage_percent", + "needs_restart": false, + "nullable": false, + "related_topics": [ + "xref:manage:cluster-maintenance/continuous-data-balancing.adoc[Configure Continuous Data Balancing]" + ], + "type": "integer", + "visibility": "user" + }, + "partition_autobalancing_min_size_threshold": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": null, + "defined_in": "src/v/config/configuration.cc", + "description": "Minimum size of partition that is going to be prioritized when rebalancing a cluster due to the disk size threshold being breached. This value is calculated automatically by default.", + "name": "partition_autobalancing_min_size_threshold", + "needs_restart": false, + "nullable": true, + "type": "integer", + "visibility": "tunable" + }, + "partition_autobalancing_mode": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": "node_add", + "defined_in": "src/v/config/configuration.cc", + "description": "Mode of xref:manage:cluster-maintenance/cluster-balancing.adoc[partition balancing] for a cluster.\n\n*Accepted values:*\n\n* `continuous`: partition balancing happens automatically to maintain optimal performance and availability, based on continuous monitoring for node changes (same as `node_add`) and also high disk usage. This option is customized by <> and <> properties.\n* `node_add`: partition balancing happens when a node is added.\n* `off`: partition balancing is disabled. This option is not recommended for production clusters.", + "enterprise_value": "continuous", + "is_enterprise": true, + "name": "partition_autobalancing_mode", + "needs_restart": false, + "nullable": false, + "related_topics": [ + "xref:manage:cluster-maintenance/cluster-balancing.adoc[partition balancing]", + "xref:manage:cluster-maintenance/continuous-data-balancing.adoc[Configure Continuous Data Balancing]" + ], + "type": "object", + "visibility": "user" + }, + "partition_autobalancing_movement_batch_size_bytes": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": "5_GiB", + "defined_in": "src/v/config/configuration.cc", + "description": "Total size of partitions that autobalancer is going to move in one batch (deprecated, use partition_autobalancing_concurrent_moves to limit the autobalancer concurrency)", + "is_deprecated": true, + "name": "partition_autobalancing_movement_batch_size_bytes", + "needs_restart": false, + "nullable": false, + "type": null, + "visibility": "deprecated" + }, + "partition_autobalancing_node_availability_timeout_sec": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": "15min", + "defined_in": "src/v/config/configuration.cc", + "description": "When a node is unavailable for at least this timeout duration, it triggers Redpanda to move partitions off of the node. This property applies only when `partition_autobalancing_mode` is set to `continuous`. ", + "maximum": 17179869183, + "minimum": -17179869184, + "name": "partition_autobalancing_node_availability_timeout_sec", + "needs_restart": false, + "nullable": false, + "related_topics": [ + "xref:manage:cluster-maintenance/continuous-data-balancing.adoc[Configure Continuous Data Balancing]" + ], + "type": "integer", + "visibility": "user" + }, + "partition_autobalancing_tick_interval_ms": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": "30s", + "defined_in": "src/v/config/configuration.cc", + "description": "Partition autobalancer tick interval.", + "maximum": 17592186044415, + "minimum": -17592186044416, + "name": "partition_autobalancing_tick_interval_ms", + "needs_restart": false, + "nullable": false, + "type": "integer", + "visibility": "tunable" + }, + "partition_autobalancing_tick_moves_drop_threshold": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": "0.2", + "defined_in": "src/v/config/configuration.cc", + "description": "If the number of scheduled tick moves drops by this ratio, a new tick is scheduled immediately. Valid values are (0, 1]. For example, with a value of 0.2 and 100 scheduled moves in a tick, a new tick is scheduled when the in-progress moves are fewer than 80.", + "name": "partition_autobalancing_tick_moves_drop_threshold", + "needs_restart": false, + "nullable": false, + "type": "number", + "visibility": "tunable" + }, + "partition_autobalancing_topic_aware": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": "true", + "defined_in": "src/v/config/configuration.cc", + "description": "If `true`, Redpanda prioritizes balancing a topic\u2019s partition replica count evenly across all brokers while it\u2019s balancing the cluster\u2019s overall partition count. Because different topics in a cluster can have vastly different load profiles, this better distributes the workload of the most heavily-used topics evenly across brokers.", + "name": "partition_autobalancing_topic_aware", + "needs_restart": false, + "nullable": false, + "type": "boolean", + "visibility": "user" + }, + "partition_manager_shutdown_watchdog_timeout": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": "30s", + "defined_in": "src/v/config/configuration.cc", + "description": "A threshold value to detect partitions which might have been stuck while shutting down. After this threshold, a watchdog in partition manager will log information about partition shutdown not making progress.", + "maximum": 17592186044415, + "minimum": -17592186044416, + "name": "partition_manager_shutdown_watchdog_timeout", + "needs_restart": false, + "nullable": false, + "type": "integer", + "visibility": "tunable" + }, + "pp_sr_smp_max_non_local_requests": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": null, + "defined_in": "src/v/config/configuration.cc", + "description": "Maximum number of Cross-core(Inter-shard communication) requests pending in HTTP Proxy and Schema Registry seastar::smp group. (For more details, see the `seastar::smp_service_group` documentation).\n\nSee https://docs.seastar.io/master/[Seastar documentation^]", + "maximum": 4294967295, + "minimum": 0, + "name": "pp_sr_smp_max_non_local_requests", + "needs_restart": true, + "nullable": true, + "type": "integer", + "visibility": "tunable" + }, + "produce_ack_level": { + "category": "pandaproxy-client", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "broker", + "default": "-1", + "defined_in": "src/v/kafka/client/configuration.cc", + "description": "Number of acknowledgments the producer requires the leader to have received before considering a request complete.", + "maximum": 32767, + "minimum": -32768, + "name": "produce_ack_level", + "needs_restart": true, + "nullable": false, + "type": "integer" + }, + "produce_batch_delay": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "broker", + "default": "100ms", + "defined_in": "src/v/kafka/client/configuration.cc", + "description": "Delay (in milliseconds) to wait before sending batch.", + "maximum": 17592186044415, + "minimum": -17592186044416, + "name": "produce_batch_delay_ms", + "needs_restart": true, + "nullable": false, + "type": "integer" + }, + "produce_batch_delay_ms": { + "category": "pandaproxy-client", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "broker", + "default": null, + "defined_in": "override", + "description": "Configuration property: produce_batch_delay_ms", + "is_deprecated": false, + "is_topic_property": false, + "name": "produce_batch_delay_ms", + "type": "string", + "visibility": "user" + }, + "produce_batch_record_count": { + "category": "pandaproxy-client", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "broker", + "default": "1000", + "defined_in": "src/v/kafka/client/configuration.cc", + "description": "Number of records to batch before sending to broker.", + "maximum": 2147483647, + "minimum": -2147483648, + "name": "produce_batch_record_count", + "needs_restart": true, + "nullable": false, + "type": "integer" + }, + "produce_batch_size_bytes": { + "category": "pandaproxy-client", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "broker", + "default": "1048576", + "defined_in": "src/v/kafka/client/configuration.cc", + "description": "Number of bytes to batch before sending to broker.", + "maximum": 2147483647, + "minimum": -2147483648, + "name": "produce_batch_size_bytes", + "needs_restart": true, + "nullable": false, + "type": "integer" + }, + "produce_compression_type": { + "category": "pandaproxy-client", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "broker", + "default": "none", + "defined_in": "src/v/kafka/client/configuration.cc", + "description": "Enable or disable compression by the Kafka client. Specify `none` to disable compression or one of the supported types [gzip, snappy, lz4, zstd].", + "name": "produce_compression_type", + "needs_restart": true, + "nullable": false, + "type": "string" + }, + "produce_shutdown_delay": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "broker", + "default": "0ms", + "defined_in": "src/v/kafka/client/configuration.cc", + "description": "Delay (in milliseconds) to allow for final flush of buffers before shutting down.", + "maximum": 17592186044415, + "minimum": -17592186044416, + "name": "produce_shutdown_delay_ms", + "needs_restart": true, + "nullable": false, + "type": "integer" + }, + "produce_shutdown_delay_ms": { + "category": "pandaproxy-client", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "broker", + "default": null, + "defined_in": "override", + "description": "Delay (in milliseconds) to allow for final flush of buffers before shutting down.", + "is_deprecated": false, + "is_topic_property": false, + "name": "produce_shutdown_delay_ms", + "type": "string", + "visibility": "user" + }, + "quota_manager_gc_sec": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": "30000 milliseconds", + "defined_in": "src/v/config/configuration.cc", + "description": "Quota manager GC frequency in milliseconds.", + "maximum": 17592186044415, + "minimum": -17592186044416, + "name": "quota_manager_gc_sec", + "needs_restart": false, + "nullable": false, + "type": "integer", + "visibility": "tunable" + }, + "rack": { + "category": "redpanda", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "broker", + "default": null, + "defined_in": "src/v/config/node_config.cc", + "description": "A label that identifies a failure zone. Apply the same label to all brokers in the same failure zone. When xref:./cluster-properties.adoc#enable_rack_awareness[enable_rack_awareness] is set to `true` at the cluster level, the system uses the rack labels to spread partition replicas across different failure zones.", + "name": "rack", + "needs_restart": true, + "nullable": true, + "related_topics": [ + "xref:./cluster-properties.adoc#enable_rack_awareness[enable_rack_awareness]" + ], + "type": "string", + "visibility": "user" + }, + "raft_election_timeout_ms": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": "1'500ms", + "defined_in": "src/v/config/configuration.cc", + "description": "Election timeout expressed in milliseconds.", + "maximum": 17592186044415, + "minimum": -17592186044416, + "name": "election_timeout_ms", + "needs_restart": false, + "nullable": false, + "type": "integer", + "visibility": "tunable" + }, + "raft_enable_longest_log_detection": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": "true", + "defined_in": "src/v/config/configuration.cc", + "description": "Enables an additional step in leader election where a candidate is allowed to wait for all the replies from the broker it requested votes from. This may introduce a small delay when recovering from failure, but it prevents truncation if any of the replicas have more data than the majority.", + "name": "raft_enable_longest_log_detection", + "needs_restart": false, + "nullable": false, + "type": "boolean", + "visibility": "tunable" + }, + "raft_enable_lw_heartbeat": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": "true", + "defined_in": "src/v/config/configuration.cc", + "description": "Enables Raft optimization of heartbeats.", + "name": "raft_enable_lw_heartbeat", + "needs_restart": false, + "nullable": false, + "type": "boolean", + "visibility": "tunable" + }, + "raft_flush_timer_interval_ms": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": "100ms", + "defined_in": "src/v/config/configuration.cc", + "description": "Interval of checking partition against the `raft_replica_max_pending_flush_bytes`, deprecated started 24.1, use raft_replica_max_flush_delay_ms instead ", + "is_deprecated": true, + "maximum": 17592186044415, + "minimum": -17592186044416, + "name": "raft_flush_timer_interval_ms", + "needs_restart": true, + "nullable": false, + "type": null, + "visibility": "deprecated" + }, + "raft_heartbeat_disconnect_failures": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": "3", + "defined_in": "src/v/config/configuration.cc", + "description": "The number of failed heartbeats after which an unresponsive TCP connection is forcibly closed. To disable forced disconnection, set to 0.", + "name": "raft_heartbeat_disconnect_failures", + "needs_restart": true, + "nullable": false, + "type": "integer", + "visibility": "tunable" + }, + "raft_heartbeat_interval_ms": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": "150 milliseconds", + "defined_in": "src/v/config/configuration.cc", + "description": "Number of milliseconds for Raft leader heartbeats.", + "maximum": 17592186044415, + "minimum": -17592186044416, + "name": "raft_heartbeat_interval_ms", + "needs_restart": false, + "nullable": false, + "type": "integer", + "visibility": "tunable" + }, + "raft_heartbeat_timeout_ms": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": "3s", + "defined_in": "src/v/config/configuration.cc", + "description": "Raft heartbeat RPC (remote procedure call) timeout. Raft uses a heartbeat mechanism to maintain leadership authority and to trigger leader elections. The `raft_heartbeat_interval_ms` is a periodic heartbeat sent by the partition leader to all followers to declare its leadership. If a follower does not receive a heartbeat within the `raft_heartbeat_timeout_ms`, then it triggers an election to choose a new partition leader.", + "maximum": 17592186044415, + "minimum": -17592186044416, + "name": "raft_heartbeat_timeout_ms", + "needs_restart": false, + "nullable": false, + "type": "integer", + "visibility": "tunable" + }, + "raft_io_timeout_ms": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": "10'000ms", + "defined_in": "src/v/config/configuration.cc", + "description": "Raft I/O timeout.", + "maximum": 17592186044415, + "minimum": -17592186044416, + "name": "raft_io_timeout_ms", + "needs_restart": false, + "nullable": false, + "type": "integer", + "visibility": "tunable" + }, + "raft_learner_recovery_rate": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": "100_MiB", + "defined_in": "src/v/config/configuration.cc", + "description": "Raft learner recovery rate limit. Throttles the rate of data communicated to nodes (learners) that need to catch up to leaders. This rate limit is placed on a node sending data to a recovering node. Each sending node is limited to this rate. The recovering node accepts data as fast as possible according to the combined limits of all healthy nodes in the cluster. For example, if two nodes are sending data to the recovering node, and `raft_learner_recovery_rate` is 100 MB/sec, then the recovering node will recover at a rate of 200 MB/sec.", + "name": "raft_learner_recovery_rate", + "needs_restart": false, + "nullable": false, + "type": "integer", + "visibility": "tunable" + }, + "raft_max_buffered_follower_append_entries_bytes_per_shard": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": "0", + "defined_in": "src/v/config/configuration.cc", + "description": "The total size of append entry requests that may be cached per shard, using the Raft-buffered protocol. When an entry is cached, the leader can continue serving requests because the ordering of the cached requests cannot change. When the total size of cached requests reaches the set limit, back pressure is applied to throttle producers.", + "name": "raft_max_buffered_follower_append_entries_bytes_per_shard", + "needs_restart": false, + "nullable": false, + "type": "integer", + "visibility": "tunable" + }, + "raft_max_concurrent_append_requests_per_follower": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "defined_in": "src/v/config/configuration.cc", + "description": null, + "is_deprecated": true, + "name": "raft_max_concurrent_append_requests_per_follower", + "needs_restart": true, + "nullable": false, + "type": null + }, + "raft_max_inflight_follower_append_entries_requests_per_shard": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": "1024", + "defined_in": "src/v/config/configuration.cc", + "description": "The maximum number of append entry requests that may be sent from Raft groups on a Seastar shard to the current node, and are awaiting a reply. This property replaces `raft_max_concurrent_append_requests_per_follower`.", + "name": "raft_max_inflight_follower_append_entries_requests_per_shard", + "needs_restart": false, + "nullable": false, + "type": "integer", + "visibility": "tunable" + }, + "raft_max_recovery_memory": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": null, + "defined_in": "src/v/config/configuration.cc", + "description": "Maximum memory that can be used for reads in Raft recovery process by default 15% of total memory.", + "name": "raft_max_recovery_memory", + "needs_restart": false, + "nullable": true, + "type": "integer", + "visibility": "tunable" + }, + "raft_recovery_concurrency_per_shard": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": "64", + "defined_in": "src/v/config/configuration.cc", + "description": "Number of partitions that may simultaneously recover data to a particular shard. This number is limited to avoid overwhelming nodes when they come back online after an outage.", + "name": "raft_recovery_concurrency_per_shard", + "needs_restart": false, + "nullable": false, + "type": "integer", + "visibility": "tunable" + }, + "raft_recovery_default_read_size": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": "512_KiB", + "defined_in": "src/v/config/configuration.cc", + "description": "Specifies the default size of a read issued during Raft follower recovery.", + "name": "raft_recovery_default_read_size", + "needs_restart": false, + "nullable": false, + "type": "integer", + "visibility": "tunable" + }, + "raft_recovery_throttle_disable_dynamic_mode": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": "false", + "defined_in": "src/v/config/configuration.cc", + "description": "include::reference:partial$internal-use-property.adoc[]\n\nDisables cross shard sharing used to throttle recovery traffic. Should only be used to debug unexpected problems.", + "name": "raft_recovery_throttle_disable_dynamic_mode", + "needs_restart": false, + "nullable": false, + "type": "boolean", + "visibility": "tunable" + }, + "raft_replica_max_flush_delay_ms": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": "100ms", + "defined_in": "src/v/config/configuration.cc", + "description": "Maximum delay between two subsequent flushes. After this delay, the log is automatically force flushed.", + "maximum": 17592186044415, + "minimum": -17592186044416, + "name": "raft_replica_max_flush_delay_ms", + "needs_restart": false, + "nullable": false, + "type": "integer", + "visibility": "tunable" + }, + "raft_replica_max_pending_flush_bytes": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": "256_KiB", + "defined_in": "src/v/config/configuration.cc", + "description": "Maximum number of bytes that are not flushed per partition. If the configured threshold is reached, the log is automatically flushed even if it has not been explicitly requested.", + "name": "raft_replica_max_pending_flush_bytes", + "needs_restart": false, + "nullable": true, + "type": "integer", + "visibility": "tunable" + }, + "raft_replicate_batch_window_size": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": "1_MiB", + "defined_in": "src/v/config/configuration.cc", + "description": "Maximum size of requests cached for replication.", + "name": "raft_replicate_batch_window_size", + "needs_restart": true, + "nullable": false, + "type": "integer", + "visibility": "tunable" + }, + "raft_smp_max_non_local_requests": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": null, + "defined_in": "src/v/config/configuration.cc", + "description": "Maximum number of Cross-core(Inter-shard communication) requests pending in Raft seastar::smp group. For details, refer to the `seastar::smp_service_group` documentation).\n\nSee https://docs.seastar.io/master/[Seastar documentation^]", + "maximum": 4294967295, + "minimum": 0, + "name": "raft_smp_max_non_local_requests", + "needs_restart": true, + "nullable": true, + "type": "integer", + "visibility": "tunable" + }, + "raft_timeout_now_timeout_ms": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": "1s", + "defined_in": "src/v/config/configuration.cc", + "description": "Timeout for Raft's timeout_now RPC. This RPC is used to force a follower to dispatch a round of votes immediately.", + "maximum": 17592186044415, + "minimum": -17592186044416, + "name": "raft_timeout_now_timeout_ms", + "needs_restart": false, + "nullable": false, + "type": "integer", + "visibility": "tunable" + }, + "raft_transfer_leader_recovery_timeout_ms": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": "10s", + "defined_in": "src/v/config/configuration.cc", + "description": "Follower recovery timeout waiting period when transferring leadership.", + "maximum": 17592186044415, + "minimum": -17592186044416, + "name": "raft_transfer_leader_recovery_timeout_ms", + "needs_restart": false, + "nullable": false, + "type": "integer", + "visibility": "tunable" + }, + "readers_cache_eviction_timeout_ms": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": "30s", + "defined_in": "src/v/config/configuration.cc", + "description": "Duration after which inactive readers are evicted from cache.", + "maximum": 17592186044415, + "minimum": -17592186044416, + "name": "readers_cache_eviction_timeout_ms", + "needs_restart": true, + "nullable": false, + "type": "integer", + "visibility": "tunable" + }, + "readers_cache_target_max_size": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": "200", + "defined_in": "src/v/config/configuration.cc", + "description": "Maximum desired number of readers cached per NTP. This a soft limit, meaning that a number of readers in cache may temporarily increase as cleanup is performed in the background.", + "name": "readers_cache_target_max_size", + "needs_restart": false, + "nullable": false, + "type": "integer", + "visibility": "tunable" + }, + "reclaim_batch_cache_min_free": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": "64_MiB", + "defined_in": "src/v/config/configuration.cc", + "description": "Minimum amount of free memory maintained by the batch cache background reclaimer.", + "name": "reclaim_batch_cache_min_free", + "needs_restart": true, + "nullable": false, + "type": "integer", + "visibility": "tunable" + }, + "reclaim_growth_window": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": "3'000ms", + "defined_in": "src/v/config/configuration.cc", + "description": "Starting from the last point in time when memory was reclaimed from the batch cache, this is the duration during which the amount of memory to reclaim grows at a significant rate, based on heuristics about the amount of available memory.", + "maximum": 17592186044415, + "minimum": -17592186044416, + "name": "reclaim_growth_window", + "needs_restart": true, + "nullable": false, + "type": "integer", + "visibility": "tunable" + }, + "reclaim_max_size": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": "4_MiB", + "defined_in": "src/v/config/configuration.cc", + "description": "Maximum batch cache reclaim size.", + "name": "reclaim_max_size", + "needs_restart": true, + "nullable": false, + "type": "integer", + "visibility": "tunable" + }, + "reclaim_min_size": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": "128_KiB", + "defined_in": "src/v/config/configuration.cc", + "description": "Minimum batch cache reclaim size.", + "name": "reclaim_min_size", + "needs_restart": true, + "nullable": false, + "type": "integer", + "visibility": "tunable" + }, + "reclaim_stable_window": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": "10'000ms", + "defined_in": "src/v/config/configuration.cc", + "description": "If the duration since the last time memory was reclaimed is longer than the amount of time specified in this property, the memory usage of the batch cache is considered stable, so only the minimum size (<>) is set to be reclaimed.", + "maximum": 17592186044415, + "minimum": -17592186044416, + "name": "reclaim_stable_window", + "needs_restart": true, + "nullable": false, + "type": "integer", + "visibility": "tunable" + }, + "recovery_append_timeout_ms": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": "5s", + "defined_in": "src/v/config/configuration.cc", + "description": "Timeout for append entry requests issued while updating a stale follower.", + "maximum": 17592186044415, + "minimum": -17592186044416, + "name": "recovery_append_timeout_ms", + "needs_restart": true, + "nullable": false, + "type": "integer", + "visibility": "tunable" + }, + "recovery_mode_enabled": { + "category": "redpanda", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "broker", + "default": "false", + "defined_in": "src/v/config/node_config.cc", + "description": "If `true`, start Redpanda in xref:manage:recovery-mode.adoc[recovery mode], where user partitions are not loaded and only administrative operations are allowed.", + "name": "recovery_mode_enabled", + "needs_restart": true, + "nullable": false, + "related_topics": [ + "xref:manage:recovery-mode.adoc[recovery mode]" + ], + "type": "boolean", + "visibility": "user" + }, + "redpanda.cloud_topic.enabled": { + "acceptable_values": "", + "category": "tiered-storage", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "topic", + "corresponding_cluster_property": null, + "description": "", + "is_deprecated": false, + "is_topic_property": true, + "name": "redpanda.cloud_topic.enabled", + "source_file": "src/v/kafka/server/handlers/topics/types.h", + "type": "string" + }, + "redpanda.iceberg.delete": { + "acceptable_values": "", + "category": "iceberg-integration", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "topic", + "corresponding_cluster_property": null, + "description": "Whether the corresponding Iceberg table is deleted upon deleting the topic.", + "is_deprecated": false, + "is_topic_property": true, + "name": "redpanda.iceberg.delete", + "source_file": "src/v/kafka/server/handlers/topics/types.h", + "type": "string" + }, + "redpanda.iceberg.invalid.record.action": { + "acceptable_values": "", + "category": "iceberg-integration", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "topic", + "corresponding_cluster_property": null, + "description": "Whether to write invalid records to a dead-letter queue (DLQ).", + "is_deprecated": false, + "is_topic_property": true, + "name": "redpanda.iceberg.invalid.record.action", + "related_topics": [ + "xref:manage:iceberg/about-iceberg-topics.adoc#troubleshoot-errors[Troubleshoot errors]" + ], + "source_file": "src/v/kafka/server/handlers/topics/types.h", + "type": "string" + }, + "redpanda.iceberg.mode": { + "acceptable_values": "", + "category": "iceberg-integration", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "topic", + "corresponding_cluster_property": null, + "description": "Enable the Iceberg integration for the topic. You can choose one of four modes.", + "is_deprecated": false, + "is_topic_property": true, + "name": "redpanda.iceberg.mode", + "related_topics": [ + "xref:manage:iceberg/choose-iceberg-mode.adoc#override-value-schema-latest-default[Choose an Iceberg Mode]" + ], + "source_file": "src/v/kafka/server/handlers/topics/types.h", + "type": "string" + }, + "redpanda.iceberg.partition.spec": { + "acceptable_values": "", + "category": "iceberg-integration", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "topic", + "corresponding_cluster_property": null, + "description": "The link:https://iceberg.apache.org/docs/nightly/partitioning/[partitioning^] specification for the Iceberg table.", + "is_deprecated": false, + "is_topic_property": true, + "name": "redpanda.iceberg.partition.spec", + "related_topics": [ + "xref:manage:iceberg/about-iceberg-topics.adoc#use-custom-partitioning[Use custom partitioning]" + ], + "source_file": "src/v/kafka/server/handlers/topics/types.h", + "type": "string" + }, + "redpanda.iceberg.target.lag.ms": { + "acceptable_values": "milliseconds (integer)", + "category": "iceberg-integration", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "topic", + "corresponding_cluster_property": null, + "description": "Controls how often the data in the Iceberg table is refreshed with new data from the topic. Redpanda attempts to commit all data produced to the topic within the lag target, subject to resource availability.", + "is_deprecated": false, + "is_topic_property": true, + "name": "redpanda.iceberg.target.lag.ms", + "source_file": "src/v/kafka/server/handlers/topics/types.h", + "type": "integer" + }, + "redpanda.key.schema.id.validation": { + "acceptable_values": "", + "category": "schema-registry", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "topic", + "corresponding_cluster_property": null, + "description": "", + "is_deprecated": false, + "is_topic_property": true, + "name": "redpanda.key.schema.id.validation", + "source_file": "src/v/kafka/server/handlers/topics/types.h", + "type": "string" + }, + "redpanda.key.subject.name.strategy": { + "acceptable_values": "", + "category": "schema-registry", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "topic", + "corresponding_cluster_property": null, + "description": "", + "is_deprecated": false, + "is_topic_property": true, + "name": "redpanda.key.subject.name.strategy", + "source_file": "src/v/kafka/server/handlers/topics/types.h", + "type": "string" + }, + "redpanda.leaders.preference": { + "acceptable_values": "", + "category": "performance-cluster", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "topic", + "corresponding_cluster_property": null, + "description": "The preferred location (rack) for partition leaders of a topic.\n\nThis property inherits the value from the config_ref:default_leaders_preference,true,properties/cluster-properties[] cluster configuration property. You may override the cluster-wide setting by specifying the value for individual topics.\n\nIf the cluster configuration property config_ref:enable_rack_awareness,true,properties/cluster-properties[] is set to `false`, Leader Pinning is disabled across the cluster.", + "is_deprecated": false, + "is_topic_property": true, + "name": "redpanda.leaders.preference", + "related_topics": [ + "xref:develop:produce-data/leader-pinning.adoc[Leader pinning]" + ], + "source_file": "src/v/kafka/server/handlers/topics/types.h", + "type": "string" + }, + "redpanda.remote.allowgaps": { + "acceptable_values": "", + "category": "other", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "topic", + "corresponding_cluster_property": null, + "description": "", + "exclude_from_docs": true, + "is_deprecated": false, + "is_topic_property": true, + "name": "redpanda.remote.allowgaps", + "source_file": "src/v/kafka/server/handlers/topics/types.h", + "type": "string" + }, + "redpanda.remote.delete": { + "acceptable_values": "[`true`, `false`]", + "category": "tiered-storage", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "topic", + "corresponding_cluster_property": null, + "description": "A flag that enables deletion of data from object storage for Tiered Storage when it's deleted from local storage for a topic.\n\nNOTE: `redpanda.remote.delete` doesn't apply to Remote Read Replica topics: a Remote Read Replica topic isn't deleted from object storage when this flag is `true`.", + "is_deprecated": false, + "is_topic_property": true, + "name": "redpanda.remote.delete", + "related_topics": [ + "xref:manage:tiered-storage.adoc[Tiered Storage]" + ], + "source_file": "src/v/kafka/server/handlers/topics/types.h", + "type": "boolean" + }, + "redpanda.remote.read": { + "acceptable_values": "[`true`, `false`]", + "category": "tiered-storage", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "topic", + "corresponding_cluster_property": null, + "description": "A flag for enabling Redpanda to fetch data for a topic from object storage to local storage. When set to `true` together with <>, it enables the xref:manage:tiered-storage.adoc[Tiered Storage] feature.", + "is_deprecated": false, + "is_topic_property": true, + "name": "redpanda.remote.read", + "related_topics": [ + "xref:manage:tiered-storage.adoc[Tiered Storage]" + ], + "source_file": "src/v/kafka/server/handlers/topics/types.h", + "type": "boolean" + }, + "redpanda.remote.readreplica": { + "acceptable_values": "[`true`, `false`]", + "category": "remote-read-replica", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "topic", + "corresponding_cluster_property": null, + "description": "The name of the object storage bucket for a Remote Read Replica topic.\n\nCAUTION: Setting `redpanda.remote.readreplica` together with either `redpanda.remote.read` or `redpanda.remote.write` results in an error.", + "is_deprecated": false, + "is_topic_property": true, + "name": "redpanda.remote.readreplica", + "related_topics": [ + "xref:manage:remote-read-replicas.adoc[Remote Read Replicas]" + ], + "source_file": "src/v/kafka/server/handlers/topics/types.h", + "type": "boolean" + }, + "redpanda.remote.recovery": { + "acceptable_values": "[`true`, `false`]", + "category": "tiered-storage", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "topic", + "corresponding_cluster_property": null, + "description": "A flag that enables the recovery or reproduction of a topic from object storage for Tiered Storage. The recovered data is saved in local storage, and the maximum amount of recovered data is determined by the local storage retention limits of the topic.\n\nTIP: You can only configure `redpanda.remote.recovery` when you create a topic. You cannot apply this setting to existing topics.", + "is_deprecated": false, + "is_topic_property": true, + "name": "redpanda.remote.recovery", + "related_topics": [ + "xref:manage:tiered-storage.adoc[Tiered Storage]" + ], + "source_file": "src/v/kafka/server/handlers/topics/types.h", + "type": "boolean" + }, + "redpanda.remote.write": { + "acceptable_values": "[`true`, `false`]", + "category": "tiered-storage", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "topic", + "corresponding_cluster_property": null, + "description": "A flag for enabling Redpanda to upload data for a topic from local storage to object storage. When set to `true` together with <>, it enables the xref:manage:tiered-storage.adoc[Tiered Storage] feature.", + "is_deprecated": false, + "is_topic_property": true, + "name": "redpanda.remote.write", + "related_topics": [ + "xref:manage:tiered-storage.adoc[Tiered Storage]", + "xref:manage:tiered-storage.adoc[Tiered Storage]" + ], + "source_file": "src/v/kafka/server/handlers/topics/types.h", + "type": "boolean" + }, + "redpanda.value.schema.id.validation": { + "acceptable_values": "", + "category": "schema-registry", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "topic", + "corresponding_cluster_property": null, + "description": "", + "is_deprecated": false, + "is_topic_property": true, + "name": "redpanda.value.schema.id.validation", + "source_file": "src/v/kafka/server/handlers/topics/types.h", + "type": "string" + }, + "redpanda.value.subject.name.strategy": { + "acceptable_values": "", + "category": "schema-registry", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "topic", + "corresponding_cluster_property": null, + "description": "", + "is_deprecated": false, + "is_topic_property": true, + "name": "redpanda.value.subject.name.strategy", + "source_file": "src/v/kafka/server/handlers/topics/types.h", + "type": "string" + }, + "redpanda.virtual.cluster.id": { + "acceptable_values": "", + "category": "other", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "topic", + "corresponding_cluster_property": null, + "description": "", + "exclude_from_docs": true, + "is_deprecated": false, + "is_topic_property": true, + "name": "redpanda.virtual.cluster.id", + "source_file": "src/v/kafka/server/handlers/topics/types.h", + "type": "string" + }, + "release_cache_on_segment_roll": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": "false", + "defined_in": "src/v/config/configuration.cc", + "description": "Flag for specifying whether or not to release cache when a full segment is rolled.", + "name": "release_cache_on_segment_roll", + "needs_restart": false, + "nullable": false, + "type": "boolean", + "visibility": "tunable" + }, + "replicate_append_timeout_ms": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": "3s", + "defined_in": "src/v/config/configuration.cc", + "description": "Timeout for append entry requests issued while replicating entries.", + "maximum": 17592186044415, + "minimum": -17592186044416, + "name": "replicate_append_timeout_ms", + "needs_restart": true, + "nullable": false, + "type": "integer", + "visibility": "tunable" + }, + "replication.factor": { + "acceptable_values": "integer (1 or greater)", + "category": "performance-cluster", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "topic", + "corresponding_cluster_property": "replication_factor", + "description": "The number of replicas of a topic to save in different nodes (brokers) of a cluster.\n\nIf `replication.factor` is set to a positive value, it overrides the cluster property xref:./cluster-properties.adoc#default_topic_replication[default_topic_replication] for the topic.\n\nNOTE: Although `replication.factor` isn't returned or displayed by xref:reference:rpk/rpk-topic/rpk-topic-describe.adoc[`rpk topic describe`] as a valid Kafka property, you can set it using xref:reference:rpk/rpk-topic/rpk-topic-alter-config.adoc[`rpk topic alter-config`]. When the `replication.factor` of a topic is altered, it isn't simply a property value that's updated, but rather the actual replica sets of topic partitions that are changed.", + "is_deprecated": false, + "is_topic_property": true, + "name": "replication.factor", + "related_topics": [ + "xref:./cluster-properties.adoc#default_topic_replication[default_topic_replication]", + "xref:reference:rpk/rpk-topic/rpk-topic-describe.adoc[`rpk topic describe`]", + "xref:reference:rpk/rpk-topic/rpk-topic-alter-config.adoc[`rpk topic alter-config`]", + "xref:./cluster-properties.adoc#default_topic_replication[`default_topic_replication`]", + "xref:develop:config-topics.adoc#choose-the-replication-factor[Choose the replication factor]", + "xref:develop:config-topics.adoc#change-the-replication-factor[Change the replication factor]" + ], + "source_file": "src/v/kafka/server/handlers/topics/types.h", + "type": "integer" + }, + "retention.bytes": { + "acceptable_values": "bytes (integer)", + "category": "retention-compaction", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "topic", + "corresponding_cluster_property": "retention_bytes", + "description": "A size-based retention limit that configures the maximum size that a topic partition can grow before becoming eligible for cleanup.\n\nIf `retention.bytes` is set to a positive value, it overrides the cluster property xref:cluster-properties.adoc#retention_bytes[`retention_bytes`] for the topic, and the total retained size for the topic is `retention.bytes` multiplied by the number of partitions for the topic.\n\nWhen both size-based (`retention.bytes`) and time-based (`retention.ms`) retention limits are set, cleanup occurs when either limit is reached.", + "is_deprecated": false, + "is_topic_property": true, + "name": "retention.bytes", + "related_topics": [ + "xref:cluster-properties.adoc#retention_bytes[`retention_bytes`]", + "xref:./cluster-properties.adoc#retention_bytes[`retention_bytes`]", + "xref:manage:cluster-maintenance/disk-utilization.adoc#configure-message-retention[Configure message retention]" + ], + "source_file": "src/v/kafka/protocol/topic_properties.h", + "type": "integer" + }, + "retention.local.target.bytes": { + "acceptable_values": "bytes (integer)", + "category": "tiered-storage", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "topic", + "corresponding_cluster_property": "retention_local_target_bytes", + "description": "A size-based retention limit for Tiered Storage that configures the maximum size that a topic partition in local storage can grow before becoming eligible for cleanup. It applies per partition and is equivalent to <> without Tiered Storage.", + "is_deprecated": false, + "is_topic_property": true, + "name": "retention.local.target.bytes", + "related_topics": [ + "xref:./cluster-properties.adoc#retention_local_target_bytes[`retention_local_target_bytes`]", + "xref:manage:tiered-storage.adoc[Tiered Storage]" + ], + "source_file": "src/v/kafka/protocol/topic_properties.h", + "type": "integer" + }, + "retention.local.target.ms": { + "acceptable_values": "milliseconds (integer)", + "category": "tiered-storage", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "topic", + "corresponding_cluster_property": "retention_local_target_ms", + "description": "A time-based retention limit for Tiered Storage that sets the maximum duration that a log's segment file for a topic is retained in local storage before it's eligible for cleanup. This property is equivalent to <> without Tiered Storage.", + "is_deprecated": false, + "is_topic_property": true, + "name": "retention.local.target.ms", + "related_topics": [ + "xref:./cluster-properties.adoc#retention_local_target_ms[`retention_local_target_ms`]", + "xref:manage:tiered-storage.adoc[Tiered Storage]", + "xref:manage:remote-read-replicas.adoc[Remote Read Replicas]" + ], + "source_file": "src/v/kafka/protocol/topic_properties.h", + "type": "integer" + }, + "retention.ms": { + "acceptable_values": "milliseconds (integer)", + "category": "retention-compaction", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "topic", + "corresponding_cluster_property": "retention_ms", + "description": "A time-based retention limit that configures the maximum duration that a log's segment file for a topic is retained before it becomes eligible to be cleaned up. To consume all data, a consumer of the topic must read from a segment before its `retention.ms` elapses, otherwise the segment may be compacted and/or deleted. If a non-positive value, no per-topic limit is applied.\n\nIf `retention.ms` is set to a positive value, it overrides the cluster property xref:./cluster-properties.adoc#log_retention_ms[`log_retention_ms`] for the topic.\n\nWhen both size-based (`retention.bytes`) and time-based (`retention.ms`) retention limits are set, the earliest occurring limit applies.", + "is_deprecated": false, + "is_topic_property": true, + "name": "retention.ms", + "related_topics": [ + "xref:./cluster-properties.adoc#log_retention_ms[`log_retention_ms`]", + "xref:./cluster-properties.adoc#log_retention_ms[`log_retention_ms`]", + "xref:manage:cluster-maintenance/disk-utilization.adoc#configure-message-retention[Configure message retention]" + ], + "source_file": "src/v/kafka/protocol/topic_properties.h", + "type": "integer" + }, + "retention_bytes": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": null, + "defined_in": "src/v/config/configuration.cc", + "description": "Default maximum number of bytes per partition on disk before triggering deletion of the oldest messages. If `null` (the default value), no limit is applied.\n\nThe topic property xref:./topic-properties.adoc#retentionbytes[`retention.bytes`] overrides the value of `retention_bytes` at the topic level.", + "name": "retention_bytes", + "needs_restart": false, + "nullable": true, + "related_topics": [ + "xref:./topic-properties.adoc#retentionbytes[`retention.bytes`]" + ], + "type": "integer", + "visibility": "user" + }, + "retention_local_strict": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": "false", + "defined_in": "src/v/config/configuration.cc", + "description": "Flag to allow Tiered Storage topics to expand to consumable retention policy limits. When this flag is enabled, non-local retention settings are used, and local retention settings are used to inform data removal policies in low-disk space scenarios.", + "name": "retention_local_strict", + "needs_restart": false, + "nullable": false, + "type": "boolean", + "visibility": "user" + }, + "retention_local_strict_override": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": "true", + "defined_in": "src/v/config/configuration.cc", + "description": "Trim log data when a cloud topic reaches its local retention limit. When this option is disabled Redpanda will allow partitions to grow past the local retention limit, and will be trimmed automatically as storage reaches the configured target size.", + "name": "retention_local_strict_override", + "needs_restart": false, + "nullable": false, + "type": "boolean", + "visibility": "user" + }, + "retention_local_target_bytes_default": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": null, + "defined_in": "src/v/config/configuration.cc", + "description": "Local retention size target for partitions of topics with object storage write enabled. If `null`, the property is disabled.\n\nThis property can be overridden on a per-topic basis by setting `retention.local.target.bytes` in each topic enabled for Tiered Storage. See xref:manage:cluster-maintenance/disk-utilization.adoc#configure-message-retention[Configure message retention].", + "name": "retention_local_target_bytes_default", + "needs_restart": false, + "nullable": true, + "related_topics": [ + "xref:manage:cluster-maintenance/disk-utilization.adoc#configure-message-retention[Configure message retention]" + ], + "type": "integer", + "visibility": "user" + }, + "retention_local_target_capacity_bytes": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": null, + "defined_in": "src/v/config/configuration.cc", + "description": "The target capacity (in bytes) that log storage will try to use before additional retention rules take over to trim data to meet the target. When no target is specified, storage usage is unbounded.\n\nNOTE: Redpanda Data recommends setting only one of <> or <>. If both are set, the minimum of the two is used as the effective target capacity.", + "maximum": 18446744073709551615, + "minimum": 0, + "name": "retention_local_target_capacity_bytes", + "needs_restart": false, + "nullable": true, + "type": "integer", + "visibility": "user" + }, + "retention_local_target_capacity_percent": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": "80.0", + "defined_in": "src/v/config/configuration.cc", + "description": "The target capacity in percent of unreserved space (<>) that log storage will try to use before additional retention rules will take over to trim data in order to meet the target. When no target is specified storage usage is unbounded.\n\nNOTE: Redpanda Data recommends setting only one of <> or <>. If both are set, the minimum of the two is used as the effective target capacity.", + "name": "retention_local_target_capacity_percent", + "needs_restart": false, + "nullable": true, + "type": "number", + "visibility": "user" + }, + "retention_local_target_ms_default": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": "24h", + "defined_in": "src/v/config/configuration.cc", + "description": "Local retention time target for partitions of topics with object storage write enabled.\n\nThis property can be overridden on a per-topic basis by setting `retention.local.target.ms` in each topic enabled for Tiered Storage. See xref:manage:cluster-maintenance/disk-utilization.adoc#configure-message-retention[Configure message retention].", + "maximum": 17592186044415, + "minimum": -17592186044416, + "name": "retention_local_target_ms_default", + "needs_restart": false, + "nullable": false, + "related_topics": [ + "xref:manage:cluster-maintenance/disk-utilization.adoc#configure-message-retention[Configure message retention]" + ], + "type": "integer", + "visibility": "user" + }, + "retention_local_trim_interval": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": "30s", + "defined_in": "src/v/config/configuration.cc", + "description": "The period during which disk usage is checked for disk pressure, and data is optionally trimmed to meet the target.", + "maximum": 17592186044415, + "minimum": -17592186044416, + "name": "retention_local_trim_interval", + "needs_restart": false, + "nullable": false, + "type": "integer", + "visibility": "tunable" + }, + "retention_local_trim_overage_coeff": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": "2.0", + "defined_in": "src/v/config/configuration.cc", + "description": "The space management control loop reclaims the overage multiplied by this this coefficient to compensate for data that is written during the idle period between control loop invocations.", + "name": "retention_local_trim_overage_coeff", + "needs_restart": false, + "nullable": false, + "type": "number", + "visibility": "tunable" + }, + "retries": { + "category": "pandaproxy-client", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "broker", + "default": "5", + "defined_in": "src/v/kafka/client/configuration.cc", + "description": "Number of times to retry a request to a broker.", + "name": "retries", + "needs_restart": true, + "nullable": false, + "type": "integer" + }, + "retry_base_backoff": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "broker", + "default": "100ms", + "defined_in": "src/v/kafka/client/configuration.cc", + "description": "Delay (in milliseconds) for initial retry backoff.", + "maximum": 17592186044415, + "minimum": -17592186044416, + "name": "retry_base_backoff_ms", + "needs_restart": true, + "nullable": false, + "type": "integer" + }, + "retry_base_backoff_ms": { + "category": "pandaproxy-client", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "broker", + "default": null, + "defined_in": "override", + "description": "Configuration property: retry_base_backoff_ms", + "is_deprecated": false, + "is_topic_property": false, + "name": "retry_base_backoff_ms", + "type": "string", + "visibility": "user" + }, + "rm_sync_timeout_ms": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": "10s", + "defined_in": "src/v/config/configuration.cc", + "description": "Resource manager's synchronization timeout. Specifies the maximum time for this node to wait for the internal state machine to catch up with all events written by previous leaders before rejecting a request.", + "maximum": 17592186044415, + "minimum": -17592186044416, + "name": "rm_sync_timeout_ms", + "needs_restart": false, + "nullable": false, + "type": "integer", + "visibility": "user" + }, + "rm_violation_recovery_policy": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "defined_in": "src/v/config/configuration.cc", + "description": null, + "is_deprecated": true, + "name": "rm_violation_recovery_policy", + "needs_restart": true, + "nullable": false, + "type": null + }, + "rpc_client_connections_per_peer": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": "128", + "defined_in": "src/v/config/configuration.cc", + "description": "The maximum number of connections a broker will open to each of its peers.", + "maximum": 2147483647, + "minimum": -2147483648, + "name": "rpc_client_connections_per_peer", + "needs_restart": true, + "nullable": false, + "type": "integer" + }, + "rpc_server": { + "category": "redpanda", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "broker", + "default": { + "address": "127.0.0.1", + "port": 33145 + }, + "defined_in": "src/v/config/node_config.cc", + "description": "IP address and port for the Remote Procedure Call (RPC) server.", + "name": "rpc_server", + "needs_restart": true, + "nullable": false, + "type": "object", + "visibility": "user" + }, + "rpc_server_compress_replies": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": "false", + "defined_in": "src/v/config/configuration.cc", + "description": "Enable compression for internal RPC (remote procedure call) server replies.", + "name": "rpc_server_compress_replies", + "needs_restart": false, + "nullable": false, + "type": "boolean", + "visibility": "tunable" + }, + "rpc_server_listen_backlog": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": null, + "defined_in": "src/v/config/configuration.cc", + "description": "Maximum TCP connection queue length for Kafka server and internal RPC server. If `null` (the default value), no queue length is set.", + "maximum": 2147483647, + "minimum": -2147483648, + "name": "rpc_server_listen_backlog", + "needs_restart": true, + "nullable": true, + "type": "integer", + "visibility": "user" + }, + "rpc_server_tcp_recv_buf": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": null, + "defined_in": "src/v/config/configuration.cc", + "description": "Internal RPC TCP receive buffer size. If `null` (the default value), no buffer size is set by Redpanda.", + "maximum": 2147483647, + "minimum": -2147483648, + "name": "rpc_server_tcp_recv_buf", + "needs_restart": true, + "nullable": true, + "type": "integer" + }, + "rpc_server_tcp_send_buf": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": null, + "defined_in": "src/v/config/configuration.cc", + "description": "Internal RPC TCP send buffer size. If `null` (the default value), then no buffer size is set by Redpanda.", + "maximum": 2147483647, + "minimum": -2147483648, + "name": "rpc_server_tcp_send_buf", + "needs_restart": true, + "nullable": true, + "type": "integer" + }, + "rpc_server_tls": { + "category": "redpanda", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "broker", + "default": { + "cert_file": null, + "enabled": null, + "key_file": null, + "require_client_auth": null, + "truststore_file": null + }, + "defined_in": "src/v/config/node_config.cc", + "description": "TLS configuration for the RPC server.", + "example": ".Example\n[,yaml]\n----\nredpanda:\n rpc_server_tls:\n enabled: true\n cert_file: \"\"\n key_file: \"\"\n truststore_file: \"\"\n require_client_auth: true\n----", + "name": "rpc_server_tls", + "needs_restart": true, + "nullable": false, + "type": "object", + "visibility": "user" + }, + "rpk_path": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": "/usr/bin/rpk", + "defined_in": "src/v/config/configuration.cc", + "description": "Path to RPK binary.", + "name": "rpk_path", + "needs_restart": false, + "nullable": false, + "type": "string", + "visibility": "tunable" + }, + "rps_limit_acls_and_users_operations": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": "1000", + "defined_in": "src/v/config/configuration.cc", + "description": "Rate limit for controller ACLs and user's operations.", + "name": "rps_limit_acls_and_users_operations", + "needs_restart": false, + "nullable": false, + "type": "integer", + "visibility": "tunable" + }, + "rps_limit_configuration_operations": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": "1000", + "defined_in": "src/v/config/configuration.cc", + "description": "Rate limit for controller configuration operations.", + "name": "rps_limit_configuration_operations", + "needs_restart": false, + "nullable": false, + "type": "integer", + "visibility": "tunable" + }, + "rps_limit_move_operations": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": "1000", + "defined_in": "src/v/config/configuration.cc", + "description": "Rate limit for controller move operations.", + "name": "rps_limit_move_operations", + "needs_restart": false, + "nullable": false, + "type": "integer", + "visibility": "tunable" + }, + "rps_limit_node_management_operations": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": "1000", + "defined_in": "src/v/config/configuration.cc", + "description": "Rate limit for controller node management operations.", + "name": "rps_limit_node_management_operations", + "needs_restart": false, + "nullable": false, + "type": "integer", + "visibility": "tunable" + }, + "rps_limit_topic_operations": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": "1000", + "defined_in": "src/v/config/configuration.cc", + "description": "Rate limit for controller topic operations.", + "name": "rps_limit_topic_operations", + "needs_restart": false, + "nullable": false, + "type": "integer", + "visibility": "tunable" + }, + "sampled_memory_profile": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": "true", + "defined_in": "src/v/config/configuration.cc", + "description": "When `true`, memory allocations are sampled and tracked. A sampled live set of allocations can then be retrieved from the Admin API. Additionally, Redpanda will periodically log the top-n allocation sites.", + "name": "memory_enable_memory_sampling", + "needs_restart": false, + "nullable": false, + "type": "boolean", + "visibility": "tunable" + }, + "sasl_kerberos_config": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": "/etc/krb5.conf", + "defined_in": "src/v/config/configuration.cc", + "description": "The location of the Kerberos `krb5.conf` file for Redpanda.", + "name": "sasl_kerberos_config", + "needs_restart": false, + "nullable": false, + "type": "string", + "visibility": "user" + }, + "sasl_kerberos_keytab": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": "/var/lib/redpanda/redpanda.keytab", + "defined_in": "src/v/config/configuration.cc", + "description": "The location of the Kerberos keytab file for Redpanda.", + "name": "sasl_kerberos_keytab", + "needs_restart": false, + "nullable": false, + "type": "string", + "visibility": "user" + }, + "sasl_kerberos_principal": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": "redpanda", + "defined_in": "src/v/config/configuration.cc", + "description": "The primary of the Kerberos Service Principal Name (SPN) for Redpanda.", + "name": "sasl_kerberos_principal", + "needs_restart": false, + "nullable": false, + "type": "string", + "visibility": "user" + }, + "sasl_kerberos_principal_mapping": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": [ + "DEFAULT" + ], + "defined_in": "src/v/config/configuration.cc", + "description": "Rules for mapping Kerberos principal names to Redpanda user principals.", + "items": { + "type": "string" + }, + "name": "sasl_kerberos_principal_mapping", + "needs_restart": false, + "nullable": false, + "type": "array", + "visibility": "user" + }, + "sasl_mechanism": { + "category": "pandaproxy-client", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "broker", + "default": "", + "defined_in": "src/v/kafka/client/configuration.cc", + "description": "The SASL mechanism to use when the HTTP Proxy client connects to the Kafka API. These credentials are used when the HTTP Proxy API listener has <>: `none` but the cluster requires authenticated access to the Kafka API.\n\nThis property specifies which individual SASL mechanism the HTTP Proxy client should use, while the cluster-wide available mechanisms are configured using the xref:reference:properties/cluster-properties.adoc#sasl_mechanisms[`sasl_mechanisms`] cluster property.\n\ninclude::shared:partial$http-proxy-ephemeral-credentials-breaking-change.adoc[]", + "name": "sasl_mechanism", + "needs_restart": true, + "nullable": false, + "related_topics": [ + "xref:reference:properties/cluster-properties.adoc#sasl_mechanisms[`sasl_mechanisms`]" + ], + "type": "string" + }, + "sasl_mechanisms": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": true, + "cloud_supported": true, + "config_scope": "cluster", + "default": [ + "SCRAM" + ], + "defined_in": "src/v/config/configuration.cc", + "description": "A list of supported SASL mechanisms.\n\n*Accepted values:*\n\n* `SCRAM`\n* `GSSAPI`\n* `OAUTHBEARER`\n* `PLAIN`\n\nNote that in order to enable PLAIN, you must also enable SCRAM.", + "enterprise_value": [ + "GSSAPI", + "OAUTHBEARER" + ], + "is_enterprise": true, + "items": { + "type": "string" + }, + "name": "sasl_mechanisms", + "needs_restart": false, + "nullable": false, + "related_topics": [], + "type": "array", + "visibility": "user" + }, + "sasl_mechanisms_overrides": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": [], + "defined_in": "src/v/config/configuration.cc", + "description": "A list of overrides for SASL mechanisms, defined by listener. SASL mechanisms defined here will replace the ones set in `sasl_mechanisms`. The same limitations apply as for `sasl_mechanisms`.", + "enterprise_value": "Any override containing enterprise mechanisms (GSSAPI, OAUTHBEARER).", + "is_enterprise": true, + "items": { + "type": "object" + }, + "name": "sasl_mechanisms_overrides", + "needs_restart": false, + "nullable": false, + "related_topics": [], + "type": "array", + "visibility": "user" + }, + "schema_registry_always_normalize": { + "aliases": [ + "schema_registry_normalize_on_startup" + ], + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": "false", + "defined_in": "src/v/config/configuration.cc", + "description": "Always normalize schemas. If set, this overrides the `normalize` parameter in requests to the Schema Registry API.", + "name": "schema_registry_always_normalize", + "needs_restart": false, + "nullable": false, + "type": "boolean", + "visibility": "user" + }, + "schema_registry_api": { + "category": "schema-registry", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "broker", + "default": [ + { + "address": "0.0.0.0", + "port": 8081 + } + ], + "defined_in": "src/v/pandaproxy/schema_registry/configuration.cc", + "description": "Schema Registry API listener address and port", + "example": ".Example\n[,yaml]\n----\nschema_registry:\n schema_registry_api:\n address: 0.0.0.0\n port: 8081\n authentication_method: http_basic\n----", + "items": { + "type": "object" + }, + "name": "schema_registry_api", + "needs_restart": true, + "nullable": false, + "type": "array" + }, + "schema_registry_api_tls": { + "category": "schema-registry", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "broker", + "default": [], + "defined_in": "src/v/pandaproxy/schema_registry/configuration.cc", + "description": "TLS configuration for Schema Registry API.", + "items": { + "type": "object" + }, + "name": "schema_registry_api_tls", + "needs_restart": true, + "nullable": false, + "type": "array" + }, + "schema_registry_enable_authorization": { + "cloud_byoc_only": false, + "cloud_editable": true, + "cloud_readonly": false, + "cloud_supported": true, + "config_scope": "cluster", + "default": "false", + "defined_in": "src/v/config/configuration.cc", + "description": "Enables ACL-based authorization for Schema Registry requests. When `true`, Schema Registry\nuses ACL-based authorization instead of the default `public/user/superuser` authorization model. \nifdef::env-cloud[]\nRequires authentication to be enabled using the `authentication_method` property in the `schema_registry_api` broker configuration.\nendif::[]", + "enterprise_value": true, + "is_enterprise": true, + "name": "schema_registry_enable_authorization", + "needs_restart": false, + "nullable": false, + "related_topics": [], + "type": "boolean", + "visibility": "user" + }, + "schema_registry_protobuf_renderer_v2": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "defined_in": "src/v/config/configuration.cc", + "description": null, + "is_deprecated": true, + "name": "schema_registry_protobuf_renderer_v2", + "needs_restart": true, + "nullable": false, + "type": null + }, + "schema_registry_replication_factor": { + "category": "schema-registry", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "broker", + "default": null, + "defined_in": "src/v/pandaproxy/schema_registry/configuration.cc", + "description": "Replication factor for internal `_schemas` topic. If unset, defaults to the xref:../cluster-properties.adoc#default_topic_replication[`default_topic_replication`] cluster property.", + "maximum": 32767, + "minimum": -32768, + "name": "schema_registry_replication_factor", + "needs_restart": true, + "nullable": true, + "related_topics": [ + "xref:../cluster-properties.adoc#default_topic_replication[`default_topic_replication`]" + ], + "type": "integer" + }, + "scram_password": { + "category": "pandaproxy-client", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "broker", + "default": "", + "defined_in": "src/v/kafka/client/configuration.cc", + "description": "Password to use for SCRAM authentication mechanisms when the HTTP Proxy client connects to the Kafka API. This property is required when the HTTP Proxy API listener has <>: `none` but the cluster requires authenticated access to the Kafka API.\n\ninclude::shared:partial$http-proxy-ephemeral-credentials-breaking-change.adoc[]", + "is_secret": true, + "name": "scram_password", + "needs_restart": true, + "nullable": false, + "type": "string" + }, + "scram_username": { + "category": "pandaproxy-client", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "broker", + "default": "", + "defined_in": "src/v/kafka/client/configuration.cc", + "description": "Username to use for SCRAM authentication mechanisms when the HTTP Proxy client connects to the Kafka API. This property is required when the HTTP Proxy API listener has <>: `none` but the cluster requires authenticated access to the Kafka API.\n\ninclude::shared:partial$http-proxy-ephemeral-credentials-breaking-change.adoc[]", + "name": "scram_username", + "needs_restart": true, + "nullable": false, + "type": "string" + }, + "seed_server_meta_topic_partitions": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "defined_in": "src/v/config/configuration.cc", + "description": null, + "is_deprecated": true, + "name": "seed_server_meta_topic_partitions", + "needs_restart": true, + "nullable": false, + "type": null + }, + "seed_servers": { + "category": "redpanda", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "broker", + "default": [], + "defined_in": "src/v/config/node_config.cc", + "description": "List of the seed servers used to join current cluster. If the `seed_servers` list is empty the broker will be a cluster root and it will form a new cluster.\n\n* When `empty_seed_starts_cluster` is `true`, Redpanda enables one broker with an empty `seed_servers` list to initiate a new cluster. The broker with an empty `seed_servers` becomes the cluster root, to which other brokers must connect to join the cluster. Brokers looking to join the cluster should have their `seed_servers` populated with the cluster root's address, facilitating their connection to the cluster.\n+\n[IMPORTANT]\n====\nOnly one broker, the designated cluster root, should have an empty `seed_servers` list during the initial cluster bootstrapping. This ensures a single initiation point for cluster formation.\n====\n\n* When `empty_seed_starts_cluster` is `false`, Redpanda requires all brokers to start with a known set of brokers listed in `seed_servers`. The `seed_servers` list must not be empty and should be identical across these initial seed brokers, containing the addresses of all seed brokers. Brokers not included in the `seed_servers` list use it to discover and join the cluster, allowing for expansion beyond the foundational members.\n+\n[NOTE]\n====\nThe `seed_servers` list must be consistent across all seed brokers to prevent cluster fragmentation and ensure stable cluster formation.\n====", + "example": ".Example with `empty_seed_starts_cluster: true`\n[,yaml]\n----\n# Cluster root broker (seed starter)\nredpanda:\n empty_seed_starts_cluster: true\n seed_servers: []\n----\n\n[,yaml]\n----\n# Additional brokers joining the cluster\nredpanda:\n empty_seed_starts_cluster: true\n seed_servers:\n - host:\n address: \n port: \n----\n\n.Example with `empty_seed_starts_cluster: false`\n[,yaml]\n----\n# All initial seed brokers use the same configuration\nredpanda:\n empty_seed_starts_cluster: false\n seed_servers:\n - host:\n address: \n port: \n - host:\n address: \n port: \n - host:\n address: \n port: \n----\n\nReplace the following placeholders with your values:\n\n* ``: IP address of the cluster root broker\n* ``: IP addresses of each seed broker in the cluster\n* ``: RPC port for brokers (default: `33145`)", + "items": { + "type": "object" + }, + "name": "seed_servers", + "needs_restart": true, + "nullable": false, + "type": "array", + "visibility": "user" + }, + "segment.bytes": { + "acceptable_values": "bytes (integer)", + "category": "segment-message", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "topic", + "corresponding_cluster_property": "log_segment_size", + "description": "The maximum size of an active log segment for a topic. When the size of an active segment exceeds `segment.bytes`, the segment is closed and a new active segment is created. The closed, inactive segment is then eligible to be cleaned up according to retention properties.\n\nWhen `segment.bytes` is set to a positive value, it overrides the cluster property xref:./cluster-properties.adoc#log_segment_size[`log_segment_size`] for the topic.", + "is_deprecated": false, + "is_topic_property": true, + "name": "segment.bytes", + "related_topics": [ + "xref:./cluster-properties.adoc#log_segment_size[`log_segment_size`]", + "xref:./cluster-properties.adoc#log_segment_size[`log_segment_size`]", + "xref:manage:cluster-maintenance/disk-utilization.adoc#configure-segment-size[Configure segment size]", + "xref:manage:cluster-maintenance/disk-utilization.adoc#configure-message-retention[Configure message retention]", + "xref:manage:remote-read-replicas.adoc[Remote Read Replicas]" + ], + "source_file": "src/v/kafka/server/handlers/topics/types.h", + "type": "integer" + }, + "segment.ms": { + "acceptable_values": "milliseconds (integer)", + "category": "segment-message", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "topic", + "corresponding_cluster_property": "segment_ms", + "description": "The maximum duration that a log segment of a topic is active (open for writes and not deletable). A periodic event, with `segment.ms` as its period, forcibly closes the active segment and transitions, or rolls, to a new active segment. The closed (inactive) segment is then eligible to be cleaned up according to cleanup and retention properties.\n\nIf set to a positive duration, `segment.ms` overrides the cluster property xref:./cluster-properties.adoc#log_segment_ms[`log_segment_ms`]. Values are automatically clamped between the cluster bounds set by xref:./cluster-properties.adoc#log_segment_ms_min[`log_segment_ms_min`] (default: 10 minutes) and xref:./cluster-properties.adoc#log_segment_ms_max[`log_segment_ms_max`] (default: 1 year). If your configured value exceeds these bounds, Redpanda uses the bound value and logs a warning. Check current cluster bounds with `rpk cluster config get log_segment_ms_min log_segment_ms_max`.", + "is_deprecated": false, + "is_topic_property": true, + "name": "segment.ms", + "related_topics": [ + "xref:./cluster-properties.adoc#log_segment_ms[`log_segment_ms`]", + "xref:./cluster-properties.adoc#log_segment_ms_min[`log_segment_ms_min`]", + "xref:./cluster-properties.adoc#log_segment_ms_max[`log_segment_ms_max`]", + "xref:./cluster-properties.adoc#log_segment_ms[`log_segment_ms`]", + "xref:manage:cluster-maintenance/disk-utilization.adoc#log-rolling[Log rolling]" + ], + "source_file": "src/v/kafka/server/handlers/topics/types.h", + "type": "integer" + }, + "segment_appender_flush_timeout_ms": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": "1s milliseconds", + "defined_in": "src/v/config/configuration.cc", + "description": "Maximum delay until buffered data is written.", + "maximum": 17592186044415, + "minimum": -17592186044416, + "name": "segment_appender_flush_timeout_ms", + "needs_restart": false, + "nullable": false, + "type": "integer", + "visibility": "tunable" + }, + "segment_fallocation_step": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": "32_MiB", + "defined_in": "src/v/config/configuration.cc", + "description": "Size for segments fallocation.", + "name": "segment_fallocation_step", + "needs_restart": false, + "nullable": false, + "type": "integer", + "visibility": "tunable" + }, + "seq_table_min_size": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "defined_in": "src/v/config/configuration.cc", + "description": null, + "is_deprecated": true, + "name": "seq_table_min_size", + "needs_restart": true, + "nullable": false, + "type": null + }, + "space_management_enable": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": "true", + "defined_in": "src/v/config/configuration.cc", + "description": "Option to explicitly disable automatic disk space management. If this property was explicitly disabled while using v23.2, it will remain disabled following an upgrade.", + "name": "space_management_enable", + "needs_restart": false, + "nullable": false, + "type": "boolean", + "visibility": "user" + }, + "space_management_enable_override": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": "false", + "defined_in": "src/v/config/configuration.cc", + "description": "Enable automatic space management. This option is ignored and deprecated in versions >= v23.3.", + "name": "space_management_enable_override", + "needs_restart": false, + "nullable": false, + "type": "boolean", + "visibility": "user" + }, + "space_management_max_log_concurrency": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": "20", + "defined_in": "src/v/config/configuration.cc", + "description": "Maximum parallel logs inspected during space management process.", + "maximum": 65535, + "minimum": 0, + "name": "space_management_max_log_concurrency", + "needs_restart": false, + "nullable": false, + "type": "integer", + "visibility": "tunable" + }, + "space_management_max_segment_concurrency": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": "10", + "defined_in": "src/v/config/configuration.cc", + "description": "Maximum parallel segments inspected during space management process.", + "maximum": 65535, + "minimum": 0, + "name": "space_management_max_segment_concurrency", + "needs_restart": false, + "nullable": false, + "type": "integer", + "visibility": "tunable" + }, + "storage_compaction_index_memory": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": "128_MiB", + "defined_in": "src/v/config/configuration.cc", + "description": "Maximum number of bytes that may be used on each shard by compaction index writers.", + "maximum": 18446744073709551615, + "minimum": 0, + "name": "storage_compaction_index_memory", + "needs_restart": false, + "nullable": false, + "type": "integer", + "visibility": "tunable" + }, + "storage_compaction_key_map_memory": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": "128_MiB", + "defined_in": "src/v/config/configuration.cc", + "description": "Maximum number of bytes that may be used on each shard by compaction key-offset maps. Only applies when <> is set to `true`.", + "maximum": 18446744073709551615, + "minimum": 0, + "name": "storage_compaction_key_map_memory", + "needs_restart": true, + "nullable": false, + "type": "integer", + "visibility": "tunable" + }, + "storage_compaction_key_map_memory_limit_percent": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": "12.0", + "defined_in": "src/v/config/configuration.cc", + "description": "Limit on <>, expressed as a percentage of memory per shard, that bounds the amount of memory used by compaction key-offset maps. \n\nNOTE: Memory per shard is computed after <>, and only applies when <> is set to `true`.", + "name": "storage_compaction_key_map_memory_limit_percent", + "needs_restart": true, + "nullable": false, + "type": "number", + "visibility": "tunable" + }, + "storage_failure_injection_config_path": { + "category": "redpanda", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "broker", + "default": null, + "defined_in": "src/v/config/node_config.cc", + "description": "Path to the configuration file used for low level storage failure injection.", + "name": "storage_failure_injection_config_path", + "needs_restart": true, + "nullable": true, + "type": "string", + "visibility": "tunable" + }, + "storage_failure_injection_enabled": { + "category": "redpanda", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "broker", + "default": "false", + "defined_in": "src/v/config/node_config.cc", + "description": "If `true`, inject low level storage failures on the write path. Do _not_ use for production instances.", + "name": "storage_failure_injection_enabled", + "needs_restart": true, + "nullable": false, + "type": "boolean", + "visibility": "tunable" + }, + "storage_ignore_cstore_hints": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": "false", + "defined_in": "src/v/config/configuration.cc", + "description": "When set, cstore hints are ignored and not used for data access (but are otherwise generated).", + "name": "storage_ignore_cstore_hints", + "needs_restart": false, + "nullable": false, + "type": "boolean", + "visibility": "tunable" + }, + "storage_ignore_timestamps_in_future_sec": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": null, + "defined_in": "src/v/config/configuration.cc", + "description": "The maximum number of seconds that a record's timestamp can be ahead of a Redpanda broker's clock and still be used when deciding whether to clean up the record for data retention. This property makes possible the timely cleanup of records from clients with clocks that are drastically unsynchronized relative to Redpanda.\n\nWhen determining whether to clean up a record with timestamp more than `storage_ignore_timestamps_in_future_sec` seconds ahead of the broker, Redpanda ignores the record's timestamp and instead uses a valid timestamp of another record in the same segment, or (if another record's valid timestamp is unavailable) the timestamp of when the segment file was last modified (mtime).\n\nBy default, `storage_ignore_timestamps_in_future_sec` is disabled (null).\n\n[TIP]\n====\nTo figure out whether to set `storage_ignore_timestamps_in_future_sec` for your system:\n\n. Look for logs with segments that are unexpectedly large and not being cleaned up.\n. In the logs, search for records with unsynchronized timestamps that are further into the future than tolerable by your data retention and storage settings. For example, timestamps 60 seconds or more into the future can be considered to be too unsynchronized.\n. If you find unsynchronized timestamps throughout your logs, determine the number of seconds that the timestamps are ahead of their actual time, and set `storage_ignore_timestamps_in_future_sec` to that value so data retention can proceed.\n. If you only find unsynchronized timestamps that are the result of transient behavior, you can disable `storage_ignore_timestamps_in_future_sec`.\n====", + "maximum": 17179869183, + "minimum": -17179869184, + "name": "storage_ignore_timestamps_in_future_sec", + "needs_restart": false, + "nullable": true, + "type": "integer", + "visibility": "tunable" + }, + "storage_max_concurrent_replay": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": "1024", + "defined_in": "src/v/config/configuration.cc", + "description": "Maximum number of partitions' logs that will be replayed concurrently at startup, or flushed concurrently on shutdown.", + "maximum": 18446744073709551615, + "minimum": 0, + "name": "storage_max_concurrent_replay", + "needs_restart": false, + "nullable": false, + "type": "integer", + "visibility": "tunable" + }, + "storage_min_free_bytes": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": "5_GiB", + "defined_in": "src/v/config/configuration.cc", + "description": "Threshold of minimum bytes free space before rejecting producers.", + "name": "storage_min_free_bytes", + "needs_restart": false, + "nullable": false, + "type": "integer", + "visibility": "tunable" + }, + "storage_read_buffer_size": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": "128_KiB", + "defined_in": "src/v/config/configuration.cc", + "description": "Size of each read buffer (one per in-flight read, per log segment).", + "name": "storage_read_buffer_size", + "needs_restart": false, + "nullable": false, + "type": "integer", + "visibility": "tunable" + }, + "storage_read_readahead_count": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": "1", + "defined_in": "src/v/config/configuration.cc", + "description": "How many additional reads to issue ahead of current read location.", + "maximum": 32767, + "minimum": -32768, + "name": "storage_read_readahead_count", + "needs_restart": false, + "nullable": false, + "type": "integer", + "visibility": "tunable" + }, + "storage_reserve_min_segments": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": "2", + "defined_in": "src/v/config/configuration.cc", + "description": "The number of segments per partition that the system will attempt to reserve disk capacity for. For example, if the maximum segment size is configured to be 100 MB, and the value of this option is 2, then in a system with 10 partitions Redpanda will attempt to reserve at least 2 GB of disk space.", + "maximum": 32767, + "minimum": -32768, + "name": "storage_reserve_min_segments", + "needs_restart": false, + "nullable": false, + "type": "integer", + "visibility": "tunable" + }, + "storage_space_alert_free_threshold_bytes": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": "0", + "defined_in": "src/v/config/configuration.cc", + "description": "Threshold of minimum bytes free space before setting storage space alert.", + "name": "storage_space_alert_free_threshold_bytes", + "needs_restart": false, + "nullable": false, + "type": "integer", + "visibility": "tunable" + }, + "storage_space_alert_free_threshold_percent": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": "5", + "defined_in": "src/v/config/configuration.cc", + "description": "Threshold of minimum percent free space before setting storage space alert.", + "maximum": 4294967295, + "minimum": 0, + "name": "storage_space_alert_free_threshold_percent", + "needs_restart": false, + "nullable": false, + "type": "integer", + "visibility": "tunable" + }, + "storage_strict_data_init": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": "false", + "defined_in": "src/v/config/configuration.cc", + "description": "Requires that an empty file named `.redpanda_data_dir` be present in the xref:reference:properties/broker-properties.adoc#data_directory[`data_ directory`]. If set to `true`, Redpanda will refuse to start if the file is not found in the data directory.", + "name": "storage_strict_data_init", + "needs_restart": false, + "nullable": false, + "related_topics": [ + "xref:reference:properties/broker-properties.adoc#data_directory[`data_ directory`]" + ], + "type": "boolean", + "visibility": "user" + }, + "storage_target_replay_bytes": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": "10_GiB", + "defined_in": "src/v/config/configuration.cc", + "description": "Target bytes to replay from disk on startup after clean shutdown: controls frequency of snapshots and checkpoints.", + "maximum": 18446744073709551615, + "minimum": 0, + "name": "storage_target_replay_bytes", + "needs_restart": false, + "nullable": false, + "type": "integer", + "visibility": "tunable" + }, + "superusers": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": [], + "defined_in": "src/v/config/configuration.cc", + "description": "List of superuser usernames.", + "items": { + "type": "string" + }, + "name": "superusers", + "needs_restart": false, + "nullable": false, + "type": "array", + "visibility": "user" + }, + "target_fetch_quota_byte_rate": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "defined_in": "src/v/config/configuration.cc", + "description": null, + "is_deprecated": true, + "name": "target_fetch_quota_byte_rate", + "needs_restart": true, + "nullable": false, + "type": null + }, + "target_quota_byte_rate": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "defined_in": "src/v/config/configuration.cc", + "description": null, + "is_deprecated": true, + "name": "target_quota_byte_rate", + "needs_restart": true, + "nullable": false, + "type": null + }, + "tls_certificate_name_format": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": "legacy", + "defined_in": "src/v/config/configuration.cc", + "description": "The format of the certificates's distinguished name to use for mTLS principal mapping. The `legacy` format would appear as 'C=US,ST=California,L=San Francisco,O=Redpanda,CN=redpanda', while the `rfc2253` format would appear as 'CN=redpanda,O=Redpanda,L=San Francisco,ST=California,C=US'.", + "name": "tls_certificate_name_format", + "needs_restart": false, + "nullable": false, + "type": "tls_name_format", + "visibility": "user" + }, + "tls_enable_renegotiation": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": "false", + "defined_in": "src/v/config/configuration.cc", + "description": "TLS client-initiated renegotiation is considered unsafe and is disabled by default . Only re-enable it if you are experiencing issues with your TLS-enabled client. This option has no effect on TLSv1.3 connections as client-initiated renegotiation was removed.", + "name": "tls_enable_renegotiation", + "needs_restart": true, + "nullable": false, + "type": "boolean", + "visibility": "tunable" + }, + "tls_min_version": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": true, + "cloud_supported": true, + "config_scope": "cluster", + "default": "v1_2", + "defined_in": "src/v/config/configuration.cc", + "description": "The minimum TLS version that Redpanda clusters support. This property prevents client applications from negotiating a downgrade to the TLS version when they make a connection to a Redpanda cluster.", + "name": "tls_min_version", + "needs_restart": true, + "nullable": false, + "type": "tls_version", + "visibility": "user" + }, + "tm_sync_timeout_ms": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": "10s", + "defined_in": "src/v/config/configuration.cc", + "description": "Transaction manager's synchronization timeout. Maximum time to wait for internal state machine to catch up before rejecting a request.", + "maximum": 17592186044415, + "minimum": -17592186044416, + "name": "tm_sync_timeout_ms", + "needs_restart": true, + "nullable": false, + "type": "integer", + "visibility": "user" + }, + "tm_violation_recovery_policy": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "defined_in": "src/v/config/configuration.cc", + "description": null, + "is_deprecated": true, + "name": "tm_violation_recovery_policy", + "needs_restart": true, + "nullable": false, + "type": null + }, + "tombstone_retention_ms": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": null, + "defined_in": "src/v/config/configuration.cc", + "description": "The retention time for tombstone records in a compacted topic. Cannot be enabled at the same time as any of `cloud_storage_enabled`, `cloud_storage_enable_remote_read`, or `cloud_storage_enable_remote_write`. A typical default setting is `86400000`, or 24 hours.", + "maximum": 17592186044415, + "minimum": -17592186044416, + "name": "tombstone_retention_ms", + "needs_restart": false, + "nullable": true, + "related_topics": [ + "xref:manage:cluster-maintenance/compaction-settings.adoc#tombstone-record-removal[Tombstone record removal]" + ], + "type": "integer", + "visibility": "user" + }, + "topic_fds_per_partition": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": "5", + "defined_in": "src/v/config/configuration.cc", + "description": "File descriptors required per partition replica. If topic creation results in the ratio of file descriptor limit to partition replicas being lower than this value, creation of new topics is fails.", + "maximum": 2147483647, + "minimum": -2147483648, + "name": "topic_fds_per_partition", + "needs_restart": false, + "nullable": true, + "type": "integer", + "visibility": "tunable" + }, + "topic_label_aggregation_limit": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": null, + "defined_in": "src/v/config/configuration.cc", + "description": "When the number of topics exceeds this limit, the topic label in generated metrics will be aggregated. If `null`, then there is no limit.", + "name": "topic_label_aggregation_limit", + "needs_restart": false, + "nullable": true, + "type": "integer", + "visibility": "tunable" + }, + "topic_memory_per_partition": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": "DEFAULT_TOPIC_MEMORY_PER_PARTITION", + "defined_in": "src/v/config/configuration.cc", + "description": "Required memory in bytes per partition replica when creating or altering topics. The total size of the memory pool for partitions is the total memory available to Redpanda times `topic_partitions_memory_allocation_percent`. Each partition created requires `topic_memory_per_partition` bytes from that pool. If insufficient memory is available, creating or altering topics fails.", + "name": "topic_memory_per_partition", + "needs_restart": false, + "nullable": true, + "type": "integer", + "visibility": "tunable" + }, + "topic_partitions_memory_allocation_percent": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": "10", + "defined_in": "src/v/config/configuration.cc", + "description": "Percentage of total memory to reserve for topic partitions. See <> for details.", + "maximum": 4294967295, + "minimum": 0, + "name": "topic_partitions_memory_allocation_percent", + "needs_restart": true, + "nullable": false, + "type": "integer", + "visibility": "tunable" + }, + "topic_partitions_per_shard": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": "5000", + "defined_in": "src/v/config/configuration.cc", + "description": "Maximum number of partition replicas per shard. If topic creation results in the ratio of partition replicas to shards being higher than this value, creation of new topics fails.", + "maximum": 4294967295, + "minimum": 0, + "name": "topic_partitions_per_shard", + "needs_restart": false, + "nullable": false, + "type": "integer", + "visibility": "tunable" + }, + "topic_partitions_reserve_shard0": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": "0", + "defined_in": "src/v/config/configuration.cc", + "description": "Reserved partition slots on shard (CPU core) 0 on each node. If this is greater than or equal to <>, no data partitions will be scheduled on shard 0.", + "maximum": 4294967295, + "minimum": 0, + "name": "topic_partitions_reserve_shard0", + "needs_restart": false, + "nullable": false, + "type": "integer", + "visibility": "tunable" + }, + "transaction_coordinator_cleanup_policy": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": "deletion", + "defined_in": "src/v/config/configuration.cc", + "description": "Cleanup policy for a transaction coordinator topic.\n\n*Accepted values:*\n\n* `compact`\n* `delete`\n* `[\"compact\",\"delete\"]`\n* `none`", + "name": "transaction_coordinator_cleanup_policy", + "needs_restart": false, + "nullable": false, + "type": "object", + "visibility": "user" + }, + "transaction_coordinator_delete_retention_ms": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": "10080min", + "defined_in": "src/v/config/configuration.cc", + "description": "Delete segments older than this age. To ensure transaction state is retained for as long as the longest-running transaction, make sure this is greater than or equal to <>.\n\nFor example, if your typical transactions run for one hour, consider setting both `transaction_coordinator_delete_retention_ms` and `transactional_id_expiration_ms` to at least 3600000 (one hour), or a little over.", + "maximum": 17592186044415, + "minimum": -17592186044416, + "name": "transaction_coordinator_delete_retention_ms", + "needs_restart": false, + "nullable": false, + "type": "integer", + "visibility": "user" + }, + "transaction_coordinator_log_segment_size": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": "1_GiB", + "defined_in": "src/v/config/configuration.cc", + "description": "The size (in bytes) each log segment should be.", + "maximum": 18446744073709551615, + "minimum": 0, + "name": "transaction_coordinator_log_segment_size", + "needs_restart": false, + "nullable": false, + "type": "integer", + "visibility": "tunable" + }, + "transaction_coordinator_partitions": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": "50", + "defined_in": "src/v/config/configuration.cc", + "description": "Number of partitions for transactions coordinator.", + "maximum": 2147483647, + "minimum": -2147483648, + "name": "transaction_coordinator_partitions", + "needs_restart": false, + "nullable": false, + "type": "integer", + "visibility": "tunable" + }, + "transaction_coordinator_replication": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "defined_in": "src/v/config/configuration.cc", + "description": null, + "is_deprecated": true, + "name": "transaction_coordinator_replication", + "needs_restart": true, + "nullable": false, + "type": null + }, + "transaction_max_timeout_ms": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": "15min", + "defined_in": "src/v/config/configuration.cc", + "description": "The maximum allowed timeout for transactions. If a client-requested transaction timeout exceeds this configuration, the broker returns an error during transactional producer initialization. This guardrail prevents hanging transactions from blocking consumer progress.", + "maximum": 17592186044415, + "minimum": -17592186044416, + "name": "transaction_max_timeout_ms", + "needs_restart": false, + "nullable": false, + "type": "integer", + "visibility": "tunable" + }, + "transactional_id_expiration_ms": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": "10080min", + "defined_in": "src/v/config/configuration.cc", + "description": "Expiration time of producer IDs. Measured starting from the time of the last write until now for a given ID.", + "maximum": 17592186044415, + "minimum": -17592186044416, + "name": "transactional_id_expiration_ms", + "needs_restart": false, + "nullable": false, + "type": "integer", + "visibility": "user" + }, + "tx_log_stats_interval_s": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": "10s", + "defined_in": "src/v/config/configuration.cc", + "description": "How often to log per partition tx stats, works only with debug logging enabled.", + "is_deprecated": true, + "maximum": 17179869183, + "minimum": -17179869184, + "name": "tx_log_stats_interval_s", + "needs_restart": false, + "nullable": false, + "type": null, + "visibility": "deprecated" + }, + "tx_registry_log_capacity": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "defined_in": "src/v/config/configuration.cc", + "description": null, + "is_deprecated": true, + "name": "tx_registry_log_capacity", + "needs_restart": true, + "nullable": false, + "type": null + }, + "tx_registry_sync_timeout_ms": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "defined_in": "src/v/config/configuration.cc", + "description": null, + "is_deprecated": true, + "name": "tx_registry_sync_timeout_ms", + "needs_restart": true, + "nullable": false, + "type": null + }, + "tx_timeout_delay_ms": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": "1000ms", + "defined_in": "src/v/config/configuration.cc", + "description": "Delay before scheduling the next check for timed out transactions.", + "maximum": 17592186044415, + "minimum": -17592186044416, + "name": "tx_timeout_delay_ms", + "needs_restart": true, + "nullable": false, + "type": "integer", + "visibility": "user" + }, + "unsafe_enable_consumer_offsets_delete_retention": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": "false", + "defined_in": "src/v/config/configuration.cc", + "description": "Enables delete retention of consumer offsets topic. This is an internal-only configuration and should be enabled only after consulting with Redpanda support.", + "name": "unsafe_enable_consumer_offsets_delete_retention", + "needs_restart": true, + "nullable": false, + "type": "boolean", + "visibility": "user" + }, + "upgrade_override_checks": { + "category": "redpanda", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "broker", + "default": "false", + "defined_in": "src/v/config/node_config.cc", + "description": "Whether to violate safety checks when starting a Redpanda version newer than the cluster's consensus version.", + "name": "upgrade_override_checks", + "needs_restart": true, + "nullable": false, + "type": "boolean", + "visibility": "tunable" + }, + "usage_disk_persistance_interval_sec": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": "60 * 5 seconds", + "defined_in": "src/v/config/configuration.cc", + "description": "The interval in which all usage stats are written to disk.", + "maximum": 17179869183, + "minimum": -17179869184, + "name": "usage_disk_persistance_interval_sec", + "needs_restart": false, + "nullable": false, + "type": "integer", + "visibility": "tunable" + }, + "usage_num_windows": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": "24", + "defined_in": "src/v/config/configuration.cc", + "description": "The number of windows to persist in memory and disk.", + "name": "usage_num_windows", + "needs_restart": false, + "nullable": false, + "type": "integer", + "visibility": "tunable" + }, + "usage_window_width_interval_sec": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": "3600 seconds", + "defined_in": "src/v/config/configuration.cc", + "description": "The width of a usage window, tracking cloud and kafka ingress/egress traffic each interval.", + "maximum": 17179869183, + "minimum": -17179869184, + "name": "usage_window_width_interval_sec", + "needs_restart": false, + "nullable": false, + "type": "integer", + "visibility": "tunable" + }, + "use_fetch_scheduler_group": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": "true", + "defined_in": "src/v/config/configuration.cc", + "description": "Use a separate scheduler group for fetch processing.", + "name": "use_fetch_scheduler_group", + "needs_restart": false, + "nullable": false, + "type": "boolean", + "visibility": "tunable" + }, + "use_kafka_handler_scheduler_group": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": "true", + "defined_in": "src/v/config/configuration.cc", + "description": "Use a separate scheduler group to handle parsing Kafka protocol requests.", + "name": "use_kafka_handler_scheduler_group", + "needs_restart": false, + "nullable": false, + "type": "boolean", + "visibility": "tunable" + }, + "use_produce_scheduler_group": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": "true", + "defined_in": "src/v/config/configuration.cc", + "description": "Use a separate scheduler group to process Kafka produce requests.", + "name": "use_produce_scheduler_group", + "needs_restart": false, + "nullable": false, + "type": "boolean", + "visibility": "tunable" + }, + "use_scheduling_groups": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "defined_in": "src/v/config/configuration.cc", + "description": null, + "is_deprecated": true, + "name": "use_scheduling_groups", + "needs_restart": true, + "nullable": false, + "type": null + }, + "verbose_logging_timeout_sec_max": { + "category": "redpanda", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "broker", + "default": null, + "defined_in": "src/v/config/node_config.cc", + "description": "Maximum duration in seconds for verbose (`TRACE` or `DEBUG`) logging. Values configured above this will be clamped. If null (the default) there is no limit. Can be overridden in the Admin API on a per-request basis.\n\n*Unit:* seconds", + "example": ".Example\n[,yaml]\n----\nschema_registry:\n schema_registry_api:\n address: 0.0.0.0\n port: 8081\n authentication_method: http_basic\n schema_registry_replication_factor: 3\n mode_mutability: true\n----", + "maximum": 17179869183, + "minimum": -17179869184, + "name": "verbose_logging_timeout_sec_max", + "needs_restart": true, + "nullable": true, + "related_topics": [ + "xref:reference:properties/cluster-properties.adoc#http_authentication[`http_authentication`]", + "xref:reference:properties/cluster-properties.adoc#http_authentication[`http_authentication`]" + ], + "type": "integer", + "visibility": "tunable" + }, + "virtual_cluster_min_producer_ids": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": "Maximum value", + "defined_in": "src/v/config/configuration.cc", + "description": "Minimum number of active producers per virtual cluster.", + "maximum": 18446744073709551615, + "minimum": 0, + "name": "virtual_cluster_min_producer_ids", + "needs_restart": false, + "nullable": false, + "type": "integer", + "visibility": "tunable" + }, + "wait_for_leader_timeout_ms": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": "5'000ms", + "defined_in": "src/v/config/configuration.cc", + "description": "Timeout to wait for leadership in metadata cache.", + "maximum": 17592186044415, + "minimum": -17592186044416, + "name": "wait_for_leader_timeout_ms", + "needs_restart": false, + "nullable": false, + "type": "integer", + "visibility": "tunable" + }, + "write.caching": { + "acceptable_values": "[`true`, `false`]", + "category": "performance-cluster", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "topic", + "corresponding_cluster_property": "write_caching", + "description": "The write caching mode to apply to a topic.\n\nWhen `write.caching` is set, it overrides the cluster property xref:cluster-properties.adoc#write_caching_default[`write_caching_default`]. Write caching acknowledges a message as soon as it is received and acknowledged on a majority of brokers, without waiting for it to be written to disk. With `acks=all`, this provides lower latency while still ensuring that a majority of brokers acknowledge the write. Fsyncs follow <> and <>, whichever is reached first.", + "is_deprecated": false, + "is_topic_property": true, + "name": "write.caching", + "related_topics": [ + "xref:cluster-properties.adoc#write_caching_default[`write_caching_default`]", + "xref:./cluster-properties.adoc#write_caching_default[`write_caching_default`]", + "xref:develop:config-topics.adoc#configure-write-caching[Write caching]", + "xref:manage:tiered-storage.adoc[Tiered Storage]" + ], + "source_file": "src/v/kafka/server/handlers/topics/types.h", + "type": "boolean" + }, + "write_caching_default": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": "default_false", + "defined_in": "src/v/config/configuration.cc", + "description": "The default write caching mode to apply to user topics. Write caching acknowledges a message as soon as it is received and acknowledged on a majority of brokers, without waiting for it to be written to disk. With `acks=all`, this provides lower latency while still ensuring that a majority of brokers acknowledge the write. \n\nFsyncs follow <> and <>, whichever is reached first.\n\nThe `write_caching_default` cluster property can be overridden with the xref:reference:properties/topic-properties.adoc#writecaching[`write.caching`] topic property.", + "name": "write_caching_default", + "needs_restart": false, + "nullable": false, + "related_topics": [ + "xref:reference:properties/topic-properties.adoc#writecaching[`write.caching`]", + "xref:develop:config-topics.adoc#configure-write-caching[Write caching]" + ], + "type": "string", + "visibility": "user" + }, + "zstd_decompress_workspace_bytes": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": "8_MiB", + "defined_in": "src/v/config/configuration.cc", + "description": "Size of the zstd decompression workspace.", + "name": "zstd_decompress_workspace_bytes", + "needs_restart": true, + "nullable": false, + "type": "integer", + "visibility": "tunable" + } + } +} \ No newline at end of file diff --git a/modules/reference/pages/properties/broker-properties.adoc b/modules/reference/pages/properties/broker-properties.adoc index 90457644f8..83245b4a68 100644 --- a/modules/reference/pages/properties/broker-properties.adoc +++ b/modules/reference/pages/properties/broker-properties.adoc @@ -44,926 +44,58 @@ redpanda: port: 33145 ---- -=== admin +include::reference:partial$properties/broker-properties.adoc[tags=category-redpanda,!deprecated,!exclude-from-docs] -Network address for the glossterm:Admin API[] server. - -*Visibility:* `user` - -*Type:* array - -*Default:* `[{ address: "127.0.0.1", port: 9644 }]` - -.Example -[,yaml] ----- -redpanda: - admin: - - name: - address: - port: ----- - -Replace the following placeholders with your values: - -* ``: Name for the Admin API listener (TLS configuration is handled separately in the <> broker property) -* ``: The externally accessible hostname or IP address that clients use to connect to this broker -* ``: The port number for the Admin API endpoint - ---- - -=== admin_api_doc_dir - -Path to the API specifications for the Admin API. - -*Visibility:* `user` - -*Type:* string - -*Default:* `/usr/share/redpanda/admin-api-doc` - ---- - -=== admin_api_tls - -Specifies the TLS configuration for the HTTP Admin API. - -*Visibility:* `user` - -*Default:* `[]` - -.Example -[,yaml] ----- -redpanda: - admin_api_tls: - - name: - enabled: true - cert_file: - key_file: - truststore_file: - require_client_auth: true ----- - -Replace the following placeholders with your values: - -* ``: Name that matches your Admin API listener (defined in the <> broker property) -* ``: Full path to the TLS certificate file -* ``: Full path to the TLS private key file -* ``: Full path to the Certificate Authority file - ---- - -=== advertised_kafka_api - -Address of the Kafka API published to the clients. If not set, the <> broker property is used. When behind a load balancer or in containerized environments, this should be the externally-accessible address that clients use to connect. - -*Visibility:* `user` - -*Type:* array - -*Default:* `[]` - -.Example -[,yaml] ----- -redpanda: - advertised_kafka_api: - - name: - address: - port: ----- - -Replace the following placeholders with your values: - -* ``: Name that matches your Kafka API listener (defined in the <> broker property) -* ``: The externally accessible hostname or IP address that clients use to connect to this broker -* ``: The port number for the Kafka API endpoint - ---- - -=== advertised_rpc_api - -Address of RPC endpoint published to other cluster members. If not set, the <> broker property is used. This should be the address other brokers can use to communicate with this broker. - -*Visibility:* `user` - -*Type:* string - -*Default:* `null` - -.Example -[,yaml] ----- -redpanda: - advertised_rpc_api: - address: - port: ----- - -Replace the following placeholders with your values: - -* ``: The externally accessible hostname or IP address that other brokers use to communicate with this broker -* ``: The port number for the RPC endpoint (default is 33145) - ---- - -=== cloud_storage_cache_directory - -Directory for archival cache. Set when the xref:reference:properties/cluster-properties.adoc#cloud_storage_enabled[`cloud_storage_enabled`] cluster property is enabled. If not specified, Redpanda uses a default path within the data directory. - -*Visibility:* `user` - -*Type:* string - -*Default:* `null` - -.Example -[,yaml] ----- -redpanda: - cloud_storage_cache_directory: ----- - -Replace `` with the full path to your desired cache directory. - ---- - -=== cloud_storage_inventory_hash_store - -Directory to store inventory report hashes for use by cloud storage scrubber. If not specified, Redpanda uses a default path within the data directory. - -*Visibility:* `user` - -*Type:* string - -*Default:* `null` - -.Example -[,yaml] ----- -redpanda: - cloud_storage_inventory_hash_store: ----- - -Replace `` with the full path to your desired inventory hash storage directory. - ---- - -=== crash_loop_limit - -A limit on the number of consecutive times a broker can crash within one hour before its crash-tracking logic is reset. This limit prevents a broker from getting stuck in an infinite cycle of crashes. - -If `null`, the property is disabled and no limit is applied. - -The crash-tracking logic is reset (to zero consecutive crashes) by any of the following conditions: - -* The broker shuts down cleanly. -* One hour passes since the last crash. -* The `redpanda.yaml` broker configuration file is updated. -* The `startup_log` file in the broker's <> broker property is manually deleted. - -*Unit*: number of consecutive crashes of a broker - -*Visibility:* `user` - -*Type:* integer - -*Accepted values:* [`0`, `4294967295`] - -*Default:* `5` - ---- - -=== crash_loop_sleep_sec - -*Introduced in v24.3.4* - -The amount of time the broker sleeps before terminating when the limit on consecutive broker crashes (<>) is reached. This property provides a debugging window for you to access the broker before it terminates, and is particularly useful in Kubernetes environments. - -If `null`, the property is disabled, and the broker terminates immediately after reaching the crash loop limit. - -For information about how to reset the crash loop limit, see the <> broker property. - -*Unit:* seconds - -*Visibility:* `user` - -*Type:* integer or null - -*Accepted values:* [`0`, `4294967295`] or `null` - -*Default:* `null` - ---- - -=== data_directory - -Path to the directory for storing Redpanda's streaming data files. - -*Visibility:* `user` - -*Type:* string - -*Default:* `null` - ---- - -=== developer_mode - -CAUTION: Enabling `developer_mode` isn't recommended for production use. - -Enable developer mode, which skips most of the checks performed at startup. - -*Visibility:* `tunable` - -*Type:* boolean - -*Default:* `false` - ---- - -=== emergency_disable_data_transforms - -Override the cluster property xref:reference:properties/cluster-properties.adoc#data_transforms_enabled[`data_transforms_enabled`] and disable Wasm-powered data transforms. This is an emergency shutoff button. - -*Visibility:* `user` - -*Type:* boolean - -*Default:* `false` - ---- - -=== empty_seed_starts_cluster - -Controls how a new cluster is formed. All brokers in a cluster must have the same value. - -<> to form a cluster. - -TIP: For backward compatibility, `true` is the default. Redpanda recommends using `false` in production environments to prevent accidental cluster formation. - -*Visibility:* `user` - -*Type:* boolean - -*Default:* `true` - ---- - -=== fips_mode - -Controls whether Redpanda starts in FIPS mode. This property allows for three values: - -* Disabled - Redpanda does not start in FIPS mode. - -* Permissive - Redpanda performs the same check as enabled, but a warning is logged, and Redpanda continues to run. Redpanda loads the OpenSSL FIPS provider into the OpenSSL library. After this completes, Redpanda is operating in FIPS mode, which means that the TLS cipher suites available to users are limited to the TLSv1.2 and TLSv1.3 NIST-approved cryptographic methods. - -* Enabled - Redpanda verifies that the operating system is enabled for FIPS by checking `/proc/sys/crypto/fips_enabled`. If the file does not exist or does not return `1`, Redpanda immediately exits. - -*Visibility:* `user` - -*Accepted values:* `0` (disabled), `1` (permissive), `2` (enabled) - -*Default:* `0` (disabled) - ---- - -=== kafka_api - -IP address and port of the Kafka API endpoint that handles requests. Supports multiple listeners with different configurations. - -*Visibility:* `user` - -*Type:* array - -*Default:* `[{ address: "127.0.0.1", port: 9092 }]` - -.Basic example -[,yaml] ----- -redpanda: - kafka_api: - - address: - port: - authentication_method: sasl ----- - -.Multiple listeners example (for different networks or authentication methods) -[,yaml] ----- -redpanda: - kafka_api: - - name: - address: - port: - authentication_method: none - - name: - address: - port: - authentication_method: sasl - - name: - address: - port: - authentication_method: mtls_identity ----- - -Replace the following placeholders with your values: - -* ``: The IP address to bind the listener to (typically `0.0.0.0` for all interfaces) -* ``: The port number for the Kafka API endpoint -* ``: Name for internal network connections (for example, `internal`) -* ``: Name for external network connections (for example, `external`) -* ``: Name for mTLS connections (for example, `mtls`) -* ``: The IP address for internal connections -* ``: The port number for internal Kafka API connections -* ``: The IP address for external connections -* ``: The port number for external Kafka API connections -* ``: The IP address for mTLS connections -* ``: The port number for mTLS Kafka API connections - -[[kafka_api_auth_method]] -==== Authentication - -The `authentication_method` property configures authentication for Kafka API listeners. - -*Accepted values:* - -* `none` - No authentication required -* `sasl` - SASL authentication (specific mechanisms are configured using the xref:reference:properties/cluster-properties.adoc#sasl_mechanisms[`sasl_mechanisms`] cluster property) -* `mtls_identity` - Mutual TLS authentication using client certificates - -*Default:* `none` - -When using `authentication_method: sasl`, you must also configure the available SASL mechanisms (such as SCRAM, PLAIN, GSSAPI, or OAUTHBEARER) using the xref:reference:properties/cluster-properties.adoc#sasl_mechanisms[`sasl_mechanisms`] cluster property. - -For detailed authentication configuration, see xref:manage:security/authentication.adoc[]. - ---- - -=== kafka_api_tls - -Transport Layer Security (TLS) configuration for the Kafka API endpoint. - -*Visibility:* `user` - -*Default:* `[]` - -.Example -[,yaml] ----- -redpanda: - kafka_api_tls: - - name: - enabled: true - cert_file: - key_file: - truststore_file: - require_client_auth: false ----- - -Replace the following placeholders with your values: - -* ``: Name that matches your Kafka API listener (defined in the <> broker property) -* ``: Full path to the TLS certificate file -* ``: Full path to the TLS private key file -* ``: Full path to the Certificate Authority file - -NOTE: Set `require_client_auth: true` for mutual TLS (mTLS) authentication, or `false` for server-side TLS only. - ---- - -=== memory_allocation_warning_threshold - -Threshold for log messages that contain a larger memory allocation than specified. - -*Unit:* bytes -*Visibility:* `tunable` - -*Type:* integer - -*Default:* `131073` (128_kib + 1) - ---- - -=== node_id - -A number that uniquely identifies the broker within the cluster. If `null` (the default value), Redpanda automatically assigns an ID. If set, it must be non-negative value. - -.Do not set `node_id` manually. -[WARNING] -==== -Redpanda assigns unique IDs automatically to prevent issues such as: - -- Brokers with empty disks rejoining the cluster. -- Conflicts during recovery or scaling. - -Manually setting or reusing `node_id` values, even for decommissioned brokers, can cause cluster inconsistencies and operational failures. -==== - -Broker IDs are immutable. After a broker joins the cluster, its `node_id` *cannot* be changed. - -*Accepted values:* [`0`, `4294967295`] - -*Type:* integer - -*Visibility:* `user` - -*Default:* `null` - ---- - -=== node_id_overrides - -List of node ID and UUID overrides applied at broker startup. Each entry includes the current UUID, the desired new ID and UUID, and an ignore flag. An entry applies only if `current_uuid` matches the broker's actual UUID. - -Remove this property after the cluster restarts successfully and operates normally. This prevents reapplication and maintains consistent configuration across brokers. - -*Visibility:* `user` - -*Type:* array - -*Default:* `[]` - -.Example -[,yaml] ----- -redpanda: - node_id_overrides: - - current_uuid: "" - new_id: - new_uuid: "" - ignore_existing_node_id: - - current_uuid: "" - new_id: - new_uuid: "" - ignore_existing_node_id: ----- - -Replace the following placeholders with your values: - -* ``: The current UUID of the broker to override -* ``: The new broker ID to assign -* ``: The new UUID to assign to the broker -* ``: Set to `true` to force override on brokers that already have a node ID, or `false` to apply override only to brokers without existing node IDs -* ``: Additional broker UUID for multiple overrides -* ``: Additional new broker ID -* ``: Additional new UUID -* ``: Additional ignore existing node ID flag - ---- - -=== openssl_config_file - -Path to the configuration file used by OpenSSL to properly load the FIPS-compliant module. - -*Visibility:* `user` - -*Type:* string - -*Default:* `null` - ---- - -=== openssl_module_directory - -Path to the directory that contains the OpenSSL FIPS-compliant module. The filename that Redpanda looks for is `fips.so`. - -*Visibility:* `user` - -*Type:* string - -*Default:* `null` - ---- - -=== rack - -A label that identifies a failure zone. Apply the same label to all brokers in the same failure zone. When xref:./cluster-properties.adoc#enable_rack_awareness[enable_rack_awareness] is set to `true` at the cluster level, the system uses the rack labels to spread partition replicas across different failure zones. - -*Visibility:* `user` - -*Default:* `null` - ---- - -=== recovery_mode_enabled - -If `true`, start Redpanda in xref:manage:recovery-mode.adoc[recovery mode], where user partitions are not loaded and only administrative operations are allowed. - -*Visibility:* `user` - -*Type:* boolean - -*Default:* `false` - ---- - -=== rpc_server - -IP address and port for the Remote Procedure Call (RPC) server. - -*Visibility:* `user` - -*Default:* `127.0.0.1:33145` - ---- - -=== rpc_server_tls - -TLS configuration for the RPC server. - -*Visibility:* `user` - -*Default:* `{}` - -.Example -[,yaml] ----- -redpanda: - rpc_server_tls: - enabled: true - cert_file: "" - key_file: "" - truststore_file: "" - require_client_auth: true ----- - -Replace the following placeholders with your values: - -* ``: Full path to the RPC TLS certificate file -* ``: Full path to the RPC TLS private key file -* ``: Full path to the certificate authority file - ---- - -=== seed_servers - -List of the seed servers used to join current cluster. If the `seed_servers` list is empty the node will be a cluster root and it will form a new cluster. - -* When `empty_seed_starts_cluster` is `true`, Redpanda enables one broker with an empty `seed_servers` list to initiate a new cluster. The broker with an empty `seed_servers` becomes the cluster root, to which other brokers must connect to join the cluster. Brokers looking to join the cluster should have their `seed_servers` populated with the cluster root's address, facilitating their connection to the cluster. -+ -[IMPORTANT] -==== -Only one broker, the designated cluster root, should have an empty `seed_servers` list during the initial cluster bootstrapping. This ensures a single initiation point for cluster formation. -==== - -* When `empty_seed_starts_cluster` is `false`, Redpanda requires all brokers to start with a known set of brokers listed in `seed_servers`. The `seed_servers` list must not be empty and should be identical across these initial seed brokers, containing the addresses of all seed brokers. Brokers not included in the `seed_servers` list use it to discover and join the cluster, allowing for expansion beyond the foundational members. -+ -[NOTE] -==== -The `seed_servers` list must be consistent across all seed brokers to prevent cluster fragmentation and ensure stable cluster formation. -==== - -*Visibility:* `user` - -*Type:* array - -*Default:* `[]` - -.Example with `empty_seed_starts_cluster: true` -[,yaml] ----- -# Cluster root broker (seed starter) -redpanda: - empty_seed_starts_cluster: true - seed_servers: [] ----- - -[,yaml] ----- -# Additional brokers joining the cluster -redpanda: - empty_seed_starts_cluster: true - seed_servers: - - host: - address: - port: ----- - -.Example with `empty_seed_starts_cluster: false` -[,yaml] ----- -# All initial seed brokers use the same configuration -redpanda: - empty_seed_starts_cluster: false - seed_servers: - - host: - address: - port: - - host: - address: - port: - - host: - address: - port: ----- - -Replace the following placeholders with your values: - -* ``: IP address of the cluster root broker -* ``: IP addresses of each seed broker in the cluster -* ``: RPC port for brokers (default: `33145`) - ---- - -=== storage_failure_injection_config_path - -Path to the configuration file used for low level storage failure injection. - -*Visibility:* `tunable` - -*Type:* string - -*Default:* `null` - ---- - -=== storage_failure_injection_enabled - -If `true`, inject low level storage failures on the write path. Do _not_ use for production instances. - -*Visibility:* `tunable` - -*Type:* boolean - -*Default:* `false` - ---- - -=== upgrade_override_checks - -Whether to violate safety checks when starting a Redpanda version newer than the cluster's consensus version. - -*Visibility:* `tunable` - -*Type:* boolean - -*Default:* `false` - ---- - -=== verbose_logging_timeout_sec_max - -Maximum duration in seconds for verbose (`TRACE` or `DEBUG`) logging. Values configured above this will be clamped. If null (the default) there is no limit. Can be overridden in the Admin API on a per-request basis. - -*Unit:* seconds - -*Visibility:* `tunable` - -*Type:* integer - -*Accepted values:* [`-17179869184`, `17179869183`] - -*Default:* `null` - ---- - - - -[[http_based_auth_method]] -== HTTP-Based Authentication - -The `authentication_method` property configures authentication for HTTP-based API listeners (Schema Registry and HTTP Proxy). - -*Accepted values:* -- `none` - No authentication required (allows anonymous access). -- `http_basic` - Authentication required. The specific authentication method (Basic vs OIDC) depends on the xref:reference:properties/cluster-properties.adoc#http_authentication[`http_authentication`] cluster property and the client's Authorization header type. - -*Default:* `none` - -This property works together with the cluster property xref:reference:properties/cluster-properties.adoc#http_authentication[`http_authentication`]: - -* `authentication_method` (broker property): Controls whether a specific listener requires authentication (`http_basic`) or allows anonymous access (`none`) -* `http_authentication` (cluster property): Controls which authentication methods are available globally (`["BASIC"]`, `["OIDC"]`, or `["BASIC", "OIDC"]`) - -When `authentication_method: http_basic` is set on a listener, clients can use any authentication method that is enabled in the `http_authentication` cluster property. - -For detailed authentication configuration, see xref:manage:security/authentication.adoc[]. - -== Schema Registry - -The Schema Registry provides configuration properties to help you enable producers and consumers to share information needed to serialize and deserialize producer and consumer messages. - -For information on how to edit broker properties for the Schema Registry, see xref:manage:cluster-maintenance/node-property-configuration.adoc[]. - -Schema Registry shares some configuration property patterns with HTTP Proxy (such as API listeners and authentication methods), but also has additional schema-specific properties like managing schema storage and validation behavior. - -**Shared properties:** - -* <> - API documentation directory (independent from HTTP Proxy's same-named property) -* <> - API listener configuration (similar to HTTP Proxy's <>) -* <> - TLS configuration (similar to HTTP Proxy's <>) - -.Example -[,yaml] ----- -schema_registry: - schema_registry_api: - address: 0.0.0.0 - port: 8081 - authentication_method: http_basic - schema_registry_replication_factor: 3 - mode_mutability: true ----- - ---- - -=== mode_mutability - -Enable modifications to the read-only `mode` of the Schema Registry. When set to `true`, the entire Schema Registry or its subjects can be switched to `READONLY` or `READWRITE`. This property is useful for preventing unwanted changes to the entire Schema Registry or specific subjects. - -*Visibility:* `user` - -*Type:* boolean - -*Default:* `true` - ---- - -=== schema_registry_api - -Schema Registry API listener address and port. - -*Visibility:* `user` - -*Type:* array - -*Default:* `[{ address: "0.0.0.0", port: 8081 }]` - -.Example -[,yaml] ----- -schema_registry: - schema_registry_api: - address: 0.0.0.0 - port: 8081 - authentication_method: http_basic ----- - -[[schema_registry_auth_method]] -==== Authentication - -For authentication configuration options, see <>. - ---- - -=== schema_registry_api_tls - -TLS configuration for Schema Registry API. - -*Visibility:* `user` - -*Default:* `[]` - ---- - -=== schema_registry_replication_factor - -Replication factor for internal `_schemas` topic. If unset, defaults to the xref:../cluster-properties.adoc#default_topic_replication[`default_topic_replication`] cluster property. - -*Visibility:* `user` - -*Type:* integer - -*Accepted values:* [`-32768`, `32767`] - -*Default:* `null` - -*Related topics:* - -- Cluster property xref:../cluster-properties.adoc#default_topic_replication[`default_topic_replication`] -- Topic property xref:../topic-properties.adoc#default_topic_replication[`default_topic_replication`] - ---- - -== HTTP Proxy (pandaproxy) - -Redpanda HTTP Proxy (formerly called Pandaproxy) allows access to your data through a REST API. For example, you can list topics or brokers, get events, produce events, subscribe to events from topics using consumer groups, and commit offsets for a consumer. - -These properties configure the HTTP Proxy **server** - the REST API endpoint that external clients connect to. Configure these settings to control how clients authenticate to your HTTP Proxy, which network interfaces it listens on, and how it manages client connections. - -See xref:develop:http-proxy.adoc[] - -HTTP Proxy shares some configuration property patterns with Schema Registry (such as API listeners and authentication methods), but focuses on client management and proxy functionality. - -.Example -[,yaml] ----- -pandaproxy: - pandaproxy_api: - address: 0.0.0.0 - port: 8082 - authentication_method: http_basic - client_cache_max_size: 10 - client_keep_alive: 300000 - consumer_instance_timeout_ms: 300000 ----- - -=== api_doc_dir - -Path to the API specifications directory. This directory contains API documentation for both the HTTP Proxy API and Schema Registry API. - -*Requires restart:* Yes - -*Visibility:* `user` - -*Type:* string - -*Default:* `/usr/share/redpanda/proxy-api-doc` - -NOTE: Both HTTP Proxy and Schema Registry have independent `api_doc_dir` properties that can be configured separately. However, they both default to the same path (`/usr/share/redpanda/proxy-api-doc`) since they typically use the same API documentation directory. - -=== advertised_pandaproxy_api - -Network address for the HTTP Proxy API server to publish to clients. - -*Visibility:* `user` - -*Default:* `null` - ---- - -=== client_cache_max_size - -The maximum number of Kafka client connections that Redpanda can cache in the LRU (least recently used) cache. The LRU cache helps optimize resource utilization by keeping the most recently used clients in memory, facilitating quicker reconnections for frequent clients while limiting memory usage. - -*Visibility:* `user` - -*Type:* integer - -*Default:* `10` - ---- - -=== client_keep_alive - -Time, in milliseconds, that an idle client connection may remain open to the HTTP Proxy API. - -*Unit:* milliseconds - -*Visibility:* `user` - -*Type:* integer - -*Accepted values:* [`-17592186044416`, `17592186044415`] - -*Default:* `300000` (5min) - ---- +[[http_based_auth_method]] +== HTTP-Based Authentication -=== consumer_instance_timeout_ms +The `authentication_method` property configures authentication for HTTP-based API listeners (Schema Registry and HTTP Proxy). -How long to wait for an idle consumer before removing it. A consumer is considered idle when it's not making requests or heartbeats. +*Accepted values:* +- `none` - No authentication required (allows anonymous access). +- `http_basic` - Authentication required. The specific authentication method (Basic vs OIDC) depends on the xref:reference:properties/cluster-properties.adoc#http_authentication[`http_authentication`] cluster property and the client's Authorization header type. -*Unit:* milliseconds +*Default:* `none` -*Visibility:* `user` +This property works together with the cluster property xref:reference:properties/cluster-properties.adoc#http_authentication[`http_authentication`]: -*Type:* integer +* `authentication_method` (broker property): Controls whether a specific listener requires authentication (`http_basic`) or allows anonymous access (`none`) +* `http_authentication` (cluster property): Controls which authentication methods are available globally (`["BASIC"]`, `["OIDC"]`, or `["BASIC", "OIDC"]`) -*Accepted values:* [`-17592186044416`, `17592186044415`] +When `authentication_method: http_basic` is set on a listener, clients can use any authentication method that is enabled in the `http_authentication` cluster property. -*Default:* `300000` +For detailed authentication configuration, see xref:manage:security/authentication.adoc[]. ---- +== Schema Registry -=== pandaproxy_api +The Schema Registry provides configuration properties to help you enable producers and consumers to share information needed to serialize and deserialize producer and consumer messages. -Rest API listener address and port. +For information on how to edit broker properties for the Schema Registry, see xref:manage:cluster-maintenance/node-property-configuration.adoc[]. -*Visibility:* `user` +Schema Registry shares some configuration property patterns with HTTP Proxy (such as API listeners and authentication methods), but also has additional schema-specific properties like managing schema storage and validation behavior. -*Type:* array +**Shared properties:** -*Default:* `[{ address: "0.0.0.0", port: 8082 }]` +* <> - API documentation directory (independent from HTTP Proxy's same-named property) +* <> - API listener configuration (similar to HTTP Proxy's <>) +* <> - TLS configuration (similar to HTTP Proxy's <>) .Example [,yaml] ---- -pandaproxy: - pandaproxy_api: +schema_registry: + schema_registry_api: address: 0.0.0.0 - port: 8082 + port: 8081 authentication_method: http_basic + schema_registry_replication_factor: 3 + mode_mutability: true ---- -[[http_proxy_auth_method]] -==== Authentication - -For authentication configuration options, see <>. - --- -=== pandaproxy_api_tls - -TLS configuration for Pandaproxy API. - -*Visibility:* `user` +include::reference:partial$properties/broker-properties.adoc[tags=category-schema-registry,!deprecated,!exclude-from-docs] -*Default:* `[]` - ---- == HTTP Proxy Client @@ -996,297 +128,7 @@ Replace the following placeholders with your values: * ``: SCRAM username for authentication * ``: SCRAM password for authentication -=== broker_tls - -TLS configuration for the Kafka API servers to which the HTTP Proxy client should connect. - -*Visibility:* `user` - ---- - -=== brokers - -Network addresses of the Kafka API servers to which the HTTP Proxy client should connect. - -*Visibility:* `user` - -*Type:* array - -*Default:* `['127.0.0.1:9092']` - ---- - -=== client_identifier - -Custom identifier to include in the Kafka request header for the HTTP Proxy client. This identifier can help debug or monitor client activities. - -*Visibility:* `user` - -*Type:* string - -*Default:* `test_client` - ---- - -=== consumer_heartbeat_interval_ms - -Interval (in milliseconds) for consumer heartbeats. - -*Unit:* milliseconds - -*Visibility:* `user` - -*Type:* integer - -*Accepted values:* [`-17592186044416`, `17592186044415`] - -*Default:* `500` - ---- - -=== consumer_rebalance_timeout_ms - -Timeout (in milliseconds) for consumer rebalance. - -*Unit:* milliseconds - -*Visibility:* `user` - -*Type:* integer - -*Accepted values:* [`-17592186044416`, `17592186044415`] - -*Default:* `2000` - ---- - -=== consumer_request_max_bytes - -Maximum bytes to fetch per request. - -*Unit:* bytes - -*Visibility:* `user` - -*Type:* integer - -*Accepted values:* [`-2147483648`, `2147483647`] - -*Default:* `1048576` - ---- - -=== consumer_request_min_bytes - -Minimum bytes to fetch per request. - -*Unit:* bytes - -*Visibility:* `user` - -*Type:* integer - -*Accepted values:* [`-2147483648`, `2147483647`] - -*Default:* `1` - ---- - -=== consumer_request_timeout_ms - -Interval (in milliseconds) for consumer request timeout. - -*Unit:* milliseconds - -*Visibility:* `user` - -*Type:* integer - -*Accepted values:* [`-17592186044416`, `17592186044415`] - -*Default:* `100` - ---- - -=== consumer_session_timeout_ms - -Timeout (in milliseconds) for consumer session. - -*Unit:* milliseconds - -*Visibility:* `user` - -*Type:* integer - -*Accepted values:* [`-17592186044416`, `17592186044415`] - -*Default:* `10000` - ---- - -=== produce_ack_level - -Number of acknowledgments the producer requires the leader to have received before considering a request complete. - -*Visibility:* `user` - -*Type:* integer - -*Accepted values:* `-1`,`0`,`1` - -*Default:* `-1` - ---- - -=== produce_batch_delay_ms - -Delay (in milliseconds) to wait before sending batch. - -*Unit:* milliseconds - -*Visibility:* `user` - -*Type:* integer - -*Accepted values:* [`-17592186044416`, `17592186044415`] - -*Default:* `100` - ---- - -=== produce_batch_record_count - -Number of records to batch before sending to broker. - -*Visibility:* `user` - -*Type:* integer - -*Accepted values:* [`-2147483648`, `2147483647`] - -*Default:* `1000` - ---- - -=== produce_batch_size_bytes - -Number of bytes to batch before sending to broker. - -*Unit:* bytes - -*Visibility:* `user` - -*Type:* integer - -*Accepted values:* [`-2147483648`, `2147483647`] - -*Default:* `1048576` - ---- - -=== produce_compression_type - -Enable or disable compression by the Kafka client. Specify `none` to disable compression or one of the supported types [gzip, snappy, lz4, zstd]. - -*Visibility:* `user` - -*Type:* string - -*Default:* `none` - ---- - -=== produce_shutdown_delay_ms - -Delay (in milliseconds) to allow for final flush of buffers before shutting down. - -*Unit:* milliseconds - -*Visibility:* `user` - -*Type:* integer - -*Accepted values:* [`-17592186044416`, `17592186044415`] - -*Default:* `0` - ---- - -=== retries - -Number of times to retry a request to a broker. - -*Visibility:* `user` - -*Type:* integer - -*Default:* `5` - ---- - -=== retry_base_backoff_ms - -Delay (in milliseconds) for initial retry backoff. - -*Unit:* milliseconds - -*Visibility:* `user` - -*Type:* integer - -*Accepted values:* [`-17592186044416`, `17592186044415`] - -*Default:* `100` - ---- - -=== sasl_mechanism - -The SASL mechanism to use when the HTTP Proxy client connects to the Kafka API. These credentials are used when the HTTP Proxy API listener has <>: `none` but the cluster requires authenticated access to the Kafka API. - -This property specifies which individual SASL mechanism the HTTP Proxy client should use, while the cluster-wide available mechanisms are configured using the xref:reference:properties/cluster-properties.adoc#sasl_mechanisms[`sasl_mechanisms`] cluster property. - -include::shared:partial$http-proxy-ephemeral-credentials-breaking-change.adoc[] - -*Visibility:* `user` - -*Type:* string - -*Accepted values:* `SCRAM-SHA-256`, `SCRAM-SHA-512` - -NOTE: While the cluster-wide xref:reference:properties/cluster-properties.adoc#sasl_mechanisms[`sasl_mechanisms`] property may support additional mechanisms (PLAIN, GSSAPI, OAUTHBEARER), HTTP Proxy client connections only support SCRAM mechanisms. - -*Default:* `null` - ---- - -=== scram_password - -Password to use for SCRAM authentication mechanisms when the HTTP Proxy client connects to the Kafka API. This property is required when the HTTP Proxy API listener has <>: `none` but the cluster requires authenticated access to the Kafka API. - -include::shared:partial$http-proxy-ephemeral-credentials-breaking-change.adoc[] - -*Visibility:* `user` - -*Type:* string - -*Default:* `null` - ---- - -=== scram_username - -Username to use for SCRAM authentication mechanisms when the HTTP Proxy client connects to the Kafka API. This property is required when the HTTP Proxy API listener has <>: `none` but the cluster requires authenticated access to the Kafka API. - -include::shared:partial$http-proxy-ephemeral-credentials-breaking-change.adoc[] - -*Visibility:* `user` - -*Type:* string - -*Default:* `null` - ---- +include::reference:partial$properties/broker-properties.adoc[tags=category-pandaproxy-client,!deprecated,!exclude-from-docs] == Schema Registry Client diff --git a/modules/reference/pages/properties/cluster-properties.adoc b/modules/reference/pages/properties/cluster-properties.adoc index 0dd8017630..451d1cce9a 100644 --- a/modules/reference/pages/properties/cluster-properties.adoc +++ b/modules/reference/pages/properties/cluster-properties.adoc @@ -10,6560 +10,4 @@ NOTE: Some cluster properties require that you restart the cluster for any updat == Cluster configuration -=== abort_index_segment_size - -Capacity (in number of txns) of an abort index segment. - -Each partition tracks the aborted transaction offset ranges to help service client requests. If the number of transactions increases beyond this threshold, they are flushed to disk to ease memory pressure. Then they're loaded on demand. This configuration controls the maximum number of aborted transactions before they are flushed to disk. - -*Requires restart:* Yes - -*Visibility:* `tunable` - -*Type:* integer - -*Accepted values:* [`0`, `4294967295`] - -*Default:* `50000` - ---- - -=== abort_timed_out_transactions_interval_ms - -Interval, in milliseconds, at which Redpanda looks for inactive transactions and aborts them. - -*Unit:* milliseconds - -*Requires restart:* Yes - -*Visibility:* `tunable` - -*Type:* integer - -*Accepted values:* [`-17592186044416`, `17592186044415`] - -*Default:* `10000` (10 s) - ---- - -=== admin_api_require_auth - -Whether Admin API clients must provide HTTP basic authentication headers. - -*Requires restart:* No - -*Visibility:* `user` - -*Type:* boolean - -*Default:* `false` - ---- - -=== aggregate_metrics - -Enable aggregation of metrics returned by the xref:reference:internal-metrics-reference.adoc[`/metrics`] endpoint. Aggregation can simplify monitoring by providing summarized data instead of raw, per-instance metrics. Metric aggregation is performed by summing the values of samples by labels and is done when it makes sense by the shard and/or partition labels. - -*Requires restart:* No - -*Visibility:* `user` - -*Type:* boolean - -*Default:* `false` - ---- - -=== alive_timeout_ms - -The amount of time since the last broker status heartbeat. After this time, a broker is considered offline and not alive. - -*Unit:* milliseconds - -*Requires restart:* No - -*Visibility:* `tunable` - -*Type:* integer - -*Accepted values:* [`-17592186044416`, `17592186044415`] - -*Default:* `5000` - ---- - -=== alter_topic_cfg_timeout_ms - -The duration, in milliseconds, that Redpanda waits for the replication of entries in the controller log when executing a request to alter topic configurations. This timeout ensures that configuration changes are replicated across the cluster before the alteration request is considered complete. - -*Unit:* milliseconds - -*Requires restart:* No - -*Visibility:* `tunable` - -*Type:* integer - -*Accepted values:* [`-17592186044416`, `17592186044415`] - -*Default:* `5000` (5 s) - ---- - -=== append_chunk_size - -Size of direct write operations to disk in bytes. A larger chunk size can improve performance for write-heavy workloads, but increase latency for these writes as more data is collected before each write operation. A smaller chunk size can decrease write latency, but potentially increase the number of disk I/O operations. - -*Requires restart:* Yes - -*Visibility:* `tunable` - -*Type:* integer - -*Default:* `16384` - ---- - -=== audit_client_max_buffer_size - -Defines the number of bytes allocated by the internal audit client for audit messages. When changing this, you must disable audit logging and then re-enable it for the change to take effect. Consider increasing this if your system generates a very large number of audit records in a short amount of time. - -*Requires restart:* No - -*Visibility:* `user` - -*Type:* integer - -*Default:* `16777216` - ---- - -// tag::audit_enabled[] -=== audit_enabled - -ifndef::env-cloud[] -include::reference:partial$enterprise-licensed-property.adoc[] -endif::[] - -Enables or disables audit logging. When you set this to true, Redpanda checks for an existing topic named `_redpanda.audit_log`. If none is found, Redpanda automatically creates one for you. - -*Requires restart:* No - -*Visibility:* `user` - -*Type:* boolean - -ifndef::env-cloud[] -*Enterprise license required*: `true` - -*Default:* `false` -endif::[] - ---- - -// end::audit_enabled[] - - -=== audit_enabled_event_types - -List of strings in JSON style identifying the event types to include in the audit log. This may include any of the following: `management, produce, consume, describe, heartbeat, authenticate, schema_registry, admin`. - -*Requires restart:* No - -*Visibility:* `user` - -*Type:* array - -*Default:* `[management, authenticate, admin]` - ---- - - -// tag::audit_excluded_principals[] -=== audit_excluded_principals - -List of user principals to exclude from auditing. - -*Requires restart:* No - -*Visibility:* `user` - -*Type:* array - -ifndef::env-cloud[] -*Default:* `null` -endif::[] - ---- - -// end::audit_excluded_principals[] - - -// tag::audit_excluded_topics[] -=== audit_excluded_topics - -List of topics to exclude from auditing. - -*Requires restart:* No - -*Visibility:* `user` - -*Type:* array - -ifndef::env-cloud[] -*Default:* `null` -endif::[] - ---- - -// end::audit_excluded_topics[] - ---- - -=== audit_failure_policy - -Defines the policy for rejecting audit log messages when the audit log queue is full. If set to 'permit', then new audit messages are dropped and the operation is permitted. If set to 'reject', then the operation is rejected. - -*Requires restart:* No - -*Visibility:* `user` - -*Default:* `audit_failure_policy::reject` - ---- - -// tag::audit_log_num_partitions[] -=== audit_log_num_partitions - -ifdef::env-cloud[] -NOTE: This property is read-only in Redpanda Cloud. -endif::[] - -Defines the number of partitions used by a newly-created audit topic. This configuration applies only to the audit log topic and may be different from the cluster or other topic configurations. This cannot be altered for existing audit log topics. - -*Unit:* number of partitions per topic - -*Requires restart:* No - -*Visibility:* `user` - -*Type:* integer - -*Accepted values:* [`-2147483648`, `2147483647`] - -ifndef::env-cloud[] -*Default:* `12` -endif::[] - ---- - -// end::audit_log_num_partitions[] - -=== audit_log_replication_factor - -Defines the replication factor for a newly-created audit log topic. This configuration applies only to the audit log topic and may be different from the cluster or other topic configurations. This cannot be altered for existing audit log topics. Setting this value is optional. If a value is not provided, Redpanda will use the value specified for `internal_topic_replication_factor`. - -*Requires restart:* No - -*Visibility:* `user` - -*Type:* integer - -*Accepted values:* [`-32768`, `32767`] - -*Default:* `null` - ---- - -=== audit_queue_drain_interval_ms - -Interval, in milliseconds, at which Redpanda flushes the queued audit log messages to the audit log topic. Longer intervals may help prevent duplicate messages, especially in high throughput scenarios, but they also increase the risk of data loss during shutdowns where the queue is lost. - -*Unit:* milliseconds - -*Requires restart:* No - -*Visibility:* `tunable` - -*Type:* integer - -*Accepted values:* [`-17592186044416`, `17592186044415`] - -*Default:* `500` - ---- - -=== audit_queue_max_buffer_size_per_shard - -Defines the maximum amount of memory in bytes used by the audit buffer in each shard. Once this size is reached, requests to log additional audit messages will return a non-retryable error. Limiting the buffer size per shard helps prevent any single shard from consuming excessive memory due to audit log messages. - -*Requires restart:* Yes - -*Visibility:* `tunable` - -*Type:* integer - -*Default:* `1048576` - ---- - - -// tag::auto_create_topics_enabled[] -=== auto_create_topics_enabled - -Allow automatic topic creation. To prevent excess topics, this property is not supported on Redpanda Cloud BYOC and Dedicated clusters. You should explicitly manage topic creation for these Redpanda Cloud clusters. - -If you produce to a topic that doesn't exist, the topic will be created with defaults if this property is enabled. - -*Requires restart:* No - -*Visibility:* `user` - -*Type:* boolean - -ifndef::env-cloud[] -*Default:* `false` -endif::[] - ---- - -// end::auto_create_topics_enabled[] - -=== cluster_id - -NOTE: This property is read-only in Redpanda Cloud. - -Cluster identifier. - -*Requires restart:* No - -*Gets restored during cluster restore:* No - -*Visibility:* `user` - -*Type:* string - -*Default:* `null` - ---- - - -// tag::compacted_log_segment_size[] -=== compacted_log_segment_size - -Size (in bytes) for each compacted log segment. - -*Requires restart:* No - -*Visibility:* `tunable` - -*Type:* integer - -*Accepted values:* [`0`, `18446744073709551615`] - -*Default:* `268435456` - ---- - -// end::compacted_log_segment_size[] - -=== compaction_ctrl_backlog_size - -Target backlog size for compaction controller. If not set the max backlog size is configured to 80% of total disk space available. - -*Requires restart:* Yes - -*Visibility:* `tunable` - -*Type:* integer - -*Default:* `null` - ---- - -=== compaction_ctrl_d_coeff - -Derivative coefficient for compaction PID controller. - -*Requires restart:* Yes - -*Visibility:* `tunable` - -*Type:* number - -*Default:* `0.2` - ---- - -=== compaction_ctrl_i_coeff - -Integral coefficient for compaction PID controller. - -*Requires restart:* Yes - -*Visibility:* `tunable` - -*Type:* number - -*Default:* `0.0` - ---- - -=== compaction_ctrl_max_shares - -Maximum number of I/O and CPU shares that compaction process can use. - -*Requires restart:* Yes - -*Visibility:* `tunable` - -*Type:* integer - -*Accepted values:* [`-32768`, `32767`] - -*Default:* `1000` - ---- - -=== compaction_ctrl_min_shares - -Minimum number of I/O and CPU shares that compaction process can use. - -*Requires restart:* Yes - -*Visibility:* `tunable` - -*Type:* integer - -*Accepted values:* [`-32768`, `32767`] - -*Default:* `10` - ---- - -=== compaction_ctrl_p_coeff - -Proportional coefficient for compaction PID controller. This must be negative, because the compaction backlog should decrease when the number of compaction shares increases. - -*Requires restart:* Yes - -*Visibility:* `tunable` - -*Type:* number - -*Default:* `-12.5` - ---- - -=== compaction_ctrl_update_interval_ms - -The interval (in milliseconds) for updating the controller responsible for compaction tasks. The controller uses this interval to decide how to prioritize background compaction work, which is essential for maintaining efficient storage use. - -This is an internal-only configuration and should be enabled only after consulting with Redpanda support. - -*Requires restart:* Yes - -*Visibility:* `tunable` - -*Type:* number - -*Default:* `30000` (30 s) - ---- - -=== consumer_group_lag_collection_interval_sec - -How often to run the collection loop when <> contains `consumer_lag`. - -Reducing the value of `consumer_group_lag_collection_interval_sec` increases the metric collection frequency, which may raise resource utilization. In most environments, this impact is minimal, but it's best practice to monitor broker resource usage in high-scale settings. - -*Unit:* seconds - -*Requires restart:* No - -*Visibility:* `tunable` - -*Type:* integer - -*Accepted values:* [`-17179869184`, `17179869183`] - -*Default:* `60` - ---- - -=== consumer_offsets_topic_batch_cache_enabled - -This property lets you enable batch caching for the consumer offsets topic. By default, the cache for this topic is disabled. Changing this property is not recommended in production systems as it may affect performance. - -*Requires restart:* Yes - -*Visibility:* `tunable` - -*Type:* boolean - -*Default:* `false` - ---- - -=== controller_backend_housekeeping_interval_ms - -Interval between iterations of controller backend housekeeping loop. - -*Unit:* milliseconds - -*Requires restart:* No - -*Visibility:* `tunable` - -*Type:* integer - -*Accepted values:* [`-17592186044416`, `17592186044415`] - -*Default:* `1000` (1 s) - ---- - -=== controller_log_accummulation_rps_capacity_acls_and_users_operations - -Maximum capacity of rate limit accumulation in controller ACLs and users operations limit. - -*Requires restart:* No - -*Visibility:* `tunable` - -*Type:* integer - -*Default:* `null` - ---- - -=== controller_log_accummulation_rps_capacity_configuration_operations - -Maximum capacity of rate limit accumulation in controller configuration operations limit. - -*Requires restart:* No - -*Visibility:* `tunable` - -*Type:* integer - -*Default:* `null` - ---- - -=== controller_log_accummulation_rps_capacity_move_operations - -Maximum capacity of rate limit accumulation in controller move operations limit. - -*Requires restart:* No - -*Visibility:* `tunable` - -*Type:* integer - -*Default:* `null` - ---- - -=== controller_log_accummulation_rps_capacity_node_management_operations - -Maximum capacity of rate limit accumulation in controller node management operations limit. - -*Requires restart:* No - -*Visibility:* `tunable` - -*Type:* integer - -*Default:* `null` - ---- - -=== controller_log_accummulation_rps_capacity_topic_operations - -Maximum capacity of rate limit accumulation in controller topic operations limit. - -*Requires restart:* No - -*Visibility:* `tunable` - -*Type:* integer - -*Default:* `null` - ---- - -=== controller_snapshot_max_age_sec - -Maximum amount of time before Redpanda attempts to create a controller snapshot after a new controller command appears. - -*Unit:* seconds - -*Requires restart:* No - -*Visibility:* `tunable` - -*Type:* integer - -*Accepted values:* [`-17179869184`, `17179869183`] - -*Default:* `60` - ---- - -=== core_balancing_continuous - -include::reference:partial$enterprise-licensed-property.adoc[] - -If set to `true`, move partitions between cores in runtime to maintain balanced partition distribution. - -*Requires restart:* No - -*Visibility:* `user` - -*Type:* boolean - -*Enterprise license required*: `true` - -*Default:* `false` - ---- - -=== core_balancing_debounce_timeout - -Interval, in milliseconds, between trigger and invocation of core balancing. - -*Unit*: milliseconds - -*Requires restart:* No - -*Visibility:* `tunable` - -*Type:* integer - -*Accepted values:* [`-17592186044416`, `17592186044415`] - -*Default:* `10000` (10 s) - ---- - -=== core_balancing_on_core_count_change - -If set to `true`, and if after a restart the number of cores changes, Redpanda will move partitions between cores to maintain balanced partition distribution. - -*Requires restart:* No - -*Visibility:* `user` - -*Type:* boolean - -*Default:* `true` - ---- - -=== cpu_profiler_enabled - -Enables CPU profiling for Redpanda. - -*Requires restart:* No - -*Visibility:* `user` - -*Type:* boolean - -*Default:* `false` - ---- - -=== cpu_profiler_sample_period_ms - -The sample period for the CPU profiler. - -*Unit:* milliseconds - -*Requires restart:* No - -*Visibility:* `user` - -*Type:* integer - -*Accepted values:* [`-17592186044416`, `17592186044415`] - -*Default:* `100` - ---- - -=== create_topic_timeout_ms - -Timeout, in milliseconds, to wait for new topic creation. - -*Unit:* milliseconds - -*Requires restart:* No - -*Visibility:* `tunable` - -*Type:* integer - -*Accepted values:* [`-17592186044416`, `17592186044415`] - -*Default:* `2000` - ---- - - -// tag::data_transforms_binary_max_size[] -=== data_transforms_binary_max_size - -ifdef::env-cloud[] -NOTE: This property is read-only in Redpanda Cloud. -endif::[] - -The maximum size for a deployable WebAssembly binary that the broker can store. - -*Requires restart:* No - -*Visibility:* `tunable` - -*Type:* integer - -*Default:* `10485760` - ---- - -// end::data_transforms_binary_max_size[] - -=== data_transforms_commit_interval_ms - -The commit interval at which data transforms progress. - -*Unit:* milliseconds - -*Requires restart:* No - -*Visibility:* `tunable` - -*Type:* integer - -*Accepted values:* [`-17592186044416`, `17592186044415`] - -*Default:* `3000` - ---- - - -// tag::data_transforms_enabled[] -=== data_transforms_enabled - -Enables WebAssembly-powered data transforms directly in the broker. When `data_transforms_enabled` is set to `true`, Redpanda reserves memory for data transforms, even if no transform functions are currently deployed. This memory reservation ensures that adequate resources are available for transform functions when they are needed, but it also means that some memory is allocated regardless of usage. - -*Requires restart:* Yes - -*Visibility:* `user` - -*Type:* boolean - -ifndef::env-cloud[] -*Default:* `false` -endif::[] - ---- - -// end::data_transforms_enabled[] - - -=== data_transforms_logging_buffer_capacity_bytes - -Buffer capacity for transform logs, per shard. Buffer occupancy is calculated as the total size of buffered log messages; that is, logs emitted but not yet produced. - -*Unit:* bytes - -*Requires restart:* Yes - -*Visibility:* `tunable` - -*Type:* integer - -*Default:* `512000` - ---- - -=== data_transforms_logging_flush_interval_ms - -Flush interval for transform logs. When a timer expires, pending logs are collected and published to the `transform_logs` topic. - -*Unit:* milliseconds - -*Requires restart:* No - -*Visibility:* `tunable` - -*Type:* integer - -*Accepted values:* [`-17592186044416`, `17592186044415`] - -*Default:* `500` - ---- - -// tag::data_transforms_logging_line_max_bytes[] -=== data_transforms_logging_line_max_bytes - -Transform log lines truncate to this length. Truncation occurs after any character escaping. - -*Unit:* bytes - -*Requires restart:* No - -*Visibility:* `tunable` - -*Type:* integer - -ifndef::env-cloud[] -*Default:* `1024` -endif::[] - ---- - -// end::data_transforms_logging_line_max_bytes[] - -// tag::data_transforms_per_core_memory_reservation[] -=== data_transforms_per_core_memory_reservation - -ifdef::env-cloud[] -NOTE: This property is read-only in Redpanda Cloud. -endif::[] - -The amount of memory to reserve per core for data transform (Wasm) virtual machines. Memory is reserved on boot. The maximum number of functions that can be deployed to a cluster is equal to `data_transforms_per_core_memory_reservation` / `data_transforms_per_function_memory_limit`. - -*Requires restart:* Yes - -*Visibility:* `user` - -*Type:* integer - -*Aliases:* `wasm_per_core_memory_reservation` - -ifndef::env-cloud[] -*Default:* `20971520` -endif::[] - ---- - -// end::data_transforms_per_core_memory_reservation[] - -// tag::data_transforms_per_function_memory_limit[] -=== data_transforms_per_function_memory_limit - -ifdef::env-cloud[] -NOTE: This property is read-only in Redpanda Cloud. -endif::[] - -The amount of memory to give an instance of a data transform (Wasm) virtual machine. The maximum number of functions that can be deployed to a cluster is equal to `data_transforms_per_core_memory_reservation` / `data_transforms_per_function_memory_limit`. - -*Requires restart:* Yes - -*Visibility:* `user` - -*Type:* integer - -*Aliases:* `wasm_per_function_memory_limit` - -*Default:* `2097152` - ---- - -// end::data_transforms_per_function_memory_limit[] - - -=== data_transforms_read_buffer_memory_percentage - -include::reference:partial$internal-use-property.adoc[] - -The percentage of available memory in the transform subsystem to use for read buffers. - -*Requires restart:* Yes - -*Visibility:* `tunable` - -*Type:* integer - -*Default:* `45` - ---- - -=== data_transforms_runtime_limit_ms - -The maximum amount of runtime to start up a data transform, and the time it takes for a single record to be transformed. - -*Unit:* milliseconds - -*Requires restart:* Yes - -*Visibility:* `tunable` - -*Type:* integer - -*Accepted values:* [`-17592186044416`, `17592186044415`] - -*Default:* `3000` - ---- - -=== data_transforms_write_buffer_memory_percentage - -include::reference:partial$internal-use-property.adoc[] - -The percentage of available memory in the transform subsystem to use for write buffers. - -*Requires restart:* Yes - -*Visibility:* `tunable` - -*Type:* integer - -*Default:* `45` - ---- - -=== datalake_coordinator_snapshot_max_delay_secs - -Maximum amount of time the coordinator waits to snapshot after a command appears in the log. - -*Unit*: seconds - -*Requires restart:* No - -*Visibility:* `tunable` - -*Type:* integer - -*Accepted values:* [`-17179869184`, `17179869183`] - -*Default:* `900` - ---- - -=== datalake_disk_space_monitor_enable - -Option to explicitly disable enforcement of datalake disk space usage. - -*Requires restart:* No - -*Visibility:* `user` - -*Type:* boolean - -*Default:* `true` - ---- - -=== datalake_disk_usage_overage_coeff - -The datalake disk usage monitor reclaims the overage multiplied by this this coefficient to compensate for data that is written during the idle period between control loop invocations. - -*Requires restart:* No - -*Visibility:* `tunable` - -*Type:* number - -*Default:* `2.0` - ---- - -=== datalake_scheduler_disk_reservation_block_size - -The size, in bytes, of the block of disk reservation that the datalake manager will assign to each datalake scheduler when it runs out of local reservation. - -*Requires restart:* No - -*Visibility:* `tunable` - -*Type:* integer - -*Default:* `50_mib` - ---- - -=== datalake_scratch_space_size_bytes - -Size, in bytes, of the amount of scratch space datalake should use. - -*Unit:* bytes - -*Requires restart:* No - -*Visibility:* `tunable` - -*Type:* integer - -*Default:* `5_gib` - ---- - -=== datalake_scratch_space_soft_limit_size_percent - -Size of the scratch space datalake soft limit expressed as a percentage of the `datalake_scratch_space_size_bytes` configuration value. - -*Unit:* percent - -*Requires restart:* No - -*Visibility:* `user` - -*Type:* number - -*Default:* `80.0` - ---- - -=== datalake_translator_flush_bytes - -Size, in bytes, of the amount of per translator data that may be flushed to disk before the translator will upload and remove its current on disk data. - -*Unit:* bytes - -*Requires restart:* No - -*Visibility:* `tunable` - -*Type:* integer - -*Default:* `32_mib` - ---- - -=== datalake_scheduler_block_size_bytes - -Size, in bytes, of each memory block reserved for record translation, as tracked by the datalake scheduler. - -*Unit:* bytes - -*Requires restart:* Yes - -*Visibility:* `tunable` - -*Type:* integer - -*Default:* `4_mib` - ---- - -=== datalake_scheduler_max_concurrent_translations - -The maximum number of translations that the datalake scheduler will allow to run at a given time. If a translation is requested, but the number of running translations exceeds this value, the request will be put to sleep temporarily, polling until capacity becomes available. - -*Requires restart:* Yes - -*Visibility:* `tunable` - -*Type:* integer - -*Default:* `4` - ---- - -=== datalake_scheduler_time_slice_ms - -Time, in milliseconds, for a datalake translation as scheduled by the datalake scheduler. After a translation is scheduled, it will run until either the time specified has elapsed or all pending records on its source partition have been translated. - -*Unit:* milliseconds - -*Requires restart:* Yes - -*Visibility:* `tunable` - -*Type:* integer - -*Accepted values:* [`-17592186044416`, `17592186044415`] - -*Default:* `30000` - ---- - -=== debug_bundle_auto_removal_seconds - -If set, how long debug bundles are kept in the debug bundle storage directory after they are created. If not set, debug bundles are kept indefinitely. - -*Unit:* seconds - -*Requires restart:* No - -*Visibility:* `user` - -*Type:* integer - -*Accepted values:* [`-17179869184`, `17179869183`] - -*Default:* `null` - ---- - -=== debug_bundle_storage_dir - -Path to the debug bundle storage directory. Note: Changing this path does not clean up existing debug bundles. If not set, the debug bundle is stored in the Redpanda data directory specified in the redpanda.yaml broker configuration file. - -*Requires restart:* No - -*Visibility:* `user` - -*Type:* string - -*Default:* `null` - ---- - -=== debug_load_slice_warning_depth - -The recursion depth after which debug logging is enabled automatically for the log reader. - -*Requires restart:* No - -*Visibility:* `tunable` - -*Type:* integer - -*Accepted values:* [`0`, `4294967295`] - -*Default:* `null` - ---- - -=== default_leaders_preference - -include::reference:partial$enterprise-licensed-property.adoc[] - -Default settings for preferred location of topic partition leaders. It can be either "none" (no preference), or "racks:,,..." (prefer brokers with rack ID from the list). - -The list can contain one or more rack IDs. If you specify multiple IDs, Redpanda tries to distribute the partition leader locations equally across brokers in these racks. - -If config_ref:enable_rack_awareness,true,properties/cluster-properties[] is set to `false`, leader pinning is disabled across the cluster. - -*Requires restart:* No - -*Visibility:* `user` - -*Enterprise license required*: Any value other than the default `none` - -*Default:* `none` - -*Related topics*: - -- xref:develop:produce-data/leader-pinning.adoc[Leader pinning] - ---- - -=== default_num_windows - -Default number of quota tracking windows. - -*Requires restart:* No - -*Visibility:* `tunable` - -*Type:* integer - -*Accepted values:* [`-32768`, `32767`] - -*Default:* `10` - ---- - - -=== default_topic_partitions - -Default number of partitions per topic. - -*Unit:* number of partitions per topic - -*Requires restart:* No - -*Visibility:* `user` - -*Type:* integer - -*Accepted values:* [`-2147483648`, `2147483647`] - -*Default:* `1` - ---- - - -// tag::default_topic_replications[] -=== default_topic_replications - -ifdef::env-cloud[] -NOTE: This property is read-only in Redpanda Cloud. In Redpanda Cloud, all new topics are created with a replication factor of 3. -endif::[] - -Default replication factor for new topics. - -*Requires restart:* No - -*Visibility:* `user` - -*Type:* integer - -*Accepted values:* [`-32768`, `32767`] - -ifndef::env-cloud[] -*Default:* `1` -endif::[] - ---- - -// end::default_topic_replications[] - -=== default_window_sec - -Default quota tracking window size in milliseconds. - -*Unit:* milliseconds - -*Requires restart:* No - -*Visibility:* `tunable` - -*Type:* integer - -*Accepted values:* [`-17592186044416`, `17592186044415`] - -*Default:* `1000` - ---- - -=== development_enable_cloud_topics - -Enable cloud topics. - -*Requires restart:* No - -*Visibility:* `user` - -*Type:* boolean - -*Default:* `false` - ---- - -=== disable_batch_cache - -Disable batch cache in log manager. - -*Requires restart:* Yes - -*Visibility:* `tunable` - -*Type:* boolean - -*Default:* `false` - ---- - -=== disable_cluster_recovery_loop_for_tests - -include::reference:partial$internal-use-property.adoc[] - -Disables the cluster recovery loop. - -*Requires restart:* No - -*Visibility:* `tunable` - -*Type:* boolean - -*Default:* `false` - ---- - - -=== disable_metrics - -Disable registering the metrics exposed on the internal `/metrics` endpoint. - -*Requires restart:* Yes - -*Visibility:* `user` - -*Type:* boolean - -*Default:* `false` - ---- - - -=== disable_public_metrics - -Disable registering the metrics exposed on the `/public_metrics` endpoint. - -*Requires restart:* Yes - -*Visibility:* `user` - -*Type:* boolean - -*Default:* `false` - ---- - - -=== disk_reservation_percent - -The percentage of total disk capacity that Redpanda will avoid using. This applies both when cloud cache and log data share a disk, as well -as when cloud cache uses a dedicated disk. - -It is recommended to not run disks near capacity to avoid blocking I/O due to low disk space, as well as avoiding performance issues associated with SSD garbage collection. - -*Unit*: percentage of total disk size. - -*Requires restart:* No - -*Visibility:* `tunable` - -*Type:* number - -*Default:* `25.0` - ---- - -=== enable_cluster_metadata_upload_loop - -Enables cluster metadata uploads. Required for xref:manage:whole-cluster-restore.adoc[whole cluster restore]. - -*Requires restart:* Yes - -*Visibility:* `tunable` - -*Type:* boolean - -*Default:* `true` - ---- - -// tag::enable_consumer_group_metrics[] -=== enable_consumer_group_metrics - -List of enabled consumer group metrics. Accepted values include: - -- `group`: Enables the xref:reference:public-metrics-reference.adoc#redpanda_kafka_consumer_group_consumers[`redpanda_kafka_consumer_group_consumers`] and xref:reference:public-metrics-reference.adoc#redpanda_kafka_consumer_group_topics[`redpanda_kafka_consumer_group_topics`] metrics. -- `partition`: Enables the xref:reference:public-metrics-reference.adoc#redpanda_kafka_consumer_group_committed_offset[`redpanda_kafka_consumer_group_committed_offset`] metric. -- `consumer_lag`: Enables the xref:reference:public-metrics-reference.adoc#redpanda_kafka_consumer_group_lag_max[`redpanda_kafka_consumer_group_lag_max`] and xref:reference:public-metrics-reference.adoc#redpanda_kafka_consumer_group_lag_sum[`redpanda_kafka_consumer_group_lag_sum`] metrics -+ -Enabling `consumer_lag` may add a small amount of additional processing overhead to the brokers, especially in environments with a high number of consumer groups or partitions. -+ -ifndef::env-cloud[] -Use the xref:reference:properties/cluster-properties.adoc#consumer_group_lag_collection_interval_sec[`consumer_group_lag_collection_interval_sec`] property to control the frequency of consumer lag metric collection. -endif::[] - -*Requires restart:* No - -*Visibility:* `user` - -*Type:* array - -ifndef::env-cloud[] -*Default:* `["group", "partition"]` - -*Related topics*: - -- xref:manage:monitoring.adoc#consumers[Monitor consumer group lag] -endif::[] - -ifdef::env-cloud[] -*Related topics*: - -- xref:manage:monitor-cloud.adoc#consumers[Monitor consumer group lag] -endif::[] - ---- -// end::enable_consumer_group_metrics[] - -=== enable_controller_log_rate_limiting - -Limits the write rate for the controller log. - -*Requires restart:* No - -*Visibility:* `user` - -*Type:* boolean - -*Default:* `false` - ---- - - -=== enable_idempotence - -Enable idempotent producers. - -*Requires restart:* Yes - -*Visibility:* `user` - -*Type:* boolean - -*Default:* `true` - ---- - - -=== enable_host_metrics - -Enable exporting of some host metrics like `/proc/diskstats`, `/proc/snmp` and `/proc/net/netstat`. - -Host metrics are prefixed with xref:reference:internal-metrics-reference.adoc#vectorized_host_diskstats_discards[`vectorized_host`] and are available on the `/metrics` endpoint. - -*Requires restart:* Yes - -*Visibility:* `tunable` - -*Type:* boolean - -*Default:* `false` - ---- - -=== enable_leader_balancer - -Enable automatic leadership rebalancing. - -*Requires restart:* No - -*Visibility:* `user` - -*Type:* boolean - -*Default:* `true` - ---- - -=== enable_metrics_reporter - -Enable the cluster metrics reporter. If `true`, the metrics reporter collects and exports to Redpanda Data a set of customer usage metrics at the interval set by <>. - -[NOTE] -==== -The cluster metrics of the metrics reporter are different from xref:manage:monitoring.adoc[monitoring metrics]. - -* The metrics reporter exports customer usage metrics for consumption by Redpanda Data. -* Monitoring metrics are exported for consumption by Redpanda users. -==== - -*Requires restart:* No - -*Visibility:* `user` - -*Type:* boolean - -*Default:* `true` - ---- - -=== enable_mpx_extensions - -Enable Redpanda extensions for MPX. - -*Requires restart:* No - -*Visibility:* `tunable` - -*Type:* boolean - -*Default:* `false` - ---- - -=== enable_pid_file - -Enable PID file. You should not need to change. - -*Requires restart:* Yes - -*Visibility:* `tunable` - -*Type:* boolean - -*Default:* `true` - ---- - -=== enable_rack_awareness - -Enable rack-aware replica assignment. - -*Requires restart:* No - -*Visibility:* `user` - -*Type:* boolean - -*Default:* `false` - ---- - -=== enable_sasl - -Enable SASL authentication for Kafka connections. Authorization is required to modify this property. See also <>. - -*Requires restart:* No - -*Visibility:* `user` - -*Type:* boolean - -*Default:* `false` - ---- - - -=== enable_schema_id_validation - -ifndef::env-cloud[] -include::reference:partial$enterprise-licensed-property.adoc[] -endif::[] - -Mode to enable server-side schema ID validation. - -*Related topics*: - -* xref:manage:schema-reg/schema-id-validation.adoc[Server-Side Schema ID Validation] - -*Requires restart:* No - -*Visibility:* `user` - -*Accepted Values*: - -* `none`: Schema validation is disabled (no schema ID checks are done). Associated topic properties cannot be modified. -* `redpanda`: Schema validation is enabled. Only Redpanda topic properties are accepted. -* `compat`: Schema validation is enabled. Both Redpanda and compatible topic properties are accepted. - -*Enterprise license required*: `compat` , `redpanda` - -*Default:* `none` - ---- - - -=== enable_transactions - -Enable transactions (atomic writes). - -*Requires restart:* Yes - -*Visibility:* `user` - -*Type:* boolean - -*Default:* `true` - ---- - - -=== enable_usage - -Enables the usage tracking mechanism, storing windowed history of kafka/cloud_storage metrics over time. - -*Requires restart:* No - -*Visibility:* `user` - -*Type:* boolean - -*Default:* `false` - ---- - -=== features_auto_enable - -Whether new feature flags auto-activate after upgrades (true) or must wait for manual activation via the Admin API (false). - -*Requires restart:* No - -*Visibility:* `tunable` - -*Type:* boolean - -*Default:* `true` - ---- - - -=== fetch_max_bytes - -Maximum number of bytes returned in a fetch request. - -*Unit:* bytes - -*Requires restart:* No - -*Visibility:* `user` - -*Type:* integer - -*Default:* `57671680` - ---- - - -=== fetch_pid_d_coeff - -Derivative coefficient for fetch PID controller. - -*Requires restart:* No - -*Visibility:* `tunable` - -*Type:* number - -*Default:* `0.0` - ---- - -=== fetch_pid_i_coeff - -Integral coefficient for fetch PID controller. - -*Requires restart:* No - -*Visibility:* `tunable` - -*Type:* number - -*Default:* `0.01` - ---- - - -=== fetch_pid_max_debounce_ms - -The maximum debounce time the fetch PID controller will apply, in milliseconds. - -*Unit:* milliseconds - -*Requires restart:* No - -*Visibility:* `tunable` - -*Type:* integer - -*Accepted values:* [`-17592186044416`, `17592186044415`] - -*Default:* `100` - ---- - - -=== fetch_pid_p_coeff - -Proportional coefficient for fetch PID controller. - -*Requires restart:* No - -*Visibility:* `tunable` - -*Type:* number - -*Default:* `100.0` - ---- - -=== fetch_pid_target_utilization_fraction - -A fraction, between 0 and 1, for the target reactor utilization of the fetch scheduling group. - -*Unit:* fraction - -*Requires restart:* No - -*Visibility:* `tunable` - -*Type:* number - -*Default:* `0.2` - ---- - - -=== fetch_read_strategy - -The strategy used to fulfill fetch requests. - -* `polling`: Repeatedly polls every partition in the request for new data. The polling interval is set by <> (deprecated). - -* `non_polling`: The backend is signaled when a partition has new data, so Redpanda doesn’t need to repeatedly read from every partition in the fetch. Redpanda Data recommends using this value for most workloads, because it can improve fetch latency and CPU utilization. - -* `non_polling_with_debounce`: This option behaves like `non_polling`, but it includes a debounce mechanism with a fixed delay specified by <> at the start of each fetch. By introducing this delay, Redpanda can accumulate more data before processing, leading to fewer fetch operations and returning larger amounts of data. Enabling this option reduces reactor utilization, but it may also increase end-to-end latency. - -*Requires restart:* No - -*Visibility:* `tunable` - -*Accepted Values:* `polling`, `non_polling`, `non_polling_with_debounce` - -*Default:* `non_polling` - ---- - - -=== fetch_reads_debounce_timeout - -Time to wait for the next read in fetch requests when the requested minimum bytes was not reached. - -*Unit:* milliseconds - -*Requires restart:* No - -*Visibility:* `tunable` - -*Type:* integer - -*Accepted values:* [`-17592186044416`, `17592186044415`] - -*Default:* `1` - ---- - -// end::fetch_reads_debounce_timeout[] - -=== fetch_session_eviction_timeout_ms - -Time duration after which the inactive fetch session is removed from the fetch session cache. Fetch sessions are used to implement the incremental fetch requests where a consumer does not send all requested partitions to the server but the server tracks them for the consumer. - -*Unit:* milliseconds - -*Requires restart:* Yes - -*Visibility:* `tunable` - -*Type:* integer - -*Accepted values:* [`-17592186044416`, `17592186044415`] - -*Default:* `60000` - ---- - - -=== group_initial_rebalance_delay - -Delay added to the rebalance phase to wait for new members. - -*Unit:* milliseconds - -*Requires restart:* No - -*Visibility:* `tunable` - -*Type:* integer - -*Accepted values:* [`-17592186044416`, `17592186044415`] - -*Default:* `3000` - ---- - - -=== group_max_session_timeout_ms - -The maximum allowed session timeout for registered consumers. Longer timeouts give consumers more time to process messages in between heartbeats at the cost of a longer time to detect failures. - -*Unit:* milliseconds - -*Requires restart:* No - -*Visibility:* `user` - -*Type:* integer - -*Accepted values:* [`-17592186044416`, `17592186044415`] - -*Default:* `300000` - ---- - - -=== group_min_session_timeout_ms - -The minimum allowed session timeout for registered consumers. Shorter timeouts result in quicker failure detection at the cost of more frequent consumer heartbeating, which can overwhelm broker resources. - -*Unit:* milliseconds - -*Requires restart:* No - -*Visibility:* `user` - -*Type:* integer - -*Accepted values:* [`-17592186044416`, `17592186044415`] - -*Default:* `6000` - ---- - - -=== group_new_member_join_timeout - -Timeout for new member joins. - -*Unit:* milliseconds - -*Requires restart:* No - -*Visibility:* `tunable` - -*Type:* integer - -*Accepted values:* [`-17592186044416`, `17592186044415`] - -*Default:* `30000` - ---- - - -=== group_offset_retention_check_ms - -Frequency rate at which the system should check for expired group offsets. - -*Unit:* milliseconds - -*Requires restart:* No - -*Visibility:* `tunable` - -*Type:* integer - -*Accepted values:* [`-17592186044416`, `17592186044415`] - -*Default:* `600000` (10 min) - ---- - - -=== group_offset_retention_sec - -Consumer group offset retention seconds. To disable offset retention, set this to null. - -*Unit:* seconds - -*Requires restart:* No - -*Visibility:* `tunable` - -*Type:* integer - -*Accepted values:* [`-17179869184`, `17179869183`] - -*Default:* `604800` (one week) - ---- - - -=== group_topic_partitions - -Number of partitions in the internal group membership topic. - -*Unit:* number of partitions per topic - -*Requires restart:* No - -*Visibility:* `tunable` - -*Type:* integer - -*Accepted values:* [`-2147483648`, `2147483647`] - -*Default:* `16` - ---- - - -=== health_manager_tick_interval - -How often the health manager runs. - -*Unit:* milliseconds -*Requires restart:* Yes - -*Visibility:* `tunable` - -*Type:* integer - -*Accepted values:* [`-17592186044416`, `17592186044415`] - -*Default:* `180000` (3 min) - ---- - -=== health_monitor_max_metadata_age - -Maximum age of the metadata cached in the health monitor of a non-controller broker. - -*Unit:* milliseconds - -*Requires restart:* No - -*Visibility:* `tunable` - -*Type:* integer - -*Accepted values:* [`-17592186044416`, `17592186044415`] - -*Default:* `10000` - ---- - -// tag::http_authentication[] -=== http_authentication - -ifdef::env-cloud[] -NOTE: This property is read-only in Redpanda Cloud. -endif::[] - -ifndef::env-cloud[] -include::reference:partial$enterprise-licensed-property.adoc[] -endif::[] - -A list of supported HTTP authentication mechanisms. Accepted Values: `BASIC`, `OIDC`. - -*Requires restart:* No - -*Visibility:* `user` - -*Type:* array - -*Accepted Values:* `BASIC`, `OIDC` - -ifndef::env-cloud[] -*Enterprise license required*: `OIDC` - -*Default:* `[basic]` -endif::[] - ---- - -// end::http_authentication[] - ---- - -=== iceberg_backlog_controller_i_coeff - -Controls how much past backlog (unprocessed work) affects the priority of processing new data in the Iceberg system. The system accumulates backlog errors over time, and this coefficient determines how much that accumulated backlog influences the urgency of data translation. - -*Requires restart:* No - -*Visibility:* `tunable` - -*Type:* number - -*Default:* `0.005` - ---- - -// tag::iceberg_backlog_controller_p_coeff[] -=== iceberg_backlog_controller_p_coeff - -Proportional coefficient for the Iceberg backlog controller. Number of shares assigned to the datalake scheduling group will be proportional to the backlog size error. A negative value means larger and faster changes in the number of shares in the datalake scheduling group. - -*Requires restart:* No - -*Visibility:* `tunable` - -*Type:* number - -*Default:* `-0.0003` - -**Related topics**: - -- xref:manage:iceberg/about-iceberg-topics.adoc[] - ---- - -// end::iceberg_backlog_controller_p_coeff[] - -// tag::iceberg_catalog_base_location[] -=== iceberg_catalog_base_location - -ifdef::env-cloud[] -NOTE: This property is read-only in Redpanda Cloud. -endif::[] - -Base path for the object-storage-backed Iceberg catalog. After Iceberg is enabled, do not change this value. - -*Requires restart:* Yes - -*Visibility:* `user` - -*Type:* string - -ifndef::env-cloud[] -*Default:* `redpanda-iceberg-catalog` -endif::[] - -**Related topics**: - -- xref:manage:iceberg/use-iceberg-catalogs.adoc[] -- xref:manage:iceberg/about-iceberg-topics.adoc[] - ---- - -// end::iceberg_catalog_base_location[] - - -=== iceberg_catalog_commit_interval_ms - -The frequency at which the Iceberg coordinator commits topic files to the catalog. This is the interval between commit transactions across all topics monitored by the coordinator, not the interval between individual commits. - -*Unit:* milliseconds - -*Requires restart:* No - -*Visibility:* `tunable` - -*Type:* integer - -*Accepted values:* [`-17592186044416`, `17592186044415`] - -*Default:* `60000` - -**Related topics**: - -- xref:manage:iceberg/use-iceberg-catalogs.adoc[] - ---- - - -// tag::iceberg_catalog_type[] -=== iceberg_catalog_type - -Iceberg catalog type that Redpanda will use to commit table metadata updates. Supported types: `rest`, `object_storage`. - -NOTE: You must set <> at the same time that you set `iceberg_catalog_type` to `rest`. - -*Requires restart:* Yes - -*Visibility:* `user` - -*Accepted values:* `rest`, `object_storage` - -ifndef::env-cloud[] -*Default:* `object_storage` -endif::[] - -**Related topics**: - -- xref:manage:iceberg/use-iceberg-catalogs.adoc[] - ---- - -// end::iceberg_catalog_type[] - -// tag::iceberg_default_partition_spec[] -=== iceberg_default_partition_spec - -ifndef::env-cloud[] -Default value for the xref:reference:properties/topic-properties.adoc#redpanda-iceberg-partition-spec[`redpanda.iceberg.partition.spec`] topic property that determines the partition spec for the Iceberg table corresponding to the topic. -endif::[] - -ifdef::env-cloud[] -Default value for the `redpanda.iceberg.partition.spec` topic property that determines the partition spec for the Iceberg table corresponding to the topic. -endif::[] - -*Requires restart:* No - -*Visibility:* `user` - -*Type:* string - -ifndef::env-cloud[] -*Default:* `(hour(redpanda.timestamp))` - -Partitions the topic by extracting the hour from `redpanda.timestamp`, grouping records by hour to optimize queries. -endif::[] - -**Related topics**: - -- xref:manage:iceberg/about-iceberg-topics.adoc#enable-iceberg-integration[Enable Iceberg integration] - ---- - -// end::iceberg_default_partition_spec[] - -// tag::iceberg_delete[] -=== iceberg_delete - -Default value for the `redpanda.iceberg.delete` topic property that determines if the corresponding Iceberg table is deleted upon deleting the topic. - -*Requires restart:* No - -*Visibility:* `user` - -*Type:* boolean - -ifndef::env-cloud[] -*Default:* `true` -endif::[] - -**Related topics**: - -- xref:manage:iceberg/about-iceberg-topics.adoc[] - ---- - -// end::iceberg_delete[] - -=== iceberg_disable_automatic_snapshot_expiry - -Whether to disable automatic Iceberg snapshot expiry. This property may be useful if the Iceberg catalog expects to perform snapshot expiry on its own. - -*Requires restart:* No - -*Visibility:* `user` - -*Type:* boolean - -*Default:* `false` - ---- - -// tag::iceberg_disable_snapshot_tagging[] -=== iceberg_disable_snapshot_tagging - -Whether to disable tagging of Iceberg snapshots. These tags are used to ensure that the snapshots that Redpanda writes are retained during snapshot removal, which in turn, helps Redpanda ensure exactly-once delivery of records. Disabling tags is therefore not recommended, but it may be useful if the Iceberg catalog does not support tags. - -*Requires restart:* No - -*Visibility:* `user` - -*Type:* boolean - -ifndef::env-cloud[] -*Default:* `false` -endif::[] - -**Related topics**: - -- xref:manage:iceberg/about-iceberg-topics.adoc[] - ---- - -// end::iceberg_disable_snapshot_tagging[] - -// tag::iceberg_enabled[] -=== iceberg_enabled - -ifndef::env-cloud[] -Enables the translation of topic data into Iceberg tables. Setting `iceberg_enabled` to `true` activates the feature at the cluster level, but each topic must also set the xref:reference:properties/topic-properties.adoc#redpanda-iceberg-enabled[`redpanda.iceberg.enabled`] topic-level property to `true` to use it. If `iceberg_enabled` is set to `false`, then the feature is disabled for all topics in the cluster, overriding any topic-level settings. -endif::[] - -ifdef::env-cloud[] -Enables the translation of topic data into Iceberg tables. Setting `iceberg_enabled` to `true` activates the feature at the cluster level, but each topic must also set the `redpanda.iceberg.enabled` topic-level property to `true` to use it. If `iceberg_enabled` is set to `false`, then the feature is disabled for all topics in the cluster, overriding any topic-level settings. -endif::[] - -*Requires restart:* Yes - -*Visibility:* `user` - -*Type:* boolean - -ifndef::env-cloud[] -*Default:* `false` -endif::[] - -**Related topics**: - -- xref:manage:iceberg/about-iceberg-topics.adoc[] - ---- - -// end::iceberg_enabled[] - -// tag::iceberg_invalid_record_action[] -=== iceberg_invalid_record_action - -ifndef::env-cloud[] -Default value for the xref:reference:properties/topic-properties.adoc#redpanda-iceberg-invalid-record-action[`redpanda.iceberg.invalid.record.action`] topic property. -endif::[] - -ifdef::env-cloud[] -Default value for the `redpanda.iceberg.invalid.record.action` topic property. -endif::[] - -*Requires restart:* No - -*Visibility:* `user` - -ifndef::env-cloud[] -*Default:* `dlq_table` -endif::[] - -**Related topics**: - -- xref:manage:iceberg/about-iceberg-topics.adoc#troubleshoot-errors[Troubleshoot errors] - ---- - -// end::iceberg_invalid_record_action[] - -=== iceberg_latest_schema_cache_ttl_ms - -The TTL for caching the latest schema during translation when using the xref:manage:iceberg/specify-iceberg-schema.adoc#value_schema_latest[`value_schema_latest`] iceberg mode. This setting controls how long the latest schema remains cached during translation, which affects schema refresh behavior and performance. - -*Unit:* milliseconds - -*Requires restart:* No - -*Visibility:* `tunable` - -*Type:* integer - -*Accepted values:* [`-17592186044416`, `17592186044415`] - -*Default:* `300000` - ---- - -// tag::iceberg_rest_catalog_authentication_mode[] -=== iceberg_rest_catalog_authentication_mode - -The authentication mode for client requests made to the Iceberg catalog. Choose from: `none`, `bearer`, and `oauth2`. In `oauth2` mode, the credentials specified in `iceberg_rest_catalog_client_id` and `iceberg_rest_catalog_client_secret` are used to obtain a bearer token from the URI defined by `iceberg_rest_catalog_oauth2_server_uri.`. In `bearer` mode, the token specified in `iceberg_rest_catalog_token` is used unconditionally, and no attempts are made to refresh the token. - -ifdef::env-cloud[] -Redpanda recommends using `oauth2`. -endif::[] - -*Requires restart:* Yes - -*Visibility:* `user` - -ifndef::env-cloud[] -*Default:* `none` -endif::[] - -**Related topics**: - -- xref:manage:iceberg/use-iceberg-catalogs.adoc[] - ---- - -=== iceberg_rest_catalog_aws_access_key - -AWS access key for Iceberg REST catalog SigV4 authentication. If not set, falls back to xref:reference:properties/object-storage-properties.adoc#cloud_storage_access_key[`cloud_storage_access_key`] when using aws_sigv4 authentication mode. - -*Requires restart:* Yes - -*Visibility:* `user` - -*Type:* string - -*Default:* `null` - ---- - -=== iceberg_rest_catalog_credentials_source - -ifndef::env-cloud[] -Source of AWS credentials for Iceberg REST catalog SigV4 authentication. If not set, falls back to xref:reference:properties/object-storage-properties.adoc#cloud_storage_credentials_source[`cloud_storage_credentials_source`] when using aws_sigv4 authentication mode. -endif::[] - -ifdef::env-cloud[] -Source of AWS credentials for Iceberg REST catalog SigV4 authentication. If providing explicit credentials using `iceberg_rest_catalog_aws_access_key` and `iceberg_rest_catalog_aws_secret_key` for Glue catalog authentication, you must set this property to `config_file`. -endif::[] - -*Accepted values*: `aws_instance_metadata`, `azure_aks_oidc_federation`, `azure_vm_instance_metadata`, `config_file`, `gcp_instance_metadata`, `sts`. - -*Requires restart:* Yes - -*Visibility:* `user` - -*Aliases:* `iceberg_rest_catalog_aws_credentials_source` - -*Default:* `null` - ---- - -=== iceberg_rest_catalog_aws_region - -AWS region for Iceberg REST catalog SigV4 authentication. If not set, falls back to xref:reference:properties/object-storage-properties.adoc#cloud_storage_region[`cloud_storage_region`] when using aws_sigv4 authentication mode. - -*Requires restart:* Yes - -*Visibility:* `user` - -*Type:* string - -*Default:* `null` - ---- - -=== iceberg_rest_catalog_aws_secret_key - -AWS secret key for Iceberg REST catalog SigV4 authentication. If not set, falls back to xref:reference:properties/object-storage-properties.adoc#cloud_storage_secret_key[`cloud_storage_secret_key`] when using aws_sigv4 authentication mode. - -*Requires restart:* Yes - -*Visibility:* `user` - -*Type:* string - -*Default:* `null` - ---- - -=== iceberg_rest_catalog_aws_service_name - -AWS service name for SigV4 signing when using aws_sigv4 authentication mode. Defaults to 'glue' for AWS Glue Data Catalog. Can be changed to support other AWS services that provide Iceberg REST catalog APIs. - -*Requires restart:* Yes - -*Visibility:* `user` - -*Type:* string - -*Default:* `glue` - ---- - -// end::iceberg_rest_catalog_authentication_mode[] - -// tag::iceberg_rest_catalog_base_location[] - -=== iceberg_rest_catalog_base_location - -NOTE: This property is available in Redpanda version 25.1.7 and later. - -Base URI for the Iceberg REST catalog. If unset, the REST catalog server determines the location. Some REST catalogs, like AWS Glue, require the client to set this. After Iceberg is enabled, do not change this value. -NOTE: Specify `iceberg_rest_catalog_base_location` only when your catalog explicitly requires it (for example, for AWS Glue); otherwise, leave it unset and the REST catalog will choose the location. If you provide a value, it must use the same S3 bucket as `cloud_storage_bucket` because Tiered Storage does not support separate Iceberg buckets. Once Iceberg is enabled, do not change this value. - -*Requires restart:* Yes - -*Visibility:* `user` - -*Type:* string - -*Default:* `null` - ---- - -// end::iceberg_rest_catalog_base_location[] - -// tag::iceberg_rest_catalog_client_id[] -=== iceberg_rest_catalog_client_id - -The client ID used to query the REST catalog API for the OAuth token. Required if catalog type is set to `rest`. - -*Requires restart:* Yes - -*Visibility:* `user` - -*Type:* string - -ifndef::env-cloud[] -*Default:* `null` -endif::[] - -**Related topics**: - -- xref:manage:iceberg/use-iceberg-catalogs.adoc[] - ---- - -// end::iceberg_rest_catalog_client_id[] - -// tag::iceberg_rest_catalog_client_secret[] -=== iceberg_rest_catalog_client_secret - -Secret used with the client ID to query the OAuth token endpoint for Iceberg REST catalog authentication. Required if catalog type is set to `rest` and `iceberg_rest_catalog_authentication_mode` is set to `oauth2`. - -*Requires restart:* Yes - -*Visibility:* `user` - -*Type:* string - -ifndef::env-cloud[] -*Default:* `null` -endif::[] - -**Related topics**: - -- xref:manage:iceberg/use-iceberg-catalogs.adoc[] - ---- - -// end::iceberg_rest_catalog_client_secret[] - -// tag::iceberg_rest_catalog_crl[] -=== iceberg_rest_catalog_crl - -The contents of a certificate revocation list for `iceberg_rest_catalog_trust`. Takes precedence over `iceberg_rest_catalog_crl_file`. - -*Requires restart:* Yes - -*Visibility:* `user` - -*Type:* string - -ifndef::env-cloud[] -*Default:* `null` -endif::[] - ---- - -// end::iceberg_rest_catalog_crl[] - -=== iceberg_rest_catalog_crl_file - -Path to certificate revocation list for `iceberg_rest_catalog_trust_file`. - -*Requires restart:* Yes - -*Visibility:* `user` - -*Type:* string - -*Default:* `null` - -**Related topics**: - -- xref:manage:iceberg/use-iceberg-catalogs.adoc[] - ---- - -// tag::iceberg_rest_catalog_endpoint[] -=== iceberg_rest_catalog_endpoint - -URL of Iceberg REST catalog endpoint. - -NOTE: If you set <> to `rest`, you must also set this property at the same time. - -*Requires restart:* Yes - -*Visibility:* `user` - -*Type:* string - -ifndef::env-cloud[] -*Default:* `null` -endif::[] - -**Related topics**: - -- xref:manage:iceberg/use-iceberg-catalogs.adoc[] - ---- - -// end::iceberg_rest_catalog_endpoint[] - -// tag::iceberg_rest_catalog_oauth2_server_uri[] -=== iceberg_rest_catalog_oauth2_server_uri - -The OAuth URI used to retrieve access tokens for Iceberg REST catalog authentication. If left undefined, the deprecated Iceberg catalog endpoint `/v1/oauth/tokens` is used instead. - -*Requires restart:* Yes - -*Visibility:* `user` - -*Type:* string - -ifndef::env-cloud[] -*Default:* `null` -endif::[] - -**Related topics**: - -- xref:manage:iceberg/use-iceberg-catalogs.adoc[] - ---- - -// end::iceberg_rest_catalog_oauth2_server_uri[] - -// tag::iceberg_rest_catalog_oauth2_scope[] -=== iceberg_rest_catalog_oauth2_scope - -The OAuth scope used to retrieve access tokens for Iceberg catalog authentication. Only meaningful when `iceberg_rest_catalog_authentication_mode` is set to `oauth2`. - -*Requires restart:* Yes - -*Visibility:* `user` - -*Type:* string - -ifndef::env-cloud[] -*Default:* `principal_role:all` -endif::[] - -**Related topics**: - -- xref:manage:iceberg/use-iceberg-catalogs.adoc[] - ---- - -// end::iceberg_rest_catalog_oauth2_scope[] - -// tag::iceberg_rest_catalog_request_timeout_ms[] -=== iceberg_rest_catalog_request_timeout_ms - -Maximum length of time that Redpanda waits for a response from the REST catalog before aborting the request. - -*Unit:* milliseconds - -*Requires restart:* No - -*Visibility:* `tunable` - -*Type:* integer - -*Accepted values:* [`-17592186044416`, `17592186044415`] - -ifndef::env-cloud[] -*Default:* `10000` -endif::[] - -**Related topics**: - -- xref:manage:iceberg/use-iceberg-catalogs.adoc[] - ---- - -// end::iceberg_rest_catalog_request_timeout_ms[] - -// tag::iceberg_rest_catalog_token[] -=== iceberg_rest_catalog_token - -Token used to access the REST Iceberg catalog. If the token is present, Redpanda ignores credentials stored in the properties <> and <>. - -Required if <> is set to `bearer`. - -*Requires restart:* Yes - -*Visibility:* `user` - -*Type:* string - -ifndef::env-cloud[] -*Default:* `null` -endif::[] - -**Related topics**: - -- xref:manage:iceberg/use-iceberg-catalogs.adoc[] - ---- - -// end::iceberg_rest_catalog_token[] - -// tag::iceberg_rest_catalog_trust[] -=== iceberg_rest_catalog_trust - -The contents of a certificate chain to trust for the REST Iceberg catalog. - -ifndef::env-cloud[] -Takes precedence over <>. -endif::[] - -*Requires restart:* Yes - -*Visibility:* `user` - -*Type:* string - -ifndef::env-cloud[] -*Default:* `null` -endif::[] - ---- - -// end::iceberg_rest_catalog_trust[] - -=== iceberg_rest_catalog_trust_file - -Path to a file containing a certificate chain to trust for the REST Iceberg catalog. - -*Requires restart:* Yes - -*Visibility:* `user` - -*Type:* string - -*Default:* `null` - -**Related topics**: - -- xref:manage:iceberg/use-iceberg-catalogs.adoc[] - ---- - -// tag::iceberg_rest_catalog_warehouse[] -=== iceberg_rest_catalog_warehouse - -Warehouse to use for the Iceberg REST catalog. Redpanda queries the catalog to retrieve warehouse-specific configurations and automatically configures settings like the appropriate prefix. The prefix is appended to the catalog path (for example, `/v1/\{prefix}/namespaces`). - -*Requires restart:* Yes - -*Visibility:* `user` - -*Type:* string - -*Aliases:* `iceberg_rest_catalog_prefix` - -ifndef::env-cloud[] -*Default:* `null` -endif::[] - -**Related topics**: - -- xref:manage:iceberg/use-iceberg-catalogs.adoc[] - ---- - -// end::iceberg_rest_catalog_warehouse[] - -=== iceberg_target_backlog_size - -Average size per partition of the datalake translation backlog that the backlog controller tries to maintain. When the backlog size is larger than the set point, the backlog controller will increase the translation scheduling group priority. - -*Requires restart:* No - -*Visibility:* `tunable` - -*Type:* integer - -*Accepted values:* [`0`, `4294967295`] - -*Default:* `5242880` - -**Related topics**: - -- xref:manage:iceberg/about-iceberg-topics.adoc[] - ---- - -// tag::iceberg_target_lag_ms[] -=== iceberg_target_lag_ms - -ifndef::env-cloud[] -Default value for the xref:reference:properties/topic-properties.adoc#redpanda-iceberg-target-lag-ms[`redpanda.iceberg.target.lag.ms`] topic property, which controls how often the data in an Iceberg table is refreshed with new data from the corresponding Redpanda topic. Redpanda attempts to commit all data produced to the topic within the lag target, subject to resource availability. - -endif::[] - -ifdef::env-cloud[] -Default value for the `redpanda.iceberg.target.lag.ms` topic property, which controls how often the data in an Iceberg table is refreshed with new data from the corresponding Redpanda topic. Redpanda attempts to commit all data produced to the topic within the lag target, subject to resource availability. - -endif::[] - -*Unit:* milliseconds - -*Requires restart:* No - -*Visibility:* `user` - -*Type:* integer - -*Accepted values:* [`-17592186044416`, `17592186044415`] - -ifndef::env-cloud[] -*Default:* `60000` -endif::[] - -**Related topics**: - -- xref:manage:iceberg/about-iceberg-topics.adoc[] - ---- - -// end::iceberg_target_lag_ms[] - ---- - -=== iceberg_throttle_backlog_size_ratio - -Ration of the total backlog size to the disk space at which the throttle to iceberg producers is applied. - -*Requires restart:* No - -*Visibility:* `tunable` - -*Type:* number - -*Default:* `0.3` - ---- - -=== id_allocator_batch_size - -The ID allocator allocates messages in batches (each batch is a one log record) and then serves requests from memory without touching the log until the batch is exhausted. - -*Requires restart:* Yes - -*Visibility:* `tunable` - -*Type:* integer - -*Accepted values:* [`-32768`, `32767`] - -*Default:* `1000` - ---- - -=== id_allocator_log_capacity - -Capacity of the `id_allocator` log in number of batches. After it reaches `id_allocator_stm`, it truncates the log's prefix. - -*Requires restart:* Yes - -*Visibility:* `tunable` - -*Type:* integer - -*Accepted values:* [`-32768`, `32767`] - -*Default:* `100` - ---- - -=== initial_retention_local_target_bytes_default - -Initial local retention size target for partitions of topics with xref:manage:tiered-storage.adoc[Tiered Storage] enabled. If no initial local target retention is configured, then all locally-retained data will be delivered to learner when joining the partition replica set. - -*Unit*: bytes - -*Requires restart:* No - -*Visibility:* `user` - -*Type:* integer - -*Default:* `null` - ---- - -=== initial_retention_local_target_ms_default - -Initial local retention time target for partitions of topics with xref:manage:tiered-storage.adoc[Tiered Storage] enabled. If no initial local target retention is configured, then all locally-retained data will be delivered to learner when joining the partition replica is set. - -*Unit*: milliseconds - -*Requires restart:* No - -*Visibility:* `user` - -*Type:* integer - -*Accepted values:* [`-17592186044416`, `17592186044415`] - -*Default:* `null` - ---- - -=== internal_topic_replication_factor - -Target replication factor for internal topics. - -*Unit*: number of replicas per topic. -*Requires restart:* Yes - -*Visibility:* `user` - -*Type:* integer - -*Accepted values:* [`-2147483648`, `2147483647`] - -*Default:* `3` - ---- - -=== join_retry_timeout_ms - -Time between cluster join retries in milliseconds. - -*Unit:* milliseconds - -*Requires restart:* Yes - -*Visibility:* `tunable` - -*Type:* integer - -*Accepted values:* [`-17592186044416`, `17592186044415`] - -*Default:* `5000` - ---- - -=== kafka_batch_max_bytes - -Maximum size of a batch processed by the server. If the batch is compressed, the limit applies to the compressed batch size. - -*Unit:* bytes - -*Requires restart:* No - -*Visibility:* `tunable` - -*Type:* integer - -*Accepted values:* [`0`, `4294967295`] - -*Default:* `1048576` - ---- - -=== kafka_connection_rate_limit - -Maximum connections per second for one core. If `null` (the default), then the number of connections per second is unlimited. - -*Requires restart:* No - -*Visibility:* `user` - -*Type:* integer - -*Accepted values:* [`-9223372036854775808`, `9223372036854775807`] - -*Default:* `null` - ---- - -=== kafka_connection_rate_limit_overrides - -Overrides the maximum connections per second for one core for the specified IP addresses (for example, `['127.0.0.1:90', '50.20.1.1:40']`) - -*Related topics*: - -* xref:manage:cluster-maintenance/configure-availability.adoc#limit-client-connections[Limit client connections] - -*Requires restart:* No - -*Visibility:* `user` - -*Type:* array - -*Default:* `null` - ---- - -=== kafka_connections_max - -Maximum number of Kafka client connections per broker. If `null`, the property is disabled. - -*Unit*: number of Kafka client connections per broker - -*Default*: null - -*Requires restart:* No - -*Visibility:* `user` - -*Type:* integer - -*Accepted values:* [`0`, `4294967295`] - -*Related topics*: - -* xref:manage:cluster-maintenance/configure-availability.adoc#limit-client-connections[Limit client connections] - ---- - -// tag::kafka_connections_max_overrides[] -=== kafka_connections_max_overrides - -A list of IP addresses for which Kafka client connection limits are overridden and don't apply. For example, `(['127.0.0.1:90', '50.20.1.1:40']).`. - -*Requires restart:* No - -*Visibility:* `user` - -*Type:* array - -ifndef::env-cloud[] -*Default*: `{}` (empty list) -endif::[] - -*Related topics*: - -* xref:manage:cluster-maintenance/configure-availability.adoc#limit-client-connections[Limit client connections] - ---- - - -// end::kafka_connections_max_overrides[] - -// tag::kafka_connections_max_per_ip[] -=== kafka_connections_max_per_ip - -Maximum number of Kafka client connections per IP address, per broker. If `null`, the property is disabled. - -*Requires restart:* No - -*Visibility:* `user` - -*Type:* integer - -*Accepted values:* [`0`, `4294967295`] - -ifndef::env-cloud[] -*Default:* `null` -endif::[] - -*Related topics*: - -* xref:manage:cluster-maintenance/configure-availability.adoc#limit-client-connections[Limit client connections] - ---- - -// end::kafka_connections_max_per_ip[] - -=== kafka_enable_authorization - -Flag to require authorization for Kafka connections. If `null`, the property is disabled, and authorization is instead enabled by <>. - -*Requires restart:* No - -*Visibility:* `user` - -*Type:* boolean - -*Default:* `null` - -*Accepted Values:* - -* `null`: Ignored. Authorization is enabled with <>: `true` -* `true`: authorization is required. -* `false`: authorization is disabled. - -*Related properties*: - -* <> -* `kafka_api[].authentication_method` - ---- - -=== kafka_enable_describe_log_dirs_remote_storage - -Whether to include Tiered Storage as a special remote:// directory in `DescribeLogDirs Kafka` API requests. - -*Requires restart:* No - -*Visibility:* `user` - -*Type:* boolean - -*Default:* `true` - ---- - -=== kafka_enable_partition_reassignment - -Enable the Kafka partition reassignment API. - -*Requires restart:* No - -*Visibility:* `user` - -*Type:* boolean - -*Default:* `true` - ---- - -=== kafka_group_recovery_timeout_ms - -Kafka group recovery timeout. - -*Unit:* milliseconds - -*Requires restart:* No - -*Visibility:* `user` - -*Type:* integer - -*Accepted values:* [`-17592186044416`, `17592186044415`] - -*Default:* `30000` (30 sec) - ---- - -=== kafka_max_bytes_per_fetch - -Limit fetch responses to this many bytes, even if the total of partition bytes limits is higher. - -*Requires restart:* No - -*Visibility:* `tunable` - -*Type:* integer - -*Default:* `67108864` - ---- - - -=== kafka_memory_batch_size_estimate_for_fetch - -The size of the batch used to estimate memory consumption for fetch requests, in bytes. Smaller sizes allow more concurrent fetch requests per shard. Larger sizes prevent running out of memory because of too many concurrent fetch requests. - -*Requires restart:* No - -*Visibility:* `user` - -*Type:* integer - -*Default:* `1048576` - ---- - -=== kafka_memory_share_for_fetch - -The share of Kafka subsystem memory that can be used for fetch read buffers, as a fraction of the Kafka subsystem memory amount. - -*Requires restart:* Yes - -*Visibility:* `user` - -*Type:* number - -*Default:* `0.5` - ---- - -=== kafka_mtls_principal_mapping_rules - -Principal mapping rules for mTLS authentication on the Kafka API. If `null`, the property is disabled. - -*Requires restart:* No - -*Visibility:* `user` - -*Type:* array - -*Default:* `null` - ---- - -=== kafka_nodelete_topics - -A list of topics that are protected from deletion and configuration changes by Kafka clients. Set by default to a list of Redpanda internal topics. - -*Requires restart:* No - -*Visibility:* `user` - -*Type:* string array - -*Default*: `['_redpanda.audit_log', '__consumer_offsets', '_schemas']` - -*Related topics*: - -* xref:develop:consume-data/consumer-offsets.adoc[Consumer Offsets] -* xref:manage:schema-registry.adoc[Schema Registry] - ---- - -=== kafka_noproduce_topics - -A list of topics that are protected from being produced to by Kafka clients. Set by default to a list of Redpanda internal topics. - -*Requires restart:* No - -*Visibility:* `user` - -*Type:* array - -*Default*: `['_redpanda.audit_log']` - ---- - -=== kafka_qdc_depth_alpha - -Smoothing factor for Kafka queue depth control depth tracking. - -*Requires restart:* Yes - -*Visibility:* `tunable` - -*Type:* number - -*Default:* `0.8` - ---- - -=== kafka_qdc_depth_update_ms - -Update frequency for Kafka queue depth control. - -*Unit:* milliseconds - -*Requires restart:* Yes - -*Visibility:* `tunable` - -*Type:* integer - -*Accepted values:* [`-17592186044416`, `17592186044415`] - -*Default:* `7000` - ---- - -=== kafka_qdc_enable - -Enable Kafka queue depth control. - -*Requires restart:* Yes - -*Visibility:* `user` - -*Type:* boolean - -*Default:* `false` - ---- - - -=== kafka_qdc_idle_depth - -Queue depth when idleness is detected in Kafka queue depth control. - -*Requires restart:* Yes - -*Visibility:* `tunable` - -*Type:* integer - -*Default:* `10` - ---- - -=== kafka_qdc_latency_alpha - -Smoothing parameter for Kafka queue depth control latency tracking. - -*Requires restart:* Yes - -*Visibility:* `tunable` - -*Type:* number - -*Default:* `0.002` - ---- - -=== kafka_qdc_max_depth - -Maximum queue depth used in Kafka queue depth control. - -*Requires restart:* Yes - -*Visibility:* `tunable` - -*Type:* integer - -*Default:* `100` - ---- - -=== kafka_qdc_max_latency_ms - -Maximum latency threshold for Kafka queue depth control depth tracking. - -*Unit:* milliseconds - -*Requires restart:* Yes - -*Visibility:* `user` - -*Type:* integer - -*Accepted values:* [`-17592186044416`, `17592186044415`] - -*Default:* `80` - ---- - -=== kafka_qdc_min_depth - -Minimum queue depth used in Kafka queue depth control. - -*Requires restart:* Yes - -*Visibility:* `tunable` - -*Type:* integer - -*Default:* `1` - ---- - -=== kafka_qdc_window_count - -Number of windows used in Kafka queue depth control latency tracking. - -*Requires restart:* Yes - -*Visibility:* `tunable` - -*Type:* integer - -*Default:* `12` - ---- - -=== kafka_qdc_window_size_ms - -Window size for Kafka queue depth control latency tracking. - -*Unit:* milliseconds - -*Requires restart:* Yes - -*Visibility:* `tunable` - -*Type:* integer - -*Accepted values:* [`-17592186044416`, `17592186044415`] - -*Default:* `1500` - ---- - - -=== kafka_request_max_bytes - -Maximum size of a single request processed using the Kafka API. - -*Unit:* bytes - -*Requires restart:* No - -*Visibility:* `tunable` - -*Type:* integer - -*Accepted values:* [`0`, `4294967295`] - -*Default:* `104857600` - ---- - - -=== kafka_rpc_server_stream_recv_buf - -Maximum size of the user-space receive buffer. If `null`, this limit is not applied. - -*Unit:* bytes - -*Requires restart:* Yes - -*Visibility:* `tunable` - -*Type:* integer - -*Default:* `null` - ---- - -=== kafka_rpc_server_tcp_recv_buf - -Size of the Kafka server TCP receive buffer. If `null`, the property is disabled. - -*Unit:* bytes - -*Requires restart:* Yes - -*Visibility:* `user` - -*Type:* integer - -*Accepted values:* [`-2147483648`, `2147483647`] - -*Default:* `null` - ---- - -=== kafka_rpc_server_tcp_send_buf - -Size of the Kafka server TCP transmit buffer. If `null`, the property is disabled. - -*Unit:* bytes - -*Requires restart:* Yes - -*Visibility:* `user` - -*Type:* integer - -*Accepted values:* [`-2147483648`, `2147483647`] aligned to 4096 bytes - -*Default:* `null` - ---- - -=== kafka_sasl_max_reauth_ms - -The maximum time between Kafka client reauthentications. If a client has not reauthenticated a connection within this time frame, that connection is torn down. - -IMPORTANT: If this property is not set (or set to `null`), session expiry is disabled, and a connection could live long after the client's credentials are expired or revoked. - -*Unit:* milliseconds - -*Requires restart:* No - -*Visibility:* `user` - -*Type:* integer - -*Accepted values:* [`-17592186044416`, `17592186044415`] - -*Default:* `null` - ---- - -=== kafka_schema_id_validation_cache_capacity - -Per-shard capacity of the cache for validating schema IDs. - -*Requires restart:* No - -*Visibility:* `tunable` - -*Type:* integer - -*Default:* `128` - ---- - -=== kafka_tcp_keepalive_timeout - -TCP keepalive idle timeout in seconds for Kafka connections. This describes the timeout between TCP keepalive probes that the remote site successfully acknowledged. Refers to the TCP_KEEPIDLE socket option. When changed, applies to new connections only. - -*Unit:* seconds - -*Requires restart:* No - -*Visibility:* `tunable` - -*Type:* integer - -*Accepted values:* [`-17179869184`, `17179869183`] - -*Default:* `120` - ---- - - -=== kafka_tcp_keepalive_probe_interval_seconds - -TCP keepalive probe interval in seconds for Kafka connections. This describes the timeout between unacknowledged TCP keepalives. Refers to the TCP_KEEPINTVL socket option. When changed, applies to new connections only. - -*Unit:* seconds - -*Requires restart:* No - -*Visibility:* `tunable` - -*Type:* integer - -*Accepted values:* [`-17179869184`, `17179869183`] - -*Default:* `60` - ---- - -=== kafka_tcp_keepalive_probes - -TCP keepalive unacknowledged probes until the connection is considered dead for Kafka connections. Refers to the TCP_KEEPCNT socket option. When changed, applies to new connections only. - -*Requires restart:* No - -*Visibility:* `tunable` - -*Type:* integer - -*Accepted values:* [`0`, `4294967295`] - -*Default:* `3` - ---- - -=== kafka_throughput_control - -List of throughput control groups that define exclusions from node-wide throughput limits. Clients excluded from node-wide throughput limits are still potentially subject to client-specific throughput limits. - -Each throughput control group consists of: - -* `name` (optional) - any unique group name -* `client_id` - regex to match client_id - -Example values: - -* `[{'name': 'first_group','client_id': 'client1'}, {'client_id': 'consumer-\d+'}]` -* `[{'name': 'catch all'}]` -* `[{'name': 'missing_id', 'client_id': '+empty'}]` - -A connection is assigned the first matching group and is then excluded from throughput control. A `name` is not required, but can help you categorize the exclusions. Specifying `+empty` for the `client_id` will match on clients that opt not to send a `client_id`. You can also optionally omit the `client_id` and specify only a `name`, as shown. In this situation, all clients will match the rule and Redpanda will exclude them from all from node-wide throughput control. - -*Requires restart:* No - -*Visibility:* `user` - -*Type:* string array - -*Accepted Values:* list of control groups of the format `{'name' : 'group name', 'client_id' : 'regex pattern'}` - -*Default*: `[]` (empty list) - -*Related topics*: - -* xref:manage:cluster-maintenance/manage-throughput.adoc[Manage throughput] - ---- - -=== kafka_throughput_controlled_api_keys - -List of Kafka API keys that are subject to cluster-wide and node-wide throughput limit control. - -*Requires restart:* No - -*Visibility:* `user` - -*Type:* list - -*Default:* `["produce", "fetch"]` - ---- - -=== kafka_throughput_limit_node_in_bps - -The maximum rate of all ingress Kafka API traffic for a node. Includes all Kafka API traffic (requests, responses, headers, fetched data, produced data, etc.). If `null`, the property is disabled, and traffic is not limited. - -*Unit:* bytes per second - -*Requires restart:* No - -*Visibility:* `user` - -*Type:* integer - -*Accepted values:* [`-9223372036854775808`, `9223372036854775807`] - -*Default:* `null` - -*Related topics*: - -* xref:manage:cluster-maintenance/manage-throughput.adoc#node-wide-throughput-limits[Node-wide throughput limits] - ---- - -=== kafka_throughput_limit_node_out_bps - -The maximum rate of all egress Kafka traffic for a node. Includes all Kafka API traffic (requests, responses, headers, fetched data, produced data, etc.). If `null`, the property is disabled, and traffic is not limited. - -*Unit:* bytes per second - -*Requires restart:* No - -*Visibility:* `user` - -*Type:* integer - -*Accepted values:* [`-9223372036854775808`, `9223372036854775807`] - -*Default:* `null` - -*Related topics*: - -* xref:manage:cluster-maintenance/manage-throughput.adoc#node-wide-throughput-limits[Node-wide throughput limits] - ---- - -=== kafka_throughput_replenish_threshold - -Threshold for refilling the token bucket as part of enforcing throughput limits. - -This threshold is evaluated with each request for data. When the number of tokens to replenish exceeds this threshold, then tokens are added to the token bucket. This ensures that the atomic is not being updated for the token count with each request. The range for this threshold is automatically clamped to the corresponding throughput limit for ingress and egress. - -*Requires restart:* No - -*Visibility:* `tunable` - -*Type:* integer - -*Accepted values:* For ingress, [1, xref:reference:cluster-properties.adoc#kafka_throughput_limit_node_in_bps[`kafka_throughput_limit_node_in_bps`]]. For egress, [1, xref:reference:cluster-properties.adoc#kafka_throughput_limit_node_out_bps[`kafka_throughput_limit_node_out_bps`]] - -*Default:* `1` - -*Related topics*: - -* xref:manage:cluster-maintenance/manage-throughput.adoc[Manage Throughput] - ---- - -=== kafka_topics_max - -Maximum number of Kafka user topics that can be created. If `null`, then no limit is enforced. - -*Requires restart:* No - -*Visibility:* `user` - -*Type:* integer - -*Accepted values:* [`0`, `4294967295`] - -*Default:* `null` - ---- - -=== kvstore_flush_interval - -Key-value store flush interval (in milliseconds). - -*Unit:* milliseconds - -*Requires restart:* No - -*Visibility:* `tunable` - -*Type:* integer - -*Accepted values:* [`-17592186044416`, `17592186044415`] - -*Default:* `10` - ---- - -=== kvstore_max_segment_size - -Key-value maximum segment size (in bytes). - -*Requires restart:* Yes - -*Visibility:* `tunable` - -*Type:* integer - -*Default:* `16777216` - ---- - -=== leader_balancer_idle_timeout - -Leadership rebalancing idle timeout. - -*Unit*: milliseconds - -*Requires restart:* No - -*Visibility:* `tunable` - -*Type:* integer - -*Accepted values:* [`-17592186044416`, `17592186044415`] - -*Default:* `120000` (2 min) - ---- - -=== leader_balancer_mute_timeout - -The length of time that a glossterm:Raft[] group is muted after a leadership rebalance operation. Any group that has been moved, regardless of whether the move succeeded or failed, undergoes a cooling-off period. This prevents Raft groups from repeatedly experiencing leadership rebalance operations in a short time frame, which can lead to instability in the cluster. - -The leader balancer maintains a list of muted groups and reevaluates muted status at the start of each balancing iteration. Muted groups still contribute to overall cluster balance calculations although they can't themselves be moved until the mute period is over. - -*Unit:* milliseconds - -*Requires restart:* No - -*Visibility:* `tunable` - -*Type:* integer - -*Accepted values:* [`-17592186044416`, `17592186044415`] - -*Default:* `300000` (5 min) - -*Related topics*: - -* xref:manage:cluster-maintenance/cluster-balancing.adoc[] - ---- - -=== leader_balancer_node_mute_timeout - -The duration after which a broker that hasn't sent a heartbeat is considered muted. This timeout sets a threshold for identifying brokers that shouldn't be targeted for leadership transfers when the cluster rebalances, for example, because of unreliable network connectivity. - -*Unit:* milliseconds - -*Requires restart:* No - -*Visibility:* `tunable` - -*Type:* integer - -*Accepted values:* [`-17592186044416`, `17592186044415`] - -*Default:* `20000` (20 s) - -*Related topics*: - -* xref:manage:cluster-maintenance/cluster-balancing.adoc[] - ---- - -=== leader_balancer_transfer_limit_per_shard - -Per shard limit for in-progress leadership transfers. - -*Requires restart:* No - -*Visibility:* `tunable` - -*Type:* integer - -*Default:* `512` - ---- - -=== legacy_group_offset_retention_enabled - -Group offset retention is enabled by default starting in Redpanda version 23.1. To enable offset retention after upgrading from an older version, set this option to true. - -*Requires restart:* No - -*Visibility:* `tunable` - -*Type:* boolean - -*Default:* `false` - ---- - -=== legacy_permit_unsafe_log_operation - -Flag to enable a Redpanda cluster operator to use unsafe control characters within strings, such as consumer group names or user names. This flag applies only for Redpanda clusters that were originally on version 23.1 or earlier and have been upgraded to version 23.2 or later. Starting in version 23.2, newly-created Redpanda clusters ignore this property. - -*Requires restart:* No - -*Visibility:* `user` - -*Type:* boolean - -*Default:* `true` - -*Related properties*: <> - ---- - -=== legacy_unsafe_log_warning_interval_sec - -Period at which to log a warning about using unsafe strings containing control characters. If unsafe strings are permitted by `legacy_permit_unsafe_log_operation`, a warning will be logged at an interval specified by this property. - -*Unit:* seconds - -*Requires restart:* No - -*Visibility:* `user` - -*Type:* integer - -*Accepted values:* [`-17179869184`, `17179869183`] - -*Default:* `300` - -*Related properties*: <> - ---- - -=== log_cleanup_policy - -Default cleanup policy for topic logs. - -The topic property xref:./topic-properties.adoc#cleanuppolicy[`cleanup.policy`] overrides the value of `log_cleanup_policy` at the topic level. - -*Requires restart:* No - -*Visibility:* `user` - -*Accepted Values*: `compact`, `delete`, `compact,delete` - -*Default:* `delete` - ---- - -=== log_compaction_interval_ms - -How often to trigger background compaction. - -*Unit:* milliseconds - -*Requires restart:* No - -*Visibility:* `user` - -*Type:* integer - -*Accepted values:* [`-17592186044416`, `17592186044415`] - -*Default:* `10000` - ---- - -=== log_compaction_merge_max_ranges - -The maximum range of segments that can be processed in a single round of adjacent segment compaction. If `null` (the default value), no maximum is imposed on the number of ranges that can be processed at once. A value below 1 effectively disables adjacent merge compaction. - -*Requires restart:* No - -*Visibility:* `tunable` - -*Type:* integer - -*Accepted values:* [`0`, `4294967295`] - -*Default:* `null` - ---- - -=== log_compaction_merge_max_segments_per_range - -The maximum number of segments that can be combined into a single segment during an adjacent merge operation. If `null` (the default value), no maximum is imposed on the number of segments that can be combined at once. A value below 2 effectively disables adjacent merge compaction. - -*Requires restart:* No - -*Visibility:* `tunable` - -*Type:* integer - -*Accepted values:* [`0`, `4294967295`] - -*Default:* `null` - ---- - -=== log_compaction_pause_use_sliding_window - -Pause use of sliding window compaction. Toggle to `true` _only_ when you want to force adjacent segment compaction. The memory reserved by `storage_compaction_key_map_memory` is not freed when this is set to `true`. - -*Requires restart:* No - -*Visibility:* `tunable` - -*Type:* boolean - -*Default:* `false` - ---- - -=== log_compaction_use_sliding_window - -Use sliding window compaction. - -*Requires restart:* Yes - -*Visibility:* `tunable` - -*Type:* boolean - -*Default:* `true` - ---- - -=== log_compression_type - -IMPORTANT: This property is ignored regardless of the value specified. The behavior is always the same as the `producer` value. Redpanda brokers do not compress or recompress data based on this property. If producers send compressed data, Redpanda stores it as-is; if producers send uncompressed data, Redpanda stores it uncompressed. Other listed values are accepted for Apache Kafka compatibility but are ignored by the broker. This property may appear in Admin API and `rpk topic describe` outputs for compatibility. - -Default for the Kafka-compatible compression.type property. Redpanda does not recompress data. - -The topic property xref:./topic-properties.adoc#compressiontype[`compression.type`] overrides the value of `log_compression_type` at the topic level. - -*Requires restart:* No - -*Visibility:* `user` - -*Accepted Values:* `producer`. The following values are accepted for Kafka compatibility but ignored by the broker: `gzip`, `snappy`, `lz4`, `zstd`, `none`. - -*Default:* `producer` - ---- - -=== log_disable_housekeeping_for_tests - -Disables the housekeeping loop for local storage. This property is used to simplify testing, and should not be set in production. - -*Requires restart:* Yes - -*Visibility:* `tunable` - -*Type:* boolean - -*Default:* `false` - ---- - -=== log_message_timestamp_alert_after_ms - -Threshold in milliseconds for alerting on messages with a timestamp after the broker's time, meaning the messages are in the future relative to the broker's clock. - -*Unit:* milliseconds - -*Requires restart:* No - -*Visibility:* `tunable` - -*Type:* integer - -*Accepted values:* [`-17592186044416`, `17592186044415`] - -*Default:* `7200000` (2 h) - ---- - -=== log_message_timestamp_alert_before_ms - -Threshold in milliseconds for alerting on messages with a timestamp before the broker's time, meaning the messages are in the past relative to the broker's clock. To disable this check, set to `null`. - -*Unit:* milliseconds - -*Requires restart:* No - -*Visibility:* `tunable` - -*Type:* integer - -*Accepted values:* [`-17592186044416`, `17592186044415`] - -*Default:* `null` - ---- - -=== log_message_timestamp_type - -Default timestamp type for topic messages (CreateTime or LogAppendTime). - -The topic property xref:./topic-properties.adoc#messagetimestamptype[`message.timestamp.type`] overrides the value of `log_message_timestamp_type` at the topic level. - -*Requires restart:* No - -*Visibility:* `user` - -*Accepted Values:* `CreateTime`, `LogAppendTime`. - -*Default:* `CreateTime` - ---- - -=== log_retention_ms - -The amount of time to keep a log file before deleting it (in milliseconds). If set to `-1`, no time limit is applied. This is a cluster-wide default when a topic does not set or disable xref:./topic-properties.adoc#retentionms[`retention.ms`]. - -*Unit:* milliseconds - -*Requires restart:* No - -*Visibility:* `user` - -*Aliases:* `delete_retention_ms` - -*Accepted values:* [`-17592186044416`, `17592186044415`] - -*Default:* `604800000` (one week) - ---- - - -// tag::log_segment_ms[] -=== log_segment_ms - -ifdef::env-cloud[] -NOTE: This property is read-only in Redpanda Cloud. -endif::[] - -Default lifetime of log segments. If `null`, the property is disabled, and no default lifetime is set. Any value under 60 seconds (60000 ms) is rejected. This property can also be set in the Kafka API using the Kafka-compatible alias, `log.roll.ms`. - -ifndef::env-cloud[] -The topic property xref:./topic-properties.adoc#segmentms[`segment.ms`] overrides the value of `log_segment_ms` at the topic level. -endif::[] - -*Unit:* milliseconds - -*Requires restart:* No - -*Visibility:* `user` - -*Type:* integer - -*Accepted values:* [`-17592186044416`, `17592186044415`] - -ifndef::env-cloud[] -*Default:* `1209600000` (2 weeks) - -*Related properties*: - -* <> -* <> -endif::[] - ---- - -// end::log_segment_ms[] - -=== log_segment_ms_max - -Upper bound on topic `segment.ms`: higher values will be clamped to this value. - -*Unit*: milliseconds - -*Requires restart:* No - -*Visibility:* `tunable` - -*Type:* integer - -*Accepted values:* [`-17592186044416`, `17592186044415`] - -*Default:* `31536000000` (one year) - ---- - -=== log_segment_ms_min - -Lower bound on topic `segment.ms`: lower values will be clamped to this value. - -*Unit*: milliseconds - -*Requires restart:* No - -*Visibility:* `tunable` - -*Type:* integer - -*Accepted values:* [`-17592186044416`, `17592186044415`] - -*Default:* `600000` (10 min) - ---- - - -=== log_segment_size - -Default log segment size in bytes for topics which do not set `segment.bytes`. - -*Requires restart:* No - -*Visibility:* `tunable` - -*Type:* integer - -*Accepted values:* [`0`, `18446744073709551615`] - -*Default:* `134217728` - ---- - -// end::log_segment_size[] - -=== log_segment_size_jitter_percent - -Random variation to the segment size limit used for each partition. - -*Unit:* percent - -*Requires restart:* Yes - -*Visibility:* `tunable` - -*Type:* integer - -*Accepted values:* [`0`, `65535`] - -*Default:* `5` - ---- - -=== log_segment_size_max - -Upper bound on topic `segment.bytes`: higher values will be clamped to this limit. - -*Requires restart:* No - -*Visibility:* `tunable` - -*Type:* integer - -*Accepted values:* [`0`, `18446744073709551615`] - -*Default:* `null` - ---- - -=== log_segment_size_min - -Lower bound on topic `segment.bytes`: lower values will be clamped to this limit. - -*Requires restart:* No - -*Visibility:* `tunable` - -*Type:* integer - -*Accepted values:* [`0`, `18446744073709551615`] - -*Default:* `1048576` - ---- - -=== lz4_decompress_reusable_buffers_disabled - -Disable reusable preallocated buffers for LZ4 decompression. - -*Requires restart:* Yes - -*Visibility:* `tunable` - -*Type:* boolean - -*Default:* `false` - ---- - -=== max_compacted_log_segment_size - -Maximum compacted segment size after consolidation. - -*Requires restart:* No - -*Visibility:* `tunable` - -*Type:* integer - -*Default:* `5368709120` - ---- - -=== max_compaction_lag_ms - -For a compacted topic, the maximum time a message remains ineligible for compaction. The topic property `max.compaction.lag.ms` overrides this property. - -*Unit:* milliseconds - -*Requires restart:* No - -*Visibility:* `user` - -*Type:* integer - -*Accepted values:* [`1`, `9223372036854`] - -*Default:* `9223372036854` - -*Related topics*: xref:reference:properties/topic-properties.adoc#max.compaction.lag.ms[`max.compaction.lag.ms`] - ---- - -=== max_concurrent_producer_ids - -Maximum number of active producer sessions. When the threshold is passed, Redpanda terminates old sessions. When an idle producer corresponding to the terminated session wakes up and produces, its message batches are rejected, and an out of order sequence error is emitted. Consumers don't affect this setting. - -*Requires restart:* No - -*Visibility:* `tunable` - -*Type:* integer - -*Accepted values:* [`0`, `18446744073709551615`] - -*Default:* `18446744073709551615` - ---- - -=== max_in_flight_pandaproxy_requests_per_shard - -Maximum number of in-flight HTTP requests to HTTP Proxy permitted per shard. Any additional requests above this limit will be rejected with a 429 error. - -*Requires restart:* No - -*Visibility:* `tunable` - -*Type:* integer - -*Default:* `500` - ---- - -=== max_in_flight_schema_registry_requests_per_shard - -Maximum number of in-flight HTTP requests to Schema Registry permitted per shard. Any additional requests above this limit will be rejected with a 429 error. - -*Requires restart:* No - -*Visibility:* `tunable` - -*Type:* integer - -*Default:* `500` - ---- - -=== max_kafka_throttle_delay_ms - -Fail-safe maximum throttle delay on Kafka requests. - -*Unit:* milliseconds - -*Requires restart:* No - -*Visibility:* `tunable` - -*Type:* integer - -*Accepted values:* [`-17592186044416`, `17592186044415`] - -*Default:* `30000` - ---- - -=== max_transactions_per_coordinator - -Specifies the maximum number of active transaction sessions per coordinator. When the threshold is passed Redpanda terminates old sessions. When an idle producer corresponding to the terminated session wakes up and produces, it leads to its batches being rejected with invalid producer epoch or invalid_producer_id_mapping error (depends on the transaction execution phase). - -For details, see xref:develop:transactions#transaction-usage-tips[Transaction usage tips]. - -*Requires restart:* No - -*Visibility:* `tunable` - -*Type:* integer - -*Accepted values:* [`0`, `18446744073709551615`] - -*Default:* `18446744073709551615` - ---- - -=== members_backend_retry_ms - -Time between members backend reconciliation loop retries. - -*Unit:* milliseconds - -*Requires restart:* Yes - -*Visibility:* `tunable` - -*Type:* integer - -*Accepted values:* [`-17592186044416`, `17592186044415`] - -*Default:* `5000` (5 s) - ---- - -=== memory_abort_on_alloc_failure - -If `true`, the Redpanda process will terminate immediately when an allocation cannot be satisfied due to memory exhaustion. If false, an exception is thrown. - -*Requires restart:* No - -*Visibility:* `tunable` - -*Type:* boolean - -*Default:* `true` - ---- - -=== metadata_dissemination_interval_ms - -Interval for metadata dissemination batching. - -*Unit:* milliseconds - -*Requires restart:* Yes - -*Visibility:* `tunable` - -*Type:* integer - -*Accepted values:* [`-17592186044416`, `17592186044415`] - -*Default:* `3000` - ---- - -=== metadata_dissemination_retries - -Number of attempts to look up a topic's metadata-like shard before a request fails. This configuration controls the number of retries that request handlers perform when internal topic metadata (for topics like tx, consumer offsets, etc) is missing. These topics are usually created on demand when users try to use the cluster for the first time and it may take some time for the creation to happen and the metadata to propagate to all the brokers (particularly the broker handling the request). In the meantime Redpanda waits and retries. This configuration controls the number retries. - -*Requires restart:* Yes - -*Visibility:* `tunable` - -*Type:* integer - -*Accepted values:* [`-32768`, `32767`] - -*Default:* `30` - ---- - -=== metadata_dissemination_retry_delay_ms - -Delay before retrying a topic lookup in a shard or other meta tables. - -*Unit:* milliseconds - -*Requires restart:* Yes - -*Visibility:* `tunable` - -*Type:* integer - -*Accepted values:* [`-17592186044416`, `17592186044415`] - -*Default:* `500` - ---- - -=== metadata_status_wait_timeout_ms - -Maximum time to wait in metadata request for cluster health to be refreshed. - -*Unit:* milliseconds - -*Requires restart:* Yes - -*Visibility:* `tunable` - -*Type:* integer - -*Accepted values:* [`-17592186044416`, `17592186044415`] - -*Default:* `2000` - ---- - -=== metrics_reporter_report_interval - -Cluster metrics reporter report interval. - -*Unit:* milliseconds -*Requires restart:* No - -*Visibility:* `tunable` - -*Type:* integer - -*Accepted values:* [`-17592186044416`, `17592186044415`] - -*Default:* `86400000` (one day) - ---- - -=== metrics_reporter_tick_interval - -Cluster metrics reporter tick interval. - -*Unit:* milliseconds - -*Requires restart:* No - -*Visibility:* `tunable` - -*Type:* integer - -*Accepted values:* [`-17592186044416`, `17592186044415`] - -*Default:* `60000` (1 min) - ---- - -=== metrics_reporter_url - -URL of the cluster metrics reporter. - -*Requires restart:* No - -*Visibility:* `tunable` - -*Type:* string - -*Default:* `https://m.rp.vectorized.io/v2` - ---- - -=== min_cleanable_dirty_ratio - -The minimum ratio between the number of bytes in dirty segments and the total number of bytes in closed segments that must be reached before a partition's log is eligible for compaction in a compact topic. The topic property `min.cleanable.dirty.ratio` overrides this value at the topic level. - -*Requires restart:* No - -*Visibility:* `user` - -*Type:* number - -*Default:* `0.5` - ---- - -=== min_compaction_lag_ms - -The minimum amount of time (in ms) that a log segment must remain unaltered before it can be compacted in a compact topic. - -*Unit:* milliseconds - -*Requires restart:* No - -*Visibility:* `user` - -*Type:* integer - -*Accepted values:* [`0`, `9223372036854`] - -*Default:* `0` - -*Related topics*: - -- xref:reference:properties/topic-properties.adoc#min.compaction.lag.ms[`min.compaction.lag.ms`] - ---- - -// tag::minimum_topic_replications[] - -[[minimum_topic_replications]] -=== minimum_topic_replications - -ifdef::env-cloud[] -NOTE: This property is read-only in Redpanda Cloud. -endif::[] - -Minimum allowable replication factor for topics in this cluster. The set value must be positive, odd, and equal to or less than the number of available brokers. Changing this parameter only restricts newly-created topics. Redpanda returns an `INVALID_REPLICATION_FACTOR` error on any attempt to create a topic with a replication factor less than this property. - -If you change the `minimum_topic_replications` setting, the replication factor of existing topics remains unchanged. However, Redpanda will log a warning on start-up with a list of any topics that have fewer replicas than this minimum. For example, you might see a message such as `Topic X has a replication factor less than specified minimum: 1 < 3`. - -*Unit*: minimum number of replicas per topic - -*Requires restart:* No - -*Visibility:* `user` - -*Type:* integer - -*Accepted values:* [`1`, `32767`] - -ifndef::env-cloud[] -*Default:* `1` -endif::[] - ---- - -// end::minimum_topic_replications[] - -=== node_isolation_heartbeat_timeout - -How long after the last heartbeat request a node will wait before considering itself to be isolated. - -*Requires restart:* No - -*Visibility:* `tunable` - -*Type:* integer - -*Accepted values:* [`-9223372036854775808`, `9223372036854775807`] - -*Default:* `3000` - ---- - -=== node_management_operation_timeout_ms - -Timeout for executing node management operations. - -*Unit:* milliseconds - -*Requires restart:* Yes - -*Visibility:* `tunable` - -*Type:* integer - -*Accepted values:* [`-17592186044416`, `17592186044415`] - -*Default:* `5000` (5 s) - ---- - -=== node_status_interval - -Time interval between two node status messages. Node status messages establish liveness status outside of the Raft protocol. - -*Unit:* milliseconds -*Requires restart:* No - -*Visibility:* `tunable` - -*Type:* integer - -*Accepted values:* [`-17592186044416`, `17592186044415`] - -*Default:* `100` - ---- - -=== node_status_reconnect_max_backoff_ms - -Maximum backoff (in milliseconds) to reconnect to an unresponsive peer during node status liveness checks. - -*Unit:* milliseconds - -*Requires restart:* No - -*Visibility:* `user` - -*Type:* integer - -*Accepted values:* [`-17592186044416`, `17592186044415`] - -*Default:* `15000` - ---- - -=== oidc_clock_skew_tolerance - -The amount of time (in seconds) to allow for when validating the expiry claim in the token. - -*Unit*: seconds - -*Requires restart:* No - -*Visibility:* `user` - -*Type:* integer - -*Accepted values:* [`-17179869184`, `17179869183`] - -*Default:* `30` - ---- - -// tag::oidc_discovery_url[] -=== oidc_discovery_url - -ifdef::env-cloud[] -NOTE: This property is read-only in Redpanda Cloud. -endif::[] - -The URL pointing to the well-known discovery endpoint for the OIDC provider. - -*Requires restart:* No - -*Visibility:* `user` - -*Type:* string - -ifdef::env-cloud[] -*Default:* `https://auth.prd.cloud.redpanda.com/.well-known/openid-configuration` -endif::[] - ---- - -// end::oidc_discovery_url[] - -=== oidc_keys_refresh_interval - -The frequency of refreshing the JSON Web Keys (JWKS) used to validate access tokens. - -*Unit:* milliseconds - -*Requires restart:* No - -*Visibility:* `user` - -*Type:* integer - -*Accepted values:* [`-17179869184`, `17179869183`] - -*Default:* `3600000` - ---- - -// tag::oidc_principal_mapping[] -=== oidc_principal_mapping - -ifdef::env-cloud[] -NOTE: This property is read-only in Redpanda Cloud. -endif::[] - -Rule for mapping JWT payload claim to a Redpanda user principal. - -*Requires restart:* No - -*Visibility:* `user` - -*Type:* string - -ifndef::env-cloud[] -*Default:* `$.sub` - -*Related topics*: - -* xref:manage:security/authentication.adoc#oidc[OpenID Connect authentication] -* xref:manage:kubernetes/security/authentication/k-authentication.adoc[OpenID Connect authentication in Kubernetes] - -endif::[] - ---- - -// end::oidc_principal_mapping[] - -// tag::oidc_token_audience[] -=== oidc_token_audience - -ifdef::env-cloud[] -NOTE: This property is read-only in Redpanda Cloud. -endif::[] - -A string representing the intended recipient of the token. - -*Requires restart:* No - -*Visibility:* `user` - -*Type:* string - -ifndef::env-cloud[] -*Default:* `redpanda` -endif::[] - ---- - -// end::oidc_token_audience[] - -=== partition_autobalancing_concurrent_moves - -Number of partitions that can be reassigned at once. - -*Requires restart:* No - -*Visibility:* `tunable` - -*Type:* integer - -*Default:* `50` - ---- - -=== partition_autobalancing_max_disk_usage_percent - -NOTE: This property applies only when <> is set to `continuous`. - -When the disk usage of a node exceeds this threshold, it triggers Redpanda to move partitions off of the node. - -*Unit*: percent of disk used - -*Requires restart:* No - -*Visibility:* `user` - -*Type:* integer - -*Accepted values:* [`0`, `4294967295`] - -*Default:* `80` - -*Related topics*: - -* xref:manage:cluster-maintenance/continuous-data-balancing.adoc[Configure Continuous Data Balancing] - ---- - -=== partition_autobalancing_min_size_threshold - -Minimum size of partition that is going to be prioritized when rebalancing a cluster due to the disk size threshold being breached. This value is calculated automatically by default. - -*Requires restart:* No - -*Visibility:* `tunable` - -*Type:* integer - -*Default:* `null` - ---- - -=== partition_autobalancing_mode - -include::reference:partial$enterprise-licensed-property.adoc[] - -Mode of xref:manage:cluster-maintenance/cluster-balancing.adoc[partition balancing] for a cluster. - -*Requires restart:* No - -*Visibility:* `user` - -*Accepted values:* - -* `continuous`: partition balancing happens automatically to maintain optimal performance and availability, based on continuous monitoring for node changes (same as `node_add`) and also high disk usage. This option requires an xref:get-started:licensing/index.adoc[enterprise license], and it is customized by <> and <> properties. -* `node_add`: partition balancing happens when a node is added. -* `off`: partition balancing is disabled. This option is not recommended for production clusters. - -*Enterprise license required*: `continuous` - -*Default:* `node_add` - -*Related topics*: - -* xref:manage:cluster-maintenance/continuous-data-balancing.adoc[Configure Continuous Data Balancing] - ---- - -=== partition_autobalancing_node_availability_timeout_sec - -NOTE: This property applies only when <> is set to `continuous`. - -When a node is unavailable for at least this timeout duration, it triggers Redpanda to move partitions off of the node. - -*Unit:* seconds - -*Requires restart:* No - -*Visibility:* `user` - -*Type:* integer - -*Accepted values:* [`-17179869184`, `17179869183`] - -*Default:* `900` (15 min) - -*Related topics*: - -* xref:manage:cluster-maintenance/continuous-data-balancing.adoc[Configure Continuous Data Balancing] - ---- - -=== partition_autobalancing_tick_interval_ms - -Partition autobalancer tick interval. - -*Unit:* milliseconds - -*Requires restart:* No - -*Visibility:* `tunable` - -*Type:* integer - -*Accepted values:* [`-17592186044416`, `17592186044415`] - -*Default:* `30000` (30 s) - ---- - -=== partition_autobalancing_tick_moves_drop_threshold - -If the number of scheduled tick moves drops by this ratio, a new tick is scheduled immediately. Valid values are (0, 1]. For example, with a value of 0.2 and 100 scheduled moves in a tick, a new tick is scheduled when the in-progress moves are fewer than 80. - -*Requires restart:* No - -*Visibility:* `tunable` - -*Type:* number - -*Default:* `0.2` - ---- - -=== partition_autobalancing_topic_aware - -If `true`, Redpanda prioritizes balancing a topic’s partition replica count evenly across all brokers while it’s balancing the cluster’s overall partition count. Because different topics in a cluster can have vastly different load profiles, this better distributes the workload of the most heavily-used topics evenly across brokers. - -*Requires restart:* No - -*Visibility:* `user` - -*Type:* boolean - -*Default*: true - -*Related topics*: - -* xref:manage:cluster-maintenance/cluster-balancing.adoc[] - ---- - -=== partition_manager_shutdown_watchdog_timeout - -A threshold value to detect partitions which might have been stuck while shutting down. After this threshold, a watchdog in partition manager will log information about partition shutdown not making progress. - -*Unit*: milliseconds - -*Requires restart:* No - -*Visibility:* `tunable` - -*Type:* integer - -*Accepted values:* [`-17592186044416`, `17592186044415`] - -*Default:* `30000` - ---- - -=== pp_sr_smp_max_non_local_requests - -Maximum number of Cross-core(Inter-shard communication) requests pending in HTTP Proxy and Schema Registry seastar::smp group. (For more details, see the `seastar::smp_service_group` documentation). - -See https://docs.seastar.io/master/[Seastar documentation^] - -*Requires restart:* Yes - -*Visibility:* `tunable` - -*Type:* integer - -*Accepted values:* [`0`, `4294967295`] - -*Default:* `null` - ---- - -=== quota_manager_gc_sec - -Quota manager GC frequency in milliseconds. - -*Unit:* seconds - -*Requires restart:* No - -*Visibility:* `tunable` - -*Type:* integer - -*Accepted values:* [`-17592186044416`, `17592186044415`] - -*Default:* `30000` (30 s) - ---- - -=== election_timeout_ms - -Raft election timeout expressed in milliseconds. - -*Unit:* milliseconds - -*Requires restart:* No - -*Visibility:* `tunable` - -*Type:* integer - -*Accepted values:* [`-17592186044416`, `17592186044415`] - -*Default:* `1500` - ---- - -=== raft_enable_longest_log_detection - -Enables an additional step in leader election where a candidate is allowed to wait for all the replies from the broker it requested votes from. This may introduce a small delay when recovering from failure, but it prevents truncation if any of the replicas have more data than the majority. - -*Requires restart:* No - -*Visibility:* `tunable` - -*Type:* boolean - -*Default:* `true` - ---- - -=== raft_enable_lw_heartbeat - -Enables Raft optimization of heartbeats. - -*Requires restart:* No - -*Visibility:* `tunable` - -*Type:* boolean - -*Default:* `true` - ---- - -=== raft_heartbeat_disconnect_failures - -The number of failed heartbeats after which an unresponsive TCP connection is forcibly closed. To disable forced disconnection, set to 0. - -*Requires restart:* Yes - -*Visibility:* `tunable` - -*Type:* integer - -*Default:* `3` - ---- - -=== raft_heartbeat_interval_ms - -Number of milliseconds for Raft leader heartbeats. - -*Unit:* milliseconds - -*Requires restart:* No - -*Visibility:* `tunable` - -*Type:* integer - -*Accepted values:* [`1`, `17592186044415`] - -*Default:* `150` - ---- - -=== raft_heartbeat_timeout_ms - -Raft heartbeat RPC (remote procedure call) timeout. Raft uses a heartbeat mechanism to maintain leadership authority and to trigger leader elections. The `raft_heartbeat_interval_ms` is a periodic heartbeat sent by the partition leader to all followers to declare its leadership. If a follower does not receive a heartbeat within the `raft_heartbeat_timeout_ms`, then it triggers an election to choose a new partition leader. - -*Unit:* milliseconds - -*Requires restart:* No - -*Visibility:* `tunable` - -*Type:* integer - -*Accepted values:* [`-17592186044416`, `17592186044415`] - -*Default:* `3000` - ---- - -=== raft_io_timeout_ms - -Raft I/O timeout. - -*Unit:* milliseconds - -*Requires restart:* No - -*Visibility:* `tunable` - -*Type:* integer - -*Accepted values:* [`-17592186044416`, `17592186044415`] - -*Default:* `10000` - ---- - -=== raft_learner_recovery_rate - -Raft learner recovery rate limit. Throttles the rate of data communicated to nodes (learners) that need to catch up to leaders. This rate limit is placed on a node sending data to a recovering node. Each sending node is limited to this rate. The recovering node accepts data as fast as possible according to the combined limits of all healthy nodes in the cluster. For example, if two nodes are sending data to the recovering node, and `raft_learner_recovery_rate` is 100 MB/sec, then the recovering node will recover at a rate of 200 MB/sec. - -*Requires restart:* No - -*Visibility:* `tunable` - -*Type:* integer - -*Default:* `104857600` - ---- - - -=== raft_max_buffered_follower_append_entries_bytes_per_shard - -The total size of append entry requests that may be cached per shard, using the Raft-buffered protocol. When an entry is cached, the leader can continue serving requests because the ordering of the cached requests cannot change. When the total size of cached requests reaches the set limit, back pressure is applied to throttle producers. - -*Requires restart:* No - -*Visibility:* `tunable` - -*Type:* integer - -*Default:* `0` - ---- - -=== raft_max_inflight_follower_append_entries_requests_per_shard - -The maximum number of append entry requests that may be sent from Raft groups on a Seastar shard to the current node, and are awaiting a reply. This property replaces `raft_max_concurrent_append_requests_per_follower`. - -*Requires restart:* No - -*Visibility:* `tunable` - -*Type:* integer - -*Default:* `1024` - ---- - -=== raft_max_recovery_memory - -Maximum memory that can be used for reads in Raft recovery process by default 15% of total memory. - -*Requires restart:* No - -*Visibility:* `tunable` - -*Type:* integer - -*Default:* `null` - ---- - -=== raft_recovery_concurrency_per_shard - -Number of partitions that may simultaneously recover data to a particular shard. This number is limited to avoid overwhelming nodes when they come back online after an outage. - -*Requires restart:* No - -*Visibility:* `tunable` - -*Type:* integer - -*Default:* `64` - ---- - -=== raft_recovery_default_read_size - -Specifies the default size of a read issued during Raft follower recovery. - -*Requires restart:* No - -*Visibility:* `tunable` - -*Type:* integer - -*Default:* `524288` - ---- - -=== raft_recovery_throttle_disable_dynamic_mode - -include::reference:partial$internal-use-property.adoc[] - -Disables cross shard sharing used to throttle recovery traffic. Should only be used to debug unexpected problems. - -*Requires restart:* No - -*Visibility:* `tunable` - -*Type:* boolean - -*Default:* `false` - ---- - -=== raft_replica_max_flush_delay_ms - -Maximum delay between two subsequent flushes. After this delay, the log is automatically force flushed. - -*Unit:* milliseconds - -*Requires restart:* No - -*Visibility:* `tunable` - -*Type:* integer - -*Accepted values:* [`-17592186044416`, `17592186044415`] - -*Default:* `100` - ---- - -=== raft_replica_max_pending_flush_bytes - -Maximum number of bytes that are not flushed per partition. If the configured threshold is reached, the log is automatically flushed even if it has not been explicitly requested. - -*Unit:* bytes - -*Requires restart:* No - -*Visibility:* `tunable` - -*Type:* integer - -*Default:* `262144` - ---- - -=== raft_replicate_batch_window_size - -Maximum size of requests cached for replication. - -*Requires restart:* Yes - -*Visibility:* `tunable` - -*Type:* integer - -*Default:* `1048576` - ---- - -=== raft_smp_max_non_local_requests - -Maximum number of Cross-core(Inter-shard communication) requests pending in Raft seastar::smp group. For details, refer to the `seastar::smp_service_group` documentation). - -See https://docs.seastar.io/master/[Seastar documentation^] -*Requires restart:* Yes - -*Visibility:* `tunable` - -*Type:* integer - -*Accepted values:* [`0`, `4294967295`] - -*Default:* `null` - ---- - -=== raft_timeout_now_timeout_ms - -Timeout for Raft's timeout_now RPC. This RPC is used to force a follower to dispatch a round of votes immediately. - -*Unit:* milliseconds - -*Requires restart:* No - -*Visibility:* `tunable` - -*Type:* integer - -*Accepted values:* [`-17592186044416`, `17592186044415`] - -*Default:* `1000` - ---- - -=== raft_transfer_leader_recovery_timeout_ms - -Follower recovery timeout waiting period when transferring leadership. - -*Unit:* milliseconds - -*Requires restart:* No - -*Visibility:* `tunable` - -*Type:* integer - -*Accepted values:* [`-17592186044416`, `17592186044415`] - -*Default:* `10000` - ---- - -=== readers_cache_eviction_timeout_ms - -Duration after which inactive readers are evicted from cache. - -*Unit:* milliseconds - -*Requires restart:* Yes - -*Visibility:* `tunable` - -*Type:* integer - -*Accepted values:* [`-17592186044416`, `17592186044415`] - -*Default:* `30000` - ---- - -=== readers_cache_target_max_size - -Maximum desired number of readers cached per NTP. This a soft limit, meaning that a number of readers in cache may temporarily increase as cleanup is performed in the background. - -*Requires restart:* No - -*Visibility:* `tunable` - -*Type:* integer - -*Default:* `200` - ---- - -=== reclaim_batch_cache_min_free - -Minimum amount of free memory maintained by the batch cache background reclaimer. - -*Requires restart:* Yes - -*Visibility:* `tunable` - -*Type:* integer - -*Default:* `67108864` - ---- - -=== reclaim_growth_window - -Starting from the last point in time when memory was reclaimed from the batch cache, this is the duration during which the amount of memory to reclaim grows at a significant rate, based on heuristics about the amount of available memory. - -*Unit:* milliseconds - -*Requires restart:* Yes - -*Visibility:* `tunable` - -*Type:* integer - -*Accepted values:* [`-17592186044416`, `17592186044415`] - -*Default:* `3000` - ---- - -=== reclaim_max_size - -Maximum batch cache reclaim size. - -*Requires restart:* Yes - -*Visibility:* `tunable` - -*Type:* integer - -*Default:* `4194304` - ---- - -=== reclaim_min_size - -Minimum batch cache reclaim size. - -*Requires restart:* Yes - -*Visibility:* `tunable` - -*Type:* integer - -*Default:* `131072` - ---- - -=== reclaim_stable_window - -If the duration since the last time memory was reclaimed is longer than the amount of time specified in this property, the memory usage of the batch cache is considered stable, so only the minimum size (<>) is set to be reclaimed. - -*Unit:* milliseconds - -*Requires restart:* Yes - -*Visibility:* `tunable` - -*Type:* integer - -*Accepted values:* [`-17592186044416`, `17592186044415`] - -*Default:* `10000` - ---- - -=== recovery_append_timeout_ms - -Timeout for append entry requests issued while updating a stale follower. - -*Unit:* milliseconds - -*Requires restart:* Yes - -*Visibility:* `tunable` - -*Type:* integer - -*Accepted values:* [`-17592186044416`, `17592186044415`] - -*Default:* `5000` - ---- - -=== release_cache_on_segment_roll - -Flag for specifying whether or not to release cache when a full segment is rolled. - -*Requires restart:* No - -*Visibility:* `tunable` - -*Type:* boolean - -*Default:* `false` - ---- - -=== replicate_append_timeout_ms - -Timeout for append entry requests issued while replicating entries. - -*Unit:* milliseconds - -*Requires restart:* Yes - -*Visibility:* `tunable` - -*Type:* integer - -*Accepted values:* [`-17592186044416`, `17592186044415`] - -*Default:* `3000` - ---- - -=== retention_bytes - -Default maximum number of bytes per partition on disk before triggering deletion of the oldest messages. If `null` (the default value), no limit is applied. - -The topic property xref:./topic-properties.adoc#retentionbytes[`retention.bytes`] overrides the value of `retention_bytes` at the topic level. - -*Unit*: bytes per partition. - -*Requires restart:* No - -*Visibility:* `user` - -*Type:* integer - -*Default:* `null` - ---- - - -=== retention_local_strict - -Flag to allow Tiered Storage topics to expand to consumable retention policy limits. When this flag is enabled, non-local retention settings are used, and local retention settings are used to inform data removal policies in low-disk space scenarios. - -*Requires restart:* No - -*Visibility:* `user` - -*Type:* boolean - -*Default:* `false` - ---- - -=== retention_local_strict_override - -Trim log data when a cloud topic reaches its local retention limit. When this option is disabled Redpanda will allow partitions to grow past the local retention limit, and will be trimmed automatically as storage reaches the configured target size. - -*Requires restart:* No - -*Visibility:* `user` - -*Type:* boolean - -*Default:* `true` - ---- - -=== retention_local_target_bytes_default - -Local retention size target for partitions of topics with object storage write enabled. If `null`, the property is disabled. - -This property can be overridden on a per-topic basis by setting `retention.local.target.bytes` in each topic enabled for Tiered Storage. See xref:manage:cluster-maintenance/disk-utilization.adoc#configure-message-retention[Configure message retention]. - -NOTE: Both `retention_local_target_bytes_default` and `retention_local_target_ms_default` can be set. The limit that is reached earlier is applied. - -*Related properties*: - -* <> - -*Unit*: bytes - -*Requires restart:* No - -*Visibility:* `user` - -*Type:* integer - -*Default:* `null` - ---- - - -=== retention_local_target_capacity_bytes - -The target capacity (in bytes) that log storage will try to use before additional retention rules take over to trim data to meet the target. When no target is specified, storage usage is unbounded. - -NOTE: Redpanda Data recommends setting only one of <> or <>. If both are set, the minimum of the two is used as the effective target capacity. - -*Unit*: percentage of total disk size - -*Requires restart:* No - -*Visibility:* `user` - -*Type:* integer - -*Accepted values:* [`0`, `18446744073709551615`] - -*Default:* `null` - ---- - -=== retention_local_target_capacity_percent - -The target capacity in percent of unreserved space (<>) that log storage will try to use before additional retention rules will take over to trim data in order to meet the target. When no target is specified storage usage is unbounded. - -NOTE: Redpanda Data recommends setting only one of <> or <>. If both are set, the minimum of the two is used as the effective target capacity. - -*Unit*: percentage of total disk size - -*Requires restart:* No - -*Visibility:* `user` - -*Type:* number - -*Default:* `80.0` - ---- - -=== retention_local_target_ms_default - -Local retention time target for partitions of topics with object storage write enabled. - -This property can be overridden on a per-topic basis by setting `retention.local.target.ms` in each topic enabled for Tiered Storage. See xref:manage:cluster-maintenance/disk-utilization.adoc#configure-message-retention[Configure message retention]. - -NOTE: Both <> and <> can be set. The limit that is reached first is applied. - -*Related properties*: - -* <> - -*Unit*: milliseconds - -*Requires restart:* No - -*Visibility:* `user` - -*Type:* integer - -*Accepted values:* [`-17592186044416`, `17592186044415`] - -*Default:* `86400000` (one day) - ---- - - -=== retention_local_trim_interval - -The period during which disk usage is checked for disk pressure, and data is optionally trimmed to meet the target. - -*Unit*: milliseconds - -*Requires restart:* No - -*Visibility:* `tunable` - -*Type:* integer - -*Accepted values:* [`0`, `17592186044415`] - -*Default:* `30000` (30 s) - ---- - -=== retention_local_trim_overage_coeff - -The space management control loop reclaims the overage multiplied by this this coefficient to compensate for data that is written during the idle period between control loop invocations. - -*Requires restart:* No - -*Visibility:* `tunable` - -*Type:* number - -*Default:* `2.0` - ---- - -=== rm_sync_timeout_ms - -Resource manager's synchronization timeout. Specifies the maximum time for this node to wait for the internal state machine to catch up with all events written by previous leaders before rejecting a request. - -*Unit:* milliseconds - -*Requires restart:* No - -*Visibility:* `user` - -*Type:* integer - -*Accepted values:* [`-17592186044416`, `17592186044415`] - -*Default:* `10000` - ---- - -=== rpc_client_connections_per_peer - -The maximum number of connections a broker will open to each of its peers. - -*Requires restart:* Yes - -*Visibility:* `user` - -*Type:* integer - -*Accepted values:* [`-2147483648`, `2147483647`] - -*Default:* `128` - ---- - -=== rpc_server_compress_replies - -Enable compression for internal RPC (remote procedure call) server replies. - -*Requires restart:* No - -*Visibility:* `tunable` - -*Type:* boolean - -*Default:* `false` - ---- - -=== rpc_server_listen_backlog - -Maximum TCP connection queue length for Kafka server and internal RPC server. If `null` (the default value), no queue length is set. - -*Unit*: number of queue entries -*Requires restart:* Yes - -*Visibility:* `user` - -*Type:* integer - -*Accepted values:* [`-2147483648`, `2147483647`] - -*Default:* `null` - ---- - -=== rpc_server_tcp_recv_buf - -Internal RPC TCP receive buffer size. If `null` (the default value), no buffer size is set by Redpanda. - -*Unit:* bytes - -*Requires restart:* Yes - -*Visibility:* `user` - -*Type:* integer - -*Accepted values:* [`-2147483648`, `2147483647`] - -*Default:* `null` - ---- - -=== rpc_server_tcp_send_buf - -Internal RPC TCP send buffer size. If `null` (the default value), then no buffer size is set by Redpanda. - -*Unit:* bytes - -*Requires restart:* Yes - -*Visibility:* `user` - -*Type:* integer - -*Accepted values:* [`-2147483648`, `2147483647`] - -*Default:* `null` - ---- - -=== rpk_path - -Path to RPK binary. - -*Requires restart:* No - -*Visibility:* `tunable` - -*Type:* string - -*Default:* `/usr/bin/rpk` - ---- - -=== rps_limit_acls_and_users_operations - -Rate limit for controller ACLs and user's operations. - -*Requires restart:* No - -*Visibility:* `tunable` - -*Type:* integer - -*Default:* `1000` - ---- - -=== rps_limit_configuration_operations - -Rate limit for controller configuration operations. - -*Requires restart:* No - -*Visibility:* `tunable` - -*Type:* integer - -*Default:* `1000` - ---- - -=== rps_limit_move_operations - -Rate limit for controller move operations. - -*Requires restart:* No - -*Visibility:* `tunable` - -*Type:* integer - -*Default:* `1000` - ---- - -=== rps_limit_node_management_operations - -Rate limit for controller node management operations. - -*Requires restart:* No - -*Visibility:* `tunable` - -*Type:* integer - -*Default:* `1000` - ---- - -=== rps_limit_topic_operations - -Rate limit for controller topic operations. - -*Requires restart:* No - -*Visibility:* `tunable` - -*Type:* integer - -*Default:* `1000` - ---- - -=== memory_enable_memory_sampling - -When `true`, memory allocations are sampled and tracked. A sampled live set of allocations can then be retrieved from the Admin API. Additionally, Redpanda will periodically log the top-n allocation sites. - -*Requires restart:* No - -*Visibility:* `tunable` - -*Type:* boolean - -*Default:* `true` - ---- - -=== sasl_kerberos_config - -The location of the Kerberos `krb5.conf` file for Redpanda. - -*Requires restart:* No - -*Visibility:* `user` - -*Type:* string - -*Default:* `/etc/krb5.conf` - ---- - -=== sasl_kerberos_keytab - -The location of the Kerberos keytab file for Redpanda. - -*Requires restart:* No - -*Visibility:* `user` - -*Type:* string - -*Default:* `/var/lib/redpanda/redpanda.keytab` - ---- - -=== sasl_kerberos_principal - -The primary of the Kerberos Service Principal Name (SPN) for Redpanda. - -*Requires restart:* No - -*Visibility:* `user` - -*Type:* string - -*Default:* `redpanda` - ---- - -=== sasl_kerberos_principal_mapping - -Rules for mapping Kerberos principal names to Redpanda user principals. - -*Requires restart:* No - -*Visibility:* `user` - -*Type:* string array - -*Default:* `[default]` - ---- - -// tag::sasl_mechanisms[] -=== sasl_mechanisms - -ifdef::env-cloud[] -NOTE: This property is read-only in Redpanda Cloud. -endif::[] - -ifndef::env-cloud[] -include::reference:partial$enterprise-licensed-property.adoc[] -endif::[] - -A list of supported SASL mechanisms. Accepted values: `SCRAM`, `GSSAPI`, `OAUTHBEARER`, `PLAIN`. Note that in order to enable PLAIN, you must also enable SCRAM. - -*Requires restart:* No - -*Visibility:* `user` - -*Type:* string array - -*Accepted values*: `SCRAM`, `GSSAPI`, `OAUTHBEARER`, `PLAIN` - -ifndef::env-cloud[] -*Enterprise license required*: `GSSAPI`, `OAUTHBEARER` - -*Default:* `[SCRAM]` -endif::[] - ---- - -// end::sasl_mechanisms[] - -=== schema_registry_always_normalize - -Always normalize schemas. If set, this overrides the `normalize` parameter in requests to the Schema Registry API. - -*Requires restart:* No - -*Visibility:* `user` - -*Type:* boolean - -*Aliases:* `schema_registry_normalize_on_startup` - -*Default:* `false` - ---- - -// tag::schema_registry_enable_authorization[] -=== schema_registry_enable_authorization - -Enables ACL-based authorization for Schema Registry requests. When `true`, Schema Registry -uses ACL-based authorization instead of the default `public/user/superuser` authorization model. - -ifdef::env-cloud[] -Requires authentication to be enabled using the `authentication_method` property in the `schema_registry_api` broker configuration. -endif::[] - -*Requires restart:* No - -*Visibility:* `user` - -*Type:* boolean - -ifndef::env-cloud[] -*Enterprise license required:* `true` -endif::[] - -*Default:* `false` - ---- - -// end::schema_registry_enable_authorization[] - -=== segment_appender_flush_timeout_ms - -Maximum delay until buffered data is written. - -*Unit:* milliseconds - -*Requires restart:* No - -*Visibility:* `tunable` - -*Type:* integer - -*Accepted values:* [`-17592186044416`, `17592186044415`] - -*Default:* `1000` (1 s) - ---- - -=== segment_fallocation_step - -Size for segments fallocation. - -*Requires restart:* No - -*Visibility:* `tunable` - -*Type:* integer - -*Default:* `33554432` - ---- - -=== space_management_enable - -Option to explicitly disable automatic disk space management. If this property was explicitly disabled while using v23.2, it will remain disabled following an upgrade. - -*Requires restart:* No - -*Visibility:* `user` - -*Type:* boolean - -*Default:* `true` - ---- - -=== space_management_enable_override - -Enable automatic space management. This option is ignored and deprecated in versions >= v23.3. - -*Requires restart:* No - -*Visibility:* `user` - -*Type:* boolean - -*Default:* `false` - ---- - -=== space_management_max_log_concurrency - -Maximum parallel logs inspected during space management process. - -*Requires restart:* No - -*Visibility:* `tunable` - -*Type:* integer - -*Accepted values:* [`0`, `65535`] - -*Default:* `20` - ---- - -=== space_management_max_segment_concurrency - -Maximum parallel segments inspected during space management process. - -*Requires restart:* No - -*Visibility:* `tunable` - -*Type:* integer - -*Accepted values:* [`0`, `65535`] - -*Default:* `10` - ---- - -=== storage_compaction_index_memory - -Maximum number of bytes that may be used on each shard by compaction index writers. - -*Requires restart:* No - -*Visibility:* `tunable` - -*Type:* integer - -*Accepted values:* [`0`, `18446744073709551615`] - -*Default:* `134217728` - ---- - -=== storage_compaction_key_map_memory - -Maximum number of bytes that may be used on each shard by compaction key-offset maps. Only applies when <> is set to `true`. - -*Requires restart:* Yes - -*Visibility:* `tunable` - -*Type:* integer - -*Accepted values:* [`0`, `18446744073709551615`] - -*Default:* `134217728` - ---- - -=== storage_compaction_key_map_memory_limit_percent - -Limit on <>, expressed as a percentage of memory per shard, that bounds the amount of memory used by compaction key-offset maps. - -NOTE: Memory per shard is computed after <>, and only applies when <> is set to `true`. - -*Requires restart:* Yes - -*Visibility:* `tunable` - -*Type:* number - -*Default:* `12.0` - ---- - -=== storage_ignore_cstore_hints - -When set, cstore hints are ignored and not used for data access (but are otherwise generated). - -*Requires restart:* No - -*Visibility:* `tunable` - -*Type:* boolean - -*Default:* `false` - ---- - -=== storage_ignore_timestamps_in_future_sec - -The maximum number of seconds that a record's timestamp can be ahead of a Redpanda broker's clock and still be used when deciding whether to clean up the record for data retention. This property makes possible the timely cleanup of records from clients with clocks that are drastically unsynchronized relative to Redpanda. - -When determining whether to clean up a record with timestamp more than `storage_ignore_timestamps_in_future_sec` seconds ahead of the broker, Redpanda ignores the record's timestamp and instead uses a valid timestamp of another record in the same segment, or (if another record's valid timestamp is unavailable) the timestamp of when the segment file was last modified (mtime). - -By default, `storage_ignore_timestamps_in_future_sec` is disabled (null). - -[TIP] -==== -To figure out whether to set `storage_ignore_timestamps_in_future_sec` for your system: - -. Look for logs with segments that are unexpectedly large and not being cleaned up. -. In the logs, search for records with unsynchronized timestamps that are further into the future than tolerable by your data retention and storage settings. For example, timestamps 60 seconds or more into the future can be considered to be too unsynchronized. -. If you find unsynchronized timestamps throughout your logs, determine the number of seconds that the timestamps are ahead of their actual time, and set `storage_ignore_timestamps_in_future_sec` to that value so data retention can proceed. -. If you only find unsynchronized timestamps that are the result of transient behavior, you can disable `storage_ignore_timestamps_in_future_sec`. -==== - -*Unit*: seconds - -*Requires restart:* No - -*Visibility:* `tunable` - -*Type:* integer - -*Accepted values:* [`-17179869184`, `17179869183`] - -*Default:* `null` - ---- - -=== storage_max_concurrent_replay - -Maximum number of partitions' logs that will be replayed concurrently at startup, or flushed concurrently on shutdown. - -*Requires restart:* No - -*Visibility:* `tunable` - -*Type:* integer - -*Accepted values:* [`0`, `18446744073709551615`] - -*Default:* `1024` - ---- - -=== storage_min_free_bytes - -Threshold of minimum bytes free space before rejecting producers. - -*Unit:* bytes - -*Requires restart:* No - -*Visibility:* `tunable` - -*Type:* integer - -*Default:* `5368709120` - ---- - -=== storage_read_buffer_size - -Size of each read buffer (one per in-flight read, per log segment). - -*Requires restart:* No - -*Visibility:* `tunable` - -*Type:* integer - -*Default:* `131072` - ---- - -=== storage_read_readahead_count - -How many additional reads to issue ahead of current read location. - -*Requires restart:* No - -*Visibility:* `tunable` - -*Type:* integer - -*Accepted values:* [`-32768`, `32767`] - -*Default:* `1` - ---- - -=== storage_reserve_min_segments - -The number of segments per partition that the system will attempt to reserve disk capacity for. For example, if the maximum segment size is configured to be 100 MB, and the value of this option is 2, then in a system with 10 partitions Redpanda will attempt to reserve at least 2 GB of disk space. - -*Requires restart:* No - -*Visibility:* `tunable` - -*Type:* integer - -*Accepted values:* [`-32768`, `32767`] - -*Default:* `2` - ---- - -=== storage_space_alert_free_threshold_bytes - -Threshold of minimum bytes free space before setting storage space alert. - -*Unit:* bytes - -*Requires restart:* No - -*Visibility:* `tunable` - -*Type:* integer - -*Default:* `0` - ---- - -=== storage_space_alert_free_threshold_percent - -Threshold of minimum percent free space before setting storage space alert. - -*Unit:* percent - -*Requires restart:* No - -*Visibility:* `tunable` - -*Type:* integer - -*Accepted values:* [`0`, `4294967295`] - -*Default:* `5` - ---- - -=== storage_strict_data_init - -Requires that an empty file named `.redpanda_data_dir` be present in the xref:reference:properties/broker-properties.adoc#data_directory[`data_ directory`]. If set to `true`, Redpanda will refuse to start if the file is not found in the data directory. - -*Requires restart:* No - -*Visibility:* `user` - -*Type:* boolean - -*Default:* `false` - ---- - -=== storage_target_replay_bytes - -Target bytes to replay from disk on startup after clean shutdown: controls frequency of snapshots and checkpoints. - -*Unit:* bytes - -*Requires restart:* No - -*Visibility:* `tunable` - -*Type:* integer - -*Accepted values:* [`0`, `18446744073709551615`] - -*Default:* `10737418240` - ---- - -=== superusers - -List of superuser usernames. - -*Requires restart:* No - -*Visibility:* `user` - -*Type:* string - -*Default:* `null` - ---- - -=== tls_certificate_name_format - -The format of the certificates's distinguished name to use for mTLS principal mapping. The `legacy` format would appear as 'C=US,ST=California,L=San Francisco,O=Redpanda,CN=redpanda', while the `rfc2253` format would appear as 'CN=redpanda,O=Redpanda,L=San Francisco,ST=California,C=US'. - -*Requires restart:* No - -*Visibility:* `user` - -*Default:* `legacy` - ---- - -=== tls_enable_renegotiation - -TLS client-initiated renegotiation is considered unsafe and is disabled by default . Only re-enable it if you are experiencing issues with your TLS-enabled client. This option has no effect on TLSv1.3 connections as client-initiated renegotiation was removed. - -*Requires restart:* Yes - -*Visibility:* `tunable` - -*Type:* boolean - -*Default:* `false` - ---- - -// tag::tls_min_version[] -=== tls_min_version - -ifdef::env-cloud[] -NOTE: This property is read-only in Redpanda Cloud. -endif::[] - -The minimum TLS version that Redpanda clusters support. This property prevents client applications from negotiating a downgrade to the TLS version when they make a connection to a Redpanda cluster. - -*Requires restart:* Yes - -*Visibility:* `user` - -*Accepted values:* `v1.0`, `v1.1`, `v1.2`, `v1.3` - -*Type:* string - -ifndef::env-cloud[] -*Default:* `v1.2` -endif::[] - ---- - -// end::tls_min_version[] - -=== tm_sync_timeout_ms - -Transaction manager's synchronization timeout. Maximum time to wait for internal state machine to catch up before rejecting a request. - -*Unit:* milliseconds - -*Requires restart:* Yes - -*Visibility:* `user` - -*Type:* integer - -*Accepted values:* [`-17592186044416`, `17592186044415`] - -*Default:* `10000` - ---- - - -=== tombstone_retention_ms - -The retention time for tombstone records in a compacted topic. Cannot be enabled at the same time as any of `cloud_storage_enabled`, `cloud_storage_enable_remote_read`, or `cloud_storage_enable_remote_write`. A typical default setting is `86400000`, or 24 hours. - -*Unit:* milliseconds - -*Requires restart:* No - -*Visibility:* `user` - -*Type:* integer - -*Accepted values:* [`1`, `17592186044415`] - -*Default:* `null` - -ifndef::env-cloud[] -*Related topics:* xref:manage:cluster-maintenance/compaction-settings.adoc#tombstone-record-removal[Tombstone record removal] -endif::[] - ---- - - -=== topic_fds_per_partition - -File descriptors required per partition replica. If topic creation results in the ratio of file descriptor limit to partition replicas being lower than this value, creation of new topics is fails. - -*Requires restart:* No - -*Visibility:* `tunable` - -*Type:* integer - -*Accepted values:* [`-2147483648`, `2147483647`] - -*Default:* `5` - ---- - -=== topic_label_aggregation_limit - -When the number of topics exceeds this limit, the topic label in generated metrics will be aggregated. If `null`, then there is no limit. - -*Requires restart:* No - -*Visibility:* `tunable` - -*Type:* integer - -*Default:* `null` - ---- - -=== topic_memory_per_partition - -Required memory in bytes per partition replica when creating or altering topics. The total size of the memory pool for partitions is the total memory available to Redpanda times `topic_partitions_memory_allocation_percent`. Each partition created requires `topic_memory_per_partition` bytes from that pool. If insufficient memory is available, creating or altering topics fails. - -*Requires restart:* No - -*Visibility:* `tunable` - -*Type:* integer - -*Default:* `204800` - ---- - -=== topic_partitions_memory_allocation_percent - -Percentage of total memory to reserve for topic partitions. See <> for details. - -*Unit:* percent - -*Requires restart:* Yes - -*Visibility:* `tunable` - -*Type:* integer - -*Accepted values:* [`0`, `4294967295`] - -*Default:* `10` - ---- - -=== topic_partitions_per_shard - -Maximum number of partition replicas per shard. If topic creation results in the ratio of partition replicas to shards being higher than this value, creation of new topics fails. - -*Requires restart:* No - -*Visibility:* `tunable` - -*Type:* integer - -*Accepted values:* [`0`, `4294967295`] - -*Default:* `5000` - ---- - -=== topic_partitions_reserve_shard0 - -Reserved partition slots on shard (CPU core) 0 on each node. If this is greater than or equal to <>, no data partitions will be scheduled on shard 0. - -*Requires restart:* No - -*Visibility:* `tunable` - -*Type:* integer - -*Accepted values:* [`0`, `4294967295`] - -*Default:* `0` - ---- - - -=== transaction_coordinator_cleanup_policy - -Cleanup policy for a transaction coordinator topic. - -*Requires restart:* No - -*Visibility:* `user` - -*Type:* string array - -*Accepted Values*: `compact`, `delete`, `["compact","delete"]`, `none` - -*Default:* `delete` - ---- - - -=== transaction_coordinator_delete_retention_ms - -Delete segments older than this age. To ensure transaction state is retained for as long as the longest-running transaction, make sure this is greater than or equal to <>. - -For example, if your typical transactions run for one hour, consider setting both `transaction_coordinator_delete_retention_ms` and `transactional_id_expiration_ms` to at least 3600000 (one hour), or a little over. - -*Unit:* milliseconds - -*Requires restart:* No - -*Visibility:* `user` - -*Type:* integer - -*Accepted values:* [`-17592186044416`, `17592186044415`] - -*Default:* `604800000` (10080 min) - ---- - - -=== transaction_coordinator_log_segment_size - -The size (in bytes) each log segment should be. - -*Requires restart:* No - -*Visibility:* `tunable` - -*Type:* integer - -*Accepted values:* [`0`, `18446744073709551615`] - -*Default:* `1073741824` (1 GB) - ---- - - -=== transaction_coordinator_partitions - -Number of partitions for transactions coordinator. - -*Unit:* number of partitions per topic - -*Requires restart:* No - -*Visibility:* `tunable` - -*Type:* integer - -*Accepted values:* [`-2147483648`, `2147483647`] - -*Default:* `50` - ---- - - -=== transaction_max_timeout_ms - -The maximum allowed timeout for transactions. If a client-requested transaction timeout exceeds this configuration, the broker returns an error during transactional producer initialization. This guardrail prevents hanging transactions from blocking consumer progress. - -*Unit:* milliseconds - -*Requires restart:* No - -*Visibility:* `tunable` - -*Type:* integer - -*Accepted values:* [`-17592186044416`, `17592186044415`] - -*Default:* `900000` - ---- - - -=== transactional_id_expiration_ms - -Expiration time of producer IDs. Measured starting from the time of the last write until now for a given ID. - -*Unit:* milliseconds - -*Requires restart:* No - -*Visibility:* `user` - -*Type:* integer - -*Accepted values:* [`-17592186044416`, `17592186044415`] - -*Default:* `604800000` (10080 min) - ---- - - -=== tx_timeout_delay_ms - -Delay before scheduling the next check for timed out transactions. - -*Unit:* milliseconds - -*Requires restart:* Yes - -*Visibility:* `user` - -*Type:* integer - -*Accepted values:* [`-17592186044416`, `17592186044415`] - -*Default:* `1000` - ---- - -=== unsafe_enable_consumer_offsets_delete_retention - -Enables delete retention of consumer offsets topic. This is an internal-only configuration and should be enabled only after consulting with Redpanda support. - -*Requires restart:* Yes - -*Visibility:* `user` - -*Type:* boolean - -*Default:* `false` - ---- - -=== usage_disk_persistance_interval_sec - -The interval in which all usage stats are written to disk. - -*Unit:* seconds - -*Requires restart:* No - -*Visibility:* `tunable` - -*Type:* integer - -*Accepted values:* [`-17179869184`, `17179869183`] - -*Default:* `300` (5 min) - ---- - -=== usage_num_windows - -The number of windows to persist in memory and disk. - -*Requires restart:* No - -*Visibility:* `tunable` - -*Type:* integer - -*Default:* `24` - ---- - -=== usage_window_width_interval_sec - -The width of a usage window, tracking cloud and kafka ingress/egress traffic each interval. - -*Unit:* seconds - -*Requires restart:* No - -*Visibility:* `tunable` - -*Type:* integer - -*Accepted values:* [`-17179869184`, `17179869183`] - -*Default:* `3600` - ---- - -=== use_fetch_scheduler_group - -Use a separate scheduler group for fetch processing. - -*Requires restart:* No - -*Visibility:* `tunable` - -*Type:* boolean - -*Default:* `true` - ---- - -=== use_kafka_handler_scheduler_group - -Use a separate scheduler group to handle parsing Kafka protocol requests. - -*Requires restart:* No - -*Visibility:* `tunable` - -*Type:* boolean - -*Default:* `true` - ---- - -=== use_produce_scheduler_group - -Use a separate scheduler group to process Kafka produce requests. - -*Requires restart:* No - -*Visibility:* `tunable` - -*Type:* boolean - -*Default:* `true` - ---- - -=== virtual_cluster_min_producer_ids - -Minimum number of active producers per virtual cluster. - -*Requires restart:* No - -*Visibility:* `tunable` - -*Type:* integer - -*Accepted values:* [`0`, `18446744073709551615`] - -*Default:* `18446744073709551615` - ---- - -=== wait_for_leader_timeout_ms - -Timeout to wait for leadership in metadata cache. - -*Unit:* milliseconds - -*Requires restart:* No - -*Visibility:* `tunable` - -*Type:* integer - -*Accepted values:* [`-17592186044416`, `17592186044415`] - -*Default:* `5000` - ---- - - -=== write_caching_default - -The default write caching mode to apply to user topics. Write caching acknowledges a message as soon as it is received and acknowledged on a majority of brokers, without waiting for it to be written to disk. With `acks=all`, this provides lower latency while still ensuring that a majority of brokers acknowledge the write. - -Fsyncs follow <> and <>, whichever is reached first. - -The `write_caching_default` cluster property can be overridden with the xref:reference:properties/topic-properties.adoc#writecaching[`write.caching`] topic property. - -*Requires restart:* no - -*Type*: string - -*Accepted values:* - -* `true` -* `false` -* `disabled`: This takes precedence over topic overrides and disables write caching for the entire cluster. - -*Default*: For clusters in production mode, the default is `false`. For clusters in development mode, the default is `true`. - -*Related topics*: - -* xref:develop:config-topics.adoc#configure-write-caching[Write caching] - ---- - -=== zstd_decompress_workspace_bytes - -Size of the zstd decompression workspace. - -*Unit:* bytes - -*Requires restart:* Yes - -*Visibility:* `tunable` - -*Type:* integer - -*Default:* `8388608` - ---- - - +include::reference:partial$properties/cluster-properties.adoc[tags=!deprecated,!exclude-from-docs] \ No newline at end of file diff --git a/modules/reference/pages/properties/object-storage-properties.adoc b/modules/reference/pages/properties/object-storage-properties.adoc index 35fe634970..e0460f42ba 100644 --- a/modules/reference/pages/properties/object-storage-properties.adoc +++ b/modules/reference/pages/properties/object-storage-properties.adoc @@ -12,1751 +12,4 @@ NOTE: Some object storage properties require that you restart the cluster for an Object storage properties should only be set if you enable xref:manage:tiered-storage.adoc[Tiered Storage]. -=== cloud_storage_access_key - -AWS or GCP access key. This access key is part of the credentials that Redpanda requires to authenticate with object storage services for Tiered Storage. This access key is used with the <> to form the complete credentials required for authentication. - -To authenticate using IAM roles, see <>. - -*Requires restart:* Yes - -*Visibility:* `user` - -*Type:* string - -*Default:* `null` - ---- - -=== cloud_storage_api_endpoint - -Optional API endpoint. The only instance in which you must set this value is when using a custom domain with your object storage service. - -- AWS: If not set, this is automatically generated using <> and <>. Otherwise, this uses the value assigned. -- GCP: If not set, this is automatically generated using `storage.googleapis.com` and <>. -- Azure: If not set, this is automatically generated using `blob.core.windows.net` and <>. If you have enabled hierarchical namespaces for your storage account and use a custom endpoint, use <>. - -*Requires restart:* No - -*Gets restored during cluster restore:* No - -*Optional:* Yes (if not using a custom domain) - -*Visibility:* `user` - -*Type:* string - -*Default:* `null` - ---- - -=== cloud_storage_api_endpoint_port - -TLS port override. - -*Requires restart:* Yes - -*Visibility:* `user` - -*Type:* integer - -*Accepted values:* [`-32768`, `32767`] - -*Default:* `443` - ---- - -=== cloud_storage_attempt_cluster_restore_on_bootstrap - -When set to `true`, Redpanda automatically retrieves cluster metadata from a specified object storage bucket at the cluster's first startup. This option is ideal for orchestrated deployments, such as Kubernetes. Ensure any previous cluster linked to the bucket is fully decommissioned to prevent conflicts between Tiered Storage subsystems. - -*Requires restart:* Yes - -*Visibility:* `tunable` - -*Type:* boolean - -*Default:* `false` - ---- - -=== cloud_storage_azure_adls_endpoint - -Azure Data Lake Storage v2 endpoint override. Use when hierarchical namespaces are enabled on your storage account and you have set up a custom endpoint. - -If not set, this is automatically generated using `dfs.core.windows.net` and <>. - -*Requires restart:* Yes - -*Gets restored during cluster restore:* No - -*Visibility:* `user` - -*Type:* string - -*Default:* `null` - ---- - -=== cloud_storage_azure_adls_port - -Azure Data Lake Storage v2 port override. See also: <>. Use when hierarchical namespaces are enabled on your storage account and you have set up a custom endpoint. - -*Requires restart:* Yes - -*Gets restored during cluster restore:* No - -*Visibility:* `user` - -*Type:* integer - -*Accepted values:* [`0`, `65535`] - -*Default:* `null` - ---- - -// tag::cloud_storage_azure_container[] -=== cloud_storage_azure_container - -The name of the Azure container to use with Tiered Storage. If `null`, the property is disabled. - -NOTE: The container must belong to <>. - -*Requires restart*: Yes - -*Gets restored during cluster restore:* No - -*Type*: string - -ifndef::env-cloud[] -*Default*: null -endif::[] - -*Supported versions*: Redpanda v23.1 or later - ---- - -// end::cloud_storage_azure_container[] - -=== cloud_storage_azure_hierarchical_namespace_enabled - -Force Redpanda to use or not use an Azure Data Lake Storage (ADLS) Gen2 hierarchical namespace-compliant client in <>. - -When this property is not set, <> must be set, and each broker checks at startup if a hierarchical namespace is enabled. - -When set to `true`, this property disables the check and assumes a hierarchical namespace is enabled. - -When set to `false`, this property disables the check and assumes a hierarchical namespace is not enabled. - -This setting should be used only in emergencies where Redpanda fails to detect the correct a hierarchical namespace status. - -*Requires restart:* Yes - -*Visibility:* `tunable` - -*Type:* boolean - -*Default:* `null` - ---- - -=== cloud_storage_azure_managed_identity_id - -The managed identity ID to use for access to the Azure storage account. To use Azure managed identities, you must set <> to `azure_vm_instance_metadata`. See xref:manage:security/iam-roles.adoc[IAM Roles] for more information on managed identities. - -*Type*: string - -*Default*: null - -*Requires restart*: No - -*Supported versions*: Redpanda v24.1 or later - ---- - -=== cloud_storage_azure_shared_key - -The account access key to be used for Azure Shared Key authentication with the Azure storage account configured by <>. If `null`, the property is disabled. - -NOTE: Redpanda expects this key string to be Base64 encoded. - -*Requires restart*: Yes - -*Gets restored during cluster restore:* No - -*Type*: string - -ifndef::env-cloud[] -*Default*: null -endif::[] - -*Supported versions*: Redpanda v23.1 or later - ---- - -// tag::cloud_storage_azure_storage_account[] -=== cloud_storage_azure_storage_account - -The name of the Azure storage account to use with Tiered Storage. If `null`, the property is disabled. - -*Requires restart:* Yes - -*Gets restored during cluster restore:* No - -*Visibility:* `user` - -*Type:* string - -*Default:* `null` - ---- - -// end::cloud_storage_azure_storage_account[] - -=== cloud_storage_backend - -Optional object storage backend variant used to select API capabilities. If not supplied, this will be inferred from other configuration properties. - -*Requires restart:* Yes - -*Gets restored during cluster restore:* No - -*Visibility:* `user` - -*Accepted values:* [`unknown`, `aws`, `google_s3_compat`, `azure`, `minio`] - -*Default:* `unknown` - ---- - -=== cloud_storage_background_jobs_quota - -The total number of requests the object storage background jobs can make during one background housekeeping run. This is a per-shard limit. Adjusting this limit can optimize object storage traffic and impact shard performance. - -*Requires restart:* No - -*Visibility:* `tunable` - -*Type:* integer - -*Accepted values:* [`-2147483648`, `2147483647`] - -*Default:* `5000` - ---- - -=== cloud_storage_bucket - -AWS or GCP bucket that should be used to store data. - -WARNING: Modifying this property after writing data to a bucket could cause data loss. - -*Requires restart:* Yes - -*Gets restored during cluster restore:* No - -*Visibility:* `user` - -*Type:* string - -*Default:* `null` - ---- - -=== cloud_storage_cache_check_interval - -Minimum interval between Tiered Storage cache trims, measured in milliseconds. This setting dictates the cooldown period after a cache trim operation before another trim can occur. If a cache fetch operation requests a trim but the interval since the last trim has not yet passed, the trim will be postponed until this cooldown expires. Adjusting this interval helps manage the balance between cache size and retrieval performance. - -*Unit:* milliseconds - -*Requires restart:* Yes - -*Visibility:* `tunable` - -*Type:* integer - -*Accepted values:* [`-17592186044416`, `17592186044415`] - -*Default:* `5000` - ---- - -=== cloud_storage_cache_chunk_size - -Size of chunks of segments downloaded into object storage cache. Reduces space usage by only downloading the necessary chunk from a segment. - -*Unit:* bytes - -*Requires restart:* Yes - -*Visibility:* `tunable` - -*Type:* integer - -*Accepted values:* [`0`, `18446744073709551615`] - -*Default:* `16777216` - ---- - -=== cloud_storage_cache_directory - -The directory where the cache archive is stored. This property is mandatory when <> is set to `true`. - -*Requires restart:* Yes - -*Visibility:* `user` - -*Type:* string - -*Default:* `null` - ---- - -=== cloud_storage_cache_max_objects - -Maximum number of objects that may be held in the Tiered Storage cache. This applies simultaneously with <>, and whichever limit is hit first will trigger trimming of the cache. - -*Requires restart:* No - -*Visibility:* `tunable` - -*Type:* integer - -*Accepted values:* [`0`, `4294967295`] - -*Default:* `100000` - ---- - -=== cloud_storage_cache_num_buckets - -Divide the object storage cache across the specified number of buckets. This only works for objects with randomized prefixes. The names are not changed when the value is set to zero. - -*Requires restart:* No - -*Visibility:* `tunable` - -*Type:* integer - -*Accepted values:* [`0`, `4294967295`] - -*Default:* `0` - ---- - -=== cloud_storage_cache_size - -Maximum size of the object storage cache, in bytes. - -This property works together with <> to define cache behavior: - -- When both properties are set, Redpanda uses the smaller calculated value of the two, in bytes. - -- If one of these properties is set to `0`, Redpanda uses the non-zero value. - -- These properties cannot both be `0`. - -- `cloud_storage_cache_size` cannot be `0` while `cloud_storage_cache_size_percent` is `null`. - -*Requires restart:* No - -*Gets restored during cluster restore:* No - -*Unit*: bytes - -*Visibility:* `user` - -*Type:* integer - -*Accepted values:* [`0`, `18446744073709551615`] - -*Default:* `0` - ---- - -=== cloud_storage_cache_size_percent - -Maximum size of the cache as a percentage, minus the space that Redpanda avoids using defined by the xref:reference:cluster-properties.adoc#disk_reservation_percent[`disk_reservation_percent`] cluster property. This is calculated at startup and dynamically updated if either this property, `disk_reservation_percent`, or <> changes. - -This property works together with <> to define cache behavior: - -- When both properties are set, Redpanda uses the smaller calculated value of the two, in bytes. - -- If one of these properties is set to `0`, Redpanda uses the non-zero value. - -- These properties cannot both be `0`. - -- `cloud_storage_cache_size` cannot be `0` while `cloud_storage_cache_size_percent` is `null`. - -*Unit:* percent - -*Requires restart:* No - -*Visibility:* `user` - -*Type:* number - -*Default:* `20.0` - ---- - -=== cloud_storage_cache_trim_threshold_percent_objects - -Introduced in 24.1.10. - -Cache trimming is triggered when the number of objects in the cache reaches this percentage relative to its maximum object count. If unset, the default behavior is to start trimming when the cache is full. - -*Requires restart:* No - -*Visibility:* `tunable` - -*Type:* number - -*Default:* `null` - ---- - -=== cloud_storage_cache_trim_threshold_percent_size - -Introduced in 24.1.10. - -Cache trimming is triggered when the cache size reaches this percentage relative to its maximum capacity. If unset, the default behavior is to start trimming when the cache is full. - -*Requires restart:* No - -*Visibility:* `tunable` - -*Type:* number - -*Default:* `null` - ---- - -=== cloud_storage_cache_trim_walk_concurrency - -The maximum number of concurrent tasks launched for traversing the directory structure during cache trimming. A higher number allows cache trimming to run faster but can cause latency spikes due to increased pressure on I/O subsystem and syscall threads. - -*Requires restart:* No - -*Visibility:* `tunable` - -*Type:* integer - -*Accepted values:* [`0`, `65535`] - -*Default:* `1` - ---- - -=== cloud_storage_chunk_eviction_strategy - -Selects a strategy for evicting unused cache chunks. - -*Requires restart:* No - -*Visibility:* `tunable` - -*Accepted values:* [`eager`, `capped`, `predictive`] - -*Default:* `eager` - ---- - -=== cloud_storage_chunk_prefetch - -Number of chunks to prefetch ahead of every downloaded chunk. Prefetching additional chunks can enhance read performance by reducing wait times for sequential data access. A value of `0` disables prefetching, relying solely on on-demand downloads. Adjusting this property allows for tuning the balance between improved read performance and increased network and storage I/O. - -*Requires restart:* No - -*Visibility:* `tunable` - -*Type:* integer - -*Accepted values:* [`0`, `65535`] - -*Default:* `0` - ---- - -=== cloud_storage_cluster_metadata_num_consumer_groups_per_upload - -Number of groups to upload in a single snapshot object during consumer offsets upload. Setting a lower value will mean a larger number of smaller snapshots are uploaded. - -*Requires restart:* No - -*Visibility:* `tunable` - -*Type:* integer - -*Default:* `1000` - ---- - -=== cloud_storage_cluster_metadata_retries - -Number of attempts metadata operations may be retried. - -*Requires restart:* Yes - -*Visibility:* `tunable` - -*Type:* integer - -*Accepted values:* [`-32768`, `32767`] - -*Default:* `5` - ---- - -=== cloud_storage_cluster_metadata_upload_interval_ms - -Time interval to wait between cluster metadata uploads. - -*Unit:* milliseconds - -*Requires restart:* No - -*Visibility:* `tunable` - -*Type:* integer - -*Accepted values:* [`-17592186044416`, `17592186044415`] - -*Default:* `3600000` - ---- - -=== cloud_storage_cluster_metadata_upload_timeout_ms - -Timeout for cluster metadata uploads. - -*Unit:* milliseconds - -*Requires restart:* No - -*Visibility:* `tunable` - -*Type:* integer - -*Accepted values:* [`-17592186044416`, `17592186044415`] - -*Default:* `60000` - ---- - -=== cloud_storage_credentials_host - -The hostname to connect to for retrieving role based credentials. Derived from <> if not set. Only required when using IAM role based access. To authenticate using access keys, see <>. - -*Requires restart:* Yes - -*Gets restored during cluster restore:* No - -*Visibility:* `tunable` - -*Type:* string - -*Accepted values:* [`config_file`, `aws_instance_metadata`, `sts`, `gcp_instance_metadata`, `azure_aks_oidc_federation`, `azure_vm_instance_metadata`] - -*Default:* `config_file` - ---- - -=== cloud_storage_credentials_source - -The source of credentials used to authenticate to object storage services. -Required for AWS or GCP authentication with IAM roles. - -To authenticate using access keys, see <>. - -*Requires restart:* Yes - -*Gets restored during cluster restore:* No - -*Accepted values*: [`config_file`, `aws_instance_metadata`, `sts`, `gcp_instance_metadata`, `azure_vm_instance_metadata`, `azure_aks_oidc_federation`] - -*Visibility:* `user` - -*Default:* `config_file` - ---- - -=== cloud_storage_crl_file - -Path to certificate revocation list for <>. - -*Requires restart:* No - -*Visibility:* `user` - -*Type:* string - -*Default:* `null` - ---- - -=== cloud_storage_disable_archival_stm_rw_fence - -Disables the concurrency control mechanism in Tiered Storage. This safety feature keeps data organized and correct when multiple processes access it simultaneously. Disabling it can cause data consistency problems, so use this setting only for testing, never in production systems. - -*Requires restart:* No - -*Visibility:* `tunable` - -*Type:* boolean - -*Default:* `false` - ---- - -=== cloud_storage_disable_chunk_reads - -Disable chunk reads and switch back to legacy mode where full segments are downloaded. When set to `true`, this option disables the more efficient chunk-based reads, causing Redpanda to download entire segments. This legacy behavior might be useful in specific scenarios where chunk-based fetching is not optimal. - -*Requires restart:* No - -*Visibility:* `tunable` - -*Type:* boolean - -*Default:* `false` - ---- - -=== cloud_storage_disable_read_replica_loop_for_tests - -Begins the read replica sync loop in topic partitions with Tiered Storage enabled. The property exists to simplify testing and shouldn't be set in production. - -*Requires restart:* No - -*Visibility:* `tunable` - -*Type:* boolean - -*Default:* `false` - ---- - -=== cloud_storage_disable_remote_labels_for_tests - -If `true`, Redpanda disables remote labels and falls back on the hash-based object naming scheme for new topics. - -CAUTION: This property exists to simplify testing and shouldn't be set in production. - -*Requires restart:* No - -*Visibility:* `tunable` - -*Type:* boolean - -*Default:* `false` - ---- - -=== cloud_storage_disable_tls - -Disable TLS for all object storage connections. - -*Requires restart:* Yes - -*Visibility:* `user` - -*Type:* boolean - -*Default:* `false` - ---- - -=== cloud_storage_disable_upload_consistency_checks - -Disable all upload consistency checks to allow Redpanda to upload logs with gaps and replicate metadata with consistency violations. Do not change the default value unless requested by Redpanda Support. - -*Requires restart:* No - -*Visibility:* `tunable` - -*Type:* boolean - -*Default:* `false` - ---- - -=== cloud_storage_disable_upload_loop_for_tests - -Begins the upload loop in topic partitions with Tiered Storage enabled. The property exists to simplify testing and shouldn't be set in production. - -*Requires restart:* No - -*Visibility:* `tunable` - -*Type:* boolean - -*Default:* `false` - ---- - -=== cloud_storage_enable_compacted_topic_reupload - -Enable re-uploading data for compacted topics. -When set to `true`, Redpanda can re-upload data for compacted topics to object storage, ensuring that the most current state of compacted topics is available in the cloud. Disabling this property (`false`) may reduce storage and network overhead but at the risk of not having the latest compacted data state in object storage. - -*Requires restart:* No - -*Visibility:* `tunable` - -*Type:* boolean - -*Default:* `true` - ---- - -=== cloud_storage_enable_remote_allow_gaps - -Controls the eviction of locally stored log segments when Tiered Storage uploads are paused. Set to `false` to only evict data that has already been uploaded to object storage. If the retained data fills the local volume, Redpanda throttles producers. Set to `true` to allow the eviction of locally stored log segments, which may create gaps in offsets. - -*Requires restart:* No - -*Visibility:* `tunable` - -*Type:* boolean - -*Default:* `false` - ---- - -=== cloud_storage_enable_remote_read - -Default remote read config value for new topics. -When set to `true`, new topics are by default configured to allow reading data directly from object storage, facilitating access to older data that might have been offloaded as part of Tiered Storage. With the default set to `false`, remote reads must be explicitly enabled at the topic level. - -*Requires restart:* No - -*Visibility:* `tunable` - -*Type:* boolean - -*Default:* `false` - ---- - -=== cloud_storage_enable_remote_write - -Default remote write value for new topics. -When set to `true`, new topics are by default configured to upload data to object storage. With the default set to `false`, remote write must be explicitly enabled at the topic level. - -*Requires restart:* No - -*Visibility:* `tunable` - -*Type:* boolean - -*Default:* `false` - ---- - -=== cloud_storage_enable_scrubbing - -Enable routine checks (scrubbing) of object storage partitions. The scrubber validates the integrity of data and metadata uploaded to object storage. - -*Requires restart:* No - -*Visibility:* `tunable` - -*Type:* boolean - -*Default:* `false` - ---- - -=== cloud_storage_enable_segment_merging - -Enables adjacent segment merging. The segments are reuploaded if there is an opportunity for that and if it will improve the performance of Tiered Storage. - -*Requires restart:* No - -*Visibility:* `tunable` - -*Type:* boolean - -*Default:* `true` - -*Related topics*: - -* xref:manage:tiered-storage.adoc#object-storage-housekeeping[Object storage housekeeping] - ---- - -=== cloud_storage_enable_segment_uploads - -Controls the upload of log segments to Tiered Storage. If set to `false`, this property temporarily pauses all log segment uploads from the Redpanda cluster. When the uploads are paused, the <> cluster configuration and `redpanda.remote.allowgaps` topic properties control local retention behavior. - -*Requires restart:* No - -*Visibility:* `tunable` - -*Type:* boolean - -*Default:* `true` - ---- - -=== cloud_storage_enabled - -include::reference:partial$enterprise-licensed-property.adoc[] - -Enable object storage. Must be set to `true` to use Tiered Storage or Remote Read Replicas. - -*Requires restart:* Yes - -*Visibility:* `user` - -*Type:* boolean - -*Enterprise license required*: `true` - -*Default:* `false` - ---- - -=== cloud_storage_full_scrub_interval_ms - -Interval, in milliseconds, between a final scrub and the next scrub. - -*Unit:* milliseconds - -*Requires restart:* No - -*Visibility:* `tunable` - -*Type:* integer - -*Accepted values:* [`-17592186044416`, `17592186044415`] - -*Default:* `43200000` (12h) - ---- - -=== cloud_storage_garbage_collect_timeout_ms - -Timeout for running the cloud storage garbage collection, in milliseconds. - -*Unit:* milliseconds - -*Requires restart:* No - -*Visibility:* `tunable` - -*Type:* integer - -*Accepted values:* [`-17592186044416`, `17592186044415`] - -*Default:* `30000` - ---- - -=== cloud_storage_graceful_transfer_timeout_ms - -Time limit on waiting for uploads to complete before a leadership transfer. If this is `null`, leadership transfers proceed without waiting. - -*Unit:* milliseconds - -*Requires restart:* No - -*Visibility:* `tunable` - -*Type:* integer - -*Aliases:* `cloud_storage_graceful_transfer_timeout` - -*Accepted values:* [`-17592186044416`, `17592186044415`] - -*Default:* `5000` - ---- - -=== cloud_storage_housekeeping_interval_ms - -Interval, in milliseconds, between object storage housekeeping tasks. - -*Unit:* milliseconds - -*Requires restart:* No - -*Visibility:* `tunable` - -*Type:* integer - -*Accepted values:* [`-17592186044416`, `17592186044415`] - -*Default:* `300000` - ---- - -=== cloud_storage_hydrated_chunks_per_segment_ratio - -The maximum number of chunks per segment that can be hydrated at a time. Above this number, unused chunks are trimmed. - -A segment is divided into chunks. Chunk hydration means downloading the chunk (which is a small part of a full segment) from cloud storage and placing it in the local disk cache. Redpanda periodically removes old, unused chunks from your local disk. This process is called chunk eviction. This property controls how many chunks can be present for a given segment in local disk at a time, before eviction is triggered, removing the oldest ones from disk. Note that this property is not used for the default eviction strategy which simply removes all unused chunks. - -*Requires restart:* No - -*Visibility:* `tunable` - -*Type:* number - -*Accepted values:* [`0`, `1`] - -*Default:* `0.7` - ---- - -=== cloud_storage_hydration_timeout_ms - -Time to wait for a hydration request to be fulfilled. If hydration is not completed within this time, the consumer is notified with a timeout error. - -Negative doesn't make sense, but it may not be checked-for/enforced. Large is subjective, but a huge timeout also doesn't make sense. This particular config doesn't have a min/max bounds control, but it probably should to avoid mistakes. - -*Unit:* milliseconds - -*Requires restart:* No - -*Visibility:* `tunable` - -*Type:* integer - -*Accepted values:* [`0`, `17592186044415`] - -*Default:* `600000` - ---- - -=== cloud_storage_idle_threshold_rps - -The object storage request rate threshold for idle state detection. If the average request rate for the configured period is lower than this threshold, the object storage is considered idle. - -*Requires restart:* No - -*Visibility:* `tunable` - -*Type:* number - -*Default:* `10.0` - ---- - -=== cloud_storage_idle_timeout_ms - -The timeout, in milliseconds, used to detect the idle state of the object storage API. If the average object storage request rate is below this threshold for a configured amount of time, the object storage is considered idle and the housekeeping jobs are started. - -*Unit:* milliseconds - -*Requires restart:* No - -*Visibility:* `tunable` - -*Type:* integer - -*Accepted values:* [`-17592186044416`, `17592186044415`] - -*Default:* `10000` - ---- - -=== cloud_storage_initial_backoff_ms - -Initial backoff time for exponential backoff algorithm (ms). - -*Unit:* milliseconds - -*Requires restart:* No - -*Visibility:* `tunable` - -*Type:* integer - -*Accepted values:* [`-17592186044416`, `17592186044415`] - -*Default:* `100` - ---- - -=== cloud_storage_inventory_based_scrub_enabled - -Scrubber uses the latest cloud storage inventory report, if available, to check if the required objects exist in the bucket or container. - -*Requires restart:* Yes - -*Visibility:* `tunable` - -*Type:* boolean - -*Default:* `false` - ---- - -=== cloud_storage_inventory_hash_path_directory - -Directory to store inventory report hashes for use by cloud storage scrubber. - -*Requires restart:* Yes - -*Visibility:* `user` - -*Type:* string - -*Default:* `null` - ---- - -=== cloud_storage_inventory_id - -The name of the scheduled inventory job created by Redpanda to generate bucket or container inventory reports. - -*Requires restart:* Yes - -*Visibility:* `tunable` - -*Type:* string - -*Default:* `redpanda_scrubber_inventory` - ---- - -=== cloud_storage_inventory_max_hash_size_during_parse - -Maximum bytes of hashes held in memory before writing data to disk during inventory report parsing. This affects the number of files written to disk during inventory report parsing. When this limit is reached, new files are written to disk. - -*Unit:* bytes - -*Requires restart:* No - -*Visibility:* `tunable` - -*Type:* integer - -*Accepted values:* [`0`, `18446744073709551615`] - -*Default:* `67108864` - ---- - -=== cloud_storage_inventory_report_check_interval_ms - -Time interval between checks for a new inventory report in the cloud storage bucket or container. - -*Unit:* milliseconds - -*Requires restart:* Yes - -*Visibility:* `tunable` - -*Type:* integer - -*Accepted values:* [`-17592186044416`, `17592186044415`] - -*Default:* `21600000` (6h) - ---- - -=== cloud_storage_inventory_reports_prefix - -The prefix to the path in the cloud storage bucket or container where inventory reports will be placed. - -*Requires restart:* Yes - -*Visibility:* `tunable` - -*Type:* string - -*Default:* `redpanda_scrubber_inventory` - ---- - -=== cloud_storage_inventory_self_managed_report_config - -If enabled, Redpanda will not attempt to create the scheduled report configuration using cloud storage APIs. The scrubbing process will look for reports in the expected paths in the bucket or container, and use the latest report found. Primarily intended for use in testing and on backends where scheduled inventory reports are not supported. - -*Requires restart:* Yes - -*Visibility:* `tunable` - -*Type:* boolean - -*Default:* `false` - ---- - -=== cloud_storage_manifest_cache_size - -Amount of memory that can be used to handle Tiered Storage metadata. - -*Unit:* bytes - -*Requires restart:* No - -*Visibility:* `tunable` - -*Type:* integer - -*Default:* `1048576` - ---- - -=== cloud_storage_materialized_manifest_ttl_ms - -The interval, in milliseconds, determines how long the materialized manifest can stay in the cache under contention. This setting is used for performance tuning. When the spillover manifest is materialized and stored in the cache, and the cache needs to evict it, it uses this value as a timeout. The cursor that uses the spillover manifest uses this value as a TTL interval, after which it stops referencing the manifest making it available for eviction. This only affects spillover manifests under contention. - -*Unit:* milliseconds - -*Requires restart:* No - -*Visibility:* `tunable` - -*Type:* integer - -*Accepted values:* [`-17592186044416`, `17592186044415`] - -*Default:* `10000` - ---- - -=== cloud_storage_manifest_max_upload_interval_sec - -Minimum interval, in seconds, between partition manifest uploads. Actual time between uploads may be greater than this interval. If this is `null`, metadata is updated after each segment upload. - -*Unit:* seconds - -*Requires restart:* No - -*Visibility:* `tunable` - -*Type:* integer - -*Accepted values:* [`-17179869184`, `17179869183`] - -*Default:* `60` - ---- - -=== cloud_storage_manifest_upload_timeout_ms - -Manifest upload timeout, in milliseconds. - -*Unit:* milliseconds - -*Requires restart:* No - -*Visibility:* `tunable` - -*Type:* integer - -*Accepted values:* [`-17592186044416`, `17592186044415`] - -*Default:* `10000` - ---- - -=== cloud_storage_max_concurrent_hydrations_per_shard - -Maximum concurrent segment hydrations of remote data per CPU core. If unset, value of `cloud_storage_max_connections / 2` is used, which means that half of available object storage bandwidth could be used to download data from object storage. If the cloud storage cache is empty every new segment reader will require a download. This will lead to 1:1 mapping between number of partitions scanned by the fetch request and number of parallel downloads. If this value is too large the downloads can affect other workloads. In case of any problem caused by the tiered-storage reads this value can be lowered. This will only affect segment hydrations (downloads) but won't affect cached segments. If fetch request is reading from the tiered-storage cache its concurrency will only be limited by available memory. - - -*Requires restart:* No - -*Visibility:* `tunable` - -*Type:* integer - -*Accepted values:* [`0`, `4294967295`] - -*Default:* `null` - ---- - -=== cloud_storage_max_connection_idle_time_ms - -Defines the maximum duration an HTTPS connection to object storage can stay idle, in milliseconds, before being terminated. -This setting reduces resource utilization by closing inactive connections. Adjust this property to balance keeping connections ready for subsequent requests and freeing resources associated with idle connections. - -*Unit:* milliseconds - -*Requires restart:* Yes - -*Visibility:* `tunable` - -*Type:* integer - -*Accepted values:* [`-17592186044416`, `17592186044415`] - -*Default:* `5000` - ---- - -=== cloud_storage_max_connections - -Maximum simultaneous object storage connections per shard, applicable to upload and download activities. - -*Requires restart:* Yes - -*Visibility:* `user` - -*Type:* integer - -*Accepted values:* [`-32768`, `32767`] - -*Default:* `20` - ---- - -=== cloud_storage_max_segment_readers_per_shard - -Maximum concurrent I/O cursors of materialized remote segments per CPU core. If unset, the value of `topic_partitions_per_shard` is used, where one segment reader per partition is used if the shard is at its maximum partition capacity. These readers are cached across Kafka consume requests and store a readahead buffer. - -*Requires restart:* No - -*Visibility:* `tunable` - -*Type:* integer - -*Aliases:* `cloud_storage_max_readers_per_shard` - -*Accepted values:* [`0`, `4294967295`] - -*Default:* `null` - ---- - -=== cloud_storage_max_segments_pending_deletion_per_partition - -The per-partition limit for the number of segments pending deletion from the cloud. Segments can be deleted due to retention or compaction. If this limit is breached and deletion fails, then segments are orphaned in the cloud and must be removed manually. - -*Requires restart:* No - -*Visibility:* `tunable` - -*Type:* integer - -*Default:* `5000` - ---- - -=== cloud_storage_max_throughput_per_shard - -Maximum bandwidth allocated to Tiered Storage operations per shard, in bytes per second. -This setting limits the Tiered Storage subsystem's throughput per shard, facilitating precise control over bandwidth usage in testing scenarios. In production environments, use `cloud_storage_throughput_limit_percent` for more dynamic throughput management based on actual storage capabilities. - -*Requires restart:* No - -*Visibility:* `tunable` - -*Type:* integer - -*Default:* `1073741824` - ---- - -=== cloud_storage_metadata_sync_timeout_ms - -Timeout for xref:manage:tiered-storage.adoc[] metadata synchronization. - -*Unit:* milliseconds - -*Requires restart:* No - -*Visibility:* `tunable` - -*Type:* integer - -*Accepted values:* [`-17592186044416`, `17592186044415`] - -*Default:* `10000` - ---- - -=== cloud_storage_min_chunks_per_segment_threshold - -The minimum number of chunks per segment for trimming to be enabled. If the number of chunks in a segment is below this threshold, the segment is small enough that all chunks in it can be hydrated at any given time. - -*Requires restart:* No - -*Visibility:* `tunable` - -*Type:* integer - -*Accepted values:* [`0`, `18446744073709551615`] - -*Default:* `5` - ---- - -=== cloud_storage_partial_scrub_interval_ms - -Time interval between two partial scrubs of the same partition. - -*Unit:* milliseconds - -*Requires restart:* No - -*Visibility:* `tunable` - -*Type:* integer - -*Accepted values:* [`-17592186044416`, `17592186044415`] - -*Default:* `3600000` (1h) - ---- - -=== cloud_storage_readreplica_manifest_sync_timeout_ms - -Timeout to check if new data is available for partitions in object storage for read replicas. - -*Unit:* milliseconds - -*Requires restart:* No - -*Visibility:* `tunable` - -*Type:* integer - -*Accepted values:* [`-17592186044416`, `17592186044415`] - -*Default:* `30000` - ---- - -=== cloud_storage_recovery_temporary_retention_bytes_default - -Retention in bytes for topics created during automated recovery. - -*Requires restart:* No - -*Visibility:* `tunable` - -*Type:* integer - -*Default:* `1073741824` - ---- - -=== cloud_storage_recovery_topic_validation_depth - -Number of metadata segments to validate, from newest to oldest, when <> is set to `check_manifest_and_segment_metadata`. - -*Requires restart:* No - -*Required:* No - -*Visibility:* `tunable` - -*Type:* integer - -*Accepted values:* [`0`, `4294967295`] - -*Default:* `10` - ---- - -=== cloud_storage_recovery_topic_validation_mode - -Validation performed before recovering a topic from object storage. In case of failure, the reason for the failure appears as `ERROR` lines in the Redpanda application log. For each topic, this reports errors for all partitions, but for each partition, only the first error is reported. - -This property accepts the following parameters: - -- `no_check`: Skips the checks for topic recovery. -- `check_manifest_existence`: Runs an existence check on each `partition_manifest`. Fails if there are connection issues to the object storage. -- `check_manifest_and_segment_metadata`: Downloads the manifest and runs a consistency check, comparing the metadata with the cloud storage objects. The process fails if metadata references any missing cloud storage objects. - -Example: Redpanda validates the topic `kafka/panda-topic-recovery-NOT-OK` and stops due to a fatal error on partition 0: - -```bash -ERROR 2024-04-24 21:29:08,166 [shard 1:main] cluster - [fiber11|0|299996ms recovery validation of {kafka/panda-topic-recovery-NOT-OK/0}/24] - manifest metadata check: missing segment, validation not ok -ERROR 2024-04-24 21:29:08,166 [shard 1:main] cluster - topics_frontend.cc:519 - Stopping recovery of {kafka/panda-topic-recovery-NOT-OK} due to validation error -``` - -Each failing partition error message has the following format: - -```bash -ERROR .... [... recovery validation of {}...] - , validation not ok -``` - -At the end of the process, Redpanda outputs a final ERROR message: - -```bash -ERROR ... ... - Stopping recovery of {} due to validation error -``` - -*Requires restart:* No - -*Required:* No - -*Visibility:* `tunable` - -*Type:* string - -*Default:* `check_manifest_existence` - -*Accepted values:* [`no_check`, `check_manifest_existence`, `check_manifest_and_segment_metadata`] - -*Related topics:* xref:manage:whole-cluster-restore.adoc[] - ---- - -=== cloud_storage_region - -Cloud provider region that houses the bucket or container used for storage. - -*Requires restart:* Yes - -*Gets restored during cluster restore:* No - -*Visibility:* `user` - -*Type:* string - -*Default:* `null` - ---- - -=== cloud_storage_roles_operation_timeout_ms - -Timeout for IAM role related operations (ms). - -*Unit:* milliseconds - -*Requires restart:* Yes - -*Visibility:* `tunable` - -*Type:* integer - -*Accepted values:* [`-17592186044416`, `17592186044415`] - -*Default:* `30000` - ---- - -=== cloud_storage_scrubbing_interval_jitter_ms - -Jitter applied to the object storage scrubbing interval. - -*Unit:* milliseconds - -*Requires restart:* No - -*Visibility:* `tunable` - -*Type:* integer - -*Accepted values:* [`-17592186044416`, `17592186044415`] - -*Default:* `600000` - ---- - -=== cloud_storage_secret_key - -Cloud provider secret key. - -*Requires restart:* Yes - -*Gets restored during cluster restore:* No - -*Visibility:* `user` - -*Type:* string - -*Default:* `null` - ---- - -=== cloud_storage_segment_max_upload_interval_sec - -Time that a segment can be kept locally without uploading it to the object storage, in seconds. - -*Unit:* seconds - -*Requires restart:* No - -*Visibility:* `tunable` - -*Type:* integer - -*Accepted values:* [`-17179869184`, `17179869183`] - -*Default:* `3600` (one hour) - ---- - -=== cloud_storage_segment_size_min - -Smallest acceptable segment size in the object storage. Default: `cloud_storage_segment_size_target`/2. - -*Requires restart:* No - -*Visibility:* `tunable` - -*Type:* integer - -*Default:* `cloud_storage_segment_size_target/2` - -*Related property:* <> - ---- - -=== cloud_storage_segment_size_target - -Desired segment size in the object storage. The default is set in the topic-level `segment.bytes` property. - -*Requires restart:* No - -*Visibility:* `tunable` - -*Type:* integer - -*Default:* `null` - ---- - -=== cloud_storage_segment_upload_timeout_ms - -Log segment upload timeout, in milliseconds. - -*Unit:* milliseconds - -*Requires restart:* No - -*Visibility:* `tunable` - -*Type:* integer - -*Accepted values:* [`-17592186044416`, `17592186044415`] - -*Default:* `30000` - ---- - -=== cloud_storage_spillover_manifest_max_segments - -Maximum number of segments in the spillover manifest that can be offloaded to the object storage. This setting serves as a threshold for triggering data offload based on the number of segments, rather than the total size of the manifest. It is designed for use in testing environments to control the offload behavior more granularly. In production settings, manage offloads based on the manifest size through `cloud_storage_spillover_manifest_size` for more predictable outcomes. - -*Requires restart:* No - -*Visibility:* `tunable` - -*Type:* integer - -*Default:* `null` - ---- - -=== cloud_storage_spillover_manifest_size - -The size of the manifest which can be offloaded to the cloud. If the size of the local manifest stored in Redpanda exceeds `cloud_storage_spillover_manifest_size` by two times the spillover mechanism will split the manifest into two parts and one will be uploaded to object storage. - -*Requires restart:* No - -*Visibility:* `tunable` - -*Type:* integer - -*Default:* `65536` - ---- - -=== cloud_storage_throughput_limit_percent - -Maximum throughput used by Tiered Storage per broker expressed as a percentage of the disk bandwidth. If the server has several disks, Redpanda uses the one that stores the Tiered Storage cache. Even if Tiered Storage is allowed to use the full bandwidth of the disk (100%), it won't necessarily use it in full. The actual usage depends on your workload and the state of the Tiered Storage cache. This setting is a safeguard that prevents Tiered Storage from using too many system resources: it is not a performance tuning knob. - -*Unit:* percent - -*Requires restart:* No - -*Visibility:* `tunable` - -*Type:* integer - -*Default:* `50` - ---- - -=== cloud_storage_topic_purge_grace_period_ms - -Grace period during which the purger refuses to purge the topic. - -*Unit:* milliseconds - -*Requires restart:* No - -*Visibility:* `tunable` - -*Type:* integer - -*Accepted values:* [`-17592186044416`, `17592186044415`] - -*Default:* `30000` - ---- - -=== cloud_storage_trust_file - -Path to certificate that should be used to validate server certificate during TLS handshake. - -*Requires restart:* Yes - -*Gets restored during cluster restore:* No - -*Visibility:* `user` - -*Type:* string - -*Default:* `null` - ---- - -=== cloud_storage_upload_ctrl_d_coeff - -Derivative coefficient for upload PID controller. - -*Requires restart:* Yes - -*Visibility:* `tunable` - -*Type:* number - -*Default:* `0.0` - ---- - -=== cloud_storage_upload_ctrl_max_shares - -Maximum number of I/O and CPU shares that archival upload can use. - -*Requires restart:* Yes - -*Visibility:* `tunable` - -*Type:* integer - -*Accepted values:* [`-32768`, `32767`] - -*Default:* `1000` - ---- - -=== cloud_storage_upload_ctrl_min_shares - -Minimum number of I/O and CPU shares that archival upload can use. - -*Requires restart:* Yes - -*Visibility:* `tunable` - -*Type:* integer - -*Accepted values:* [`-32768`, `32767`] - -*Default:* `100` - ---- - -=== cloud_storage_upload_ctrl_p_coeff - -Proportional coefficient for upload PID controller. - -*Requires restart:* Yes - -*Visibility:* `tunable` - -*Type:* number - -*Default:* `-2.0` - ---- - -=== cloud_storage_upload_ctrl_update_interval_ms - -The interval (in milliseconds) for updating the controller that manages the priority of Tiered Storage uploads. This property determines how frequently the system recalculates and adjusts the work scheduling for uploads to object storage. - -This is an internal-only configuration and should be enabled only after consulting with Redpanda support. - -*Requires restart:* Yes - -*Visibility:* `tunable` - -*Type:* number - -*Default:* `60000` (60s) - ---- - -=== cloud_storage_upload_loop_initial_backoff_ms - -Initial backoff interval when there is nothing to upload for a partition, in milliseconds. - -*Unit:* milliseconds - -*Requires restart:* No - -*Visibility:* `tunable` - -*Type:* integer - -*Accepted values:* [`-17592186044416`, `17592186044415`] - -*Default:* `100` - ---- - -=== cloud_storage_upload_loop_max_backoff_ms - -Maximum backoff interval when there is nothing to upload for a partition, in milliseconds. - -*Unit:* milliseconds - -*Requires restart:* No - -*Visibility:* `tunable` - -*Type:* integer - -*Accepted values:* [`-17592186044416`, `17592186044415`] - -*Default:* `10000` - ---- - -=== cloud_storage_url_style - -Configure the addressing style that controls how Redpanda formats bucket URLs for S3-compatible object storage. - -Leave this property unset (`null`) to use automatic configuration: - -* For AWS S3: Redpanda attempts `virtual_host` addressing first, then falls back to `path` style if needed -* For MinIO: Redpanda automatically uses `path` style regardless of `MINIO_DOMAIN` configuration - -Set this property explicitly to override automatic configuration, ensure consistent behavior across deployments, or when using S3-compatible storage that requires a specific URL format. - -CAUTION: AWS requires virtual-hosted addressing for buckets created after September 30, 2020. If you use AWS S3 with buckets created after this date, use `virtual_host` addressing. - -NOTE: For MinIO deployments, Redpanda defaults to `path` style when this property is unset. To use `virtual_host` addressing with a configured `MINIO_DOMAIN`, set this property explicitly to `virtual_host`. For other S3-compatible storage backends, consult your provider's documentation to determine the required URL style. - -*Requires restart:* Yes - -*Visibility:* `user` - -*Type:* string - -*Accepted values:* [`virtual_host`, `path`, `null`] - -*Example formats:* - -* `virtual_host` - Example: `.s3.amazonaws.com` -* `path` - Example: `s3.amazonaws.com/` -* `null` - Enable automatic configuration - -*Default:* `null` - ---- +include::reference:partial$properties/object-storage-properties.adoc[tags=!deprecated,!exclude-from-docs] diff --git a/modules/reference/pages/properties/topic-properties.adoc b/modules/reference/pages/properties/topic-properties.adoc index 630fcf7740..dd453859c8 100644 --- a/modules/reference/pages/properties/topic-properties.adoc +++ b/modules/reference/pages/properties/topic-properties.adoc @@ -12,731 +12,41 @@ include::develop:partial$topic-properties-warning.adoc[] == Topic property mappings -|=== -| Topic property | Corresponding cluster property - -| <> -| xref:./cluster-properties.adoc#log_cleanup_policy[`log_cleanup_policy`] - -| <> -| xref:./cluster-properties.adoc#compaction_strategy[`compaction_strategy`] - -| <> -| xref:./cluster-properties.adoc#log_compression_type[`log_compression_type`] - -| <> -| xref:./cluster-properties.adoc#tombstone_retention_ms[`tombstone_retention_ms`] -| <> -| xref:./cluster-properties.adoc#flush_bytes[`flush_bytes`] - -| <> -| xref:./cluster-properties.adoc#flush_ms[`flush_ms`] - -| <> -| xref:./cluster-properties.adoc#initial_retention_local_target_bytes[`initial_retention_local_target_bytes`] - -| <> -| xref:./cluster-properties.adoc#initial_retention_local_target_ms[`initial_retention_local_target_ms`] - -| <> -| xref:./cluster-properties.adoc#max_compaction_lag_ms[`max_compaction_lag_ms`] - -| <> -| xref:./cluster-properties.adoc#kafka_batch_max_bytes[`kafka_batch_max_bytes`] - -| <> -| xref:./cluster-properties.adoc#log_message_timestamp_type[`log_message_timestamp_type`] - -| <> -| xref:./cluster-properties.adoc#min_cleanable_dirty_ratio[`min_cleanable_dirty_ratio`] - -| <> -| xref:./cluster-properties.adoc#min_compaction_lag_ms[`min_compaction_lag_ms`] - -| <> -| xref:./cluster-properties.adoc#default_topic_replications[`default_topic_replications`] - -| <> -| xref:./cluster-properties.adoc#retention_bytes[`retention_bytes`] - -| <> -| xref:./cluster-properties.adoc#retention_local_target_bytes[`retention_local_target_bytes`] - -| <> -| xref:./cluster-properties.adoc#retention_local_target_ms[`retention_local_target_ms`] - -| <> -| xref:./cluster-properties.adoc#log_retention_ms[`log_retention_ms`] - -| <> -| xref:./cluster-properties.adoc#log_segment_size[`log_segment_size`] - -| <> -| xref:./cluster-properties.adoc#log_segment_ms[`log_segment_ms`] - -| <> -| xref:./cluster-properties.adoc#write_caching_default[`write_caching_default`] - -|=== +include::reference:partial$topic-property-mappings.adoc[] --- == Retention and Compaction Properties These properties control how data is stored, for how long, and when it is deleted or compacted. -[[cleanuppolicy]] -=== cleanup.policy - -The cleanup policy to apply for log segments of a topic. - -When `cleanup.policy` is set, it overrides the cluster property xref:cluster-properties.adoc#log_cleanup_policy[`log_cleanup_policy`] for the topic. - -*Type:* string - -*Accepted values:* [`delete`, `compact`, `compact,delete`] - -*Default:* `delete` - -**Values**: - -- `delete` - Deletes data according to size-based or time-based retention limits, or both. -- `compact` - Deletes data according to a key-based retention policy, discarding all but the latest value for each key. -- `compact,delete` - The latest values are kept for each key, while the remaining data is deleted according to retention limits. - -*Related cluster property:* xref:./cluster-properties.adoc#log_cleanup_policy[`log_cleanup_policy`] - -**Related topics**: - -- xref:manage:cluster-maintenance/disk-utilization.adoc#configure-segment-size[Configure segment size] -- xref:manage:tiered-storage.adoc#compacted-topics-in-tiered-storage[Compacted topics in Tiered Storage] - ---- -[[compactionstrategy]] -=== compaction.strategy - -Specifies the strategy used to determine which records to remove during log compaction. The compaction strategy controls how Redpanda identifies and removes duplicate records while preserving the latest value for each key. - -*Type:* string - -*Default:* `offset` - -*Accepted values:* - -* `offset` - Uses record offsets to determine which records to compact (default and currently the only supported strategy) - -*Related cluster property:* xref:./cluster-properties.adoc#compaction_strategy[`compaction_strategy`] - ---- -[[deleteretentionms]] -=== delete.retention.ms - -The retention time for tombstone records in a compacted topic. Redpanda removes tombstone records after the retention limit is exceeded. - -If you have enabled Tiered Storage and set <> or <> for the topic, you cannot enable tombstone removal. - -If both `delete.retention.ms` and the cluster property config_ref:tombstone_retention_ms,true,properties/cluster-properties[] are set, `delete.retention.ms` overrides the cluster level tombstone retention for an individual topic. - -*Type:* integer - -*Unit:* milliseconds - -*Accepted values:* [`1`, `9223372036854775`] - -*Default:* null - -*Related cluster property:* xref:./cluster-properties.adoc#tombstone_retention_ms[`tombstone_retention_ms`] - -**Related topics**: - -- xref:manage:cluster-maintenance/compaction-settings.adoc#tombstone-record-removal[Tombstone record removal] - ---- -[[maxcompactionlagms]] -=== max.compaction.lag.ms - -The maximum amount of time (in ms) that a log segment can remain unaltered before it is eligible for compaction in a compact topic. Overrides the cluster property xref:cluster-properties.adoc#max_compaction_lag_ms[`max_compaction_lag_ms`] for the topic. - -*Type:* integer - -*Unit:* milliseconds - -*Accepted values:* [`1`, `9223372036854775`] - -*Default:* `9223372036854775` - -*Related cluster property:* xref:./cluster-properties.adoc#max_compaction_lag_ms[`max_compaction_lag_ms`] - -**Related topics**: - -- xref:manage:cluster-maintenance/compaction-settings.adoc#configuration-options[Configure maximum compaction lag] - ---- -[[mincleanabledirtyratio]] -=== min.cleanable.dirty.ratio - -The minimum ratio between the number of bytes in dirty segments and the total number of bytes in closed segments that must be reached before a partition's log is eligible for compaction in a compact topic. - -*Type:* number - -*Accepted values:* [`0`, `1.0`] - -*Default:* null - -*Related cluster property:* xref:./cluster-properties.adoc#min_cleanable_dirty_ratio[`min_cleanable_dirty_ratio`] - -**Related topics**: - -- xref:manage:cluster-maintenance/compaction-settings.adoc[] - ---- -[[mincompactionlagms]] -=== min.compaction.lag.ms - -The minimum amount of time (in ms) that a log segment must remain unaltered before it can be compacted in a compact topic. Overrides the cluster property xref:cluster-properties.adoc#min_compaction_lag_ms[`min_compaction_lag_ms`] for the topic. - -*Type:* integer - -*Unit:* milliseconds - -*Accepted values:* [`0`, `9223372036854775`] - -*Default:* `0` - -*Related cluster property:* xref:./cluster-properties.adoc#min_compaction_lag_ms[`min_compaction_lag_ms`] - -**Related topics**: - -- xref:manage:cluster-maintenance/compaction-settings.adoc#configure-min-compaction-lag[Configure minimum compaction lag] - ---- -[[retentionbytes]] -=== retention.bytes +include::reference:partial$properties/topic-properties.adoc[tags=category-retention-compaction,!deprecated,!exclude-from-docs] -A size-based retention limit that configures the maximum size that a topic partition can grow before becoming eligible for cleanup. - -If `retention.bytes` is set to a positive value, it overrides the cluster property xref:cluster-properties.adoc#retention_bytes[`retention_bytes`] for the topic, and the total retained size for the topic is `retention.bytes` multiplied by the number of partitions for the topic. - -When both size-based (`retention.bytes`) and time-based (`retention.ms`) retention limits are set, cleanup occurs when either limit is reached. - -*Type:* integer - -*Unit:* bytes - -*Accepted values:* [`1`, `9223372036854775807`] - -*Default:* null - -*Related cluster property:* xref:./cluster-properties.adoc#retention_bytes[`retention_bytes`] - -**Related topics**: - -- xref:manage:cluster-maintenance/disk-utilization.adoc#configure-message-retention[Configure message retention] - ---- -[[retentionms]] -=== retention.ms - -A time-based retention limit that configures the maximum duration that a log's segment file for a topic is retained before it becomes eligible to be cleaned up. To consume all data, a consumer of the topic must read from a segment before its `retention.ms` elapses, otherwise the segment may be compacted and/or deleted. If a non-positive value, no per-topic limit is applied. - -If `retention.ms` is set to a positive value, it overrides the cluster property xref:./cluster-properties.adoc#log_retention_ms[`log_retention_ms`] for the topic. - -When both size-based (`retention.bytes`) and time-based (`retention.ms`) retention limits are set, the earliest occurring limit applies. - -*Type:* integer - -*Unit:* milliseconds - -*Accepted values:* [`-9223372036854775808`, `9223372036854775807`] - -*Default:* null - -*Related cluster property:* xref:./cluster-properties.adoc#log_retention_ms[`log_retention_ms`] - -**Related topics**: - -- xref:manage:cluster-maintenance/disk-utilization.adoc#configure-message-retention[Configure message retention] - ---- == Segment and Message Properties These properties control the size and lifecycle of log segment files and settings for individual messages. -[[compressiontype]] -=== compression.type - -Redpanda ignores this property and always uses producer compression semantics. If producers send compressed data, Redpanda stores and serves it as-is. If producers send uncompressed data, Redpanda stores it uncompressed. - -This property exists for Apache Kafka compatibility. Configure compression in your producers instead of using this topic property. - -Compression reduces message size and improves throughput, but increases CPU utilization. Enable producer batching to increase compression efficiency. - -When set, this property overrides the cluster property xref:./cluster-properties.adoc#log_compression_type[`log_compression_type`] for the topic. - -*Type:* string - -*Accepted values:* `producer`. The following values are accepted for Kafka compatibility but ignored: `gzip`, `snappy`, `lz4`, `zstd`, `none`. - -*Default:* `producer` - -*Related cluster property:* xref:./cluster-properties.adoc#log_compression_type[`log_compression_type`] - -*Related topics:* - -- xref:develop:produce-data/configure-producers.adoc#message-batching[Message batching] -- xref:develop:produce-data/configure-producers.adoc#commonly-used-producer-configuration-options[Common producer configuration options] - ---- -[[maxmessagebytes]] -=== max.message.bytes - -The maximum size of a message or batch of a topic. If a compression type is enabled, `max.message.bytes` sets the maximum size of the compressed message or batch. - -If `max.message.bytes` is set to a positive value, it overrides the cluster property xref:./cluster-properties.adoc#kafka_batch_max_bytes[`kafka_batch_max_bytes`] for the topic. - -*Type:* integer - -*Unit:* bytes - -*Accepted values:* [`1`, `2147483647`] - -*Default:* null +include::reference:partial$properties/topic-properties.adoc[tags=category-segment-message,!deprecated,!exclude-from-docs] -*Related cluster property:* xref:./cluster-properties.adoc#kafka_batch_max_bytes[`kafka_batch_max_bytes`] - -**Related topics**: - -- xref:develop:produce-data/configure-producers.adoc#message-batching[Message batching] - ---- -[[messagetimestamptype]] -=== message.timestamp.type - -The source of a message's timestamp: either the message's creation time or its log append time. - -When `message.timestamp.type` is set, it overrides the cluster property xref:./cluster-properties.adoc#log_message_timestamp_type[`log_message_timestamp_type`] for the topic. - -*Type:* string - -*Accepted values:* [`CreateTime`, `LogAppendTime`] - -*Default:* `CreateTime` - -**Values**: - -- `CreateTime` -- `LogAppendTime` - -*Related cluster property:* xref:./cluster-properties.adoc#log_message_timestamp_type[`log_message_timestamp_type`] - ---- -[[segmentbytes]] -=== segment.bytes - -The maximum size of an active log segment for a topic. When the size of an active segment exceeds `segment.bytes`, the segment is closed and a new active segment is created. The closed, inactive segment is then eligible to be cleaned up according to retention properties. - -When `segment.bytes` is set to a positive value, it overrides the cluster property xref:./cluster-properties.adoc#log_segment_size[`log_segment_size`] for the topic. - -*Type:* integer - -*Unit:* bytes - -*Accepted values:* [`1`, `9223372036854775807`] - -*Default:* null - -*Related cluster property:* xref:./cluster-properties.adoc#log_segment_size[`log_segment_size`] - -**Related topics**: - -- xref:manage:cluster-maintenance/disk-utilization.adoc#configure-segment-size[Configure segment size] -- xref:manage:cluster-maintenance/disk-utilization.adoc#configure-message-retention[Configure message retention] -- xref:manage:remote-read-replicas.adoc[Remote Read Replicas] - ---- -[[segmentms]] -=== segment.ms - -The maximum duration that a log segment of a topic is active (open for writes and not deletable). A periodic event, with `segment.ms` as its period, forcibly closes the active segment and transitions, or rolls, to a new active segment. The closed (inactive) segment is then eligible to be cleaned up according to cleanup and retention properties. - -If set to a positive duration, `segment.ms` overrides the cluster property xref:./cluster-properties.adoc#log_segment_ms[`log_segment_ms`]. Values are automatically clamped between the cluster bounds set by xref:./cluster-properties.adoc#log_segment_ms_min[`log_segment_ms_min`] (default: 10 minutes) and xref:./cluster-properties.adoc#log_segment_ms_max[`log_segment_ms_max`] (default: 1 year). If your configured value exceeds these bounds, Redpanda uses the bound value and logs a warning. Check current cluster bounds with `rpk cluster config get log_segment_ms_min log_segment_ms_max`. - -*Type:* integer - -*Unit:* milliseconds - -*Accepted values:* [`600000`, `31536000000`] - -*Default:* null - -*Related cluster property:* xref:./cluster-properties.adoc#log_segment_ms[`log_segment_ms`] - -**Related topics**: - -- xref:manage:cluster-maintenance/disk-utilization.adoc#log-rolling[Log rolling] - ---- == Performance and Cluster Properties These properties control disk flushing, replication, and how topics interact with the cluster. -[[flushbytes]] -=== flush.bytes - -The maximum bytes not fsynced per partition. If this configured threshold is reached, the log is automatically fsynced, even though it wasn't explicitly requested. - -*Type:* integer - -*Unit:* bytes - -*Accepted values:* [`1`, `9223372036854775807`] +include::reference:partial$properties/topic-properties.adoc[tags=category-performance-cluster,!deprecated,!exclude-from-docs] -*Default:* `262144` - -*Related cluster property:* xref:./cluster-properties.adoc#flush_bytes[`flush_bytes`] - -**Related topics**: - -- xref:develop:produce-data/configure-producers.adoc[] - ---- -[[flushms]] -=== flush.ms - -The maximum delay (in ms) between two subsequent fsyncs. After this delay, the log is automatically fsynced. - -*Type:* integer - -*Unit:* milliseconds - -*Accepted values:* [`1`, `9223372036854775`] - -*Default:* `100` - -*Related cluster property:* xref:./cluster-properties.adoc#flush_ms[`flush_ms`] - -**Related topics**: - -- xref:develop:produce-data/configure-producers.adoc[] - ---- -[[redpandaleaderspreference]] -=== redpanda.leaders.preference - -The preferred location (rack) for partition leaders of a topic. - -This property inherits the value from the config_ref:default_leaders_preference,true,properties/cluster-properties[] cluster configuration property. You may override the cluster-wide setting by specifying the value for individual topics. - -If the cluster configuration property config_ref:enable_rack_awareness,true,properties/cluster-properties[] is set to `false`, Leader Pinning is disabled across the cluster. - -*Type:* string - -*Default:* `none` - -**Values**: - -- `none`: Opt out the topic from Leader Pinning. -- `racks:[,,...]`: Specify the preferred location (rack) of all topic partition leaders. The list can contain one or more rack IDs. If you specify multiple IDs, Redpanda tries to distribute the partition leader locations equally across brokers in these racks. - -**Related topics**: - -- xref:develop:produce-data/leader-pinning.adoc[Leader pinning] - ---- -[[replicationfactor]] -=== replication.factor - -The number of replicas of a topic to save in different nodes (brokers) of a cluster. - -If `replication.factor` is set to a positive value, it overrides the cluster property xref:./cluster-properties.adoc#default_topic_replication[default_topic_replication] for the topic. - -NOTE: Although `replication.factor` isn't returned or displayed by xref:reference:rpk/rpk-topic/rpk-topic-describe.adoc[`rpk topic describe`] as a valid Kafka property, you can set it using xref:reference:rpk/rpk-topic/rpk-topic-alter-config.adoc[`rpk topic alter-config`]. When the `replication.factor` of a topic is altered, it isn't simply a property value that's updated, but rather the actual replica sets of topic partitions that are changed. - -*Type:* integer - -*Accepted values:* [`1`, `512`] - -*Default:* null - -*Related cluster property:* xref:./cluster-properties.adoc#default_topic_replication[`default_topic_replication`] - -**Related topics**: - -- xref:develop:config-topics.adoc#choose-the-replication-factor[Choose the replication factor] -- xref:develop:config-topics.adoc#change-the-replication-factor[Change the replication factor] - ---- -[[writecaching]] -=== write.caching - -The write caching mode to apply to a topic. - -When `write.caching` is set, it overrides the cluster property xref:cluster-properties.adoc#write_caching_default[`write_caching_default`]. Write caching acknowledges a message as soon as it is received and acknowledged on a majority of brokers, without waiting for it to be written to disk. With `acks=all`, this provides lower latency while still ensuring that a majority of brokers acknowledge the write. Fsyncs follow <> and <>, whichever is reached first. - -*Type:* boolean - -*Default:* `false` - -**Values**: - -- `true` - Enables write caching for a topic, according to <> and <>. -- `false` - Disables write caching for a topic, according to <> and <>. - -*Related cluster property:* xref:./cluster-properties.adoc#write_caching_default[`write_caching_default`] - -**Related topics**: - -- xref:develop:config-topics.adoc#configure-write-caching[Write caching] - ---- == Tiered Storage properties Configure properties to manage topics for xref:manage:tiered-storage.adoc[Tiered Storage]. -[[initialretentionlocaltargetbytes]] -=== initial.retention.local.target.bytes - -A size-based initial retention limit for Tiered Storage that determines how much data in local storage is transferred to a partition replica when a cluster is resized. If `null` (default), all locally retained data is transferred. - -*Type:* integer - -*Unit:* bytes - -*Accepted values:* [`1`, `9223372036854775807`] - -*Default:* null - -*Related cluster property:* xref:./cluster-properties.adoc#initial_retention_local_target_bytes[`initial_retention_local_target_bytes`] - -**Related topics**: - -- xref:manage:tiered-storage.adoc#fast-commission-and-decommission[Fast commission and decommission through Tiered Storage] - ---- -[[initialretentionlocaltargetms]] -=== initial.retention.local.target.ms - -A time-based initial retention limit for Tiered Storage that determines how much data in local storage is transferred to a partition replica when a cluster is resized. If `null` (default), all locally retained data is transferred. - -*Type:* integer - -*Unit:* milliseconds - -*Accepted values:* [`1`, `9223372036854775`] - -*Default:* null - -*Related cluster property:* xref:./cluster-properties.adoc#initial_retention_local_target_ms[`initial_retention_local_target_ms`] - -**Related topics**: - -- xref:manage:tiered-storage.adoc#fast-commission-and-decommission[Fast commission and decommission through Tiered Storage] - ---- -[[redpandaremotedelete]] -=== redpanda.remote.delete - -A flag that enables deletion of data from object storage for Tiered Storage when it's deleted from local storage for a topic. - -NOTE: `redpanda.remote.delete` doesn't apply to Remote Read Replica topics: a Remote Read Replica topic isn't deleted from object storage when this flag is `true`. - -*Type:* boolean - -*Default:* - -- `false` for topics created using Redpanda 22.2 or earlier. -- `true` for topics created in Redpanda 22.3 and later, including new topics on upgraded clusters. - -**Related topics**: - -- xref:manage:tiered-storage.adoc[Tiered Storage] - ---- -[[redpandaremoteread]] -=== redpanda.remote.read - -A flag for enabling Redpanda to fetch data for a topic from object storage to local storage. When set to `true` together with <>, it enables the xref:manage:tiered-storage.adoc[Tiered Storage] feature. - -*Type:* boolean - -*Default:* false - -**Related topics**: +include::reference:partial$properties/topic-properties.adoc[tags=category-tiered-storage,!deprecated,!exclude-from-docs] -- xref:manage:tiered-storage.adoc[Tiered Storage] - ---- -[[redpandaremoterecovery]] -=== redpanda.remote.recovery - -A flag that enables the recovery or reproduction of a topic from object storage for Tiered Storage. The recovered data is saved in local storage, and the maximum amount of recovered data is determined by the local storage retention limits of the topic. - -TIP: You can only configure `redpanda.remote.recovery` when you create a topic. You cannot apply this setting to existing topics. - -*Type:* boolean - -*Default:* false - -**Related topics**: - -- xref:manage:tiered-storage.adoc[Tiered Storage] - ---- -[[redpandaremotewrite]] -=== redpanda.remote.write - -A flag for enabling Redpanda to upload data for a topic from local storage to object storage. When set to `true` together with <>, it enables the xref:manage:tiered-storage.adoc[Tiered Storage] feature. - -*Type:* boolean - -*Default:* false - -**Related topics**: - -- xref:manage:tiered-storage.adoc[Tiered Storage] - ---- -[[retentionlocaltargetbytes]] -=== retention.local.target.bytes - -A size-based retention limit for Tiered Storage that configures the maximum size that a topic partition in local storage can grow before becoming eligible for cleanup. It applies per partition and is equivalent to <> without Tiered Storage. - -*Type:* integer - -*Unit:* bytes - -*Accepted values:* [`1`, `9223372036854775807`] - -*Default:* null - -*Related cluster property:* xref:./cluster-properties.adoc#retention_local_target_bytes[`retention_local_target_bytes`] - -**Related topics**: - -- xref:manage:tiered-storage.adoc[Tiered Storage] - ---- -[[retentionlocaltargetms]] -=== retention.local.target.ms - -A time-based retention limit for Tiered Storage that sets the maximum duration that a log's segment file for a topic is retained in local storage before it's eligible for cleanup. This property is equivalent to <> without Tiered Storage. - -*Type:* integer - -*Unit:* milliseconds - -*Accepted values:* [`1`, `9223372036854775`] - -*Default:* 86400000 - -*Related cluster property:* xref:./cluster-properties.adoc#retention_local_target_ms[`retention_local_target_ms`] - -**Related topics**: - -- xref:manage:tiered-storage.adoc[Tiered Storage] - ---- == Remote Read Replica properties Configure properties to manage topics for xref:manage:remote-read-replicas.adoc[Remote Read Replicas]. -[[redpandaremotereadreplica]] -=== redpanda.remote.readreplica - -The name of the object storage bucket for a Remote Read Replica topic. - -CAUTION: Setting `redpanda.remote.readreplica` together with either `redpanda.remote.read` or `redpanda.remote.write` results in an error. - -*Type:* string - -*Default:* null +include::reference:partial$properties/topic-properties.adoc[tags=category-remote-read-replica,!deprecated,!exclude-from-docs] -**Related topics**: - -- xref:manage:remote-read-replicas.adoc[Remote Read Replicas] - ---- == Apache Iceberg integration properties Integrate Redpanda topics as Iceberg tables. -[[redpandaicebergdelete]] -=== redpanda.iceberg.delete - -Whether the corresponding Iceberg table is deleted upon deleting the topic. - -*Type:* boolean - -*Default:* `true` - -**Related topics**: - -- xref:manage:iceberg/about-iceberg-topics.adoc[] - ---- -[[redpandaiceberginvalidrecordaction]] -=== redpanda.iceberg.invalid.record.action - -Whether to write invalid records to a dead-letter queue (DLQ). - -*Type:* string - -*Default:* `dlq_table` - -**Values**: - -- `drop`: Disable the DLQ and drop invalid records. -- `dlq_table`: Write invalid records to a separate DLQ Iceberg table. - -**Related topics**: - -- xref:manage:iceberg/about-iceberg-topics.adoc#troubleshoot-errors[Troubleshoot errors] - ---- -[[redpandaicebergmode]] -=== redpanda.iceberg.mode - -Enable the Iceberg integration for the topic. You can choose one of four modes. - -*Type:* string - -*Default:* `disabled` - -**Values**: - -- `key_value`: Creates an Iceberg table with a `Key` column and a `Value` column. Redpanda stores the raw topic content in the `Value` column. -- `value_schema_id_prefix`: Creates an Iceberg table whose structure matches the Redpanda schema for the topic, with columns corresponding to each field. Redpanda uses the Schema Registry wire format, consisting of the "magic byte" and schema ID encoded in the payload header, to parse the topic values per field and store them in the corresponding table columns. -- `value_schema_latest`: Creates an Iceberg table whose structure matches the latest schema version in Schema Registry that matches the subject name. This mode is compatible with Avro and Protobuf schemas and is used when you don't produce to the topic using the wire format. See xref:manage:iceberg/choose-iceberg-mode.adoc#override-value-schema-latest-default[Choose an Iceberg Mode] for details on using this mode. -- `disabled`: Disables writing to an Iceberg table for the topic. - -**Related topics**: - -- xref:manage:iceberg/choose-iceberg-mode.adoc[] -- xref:manage:iceberg/about-iceberg-topics.adoc[] - ---- -[[redpandaicebergpartitionspec]] -=== redpanda.iceberg.partition.spec - -The link:https://iceberg.apache.org/docs/nightly/partitioning/[partitioning^] specification for the Iceberg table. - -*Type:* string - -*Default:* `(hour(redpanda.timestamp))` - -**Related topics**: - -- xref:manage:iceberg/about-iceberg-topics.adoc#use-custom-partitioning[Use custom partitioning] - ---- -[[redpandaicebergtargetlagms]] -=== redpanda.iceberg.target.lag.ms - -Controls how often the data in the Iceberg table is refreshed with new data from the topic. Redpanda attempts to commit all data produced to the topic within the lag target, subject to resource availability. - -*Type:* integer - -*Default:* `60000` - -**Related topics**: - -- xref:manage:iceberg/about-iceberg-topics.adoc[] \ No newline at end of file +include::reference:partial$properties/topic-properties.adoc[tags=category-iceberg-integration,!deprecated,!exclude-from-docs] \ No newline at end of file diff --git a/modules/reference/pages/property-report.json b/modules/reference/pages/property-report.json new file mode 100644 index 0000000000..8066bed5a9 --- /dev/null +++ b/modules/reference/pages/property-report.json @@ -0,0 +1,100 @@ +{ + "empty_descriptions": [ + "cloud_storage_disable_metadata_consistency_checks", + "cloud_storage_reconciliation_interval_ms", + "coproc_max_batch_size", + "coproc_max_inflight_bytes", + "coproc_max_ingest_bytes", + "coproc_offset_flush_interval_ms", + "coproc_supervisor_server", + "dashboard_dir", + "datalake_disk_space_monitor_interval", + "enable_admin_api", + "enable_central_config", + "enable_coproc", + "find_coordinator_timeout_ms", + "full_raft_configuration_recovery_pattern", + "id_allocator_replication", + "kafka_admin_topic_api_rate", + "kafka_client_group_byte_rate_quota", + "kafka_client_group_fetch_byte_rate_quota", + "kafka_quota_balancer_min_shard_throughput_bps", + "kafka_quota_balancer_min_shard_throughput_ratio", + "kafka_quota_balancer_node_period_ms", + "kafka_quota_balancer_window_ms", + "kafka_throughput_throttling_v2", + "leader_balancer_mode", + "log_compaction_adjacent_merge_self_compaction_count", + "max_version", + "min_version", + "raft_max_concurrent_append_requests_per_follower", + "redpanda.cloud_topic.enabled", + "redpanda.key.schema.id.validation", + "redpanda.key.subject.name.strategy", + "redpanda.remote.allowgaps", + "redpanda.value.schema.id.validation", + "redpanda.value.subject.name.strategy", + "redpanda.virtual.cluster.id", + "rm_violation_recovery_policy", + "schema_registry_protobuf_renderer_v2", + "seed_server_meta_topic_partitions", + "seq_table_min_size", + "target_fetch_quota_byte_rate", + "target_quota_byte_rate", + "tm_violation_recovery_policy", + "transaction_coordinator_replication", + "tx_registry_log_capacity", + "tx_registry_sync_timeout_ms", + "use_scheduling_groups" + ], + "deprecated_properties": [ + "cloud_storage_cache_trim_carryover_bytes", + "cloud_storage_disable_metadata_consistency_checks", + "cloud_storage_max_materialized_segments_per_shard", + "cloud_storage_max_partition_readers_per_shard", + "cloud_storage_reconciliation_interval_ms", + "coproc_max_batch_size", + "coproc_max_inflight_bytes", + "coproc_max_ingest_bytes", + "coproc_offset_flush_interval_ms", + "coproc_supervisor_server", + "dashboard_dir", + "datalake_disk_space_monitor_interval", + "enable_admin_api", + "enable_auto_rebalance_on_node_add", + "enable_central_config", + "enable_coproc", + "find_coordinator_timeout_ms", + "full_raft_configuration_recovery_pattern", + "health_monitor_tick_interval", + "id_allocator_replication", + "kafka_admin_topic_api_rate", + "kafka_client_group_byte_rate_quota", + "kafka_client_group_fetch_byte_rate_quota", + "kafka_quota_balancer_min_shard_throughput_bps", + "kafka_quota_balancer_min_shard_throughput_ratio", + "kafka_quota_balancer_node_period_ms", + "kafka_quota_balancer_window_ms", + "kafka_throughput_throttling_v2", + "leader_balancer_mode", + "log_compaction_adjacent_merge_self_compaction_count", + "max_version", + "min_version", + "partition_autobalancing_movement_batch_size_bytes", + "raft_flush_timer_interval_ms", + "raft_max_concurrent_append_requests_per_follower", + "rm_violation_recovery_policy", + "schema_registry_protobuf_renderer_v2", + "seed_server_meta_topic_partitions", + "seq_table_min_size", + "target_fetch_quota_byte_rate", + "target_quota_byte_rate", + "tm_violation_recovery_policy", + "transaction_coordinator_replication", + "tx_log_stats_interval_s", + "tx_registry_log_capacity", + "tx_registry_sync_timeout_ms", + "use_scheduling_groups" + ], + "undocumented_properties": [] +} \ No newline at end of file diff --git a/modules/reference/partials/deprecated/deprecated-properties.adoc b/modules/reference/partials/deprecated/deprecated-properties.adoc new file mode 100644 index 0000000000..6809c5e81f --- /dev/null +++ b/modules/reference/partials/deprecated/deprecated-properties.adoc @@ -0,0 +1,106 @@ +// This content is autogenerated. Do not edit manually. To override descriptions, use the doc-tools CLI with the --overrides option: https://redpandadata.atlassian.net/wiki/spaces/DOC/pages/1396244485/Review+Redpanda+configuration+properties += Deprecated Configuration Properties +:description: This is an exhaustive list of all the deprecated properties. + +This is an exhaustive list of all the deprecated properties. + +== Broker properties + +- `coproc_supervisor_server` + +- `dashboard_dir` + +- `enable_central_config` + + +== Cluster properties + +- `cloud_storage_cache_trim_carryover_bytes` + +- `cloud_storage_disable_metadata_consistency_checks` + +- `cloud_storage_max_materialized_segments_per_shard` + +- `cloud_storage_max_partition_readers_per_shard` + +- `cloud_storage_reconciliation_interval_ms` + +- `coproc_max_batch_size` + +- `coproc_max_inflight_bytes` + +- `coproc_max_ingest_bytes` + +- `coproc_offset_flush_interval_ms` + +- `datalake_disk_space_monitor_interval` + +- `enable_admin_api` + +- `enable_auto_rebalance_on_node_add` + +- `enable_coproc` + +- `find_coordinator_timeout_ms` + +- `full_raft_configuration_recovery_pattern` + +- `health_monitor_tick_interval` + +- `id_allocator_replication` + +- `kafka_admin_topic_api_rate` + +- `kafka_client_group_byte_rate_quota` + +- `kafka_client_group_fetch_byte_rate_quota` + +- `kafka_quota_balancer_min_shard_throughput_bps` + +- `kafka_quota_balancer_min_shard_throughput_ratio` + +- `kafka_quota_balancer_node_period_ms` + +- `kafka_quota_balancer_window_ms` + +- `kafka_throughput_throttling_v2` + +- `leader_balancer_mode` + +- `log_compaction_adjacent_merge_self_compaction_count` + +- `max_version` + +- `min_version` + +- `partition_autobalancing_movement_batch_size_bytes` + +- `raft_flush_timer_interval_ms` + +- `raft_max_concurrent_append_requests_per_follower` + +- `rm_violation_recovery_policy` + +- `schema_registry_protobuf_renderer_v2` + +- `seed_server_meta_topic_partitions` + +- `seq_table_min_size` + +- `target_fetch_quota_byte_rate` + +- `target_quota_byte_rate` + +- `tm_violation_recovery_policy` + +- `transaction_coordinator_replication` + +- `tx_log_stats_interval_s` + +- `tx_registry_log_capacity` + +- `tx_registry_sync_timeout_ms` + +- `use_scheduling_groups` + + diff --git a/modules/reference/partials/properties/broker-properties.adoc b/modules/reference/partials/properties/broker-properties.adoc new file mode 100644 index 0000000000..947ecd7211 --- /dev/null +++ b/modules/reference/partials/properties/broker-properties.adoc @@ -0,0 +1,1914 @@ +// This content is autogenerated. Do not edit manually. To override descriptions, use the doc-tools CLI with the --overrides option: https://redpandadata.atlassian.net/wiki/spaces/DOC/pages/1396244485/Review+Redpanda+configuration+properties +// tag::category-redpanda[] +=== admin + +Network address for the glossterm:Admin API[] server. + +// tag::self-managed-only[] +*Visibility:* `user` +// end::self-managed-only[] + +*Type:* array + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `[{address: "127.0.0.1", port: 9644}]` +endif::[] + +*Nullable:* No + + +.Example +[,yaml] +---- +redpanda: + admin: + - name: + address: + port: +---- + + +--- +// end::category-redpanda[] + +// tag::category-redpanda[] +=== admin_api_doc_dir + +Path to the API specifications for the Admin API. + +// tag::self-managed-only[] +*Visibility:* `user` +// end::self-managed-only[] + +*Type:* string + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `/usr/share/redpanda/admin-api-doc` +endif::[] + +*Nullable:* No + +--- +// end::category-redpanda[] + +// tag::category-redpanda[] +=== admin_api_tls + +Specifies the TLS configuration for the HTTP Admin API. + +// tag::self-managed-only[] +*Visibility:* `user` +// end::self-managed-only[] + +*Type:* array + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `[]` +endif::[] + +*Nullable:* No + + +.Example +[,yaml] +---- +redpanda: + admin_api_tls: + - name: + enabled: true + cert_file: + key_file: + truststore_file: + require_client_auth: true +---- + + +--- +// end::category-redpanda[] + +// tag::category-redpanda[] +=== advertised_kafka_api + +Address of the Kafka API published to the clients. If not set, the <> broker property is used. When behind a load balancer or in containerized environments, this should be the externally-accessible address that clients use to connect. + +// tag::self-managed-only[] +*Visibility:* `user` +// end::self-managed-only[] + +*Type:* string + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `null` +endif::[] + +*Nullable:* No + + +.Example +[,yaml] +---- +redpanda: + advertised_kafka_api: + - name: + address: + port: +---- + + +--- +// end::category-redpanda[] + +// tag::category-pandaproxy[] +=== advertised_pandaproxy_api + +Network address for the HTTP Proxy API server to publish to clients. + +*Requires restart:* Yes + +*Type:* array + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `[]` +endif::[] + +*Nullable:* No + +--- +// end::category-pandaproxy[] + +// tag::category-redpanda[] +=== advertised_rpc_api + +Address of RPC endpoint published to other cluster members. If not set, the <> broker property is used. This should be the address other brokers can use to communicate with this broker. + +// tag::self-managed-only[] +*Visibility:* `user` +// end::self-managed-only[] + +*Type:* string + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `null` +endif::[] + +*Nullable:* No + + +.Example +[,yaml] +---- +redpanda: + advertised_rpc_api: + address: + port: +---- + + +--- +// end::category-redpanda[] + +// tag::category-pandaproxy[] +=== api_doc_dir + +Path to the API specifications directory. This directory contains API documentation for both the HTTP Proxy API and Schema Registry API. + +*Requires restart:* Yes + +*Type:* string + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `/usr/share/redpanda/proxy-api-doc` +endif::[] + +*Nullable:* No + +--- +// end::category-pandaproxy[] + +// tag::category-pandaproxy-client[] +=== broker_tls + +TLS configuration for the Kafka API servers to which the HTTP Proxy client should connect. + +*Requires restart:* Yes + +*Type:* object + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `{cert_file: null, enabled: null, key_file: null, require_client_auth: null, truststore_file: null}` +endif::[] + +*Nullable:* No + +--- +// end::category-pandaproxy-client[] + +// tag::category-pandaproxy-client[] +=== brokers + +Network addresses of the Kafka API servers to which the HTTP Proxy client should connect. + +*Requires restart:* Yes + +*Type:* array + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `vector` +endif::[] + +*Nullable:* No + +--- +// end::category-pandaproxy-client[] + +// tag::category-pandaproxy[] +=== client_cache_max_size + +The maximum number of Kafka client connections that Redpanda can cache in the LRU (least recently used) cache. The LRU cache helps optimize resource utilization by keeping the most recently used clients in memory, facilitating quicker reconnections for frequent clients while limiting memory usage. + +*Requires restart:* Yes + +*Type:* integer + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `10` +endif::[] + +*Nullable:* No + +--- +// end::category-pandaproxy[] + +// tag::category-pandaproxy-client[] +=== client_identifier + +Custom identifier to include in the Kafka request header for the HTTP Proxy client. This identifier can help debug or monitor client activities. + +*Requires restart:* Yes + +*Type:* string + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `test_client` +endif::[] + +*Nullable:* Yes + +--- +// end::category-pandaproxy-client[] + +// tag::category-pandaproxy[] +=== client_keep_alive + +Time, in milliseconds, that an idle client connection may remain open to the HTTP Proxy API. + +*Requires restart:* Yes + +*Type:* integer + +*Accepted values:* [`-17592186044416`, `17592186044415`] + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `5min` +endif::[] + +*Nullable:* No + +--- +// end::category-pandaproxy[] + +// tag::category-redpanda[] +=== cloud_storage_cache_directory + +Directory for archival cache. Set when the xref:reference:properties/cluster-properties.adoc#cloud_storage_enabled[`cloud_storage_enabled`] cluster property is enabled. If not specified, Redpanda uses a default path within the data directory. + +// tag::self-managed-only[] +*Visibility:* `user` +// end::self-managed-only[] + +*Type:* string + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `null` +endif::[] + +*Nullable:* Yes + + +.Example +[,yaml] +---- +redpanda: + cloud_storage_cache_directory: +---- + + +Replace `` with the full path to your desired cache directory. + + +*Related topics:* + +* xref:reference:properties/cluster-properties.adoc#cloud_storage_enabled[`cloud_storage_enabled`] + +--- +// end::category-redpanda[] + +// tag::category-redpanda[] +=== cloud_storage_inventory_hash_path_directory + +Directory to store inventory report hashes for use by cloud storage scrubber. If not specified, Redpanda uses a default path within the data directory. + +// tag::self-managed-only[] +*Visibility:* `user` +// end::self-managed-only[] + +*Type:* string + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `null` +endif::[] + +*Nullable:* Yes + + +.Example +[,yaml] +---- +redpanda: + cloud_storage_inventory_hash_store: +---- + + +--- +// end::category-redpanda[] + +=== consumer_heartbeat_interval_ms + +Interval (in milliseconds) for consumer heartbeats. + +*Unit:* milliseconds + +*Requires restart:* Yes + +*Type:* integer + +*Accepted values:* [`-17592186044416`, `17592186044415`] + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `500ms` +endif::[] + +*Nullable:* No + +--- + +// tag::category-pandaproxy-client[] +=== consumer_heartbeat_interval_ms + +Interval (in milliseconds) for consumer heartbeats. + +*Unit:* milliseconds + +// tag::self-managed-only[] +*Visibility:* `user` +// end::self-managed-only[] + +*Type:* string + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `null` +endif::[] + +*Nullable:* No + +--- +// end::category-pandaproxy-client[] + +=== consumer_instance_timeout_ms + +How long to wait for an idle consumer before removing it. A consumer is considered idle when it's not making requests or heartbeats. + +*Unit:* milliseconds + +*Requires restart:* Yes + +*Type:* integer + +*Accepted values:* [`-17592186044416`, `17592186044415`] + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `minutes` +endif::[] + +*Nullable:* No + +--- + +// tag::category-pandaproxy[] +=== consumer_instance_timeout_ms + +How long to wait for an idle consumer before removing it. A consumer is considered idle when it's not making requests or heartbeats. + +*Unit:* milliseconds + +// tag::self-managed-only[] +*Visibility:* `user` +// end::self-managed-only[] + +*Type:* string + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `null` +endif::[] + +*Nullable:* No + +--- +// end::category-pandaproxy[] + +=== consumer_rebalance_timeout_ms + +Timeout (in milliseconds) for consumer rebalance. + +*Unit:* milliseconds + +*Requires restart:* Yes + +*Type:* integer + +*Accepted values:* [`-17592186044416`, `17592186044415`] + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `2s` +endif::[] + +*Nullable:* No + +--- + +// tag::category-pandaproxy-client[] +=== consumer_rebalance_timeout_ms + +Timeout (in milliseconds) for consumer rebalance. + +*Unit:* milliseconds + +// tag::self-managed-only[] +*Visibility:* `user` +// end::self-managed-only[] + +*Type:* string + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `null` +endif::[] + +*Nullable:* No + +--- +// end::category-pandaproxy-client[] + +// tag::category-pandaproxy-client[] +=== consumer_request_max_bytes + +Maximum bytes to fetch per request. + +*Unit:* bytes + +*Requires restart:* Yes + +*Type:* integer + +*Accepted values:* [`-2147483648`, `2147483647`] + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `1_MiB` +endif::[] + +*Nullable:* No + +--- +// end::category-pandaproxy-client[] + +// tag::category-pandaproxy-client[] +=== consumer_request_min_bytes + +Minimum bytes to fetch per request. + +*Unit:* bytes + +*Requires restart:* Yes + +*Type:* integer + +*Accepted values:* [`-2147483648`, `2147483647`] + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `1` +endif::[] + +*Nullable:* No + +--- +// end::category-pandaproxy-client[] + +=== consumer_request_timeout_ms + +Interval (in milliseconds) for consumer request timeout. + +*Unit:* milliseconds + +*Requires restart:* Yes + +*Type:* integer + +*Accepted values:* [`-17592186044416`, `17592186044415`] + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `100ms` +endif::[] + +*Nullable:* No + +--- + +// tag::category-pandaproxy-client[] +=== consumer_request_timeout_ms + +Interval (in milliseconds) for consumer request timeout. + +*Unit:* milliseconds + +// tag::self-managed-only[] +*Visibility:* `user` +// end::self-managed-only[] + +*Type:* string + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `null` +endif::[] + +*Nullable:* No + +--- +// end::category-pandaproxy-client[] + +=== consumer_session_timeout_ms + +Timeout (in milliseconds) for consumer session. + +*Unit:* milliseconds + +*Requires restart:* Yes + +*Type:* integer + +*Accepted values:* [`-17592186044416`, `17592186044415`] + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `10s` +endif::[] + +*Nullable:* No + +--- + +// tag::category-pandaproxy-client[] +=== consumer_session_timeout_ms + +Timeout (in milliseconds) for consumer session. + +*Unit:* milliseconds + +// tag::self-managed-only[] +*Visibility:* `user` +// end::self-managed-only[] + +*Type:* string + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `null` +endif::[] + +*Nullable:* No + +--- +// end::category-pandaproxy-client[] + +// tag::deprecated[] +=== coproc_supervisor_server + +No description available. + + +*Nullable:* No + +--- +// end::deprecated[] + +// tag::category-redpanda[] +=== crash_loop_limit + +A limit on the number of consecutive times a broker can crash within one hour before its crash-tracking logic is reset. This limit prevents a broker from getting stuck in an infinite cycle of crashes. + +If `null`, the property is disabled and no limit is applied. + +The crash-tracking logic is reset (to zero consecutive crashes) by any of the following conditions: + +* The broker shuts down cleanly. +* One hour passes since the last crash. +* The `redpanda.yaml` broker configuration file is updated. +* The `startup_log` file in the broker's <> broker property is manually deleted. + +// tag::self-managed-only[] +*Visibility:* `user` +// end::self-managed-only[] + +*Type:* integer + +*Maximum value:* `4294967295` + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `5` +endif::[] + +*Nullable:* Yes + +--- +// end::category-redpanda[] + +// tag::category-redpanda[] +=== crash_loop_sleep_sec + +ifndef::env-cloud[] +*Introduced in v24.3.4* +endif::[] + +The amount of time the broker sleeps before terminating when the limit on consecutive broker crashes (<>) is reached. This property provides a debugging window for you to access the broker before it terminates, and is particularly useful in Kubernetes environments. + +If `null`, the property is disabled, and the broker terminates immediately after reaching the crash loop limit. + +For information about how to reset the crash loop limit, see the <> broker property. + +*Unit:* seconds + +// tag::self-managed-only[] +*Visibility:* `user` +// end::self-managed-only[] + +*Type:* integer + +*Accepted values:* [`-17179869184`, `17179869183`] + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `null` +endif::[] + +*Nullable:* Yes + +--- +// end::category-redpanda[] + +// tag::deprecated[] +=== dashboard_dir + +No description available. + + +*Nullable:* No + +--- +// end::deprecated[] + +// tag::category-redpanda[] +=== data_directory + +Path to the directory for storing Redpanda's streaming data files. + +// tag::self-managed-only[] +*Visibility:* `user` +// end::self-managed-only[] + +*Type:* string + +*Nullable:* No + +--- +// end::category-redpanda[] + +// tag::category-redpanda[] +=== developer_mode + +CAUTION: Enabling `developer_mode` isn't recommended for production use. + +Enable developer mode, which skips most of the checks performed at startup. + +// tag::self-managed-only[] +*Visibility:* `tunable` +// end::self-managed-only[] + +*Type:* boolean + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `false` +endif::[] + +*Nullable:* No + +--- +// end::category-redpanda[] + +// tag::category-redpanda[] +=== emergency_disable_data_transforms + +Override the cluster property xref:reference:properties/cluster-properties.adoc#data_transforms_enabled[`data_transforms_enabled`] and disable Wasm-powered data transforms. This is an emergency shutoff button. + +// tag::self-managed-only[] +*Visibility:* `user` +// end::self-managed-only[] + +*Type:* boolean + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `false` +endif::[] + +*Nullable:* No + +*Related topics:* + +* xref:reference:properties/cluster-properties.adoc#data_transforms_enabled[`data_transforms_enabled`] + +--- +// end::category-redpanda[] + +// tag::category-redpanda[] +=== empty_seed_starts_cluster + +Controls how a new cluster is formed. All brokers in a cluster must have the same value. + +<> to form a cluster. + +TIP: For backward compatibility, `true` is the default. Redpanda recommends using `false` in production environments to prevent accidental cluster formation. + +// tag::self-managed-only[] +*Visibility:* `user` +// end::self-managed-only[] + +*Type:* boolean + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `true` +endif::[] + +*Nullable:* No + +--- +// end::category-redpanda[] + +// tag::deprecated[] +=== enable_central_config + +No description available. + + +*Nullable:* No + +--- +// end::deprecated[] + +// tag::category-redpanda[] +=== fips_mode + +Controls whether Redpanda starts in FIPS mode. This property allows for three values: + +* Disabled - Redpanda does not start in FIPS mode. + +* Permissive - Redpanda performs the same check as enabled, but a warning is logged, and Redpanda continues to run. Redpanda loads the OpenSSL FIPS provider into the OpenSSL library. After this completes, Redpanda is operating in FIPS mode, which means that the TLS cipher suites available to users are limited to the TLSv1.2 and TLSv1.3 NIST-approved cryptographic methods. + +* Enabled - Redpanda verifies that the operating system is enabled for FIPS by checking `/proc/sys/crypto/fips_enabled`. If the file does not exist or does not return `1`, Redpanda immediately exits. + +// tag::self-managed-only[] +*Visibility:* `user` +// end::self-managed-only[] + +*Type:* fips_mode_flag + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `disabled` +endif::[] + +*Nullable:* No + +--- +// end::category-redpanda[] + +// tag::category-redpanda[] +=== kafka_api + +IP address and port of the Kafka API endpoint that handles requests. Supports multiple listeners with different configurations. + +// tag::self-managed-only[] +*Visibility:* `user` +// end::self-managed-only[] + +*Type:* array + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `[{address: "127.0.0.1", port: 9092}]` +endif::[] + +*Nullable:* No + +*Related topics:* + +* xref:reference:properties/cluster-properties.adoc#sasl_mechanisms[`sasl_mechanisms`] +* xref:reference:properties/cluster-properties.adoc#sasl_mechanisms[`sasl_mechanisms`] + +--- +// end::category-redpanda[] + +// tag::category-redpanda[] +=== kafka_api_tls + +Transport Layer Security (TLS) configuration for the Kafka API endpoint. + +// tag::self-managed-only[] +*Visibility:* `user` +// end::self-managed-only[] + +*Type:* array + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `[]` +endif::[] + +*Nullable:* No + + +.Example +[,yaml] +---- +redpanda: + kafka_api_tls: + - name: + enabled: true + cert_file: + key_file: + truststore_file: + require_client_auth: false +---- + + +--- +// end::category-redpanda[] + +// tag::category-redpanda[] +=== memory_allocation_warning_threshold + +Threshold for log messages that contain a larger memory allocation than specified. + +// tag::self-managed-only[] +*Visibility:* `tunable` +// end::self-managed-only[] + +*Type:* integer + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `128_KiB + 1` +endif::[] + +*Nullable:* Yes + +--- +// end::category-redpanda[] + +// tag::category-schema-registry[] +=== mode_mutability + +Enable modifications to the read-only `mode` of the Schema Registry. When set to `true`, the entire Schema Registry or its subjects can be switched to `READONLY` or `READWRITE`. This property is useful for preventing unwanted changes to the entire Schema Registry or specific subjects. + +*Requires restart:* Yes + +*Type:* boolean + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `true` +endif::[] + +*Nullable:* No + +--- +// end::category-schema-registry[] + +// tag::category-redpanda[] +=== node_id + +A number that uniquely identifies the broker within the cluster. If `null` (the default value), Redpanda automatically assigns an ID. If set, it must be non-negative value. + +.Do not set `node_id` manually. +[WARNING] +==== +Redpanda assigns unique IDs automatically to prevent issues such as: + +- Brokers with empty disks rejoining the cluster. +- Conflicts during recovery or scaling. + +Manually setting or reusing `node_id` values, even for decommissioned brokers, can cause cluster inconsistencies and operational failures. +==== + +Broker IDs are immutable. After a broker joins the cluster, its `node_id` *cannot* be changed. + +// tag::self-managed-only[] +*Visibility:* `user` +// end::self-managed-only[] + +*Type:* integer + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `null` +endif::[] + +*Nullable:* Yes + +--- +// end::category-redpanda[] + +// tag::category-redpanda[] +=== node_id_overrides + +List of node ID and UUID overrides applied at broker startup. Each entry includes the current UUID, the desired new ID and UUID, and an ignore flag. An entry applies only if `current_uuid` matches the broker's actual UUID. + +Remove this property after the cluster restarts successfully and operates normally. This prevents reapplication and maintains consistent configuration across brokers. + +// tag::self-managed-only[] +*Visibility:* `user` +// end::self-managed-only[] + +*Type:* array + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `[]` +endif::[] + +*Nullable:* No + + +.Example +[,yaml] +---- +redpanda: + node_id_overrides: + - current_uuid: "" + new_id: + new_uuid: "" + ignore_existing_node_id: + - current_uuid: "" + new_id: + new_uuid: "" + ignore_existing_node_id: +---- + + +--- +// end::category-redpanda[] + +// tag::category-redpanda[] +=== openssl_config_file + +Path to the configuration file used by OpenSSL to properly load the FIPS-compliant module. + +// tag::self-managed-only[] +*Visibility:* `user` +// end::self-managed-only[] + +*Type:* string + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `null` +endif::[] + +*Nullable:* Yes + +--- +// end::category-redpanda[] + +// tag::category-redpanda[] +=== openssl_module_directory + +Path to the directory that contains the OpenSSL FIPS-compliant module. The filename that Redpanda looks for is `fips.so`. + +// tag::self-managed-only[] +*Visibility:* `user` +// end::self-managed-only[] + +*Type:* string + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `null` +endif::[] + +*Nullable:* Yes + +--- +// end::category-redpanda[] + +// tag::category-pandaproxy[] +=== pandaproxy_api + +Rest API listener address and port. + +*Requires restart:* Yes + +*Type:* array + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `[{address: "0.0.0.0", port: 8082}]` +endif::[] + +*Nullable:* No + + +.Example +[,yaml] +---- +pandaproxy: + pandaproxy_api: + address: 0.0.0.0 + port: 8082 + authentication_method: http_basic +---- + + +--- +// end::category-pandaproxy[] + +// tag::category-pandaproxy[] +=== pandaproxy_api_tls + +TLS configuration for Pandaproxy API. + +*Requires restart:* Yes + +*Type:* array + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `[]` +endif::[] + +*Nullable:* No + +--- +// end::category-pandaproxy[] + +// tag::category-pandaproxy-client[] +=== produce_ack_level + +Number of acknowledgments the producer requires the leader to have received before considering a request complete. + +*Requires restart:* Yes + +*Type:* integer + +*Accepted values:* [`-32768`, `32767`] + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `-1` +endif::[] + +*Nullable:* No + +--- +// end::category-pandaproxy-client[] + +=== produce_batch_delay_ms + +Delay (in milliseconds) to wait before sending batch. + +*Unit:* milliseconds + +*Requires restart:* Yes + +*Type:* integer + +*Accepted values:* [`-17592186044416`, `17592186044415`] + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `100ms` +endif::[] + +*Nullable:* No + +--- + +// tag::category-pandaproxy-client[] +=== produce_batch_delay_ms + +Configuration property: produce_batch_delay_ms + +*Unit:* milliseconds + +// tag::self-managed-only[] +*Visibility:* `user` +// end::self-managed-only[] + +*Type:* string + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `null` +endif::[] + +*Nullable:* No + +--- +// end::category-pandaproxy-client[] + +// tag::category-pandaproxy-client[] +=== produce_batch_record_count + +Number of records to batch before sending to broker. + +*Requires restart:* Yes + +*Type:* integer + +*Accepted values:* [`-2147483648`, `2147483647`] + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `1000` +endif::[] + +*Nullable:* No + +--- +// end::category-pandaproxy-client[] + +// tag::category-pandaproxy-client[] +=== produce_batch_size_bytes + +Number of bytes to batch before sending to broker. + +*Unit:* bytes + +*Requires restart:* Yes + +*Type:* integer + +*Accepted values:* [`-2147483648`, `2147483647`] + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `1048576` +endif::[] + +*Nullable:* No + +--- +// end::category-pandaproxy-client[] + +// tag::category-pandaproxy-client[] +=== produce_compression_type + +Enable or disable compression by the Kafka client. Specify `none` to disable compression or one of the supported types [gzip, snappy, lz4, zstd]. + +*Requires restart:* Yes + +*Type:* string + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `none` +endif::[] + +*Nullable:* No + +--- +// end::category-pandaproxy-client[] + +=== produce_shutdown_delay_ms + +Delay (in milliseconds) to allow for final flush of buffers before shutting down. + +*Unit:* milliseconds + +*Requires restart:* Yes + +*Type:* integer + +*Accepted values:* [`-17592186044416`, `17592186044415`] + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `0ms` +endif::[] + +*Nullable:* No + +--- + +// tag::category-pandaproxy-client[] +=== produce_shutdown_delay_ms + +Delay (in milliseconds) to allow for final flush of buffers before shutting down. + +*Unit:* milliseconds + +// tag::self-managed-only[] +*Visibility:* `user` +// end::self-managed-only[] + +*Type:* string + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `null` +endif::[] + +*Nullable:* No + +--- +// end::category-pandaproxy-client[] + +// tag::category-redpanda[] +=== rack + +A label that identifies a failure zone. Apply the same label to all brokers in the same failure zone. When xref:./cluster-properties.adoc#enable_rack_awareness[enable_rack_awareness] is set to `true` at the cluster level, the system uses the rack labels to spread partition replicas across different failure zones. + +// tag::self-managed-only[] +*Visibility:* `user` +// end::self-managed-only[] + +*Type:* string + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `null` +endif::[] + +*Nullable:* Yes + +*Related topics:* + +* xref:./cluster-properties.adoc#enable_rack_awareness[enable_rack_awareness] + +--- +// end::category-redpanda[] + +// tag::category-redpanda[] +=== recovery_mode_enabled + +If `true`, start Redpanda in xref:manage:recovery-mode.adoc[recovery mode], where user partitions are not loaded and only administrative operations are allowed. + +// tag::self-managed-only[] +*Visibility:* `user` +// end::self-managed-only[] + +*Type:* boolean + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `false` +endif::[] + +*Nullable:* No + +*Related topics:* + +* xref:manage:recovery-mode.adoc[recovery mode] + +--- +// end::category-redpanda[] + +// tag::category-pandaproxy-client[] +=== retries + +Number of times to retry a request to a broker. + +*Requires restart:* Yes + +*Type:* integer + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `5` +endif::[] + +*Nullable:* No + +--- +// end::category-pandaproxy-client[] + +=== retry_base_backoff_ms + +Delay (in milliseconds) for initial retry backoff. + +*Unit:* milliseconds + +*Requires restart:* Yes + +*Type:* integer + +*Accepted values:* [`-17592186044416`, `17592186044415`] + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `100ms` +endif::[] + +*Nullable:* No + +--- + +// tag::category-pandaproxy-client[] +=== retry_base_backoff_ms + +Configuration property: retry_base_backoff_ms + +*Unit:* milliseconds + +// tag::self-managed-only[] +*Visibility:* `user` +// end::self-managed-only[] + +*Type:* string + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `null` +endif::[] + +*Nullable:* No + +--- +// end::category-pandaproxy-client[] + +// tag::category-redpanda[] +=== rpc_server + +IP address and port for the Remote Procedure Call (RPC) server. + +// tag::self-managed-only[] +*Visibility:* `user` +// end::self-managed-only[] + +*Type:* object + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `{address: "127.0.0.1", port: 33145}` +endif::[] + +*Nullable:* No + +--- +// end::category-redpanda[] + +// tag::category-redpanda[] +=== rpc_server_tls + +TLS configuration for the RPC server. + +// tag::self-managed-only[] +*Visibility:* `user` +// end::self-managed-only[] + +*Type:* object + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `{cert_file: null, enabled: null, key_file: null, require_client_auth: null, truststore_file: null}` +endif::[] + +*Nullable:* No + + +.Example +[,yaml] +---- +redpanda: + rpc_server_tls: + enabled: true + cert_file: "" + key_file: "" + truststore_file: "" + require_client_auth: true +---- + + +--- +// end::category-redpanda[] + +// tag::category-pandaproxy-client[] +=== sasl_mechanism + +The SASL mechanism to use when the HTTP Proxy client connects to the Kafka API. These credentials are used when the HTTP Proxy API listener has <>: `none` but the cluster requires authenticated access to the Kafka API. + +This property specifies which individual SASL mechanism the HTTP Proxy client should use, while the cluster-wide available mechanisms are configured using the xref:reference:properties/cluster-properties.adoc#sasl_mechanisms[`sasl_mechanisms`] cluster property. + +include::shared:partial$http-proxy-ephemeral-credentials-breaking-change.adoc[] + +*Requires restart:* Yes + +*Type:* string + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `null` +endif::[] + +*Nullable:* No + +*Related topics:* + +* xref:reference:properties/cluster-properties.adoc#sasl_mechanisms[`sasl_mechanisms`] + +--- +// end::category-pandaproxy-client[] + +// tag::category-schema-registry[] +=== schema_registry_api + +Schema Registry API listener address and port + +*Requires restart:* Yes + +*Type:* array + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `[{address: "0.0.0.0", port: 8081}]` +endif::[] + +*Nullable:* No + + +.Example +[,yaml] +---- +schema_registry: + schema_registry_api: + address: 0.0.0.0 + port: 8081 + authentication_method: http_basic +---- + + +--- +// end::category-schema-registry[] + +// tag::category-schema-registry[] +=== schema_registry_api_tls + +TLS configuration for Schema Registry API. + +*Requires restart:* Yes + +*Type:* array + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `[]` +endif::[] + +*Nullable:* No + +--- +// end::category-schema-registry[] + +// tag::category-schema-registry[] +=== schema_registry_replication_factor + +Replication factor for internal `_schemas` topic. If unset, defaults to the xref:../cluster-properties.adoc#default_topic_replication[`default_topic_replication`] cluster property. + +*Requires restart:* Yes + +*Type:* integer + +*Accepted values:* [`-32768`, `32767`] + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `null` +endif::[] + +*Nullable:* Yes + +*Related topics:* + +* xref:../cluster-properties.adoc#default_topic_replication[`default_topic_replication`] + +--- +// end::category-schema-registry[] + +// tag::category-pandaproxy-client[] +=== scram_password + +Password to use for SCRAM authentication mechanisms when the HTTP Proxy client connects to the Kafka API. This property is required when the HTTP Proxy API listener has <>: `none` but the cluster requires authenticated access to the Kafka API. + +include::shared:partial$http-proxy-ephemeral-credentials-breaking-change.adoc[] + +*Requires restart:* Yes + +*Type:* string + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `null` +endif::[] + +*Nullable:* No + +--- +// end::category-pandaproxy-client[] + +// tag::category-pandaproxy-client[] +=== scram_username + +Username to use for SCRAM authentication mechanisms when the HTTP Proxy client connects to the Kafka API. This property is required when the HTTP Proxy API listener has <>: `none` but the cluster requires authenticated access to the Kafka API. + +include::shared:partial$http-proxy-ephemeral-credentials-breaking-change.adoc[] + +*Requires restart:* Yes + +*Type:* string + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `null` +endif::[] + +*Nullable:* No + +--- +// end::category-pandaproxy-client[] + +// tag::category-redpanda[] +=== seed_servers + +List of the seed servers used to join current cluster. If the `seed_servers` list is empty the broker will be a cluster root and it will form a new cluster. + +* When `empty_seed_starts_cluster` is `true`, Redpanda enables one broker with an empty `seed_servers` list to initiate a new cluster. The broker with an empty `seed_servers` becomes the cluster root, to which other brokers must connect to join the cluster. Brokers looking to join the cluster should have their `seed_servers` populated with the cluster root's address, facilitating their connection to the cluster. ++ +[IMPORTANT] +==== +Only one broker, the designated cluster root, should have an empty `seed_servers` list during the initial cluster bootstrapping. This ensures a single initiation point for cluster formation. +==== + +* When `empty_seed_starts_cluster` is `false`, Redpanda requires all brokers to start with a known set of brokers listed in `seed_servers`. The `seed_servers` list must not be empty and should be identical across these initial seed brokers, containing the addresses of all seed brokers. Brokers not included in the `seed_servers` list use it to discover and join the cluster, allowing for expansion beyond the foundational members. ++ +[NOTE] +==== +The `seed_servers` list must be consistent across all seed brokers to prevent cluster fragmentation and ensure stable cluster formation. +==== + +// tag::self-managed-only[] +*Visibility:* `user` +// end::self-managed-only[] + +*Type:* array + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `[]` +endif::[] + +*Nullable:* No + + +.Example with `empty_seed_starts_cluster: true` +[,yaml] +---- +# Cluster root broker (seed starter) +redpanda: + empty_seed_starts_cluster: true + seed_servers: [] +---- + +[,yaml] +---- +# Additional brokers joining the cluster +redpanda: + empty_seed_starts_cluster: true + seed_servers: + - host: + address: + port: +---- + +.Example with `empty_seed_starts_cluster: false` +[,yaml] +---- +# All initial seed brokers use the same configuration +redpanda: + empty_seed_starts_cluster: false + seed_servers: + - host: + address: + port: + - host: + address: + port: + - host: + address: + port: +---- + +Replace the following placeholders with your values: + +* ``: IP address of the cluster root broker +* ``: IP addresses of each seed broker in the cluster +* ``: RPC port for brokers (default: `33145`) + + +--- +// end::category-redpanda[] + +// tag::category-redpanda[] +=== storage_failure_injection_config_path + +Path to the configuration file used for low level storage failure injection. + +// tag::self-managed-only[] +*Visibility:* `tunable` +// end::self-managed-only[] + +*Type:* string + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `null` +endif::[] + +*Nullable:* Yes + +--- +// end::category-redpanda[] + +// tag::category-redpanda[] +=== storage_failure_injection_enabled + +If `true`, inject low level storage failures on the write path. Do _not_ use for production instances. + +// tag::self-managed-only[] +*Visibility:* `tunable` +// end::self-managed-only[] + +*Type:* boolean + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `false` +endif::[] + +*Nullable:* No + +--- +// end::category-redpanda[] + +// tag::category-redpanda[] +=== upgrade_override_checks + +Whether to violate safety checks when starting a Redpanda version newer than the cluster's consensus version. + +// tag::self-managed-only[] +*Visibility:* `tunable` +// end::self-managed-only[] + +*Type:* boolean + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `false` +endif::[] + +*Nullable:* No + +--- +// end::category-redpanda[] + +// tag::category-redpanda[] +=== verbose_logging_timeout_sec_max + +Maximum duration in seconds for verbose (`TRACE` or `DEBUG`) logging. Values configured above this will be clamped. If null (the default) there is no limit. Can be overridden in the Admin API on a per-request basis. + +*Unit:* seconds + +// tag::self-managed-only[] +*Visibility:* `tunable` +// end::self-managed-only[] + +*Type:* integer + +*Accepted values:* [`-17179869184`, `17179869183`] + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `null` +endif::[] + +*Nullable:* Yes + + +.Example +[,yaml] +---- +schema_registry: + schema_registry_api: + address: 0.0.0.0 + port: 8081 + authentication_method: http_basic + schema_registry_replication_factor: 3 + mode_mutability: true +---- + + +*Related topics:* + +* xref:reference:properties/cluster-properties.adoc#http_authentication[`http_authentication`] +* xref:reference:properties/cluster-properties.adoc#http_authentication[`http_authentication`] + +--- +// end::category-redpanda[] diff --git a/modules/reference/partials/properties/cluster-properties.adoc b/modules/reference/partials/properties/cluster-properties.adoc new file mode 100644 index 0000000000..16e3b066c1 --- /dev/null +++ b/modules/reference/partials/properties/cluster-properties.adoc @@ -0,0 +1,10418 @@ +// This content is autogenerated. Do not edit manually. To override descriptions, use the doc-tools CLI with the --overrides option: https://redpandadata.atlassian.net/wiki/spaces/DOC/pages/1396244485/Review+Redpanda+configuration+properties +=== abort_index_segment_size + +Capacity (in number of txns) of an abort index segment. + +Each partition tracks the aborted transaction offset ranges to help service client requests. If the number of transactions increases beyond this threshold, they are flushed to disk to ease memory pressure. Then they're loaded on demand. This configuration controls the maximum number of aborted transactions before they are flushed to disk. + +*Requires restart:* Yes + +// tag::self-managed-only[] +*Visibility:* `tunable` +// end::self-managed-only[] + +*Type:* integer + +*Maximum value:* `4294967295` + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `50000` +endif::[] + +*Nullable:* No + +--- + +=== abort_timed_out_transactions_interval_ms + +Interval, in milliseconds, at which Redpanda looks for inactive transactions and aborts them. + +*Unit:* milliseconds + +*Requires restart:* Yes + +// tag::self-managed-only[] +*Visibility:* `tunable` +// end::self-managed-only[] + +*Type:* integer + +*Accepted values:* [`-17592186044416`, `17592186044415`] + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `10s` +endif::[] + +*Nullable:* No + +--- + +=== admin_api_require_auth + +Whether Admin API clients must provide HTTP basic authentication headers. + +*Requires restart:* No + +// tag::self-managed-only[] +*Visibility:* `user` +// end::self-managed-only[] + +*Type:* boolean + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `false` +endif::[] + +*Nullable:* No + +--- + +=== aggregate_metrics + +Enable aggregation of metrics returned by the xref:reference:internal-metrics-reference.adoc[`/metrics`] endpoint. Aggregation can simplify monitoring by providing summarized data instead of raw, per-instance metrics. Metric aggregation is performed by summing the values of samples by labels and is done when it makes sense by the shard and/or partition labels. + +*Requires restart:* No + +*Type:* boolean + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `false` +endif::[] + +*Nullable:* No + +*Related topics:* + +* xref:reference:internal-metrics-reference.adoc[`/metrics`] + +--- + +=== alive_timeout_ms + +The amount of time since the last broker status heartbeat. After this time, a broker is considered offline and not alive. + +*Unit:* milliseconds + +*Requires restart:* No + +// tag::self-managed-only[] +*Visibility:* `tunable` +// end::self-managed-only[] + +*Type:* integer + +*Accepted values:* [`-17592186044416`, `17592186044415`] + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `5s` +endif::[] + +*Nullable:* No + +--- + +=== alter_topic_cfg_timeout_ms + +The duration, in milliseconds, that Redpanda waits for the replication of entries in the controller log when executing a request to alter topic configurations. This timeout ensures that configuration changes are replicated across the cluster before the alteration request is considered complete. + +*Unit:* milliseconds + +*Requires restart:* No + +// tag::self-managed-only[] +*Visibility:* `tunable` +// end::self-managed-only[] + +*Type:* integer + +*Accepted values:* [`-17592186044416`, `17592186044415`] + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `5s` +endif::[] + +*Nullable:* No + +--- + +=== append_chunk_size + +Size of direct write operations to disk in bytes. A larger chunk size can improve performance for write-heavy workloads, but increase latency for these writes as more data is collected before each write operation. A smaller chunk size can decrease write latency, but potentially increase the number of disk I/O operations. + +*Requires restart:* Yes + +// tag::self-managed-only[] +*Visibility:* `tunable` +// end::self-managed-only[] + +*Type:* integer + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `16_KiB` +endif::[] + +*Nullable:* No + +--- + +=== audit_client_max_buffer_size + +Defines the number of bytes allocated by the internal audit client for audit messages. When changing this, you must disable audit logging and then re-enable it for the change to take effect. Consider increasing this if your system generates a very large number of audit records in a short amount of time. + +*Requires restart:* No + +// tag::self-managed-only[] +*Visibility:* `user` +// end::self-managed-only[] + +*Type:* integer + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `16_MiB` +endif::[] + +*Nullable:* No + +--- + +// tag::redpanda-cloud[] +=== audit_enabled + +Enables or disables audit logging. When you set this to true, Redpanda checks for an existing topic named `_redpanda.audit_log`. If none is found, Redpanda automatically creates one for you. + +ifndef::env-cloud[] +*Enterprise license required*: `true` (for license details, see xref:get-started:licensing/index.adoc[Redpanda Licensing]) +endif::[] + +*Requires restart:* No + +// tag::self-managed-only[] +*Visibility:* `user` +// end::self-managed-only[] + +*Type:* boolean + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `false` +endif::[] + +*Nullable:* No + +--- +// end::redpanda-cloud[] + +=== audit_enabled_event_types + +List of strings in JSON style identifying the event types to include in the audit log. This may include any of the following: `management, produce, consume, describe, heartbeat, authenticate, schema_registry, admin`. + +*Requires restart:* No + +// tag::self-managed-only[] +*Visibility:* `user` +// end::self-managed-only[] + +*Type:* array + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `[management, authenticate, admin]` +endif::[] + +*Nullable:* No + +--- + +// tag::redpanda-cloud[] +=== audit_excluded_principals + +List of user principals to exclude from auditing. + +*Requires restart:* No + +// tag::self-managed-only[] +*Visibility:* `user` +// end::self-managed-only[] + +*Type:* array + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `[]` +endif::[] + +*Nullable:* No + +--- +// end::redpanda-cloud[] + +// tag::redpanda-cloud[] +=== audit_excluded_topics + +List of topics to exclude from auditing. + +*Requires restart:* No + +// tag::self-managed-only[] +*Visibility:* `user` +// end::self-managed-only[] + +*Type:* array + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `[]` +endif::[] + +*Nullable:* No + +--- +// end::redpanda-cloud[] + +=== audit_failure_policy + +Defines the policy for rejecting audit log messages when the audit log queue is full. If set to 'permit', then new audit messages are dropped and the operation is permitted. If set to 'reject', then the operation is rejected. + +*Requires restart:* No + +// tag::self-managed-only[] +*Visibility:* `user` +// end::self-managed-only[] + +*Type:* audit_failure_policy + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `reject` +endif::[] + +*Nullable:* No + +--- + +// tag::redpanda-cloud[] +=== audit_log_num_partitions + +Defines the number of partitions used by a newly-created audit topic. This configuration applies only to the audit log topic and may be different from the cluster or other topic configurations. This cannot be altered for existing audit log topics. + +*Unit:* number of partitions per topic + +*Requires restart:* No + +// tag::self-managed-only[] +*Visibility:* `user` +// end::self-managed-only[] + +*Type:* integer + +*Accepted values:* [`-2147483648`, `2147483647`] + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `12` +endif::[] + +*Nullable:* No + +--- +// end::redpanda-cloud[] + +=== audit_log_replication_factor + +Defines the replication factor for a newly-created audit log topic. This configuration applies only to the audit log topic and may be different from the cluster or other topic configurations. This cannot be altered for existing audit log topics. Setting this value is optional. If a value is not provided, Redpanda will use the value specified for `internal_topic_replication_factor`. + +*Requires restart:* No + +// tag::self-managed-only[] +*Visibility:* `user` +// end::self-managed-only[] + +*Type:* integer + +*Accepted values:* [`-32768`, `32767`] + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `null` +endif::[] + +*Nullable:* Yes + +--- + +=== audit_queue_drain_interval_ms + +Interval, in milliseconds, at which Redpanda flushes the queued audit log messages to the audit log topic. Longer intervals may help prevent duplicate messages, especially in high throughput scenarios, but they also increase the risk of data loss during shutdowns where the queue is lost. + +*Unit:* milliseconds + +*Requires restart:* No + +// tag::self-managed-only[] +*Visibility:* `tunable` +// end::self-managed-only[] + +*Type:* integer + +*Accepted values:* [`-17592186044416`, `17592186044415`] + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `500ms` +endif::[] + +*Nullable:* No + +--- + +=== audit_queue_max_buffer_size_per_shard + +Defines the maximum amount of memory in bytes used by the audit buffer in each shard. Once this size is reached, requests to log additional audit messages will return a non-retryable error. Limiting the buffer size per shard helps prevent any single shard from consuming excessive memory due to audit log messages. + +*Requires restart:* Yes + +// tag::self-managed-only[] +*Visibility:* `tunable` +// end::self-managed-only[] + +*Type:* integer + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `1_MiB` +endif::[] + +*Nullable:* No + +--- + +// tag::redpanda-cloud[] +=== auto_create_topics_enabled + +Allow automatic topic creation. + +If you produce to a topic that doesn't exist, the topic will be created with defaults if this property is enabled. + +*Requires restart:* No + +// tag::self-managed-only[] +*Visibility:* `user` +// end::self-managed-only[] + +*Type:* boolean + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `false` +endif::[] + +*Nullable:* No + +--- +// end::redpanda-cloud[] + +=== cluster_id + +NOTE: This property is read-only in Redpanda Cloud. + +Cluster identifier. + +*Requires restart:* No + +*Type:* string + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `null` +endif::[] + +*Nullable:* Yes + +--- + +=== compacted_log_segment_size + +Size (in bytes) for each compacted log segment. + +*Requires restart:* No + +// tag::self-managed-only[] +*Visibility:* `tunable` +// end::self-managed-only[] + +*Type:* integer + +*Maximum value:* `18446744073709552000` + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `256_MiB` +endif::[] + +*Nullable:* No + +--- + +=== compaction_ctrl_backlog_size + +Target backlog size for compaction controller. If not set the max backlog size is configured to 80% of total disk space available. + +*Requires restart:* Yes + +// tag::self-managed-only[] +*Visibility:* `tunable` +// end::self-managed-only[] + +*Type:* integer + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `null` +endif::[] + +*Nullable:* Yes + +--- + +=== compaction_ctrl_d_coeff + +Derivative coefficient for compaction PID controller. + +*Requires restart:* Yes + +// tag::self-managed-only[] +*Visibility:* `tunable` +// end::self-managed-only[] + +*Type:* number + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `0.2` +endif::[] + +*Nullable:* No + +--- + +=== compaction_ctrl_i_coeff + +Integral coefficient for compaction PID controller. + +*Requires restart:* Yes + +// tag::self-managed-only[] +*Visibility:* `tunable` +// end::self-managed-only[] + +*Type:* number + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `0.0` +endif::[] + +*Nullable:* No + +--- + +=== compaction_ctrl_max_shares + +Maximum number of I/O and CPU shares that compaction process can use. + +*Requires restart:* Yes + +// tag::self-managed-only[] +*Visibility:* `tunable` +// end::self-managed-only[] + +*Type:* integer + +*Accepted values:* [`-32768`, `32767`] + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `1000` +endif::[] + +*Nullable:* No + +--- + +=== compaction_ctrl_min_shares + +Minimum number of I/O and CPU shares that compaction process can use. + +*Requires restart:* Yes + +// tag::self-managed-only[] +*Visibility:* `tunable` +// end::self-managed-only[] + +*Type:* integer + +*Accepted values:* [`-32768`, `32767`] + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `10` +endif::[] + +*Nullable:* No + +--- + +=== compaction_ctrl_p_coeff + +Proportional coefficient for compaction PID controller. This must be negative, because the compaction backlog should decrease when the number of compaction shares increases. + +*Requires restart:* Yes + +// tag::self-managed-only[] +*Visibility:* `tunable` +// end::self-managed-only[] + +*Type:* number + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `-12.5` +endif::[] + +*Nullable:* No + +--- + +=== compaction_ctrl_update_interval_ms + +The interval (in milliseconds) for updating the controller responsible for compaction tasks. The controller uses this interval to decide how to prioritize background compaction work, which is essential for maintaining efficient storage use. + +This is an internal-only configuration and should be enabled only after consulting with Redpanda support. + +*Unit:* milliseconds + +*Requires restart:* Yes + +// tag::self-managed-only[] +*Visibility:* `tunable` +// end::self-managed-only[] + +*Type:* integer + +*Accepted values:* [`-17592186044416`, `17592186044415`] + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `30s` +endif::[] + +*Nullable:* No + +--- + +=== consumer_group_lag_collection_interval_sec + +How often to run the collection loop when enable_consumer_group_metrics contains consumer_lag + +*Unit:* seconds + +*Requires restart:* No + +// tag::self-managed-only[] +*Visibility:* `tunable` +// end::self-managed-only[] + +*Type:* integer + +*Accepted values:* [`-17179869184`, `17179869183`] + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `60s` +endif::[] + +*Nullable:* No + +--- + +=== consumer_group_lag_collection_interval_sec + +How often to run the collection loop when <> contains `consumer_lag`. + +Reducing the value of `consumer_group_lag_collection_interval_sec` increases the metric collection frequency, which may raise resource utilization. In most environments, this impact is minimal, but it's best practice to monitor broker resource usage in high-scale settings. + +*Unit:* seconds + +// tag::self-managed-only[] +*Visibility:* `user` +// end::self-managed-only[] + +*Type:* string + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `null` +endif::[] + +*Nullable:* No + +--- + +=== consumer_offsets_topic_batch_cache_enabled + +This property lets you enable the batch cache for the consumer offsets topic. By default, the cache for consumer offsets topic is disabled. Changing this property is not recommended in production systems, as it may affect performance. The change is applied only after the restart. + +*Requires restart:* Yes + +// tag::self-managed-only[] +*Visibility:* `tunable` +// end::self-managed-only[] + +*Type:* boolean + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `false` +endif::[] + +*Nullable:* No + +--- + +=== controller_backend_housekeeping_interval_ms + +Interval between iterations of controller backend housekeeping loop. + +*Unit:* milliseconds + +*Requires restart:* No + +// tag::self-managed-only[] +*Visibility:* `tunable` +// end::self-managed-only[] + +*Type:* integer + +*Accepted values:* [`-17592186044416`, `17592186044415`] + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `1s` +endif::[] + +*Nullable:* No + +--- + +=== controller_log_accummulation_rps_capacity_acls_and_users_operations + +Maximum capacity of rate limit accumulation in controller ACLs and users operations limit. + +*Requires restart:* No + +// tag::self-managed-only[] +*Visibility:* `tunable` +// end::self-managed-only[] + +*Type:* integer + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `null` +endif::[] + +*Nullable:* Yes + +--- + +=== controller_log_accummulation_rps_capacity_configuration_operations + +Maximum capacity of rate limit accumulation in controller configuration operations limit. + +*Requires restart:* No + +// tag::self-managed-only[] +*Visibility:* `tunable` +// end::self-managed-only[] + +*Type:* integer + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `null` +endif::[] + +*Nullable:* Yes + +--- + +=== controller_log_accummulation_rps_capacity_move_operations + +Maximum capacity of rate limit accumulation in controller move operations limit. + +*Requires restart:* No + +// tag::self-managed-only[] +*Visibility:* `tunable` +// end::self-managed-only[] + +*Type:* integer + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `null` +endif::[] + +*Nullable:* Yes + +--- + +=== controller_log_accummulation_rps_capacity_node_management_operations + +Maximum capacity of rate limit accumulation in controller node management operations limit. + +*Requires restart:* No + +// tag::self-managed-only[] +*Visibility:* `tunable` +// end::self-managed-only[] + +*Type:* integer + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `null` +endif::[] + +*Nullable:* Yes + +--- + +=== controller_log_accummulation_rps_capacity_topic_operations + +Maximum capacity of rate limit accumulation in controller topic operations limit. + +*Requires restart:* No + +// tag::self-managed-only[] +*Visibility:* `tunable` +// end::self-managed-only[] + +*Type:* integer + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `null` +endif::[] + +*Nullable:* Yes + +--- + +=== controller_snapshot_max_age_sec + +Maximum amount of time before Redpanda attempts to create a controller snapshot after a new controller command appears. + +*Unit:* seconds + +*Requires restart:* No + +// tag::self-managed-only[] +*Visibility:* `tunable` +// end::self-managed-only[] + +*Type:* integer + +*Accepted values:* [`-17179869184`, `17179869183`] + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `60s` +endif::[] + +*Nullable:* No + +--- + +// tag::deprecated[] +=== coproc_max_batch_size + +No description available. + + +*Requires restart:* Yes + +*Nullable:* No + +--- +// end::deprecated[] + +// tag::deprecated[] +=== coproc_max_inflight_bytes + +No description available. + + +*Unit:* bytes + +*Requires restart:* Yes + +*Nullable:* No + +--- +// end::deprecated[] + +// tag::deprecated[] +=== coproc_max_ingest_bytes + +No description available. + + +*Unit:* bytes + +*Requires restart:* Yes + +*Nullable:* No + +--- +// end::deprecated[] + +// tag::deprecated[] +=== coproc_offset_flush_interval_ms + +No description available. + + +*Unit:* milliseconds + +*Requires restart:* Yes + +*Nullable:* No + +--- +// end::deprecated[] + +=== core_balancing_continuous + +If set to `true`, move partitions between cores in runtime to maintain balanced partition distribution. + +ifndef::env-cloud[] +*Enterprise license required*: `true` (for license details, see xref:get-started:licensing/index.adoc[Redpanda Licensing]) +endif::[] + +*Requires restart:* No + +// tag::self-managed-only[] +*Visibility:* `user` +// end::self-managed-only[] + +*Type:* boolean + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `false` +endif::[] + +*Nullable:* No + +--- + +=== core_balancing_debounce_timeout + +Interval, in milliseconds, between trigger and invocation of core balancing. + +*Requires restart:* No + +// tag::self-managed-only[] +*Visibility:* `tunable` +// end::self-managed-only[] + +*Type:* integer + +*Accepted values:* [`-17592186044416`, `17592186044415`] + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `10s` +endif::[] + +*Nullable:* No + +--- + +=== core_balancing_on_core_count_change + +If set to `true`, and if after a restart the number of cores changes, Redpanda will move partitions between cores to maintain balanced partition distribution. + +*Requires restart:* No + +// tag::self-managed-only[] +*Visibility:* `user` +// end::self-managed-only[] + +*Type:* boolean + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `true` +endif::[] + +*Nullable:* No + +--- + +=== cpu_profiler_enabled + +Enables CPU profiling for Redpanda. + +*Requires restart:* No + +// tag::self-managed-only[] +*Visibility:* `user` +// end::self-managed-only[] + +*Type:* boolean + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `false` +endif::[] + +*Nullable:* No + +--- + +=== cpu_profiler_sample_period_ms + +The sample period for the CPU profiler. + +*Unit:* milliseconds + +*Requires restart:* No + +// tag::self-managed-only[] +*Visibility:* `user` +// end::self-managed-only[] + +*Type:* integer + +*Accepted values:* [`-17592186044416`, `17592186044415`] + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `100ms` +endif::[] + +*Nullable:* No + +--- + +=== create_topic_timeout_ms + +Timeout, in milliseconds, to wait for new topic creation. + +*Unit:* milliseconds + +*Requires restart:* No + +// tag::self-managed-only[] +*Visibility:* `tunable` +// end::self-managed-only[] + +*Type:* integer + +*Accepted values:* [`-17592186044416`, `17592186044415`] + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `2'000ms` +endif::[] + +*Nullable:* No + +--- + +// tag::redpanda-cloud[] +=== data_transforms_binary_max_size + +ifdef::env-cloud[] +NOTE: This property is read-only in Redpanda Cloud. +endif::[] + +The maximum size for a deployable WebAssembly binary that the broker can store. + +*Requires restart:* No + +// tag::self-managed-only[] +*Visibility:* `tunable` +// end::self-managed-only[] + +*Type:* integer + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `10_MiB` +endif::[] + +*Nullable:* No + +--- +// end::redpanda-cloud[] + +=== data_transforms_commit_interval_ms + +The commit interval at which data transforms progress. + +*Unit:* milliseconds + +*Requires restart:* No + +// tag::self-managed-only[] +*Visibility:* `tunable` +// end::self-managed-only[] + +*Type:* integer + +*Accepted values:* [`-17592186044416`, `17592186044415`] + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `3s` +endif::[] + +*Nullable:* No + +--- + +// tag::redpanda-cloud[] +=== data_transforms_enabled + +Enables WebAssembly-powered data transforms directly in the broker. When `data_transforms_enabled` is set to `true`, Redpanda reserves memory for data transforms, even if no transform functions are currently deployed. This memory reservation ensures that adequate resources are available for transform functions when they are needed, but it also means that some memory is allocated regardless of usage. + +*Requires restart:* Yes + +// tag::self-managed-only[] +*Visibility:* `user` +// end::self-managed-only[] + +*Type:* boolean + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `false` +endif::[] + +*Nullable:* No + +--- +// end::redpanda-cloud[] + +=== data_transforms_logging_buffer_capacity_bytes + +Buffer capacity for transform logs, per shard. Buffer occupancy is calculated as the total size of buffered log messages; that is, logs emitted but not yet produced. + +*Unit:* bytes + +*Requires restart:* Yes + +// tag::self-managed-only[] +*Visibility:* `tunable` +// end::self-managed-only[] + +*Type:* integer + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `500_KiB` +endif::[] + +*Nullable:* No + +--- + +=== data_transforms_logging_flush_interval_ms + +Flush interval for transform logs. When a timer expires, pending logs are collected and published to the `transform_logs` topic. + +*Unit:* milliseconds + +*Requires restart:* No + +// tag::self-managed-only[] +*Visibility:* `tunable` +// end::self-managed-only[] + +*Type:* integer + +*Accepted values:* [`-17592186044416`, `17592186044415`] + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `500ms` +endif::[] + +*Nullable:* No + +--- + +// tag::redpanda-cloud[] +=== data_transforms_logging_line_max_bytes + +Transform log lines truncate to this length. Truncation occurs after any character escaping. + +*Unit:* bytes + +*Requires restart:* No + +// tag::self-managed-only[] +*Visibility:* `tunable` +// end::self-managed-only[] + +*Type:* integer + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `1_KiB` +endif::[] + +*Nullable:* No + +--- +// end::redpanda-cloud[] + +// tag::redpanda-cloud[] +=== data_transforms_per_core_memory_reservation + +ifdef::env-cloud[] +NOTE: This property is read-only in Redpanda Cloud. +endif::[] + +The amount of memory to reserve per core for data transform (Wasm) virtual machines. Memory is reserved on boot. The maximum number of functions that can be deployed to a cluster is equal to `data_transforms_per_core_memory_reservation` / `data_transforms_per_function_memory_limit`. + +*Requires restart:* Yes + +// tag::self-managed-only[] +*Visibility:* `user` +// end::self-managed-only[] + +*Type:* integer + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `20_MiB` +endif::[] + +*Nullable:* No + +// tag::self-managed-only[] +*Aliases:* wasm_per_core_memory_reservation +// end::self-managed-only[] + +--- +// end::redpanda-cloud[] + +// tag::redpanda-cloud[] +=== data_transforms_per_function_memory_limit + +ifdef::env-cloud[] +NOTE: This property is read-only in Redpanda Cloud. +endif::[] + +The amount of memory to give an instance of a data transform (Wasm) virtual machine. The maximum number of functions that can be deployed to a cluster is equal to `data_transforms_per_core_memory_reservation` / `data_transforms_per_function_memory_limit`. + +*Requires restart:* Yes + +// tag::self-managed-only[] +*Visibility:* `user` +// end::self-managed-only[] + +*Type:* integer + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `2_MiB` +endif::[] + +*Nullable:* No + +// tag::self-managed-only[] +*Aliases:* wasm_per_function_memory_limit +// end::self-managed-only[] + +--- +// end::redpanda-cloud[] + +=== data_transforms_read_buffer_memory_percentage + +include::reference:partial$internal-use-property.adoc[] + +The percentage of available memory in the transform subsystem to use for read buffers. + +*Requires restart:* Yes + +// tag::self-managed-only[] +*Visibility:* `tunable` +// end::self-managed-only[] + +*Type:* integer + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `45` +endif::[] + +*Nullable:* No + +--- + +=== data_transforms_runtime_limit_ms + +The maximum amount of runtime to start up a data transform, and the time it takes for a single record to be transformed. + +*Unit:* milliseconds + +*Requires restart:* Yes + +// tag::self-managed-only[] +*Visibility:* `tunable` +// end::self-managed-only[] + +*Type:* integer + +*Accepted values:* [`-17592186044416`, `17592186044415`] + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `3s` +endif::[] + +*Nullable:* No + +--- + +=== data_transforms_write_buffer_memory_percentage + +include::reference:partial$internal-use-property.adoc[] + +The percentage of available memory in the transform subsystem to use for write buffers. + +*Requires restart:* Yes + +// tag::self-managed-only[] +*Visibility:* `tunable` +// end::self-managed-only[] + +*Type:* integer + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `45` +endif::[] + +*Nullable:* No + +--- + +=== datalake_coordinator_snapshot_max_delay_secs + +Maximum amount of time the coordinator waits to snapshot after a command appears in the log. + +*Requires restart:* No + +// tag::self-managed-only[] +*Visibility:* `tunable` +// end::self-managed-only[] + +*Type:* integer + +*Accepted values:* [`-17179869184`, `17179869183`] + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `15min seconds` +endif::[] + +*Nullable:* No + +--- + +=== datalake_disk_space_monitor_enable + +Option to explicitly disable enforcement of datalake disk space usage. + +*Requires restart:* No + +// tag::self-managed-only[] +*Visibility:* `user` +// end::self-managed-only[] + +*Type:* boolean + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `true` +endif::[] + +*Nullable:* No + +--- + +// tag::deprecated[] +=== datalake_disk_space_monitor_interval + +No description available. + + +*Requires restart:* Yes + +*Nullable:* No + +--- +// end::deprecated[] + +=== datalake_disk_usage_overage_coeff + +The datalake disk usage monitor reclaims the overage multiplied by this this coefficient to compensate for data that is written during the idle period between control loop invocations. + +*Requires restart:* No + +// tag::self-managed-only[] +*Visibility:* `tunable` +// end::self-managed-only[] + +*Type:* number + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `2.0` +endif::[] + +*Nullable:* No + +--- + +=== datalake_scheduler_block_size_bytes + +Size, in bytes, of each memory block reserved for record translation, as tracked by the datalake scheduler. + +*Unit:* bytes + +*Requires restart:* Yes + +// tag::self-managed-only[] +*Visibility:* `tunable` +// end::self-managed-only[] + +*Type:* integer + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `4_MiB` +endif::[] + +*Nullable:* No + +--- + +=== datalake_scheduler_disk_reservation_block_size + +The size, in bytes, of the block of disk reservation that the datalake manager will assign to each datalake scheduler when it runs out of local reservation. + +*Requires restart:* No + +// tag::self-managed-only[] +*Visibility:* `tunable` +// end::self-managed-only[] + +*Type:* integer + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `50_MiB` +endif::[] + +*Nullable:* No + +--- + +=== datalake_scheduler_max_concurrent_translations + +The maximum number of translations that the datalake scheduler will allow to run at a given time. If a translation is requested, but the number of running translations exceeds this value, the request will be put to sleep temporarily, polling until capacity becomes available. + +*Requires restart:* No + +// tag::self-managed-only[] +*Visibility:* `tunable` +// end::self-managed-only[] + +*Type:* integer + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `4` +endif::[] + +*Nullable:* No + +--- + +=== datalake_scheduler_time_slice_ms + +Time, in milliseconds, for a datalake translation as scheduled by the datalake scheduler. After a translation is scheduled, it will run until either the time specified has elapsed or all pending records on its source partition have been translated. + +*Unit:* milliseconds + +*Requires restart:* Yes + +// tag::self-managed-only[] +*Visibility:* `tunable` +// end::self-managed-only[] + +*Type:* integer + +*Accepted values:* [`-17592186044416`, `17592186044415`] + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `30s` +endif::[] + +*Nullable:* No + +--- + +=== datalake_scratch_space_size_bytes + +Size, in bytes, of the amount of scratch space datalake should use. + +*Unit:* bytes + +*Requires restart:* No + +// tag::self-managed-only[] +*Visibility:* `tunable` +// end::self-managed-only[] + +*Type:* integer + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `5_GiB` +endif::[] + +*Nullable:* No + +--- + +=== datalake_scratch_space_soft_limit_size_percent + +Size of the scratch space datalake soft limit expressed as a percentage of the `datalake_scratch_space_size_bytes` configuration value. + +*Unit:* percent + +*Requires restart:* No + +// tag::self-managed-only[] +*Visibility:* `user` +// end::self-managed-only[] + +*Type:* number + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `80.0` +endif::[] + +*Nullable:* No + +--- + +=== datalake_translator_flush_bytes + +Size, in bytes, of the amount of per translator data that may be flushed to disk before the translator will upload and remove its current on disk data. + +*Unit:* bytes + +*Requires restart:* No + +// tag::self-managed-only[] +*Visibility:* `tunable` +// end::self-managed-only[] + +*Type:* integer + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `32_MiB` +endif::[] + +*Nullable:* No + +--- + +=== debug_bundle_auto_removal_seconds + +If set, how long debug bundles are kept in the debug bundle storage directory after they are created. If not set, debug bundles are kept indefinitely. + +*Unit:* seconds + +*Requires restart:* No + +// tag::self-managed-only[] +*Visibility:* `user` +// end::self-managed-only[] + +*Type:* integer + +*Accepted values:* [`-17179869184`, `17179869183`] + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `null` +endif::[] + +*Nullable:* Yes + +--- + +=== debug_bundle_storage_dir + +Path to the debug bundle storage directory. Note: Changing this path does not clean up existing debug bundles. If not set, the debug bundle is stored in the Redpanda data directory specified in the redpanda.yaml broker configuration file. + +*Requires restart:* No + +// tag::self-managed-only[] +*Visibility:* `user` +// end::self-managed-only[] + +*Type:* string + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `null` +endif::[] + +*Nullable:* Yes + +--- + +=== debug_load_slice_warning_depth + +The recursion depth after which debug logging is enabled automatically for the log reader. + +*Requires restart:* No + +// tag::self-managed-only[] +*Visibility:* `tunable` +// end::self-managed-only[] + +*Type:* integer + +*Maximum value:* `4294967295` + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `null` +endif::[] + +*Nullable:* Yes + +--- + +=== default_leaders_preference + +Default settings for preferred location of topic partition leaders. It can be either "none" (no preference), or "racks:,,..." (prefer brokers with rack ID from the list). + +The list can contain one or more rack IDs. If you specify multiple IDs, Redpanda tries to distribute the partition leader locations equally across brokers in these racks. + +If config_ref:enable_rack_awareness,true,properties/cluster-properties[] is set to `false`, leader pinning is disabled across the cluster. + +ifndef::env-cloud[] +*Enterprise license required*: `Any rack preference (not `none`)` (for license details, see xref:get-started:licensing/index.adoc[Redpanda Licensing]) +endif::[] + +*Requires restart:* No + +// tag::self-managed-only[] +*Visibility:* `user` +// end::self-managed-only[] + +*Type:* string + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `leaders_preference` +endif::[] + +*Nullable:* No + +--- + +=== default_num_windows + +Default number of quota tracking windows. + +*Requires restart:* No + +// tag::self-managed-only[] +*Visibility:* `tunable` +// end::self-managed-only[] + +*Type:* integer + +*Accepted values:* [`-32768`, `32767`] + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `10` +endif::[] + +*Nullable:* No + +--- + +=== default_topic_partitions + +Default number of partitions per topic. + +*Unit:* number of partitions per topic + +*Requires restart:* No + +// tag::self-managed-only[] +*Visibility:* `user` +// end::self-managed-only[] + +*Type:* integer + +*Accepted values:* [`-2147483648`, `2147483647`] + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `1` +endif::[] + +*Nullable:* No + +--- + +=== default_topic_replications + +Default replication factor for new topics. + +*Requires restart:* No + +// tag::self-managed-only[] +*Visibility:* `user` +// end::self-managed-only[] + +*Type:* integer + +*Accepted values:* [`-32768`, `32767`] + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `1` +endif::[] + +*Nullable:* No + +--- + +=== default_window_sec + +Default quota tracking window size in milliseconds. + +*Unit:* seconds + +*Requires restart:* No + +// tag::self-managed-only[] +*Visibility:* `tunable` +// end::self-managed-only[] + +*Type:* integer + +*Accepted values:* [`-17592186044416`, `17592186044415`] + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `1000 milliseconds` +endif::[] + +*Nullable:* No + +--- + +=== development_enable_cloud_topics + +Enable cloud topics. + +*Requires restart:* No + +// tag::self-managed-only[] +*Visibility:* `user` +// end::self-managed-only[] + +*Type:* boolean + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `false` +endif::[] + +*Nullable:* No + +--- + +=== development_enable_cluster_link + +Enable cluster linking. + +*Requires restart:* No + +// tag::self-managed-only[] +*Visibility:* `user` +// end::self-managed-only[] + +*Type:* boolean + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `false` +endif::[] + +*Nullable:* No + +--- + +=== development_feature_property_testing_only + +Development feature property for testing only. + +*Requires restart:* No + +// tag::self-managed-only[] +*Visibility:* `user` +// end::self-managed-only[] + +*Type:* integer + +*Accepted values:* [`-2147483648`, `2147483647`] + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `false` +endif::[] + +*Nullable:* No + +--- + +=== disable_batch_cache + +Disable batch cache in log manager. + +*Requires restart:* Yes + +// tag::self-managed-only[] +*Visibility:* `tunable` +// end::self-managed-only[] + +*Type:* boolean + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `false` +endif::[] + +*Nullable:* No + +--- + +=== disable_cluster_recovery_loop_for_tests + +include::reference:partial$internal-use-property.adoc[] + +Disables the cluster recovery loop. This property is used to simplify testing and should not be set in production. + +*Requires restart:* No + +// tag::self-managed-only[] +*Visibility:* `tunable` +// end::self-managed-only[] + +*Type:* boolean + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `false` +endif::[] + +*Nullable:* No + +--- + +=== disable_metrics + +Disable registering the metrics exposed on the internal `/metrics` endpoint. + +*Requires restart:* Yes + +*Type:* boolean + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `false` +endif::[] + +*Nullable:* No + +--- + +=== disable_public_metrics + +Disable registering the metrics exposed on the `/public_metrics` endpoint. + +*Requires restart:* Yes + +*Type:* boolean + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `false` +endif::[] + +*Nullable:* No + +--- + +=== disk_reservation_percent + +The percentage of total disk capacity that Redpanda will avoid using. This applies both when cloud cache and log data share a disk, as well +as when cloud cache uses a dedicated disk. + +It is recommended to not run disks near capacity to avoid blocking I/O due to low disk space, as well as avoiding performance issues associated with SSD garbage collection. + +*Unit:* percent + +*Requires restart:* No + +// tag::self-managed-only[] +*Visibility:* `tunable` +// end::self-managed-only[] + +*Type:* number + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `25.0` +endif::[] + +*Nullable:* No + +--- + +=== election_timeout_ms + +Raft election timeout expressed in milliseconds. + +*Unit:* milliseconds + +// tag::self-managed-only[] +*Visibility:* `user` +// end::self-managed-only[] + +*Type:* string + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `null` +endif::[] + +*Nullable:* No + +--- + +=== election_timeout_ms + +Election timeout expressed in milliseconds. + +*Unit:* milliseconds + +*Requires restart:* No + +// tag::self-managed-only[] +*Visibility:* `tunable` +// end::self-managed-only[] + +*Type:* integer + +*Accepted values:* [`-17592186044416`, `17592186044415`] + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `1'500ms` +endif::[] + +*Nullable:* No + +--- + +// tag::deprecated[] +=== enable_admin_api + +No description available. + + +*Requires restart:* Yes + +*Nullable:* No + +--- +// end::deprecated[] + +// tag::deprecated[] +=== enable_auto_rebalance_on_node_add + +Enable automatic partition rebalancing when new nodes are added + +*Requires restart:* No + +// tag::self-managed-only[] +*Visibility:* `deprecated` +// end::self-managed-only[] + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `false` +endif::[] + +*Nullable:* No + +--- +// end::deprecated[] + +=== enable_cluster_metadata_upload_loop + +Enables cluster metadata uploads. Required for xref:manage:whole-cluster-restore.adoc[whole cluster restore]. + +*Requires restart:* Yes + +// tag::self-managed-only[] +*Visibility:* `tunable` +// end::self-managed-only[] + +*Type:* boolean + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `true` +endif::[] + +*Nullable:* No + +*Related topics:* + +* xref:manage:whole-cluster-restore.adoc[whole cluster restore] + +--- + +// tag::redpanda-cloud[] +=== enable_consumer_group_metrics + +List of enabled consumer group metrics. + +*Accepted values:* + +- `group`: Enables the xref:reference:public-metrics-reference.adoc#redpanda_kafka_consumer_group_consumers[`redpanda_kafka_consumer_group_consumers`] and xref:reference:public-metrics-reference.adoc#redpanda_kafka_consumer_group_topics[`redpanda_kafka_consumer_group_topics`] metrics. +- `partition`: Enables the xref:reference:public-metrics-reference.adoc#redpanda_kafka_consumer_group_committed_offset[`redpanda_kafka_consumer_group_committed_offset`] metric. +- `consumer_lag`: Enables the xref:reference:public-metrics-reference.adoc#redpanda_kafka_consumer_group_lag_max[`redpanda_kafka_consumer_group_lag_max`] and xref:reference:public-metrics-reference.adoc#redpanda_kafka_consumer_group_lag_sum[`redpanda_kafka_consumer_group_lag_sum`] metrics ++ +Enabling `consumer_lag` may add a small amount of additional processing overhead to the brokers, especially in environments with a high number of consumer groups or partitions. ++ +ifndef::env-cloud[] +Use the xref:reference:properties/cluster-properties.adoc#consumer_group_lag_collection_interval_sec[`consumer_group_lag_collection_interval_sec`] property to control the frequency of consumer lag metric collection. +endif::[] + +*Requires restart:* No + +*Type:* array + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `[group, partition]` +endif::[] + +*Nullable:* No + +*Related topics:* + +* xref:reference:public-metrics-reference.adoc#redpanda_kafka_consumer_group_consumers[`redpanda_kafka_consumer_group_consumers`] +* xref:reference:public-metrics-reference.adoc#redpanda_kafka_consumer_group_topics[`redpanda_kafka_consumer_group_topics`] +* xref:reference:public-metrics-reference.adoc#redpanda_kafka_consumer_group_committed_offset[`redpanda_kafka_consumer_group_committed_offset`] +* xref:reference:public-metrics-reference.adoc#redpanda_kafka_consumer_group_lag_max[`redpanda_kafka_consumer_group_lag_max`] +* xref:reference:public-metrics-reference.adoc#redpanda_kafka_consumer_group_lag_sum[`redpanda_kafka_consumer_group_lag_sum`] +* xref:reference:properties/cluster-properties.adoc#consumer_group_lag_collection_interval_sec[`consumer_group_lag_collection_interval_sec`] +* xref:manage:monitoring.adoc#consumers[Monitor consumer group lag] + +--- +// end::redpanda-cloud[] + +=== enable_controller_log_rate_limiting + +Limits the write rate for the controller log. + +*Requires restart:* No + +// tag::self-managed-only[] +*Visibility:* `user` +// end::self-managed-only[] + +*Type:* boolean + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `false` +endif::[] + +*Nullable:* No + +--- + +// tag::deprecated[] +=== enable_coproc + +No description available. + + +*Requires restart:* Yes + +*Nullable:* No + +--- +// end::deprecated[] + +=== enable_developmental_unrecoverable_data_corrupting_features + +Development features should never be enabled in a production cluster, or any cluster where stability, data loss, or the ability to upgrade are a concern. To enable experimental features, set the value of this configuration option to the current unix epoch expressed in seconds. The value must be within one hour of the current time on the broker.Once experimental features are enabled they cannot be disabled + +*Requires restart:* No + +// tag::self-managed-only[] +*Visibility:* `tunable` +// end::self-managed-only[] + +*Type:* string + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `null` +endif::[] + +*Nullable:* No + +--- + +=== enable_host_metrics + +Enable exporting of some host metrics like `/proc/diskstats`, `/proc/snmp` and `/proc/net/netstat`. + +Host metrics are prefixed with xref:reference:internal-metrics-reference.adoc#vectorized_host_diskstats_discards[`vectorized_host`] and are available on the `/metrics` endpoint. + +*Requires restart:* Yes + +// tag::self-managed-only[] +*Visibility:* `tunable` +// end::self-managed-only[] + +*Type:* boolean + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `true` +endif::[] + +*Nullable:* No + +*Related topics:* + +* xref:reference:internal-metrics-reference.adoc#vectorized_host_diskstats_discards[`vectorized_host`] + +--- + +=== enable_idempotence + +Enable idempotent producers. + +*Requires restart:* Yes + +// tag::self-managed-only[] +*Visibility:* `user` +// end::self-managed-only[] + +*Type:* boolean + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `true` +endif::[] + +*Nullable:* No + +--- + +=== enable_leader_balancer + +Enable automatic leadership rebalancing. + +*Requires restart:* No + +// tag::self-managed-only[] +*Visibility:* `user` +// end::self-managed-only[] + +*Type:* boolean + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `true` +endif::[] + +*Nullable:* No + +--- + +=== enable_metrics_reporter + +Enable the cluster metrics reporter. If `true`, the metrics reporter collects and exports to Redpanda Data a set of customer usage metrics at the interval set by <>. + +[NOTE] +==== +The cluster metrics of the metrics reporter are different from xref:manage:monitoring.adoc[monitoring metrics]. + +* The metrics reporter exports customer usage metrics for consumption by Redpanda Data. +* Monitoring metrics are exported for consumption by Redpanda users. +==== + +*Requires restart:* No + +// tag::self-managed-only[] +*Visibility:* `user` +// end::self-managed-only[] + +*Type:* boolean + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `true` +endif::[] + +*Nullable:* No + +*Related topics:* + +* xref:manage:monitoring.adoc[monitoring metrics] + +--- + +=== enable_mpx_extensions + +Enable Redpanda extensions for MPX. + +*Requires restart:* No + +// tag::self-managed-only[] +*Visibility:* `tunable` +// end::self-managed-only[] + +*Type:* boolean + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `false` +endif::[] + +*Nullable:* No + +--- + +=== enable_pid_file + +Enable PID file. You should not need to change. + +*Requires restart:* Yes + +// tag::self-managed-only[] +*Visibility:* `tunable` +// end::self-managed-only[] + +*Type:* boolean + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `true` +endif::[] + +*Nullable:* No + +--- + +=== enable_rack_awareness + +Enable rack-aware replica assignment. + +*Requires restart:* No + +// tag::self-managed-only[] +*Visibility:* `user` +// end::self-managed-only[] + +*Type:* boolean + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `false` +endif::[] + +*Nullable:* No + +--- + +=== enable_sasl + +Enable SASL authentication for Kafka connections. Authorization is required to modify this property. See also <>. + +*Requires restart:* No + +// tag::self-managed-only[] +*Visibility:* `user` +// end::self-managed-only[] + +*Type:* boolean + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `false` +endif::[] + +*Nullable:* No + +--- + +=== enable_schema_id_validation + +Mode to enable server-side schema ID validation. + +*Accepted values:* + +* `none`: Schema validation is disabled (no schema ID checks are done). Associated topic properties cannot be modified. +* `redpanda`: Schema validation is enabled. Only Redpanda topic properties are accepted. +* `compat`: Schema validation is enabled. Both Redpanda and compatible topic properties are accepted. + +ifndef::env-cloud[] +*Enterprise license required*: `compat,redpanda` (for license details, see xref:get-started:licensing/index.adoc[Redpanda Licensing]) +endif::[] + +*Requires restart:* No + +// tag::self-managed-only[] +*Visibility:* `user` +// end::self-managed-only[] + +*Type:* object + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `none` +endif::[] + +*Nullable:* No + +*Related topics:* + +* xref:manage:schema-reg/schema-id-validation.adoc[Server-Side Schema ID Validation] + +--- + +=== enable_transactions + +Enable transactions (atomic writes). + +*Requires restart:* Yes + +// tag::self-managed-only[] +*Visibility:* `user` +// end::self-managed-only[] + +*Type:* boolean + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `true` +endif::[] + +*Nullable:* No + +--- + +=== enable_usage + +Enables the usage tracking mechanism, storing windowed history of kafka/cloud_storage metrics over time. + +*Requires restart:* No + +// tag::self-managed-only[] +*Visibility:* `user` +// end::self-managed-only[] + +*Type:* boolean + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `false` +endif::[] + +*Nullable:* No + +--- + +=== features_auto_enable + +Whether new feature flags auto-activate after upgrades (true) or must wait for manual activation via the Admin API (false). + +*Requires restart:* No + +// tag::self-managed-only[] +*Visibility:* `tunable` +// end::self-managed-only[] + +*Type:* boolean + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `true` +endif::[] + +*Nullable:* No + +--- + +=== fetch_max_bytes + +Maximum number of bytes returned in a fetch request. + +*Unit:* bytes + +*Requires restart:* No + +// tag::self-managed-only[] +*Visibility:* `user` +// end::self-managed-only[] + +*Type:* integer + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `55_MiB` +endif::[] + +*Nullable:* No + +--- + +=== fetch_pid_d_coeff + +Derivative coefficient for fetch PID controller. + +*Requires restart:* No + +// tag::self-managed-only[] +*Visibility:* `tunable` +// end::self-managed-only[] + +*Type:* number + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `0.0` +endif::[] + +*Nullable:* No + +--- + +=== fetch_pid_i_coeff + +Integral coefficient for fetch PID controller. + +*Requires restart:* No + +// tag::self-managed-only[] +*Visibility:* `tunable` +// end::self-managed-only[] + +*Type:* number + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `0.01` +endif::[] + +*Nullable:* No + +--- + +=== fetch_pid_max_debounce_ms + +The maximum debounce time the fetch PID controller will apply, in milliseconds. + +*Unit:* milliseconds + +*Requires restart:* No + +// tag::self-managed-only[] +*Visibility:* `tunable` +// end::self-managed-only[] + +*Type:* integer + +*Accepted values:* [`-17592186044416`, `17592186044415`] + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `100ms` +endif::[] + +*Nullable:* No + +--- + +=== fetch_pid_p_coeff + +Proportional coefficient for fetch PID controller. + +*Requires restart:* No + +// tag::self-managed-only[] +*Visibility:* `tunable` +// end::self-managed-only[] + +*Type:* number + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `100.0` +endif::[] + +*Nullable:* No + +--- + +=== fetch_pid_target_utilization_fraction + +A fraction, between 0 and 1, for the target reactor utilization of the fetch scheduling group. + +*Unit:* fraction + +*Requires restart:* No + +// tag::self-managed-only[] +*Visibility:* `tunable` +// end::self-managed-only[] + +*Type:* number + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `0.2` +endif::[] + +*Nullable:* No + +--- + +=== fetch_read_strategy + +The strategy used to fulfill fetch requests. + +* `polling`: Repeatedly polls every partition in the request for new data. The polling interval is set by <> (deprecated). + +* `non_polling`: The backend is signaled when a partition has new data, so Redpanda doesn't need to repeatedly read from every partition in the fetch. Redpanda Data recommends using this value for most workloads, because it can improve fetch latency and CPU utilization. + +* `non_polling_with_debounce`: This option behaves like `non_polling`, but it includes a debounce mechanism with a fixed delay specified by <> at the start of each fetch. By introducing this delay, Redpanda can accumulate more data before processing, leading to fewer fetch operations and returning larger amounts of data. Enabling this option reduces reactor utilization, but it may also increase end-to-end latency. + +*Requires restart:* No + +// tag::self-managed-only[] +*Visibility:* `tunable` +// end::self-managed-only[] + +*Type:* string + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `non_polling` +endif::[] + +*Nullable:* No + +--- + +=== fetch_reads_debounce_timeout + +Time to wait for the next read in fetch requests when the requested minimum bytes was not reached. + +*Requires restart:* No + +// tag::self-managed-only[] +*Visibility:* `tunable` +// end::self-managed-only[] + +*Type:* integer + +*Accepted values:* [`-17592186044416`, `17592186044415`] + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `1ms` +endif::[] + +*Nullable:* No + +--- + +=== fetch_session_eviction_timeout_ms + +Time duration after which the inactive fetch session is removed from the fetch session cache. Fetch sessions are used to implement the incremental fetch requests where a consumer does not send all requested partitions to the server but the server tracks them for the consumer. + +*Unit:* milliseconds + +*Requires restart:* Yes + +// tag::self-managed-only[] +*Visibility:* `tunable` +// end::self-managed-only[] + +*Type:* integer + +*Accepted values:* [`-17592186044416`, `17592186044415`] + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `60s` +endif::[] + +*Nullable:* No + +--- + +// tag::deprecated[] +=== find_coordinator_timeout_ms + +No description available. + + +*Unit:* milliseconds + +*Requires restart:* Yes + +*Nullable:* No + +--- +// end::deprecated[] + +// tag::deprecated[] +=== full_raft_configuration_recovery_pattern + +No description available. + + +*Requires restart:* Yes + +*Nullable:* No + +--- +// end::deprecated[] + +=== group_initial_rebalance_delay + +Delay added to the rebalance phase to wait for new members. + +*Requires restart:* No + +// tag::self-managed-only[] +*Visibility:* `tunable` +// end::self-managed-only[] + +*Type:* integer + +*Accepted values:* [`-17592186044416`, `17592186044415`] + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `3s` +endif::[] + +*Nullable:* No + +--- + +=== group_max_session_timeout_ms + +The maximum allowed session timeout for registered consumers. Longer timeouts give consumers more time to process messages in between heartbeats at the cost of a longer time to detect failures. + +*Unit:* milliseconds + +*Requires restart:* No + +*Type:* integer + +*Accepted values:* [`-17592186044416`, `17592186044415`] + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `300s` +endif::[] + +*Nullable:* No + +--- + +=== group_min_session_timeout_ms + +The minimum allowed session timeout for registered consumers. Shorter timeouts result in quicker failure detection at the cost of more frequent consumer heartbeating, which can overwhelm broker resources. + +*Unit:* milliseconds + +*Requires restart:* No + +*Type:* integer + +*Accepted values:* [`-17592186044416`, `17592186044415`] + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `6000ms` +endif::[] + +*Nullable:* No + +--- + +=== group_new_member_join_timeout + +Timeout for new member joins. + +*Requires restart:* No + +// tag::self-managed-only[] +*Visibility:* `tunable` +// end::self-managed-only[] + +*Type:* integer + +*Accepted values:* [`-17592186044416`, `17592186044415`] + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `30'000ms` +endif::[] + +*Nullable:* No + +--- + +=== group_offset_retention_check_ms + +Frequency rate at which the system should check for expired group offsets. + +*Unit:* milliseconds + +*Requires restart:* No + +// tag::self-managed-only[] +*Visibility:* `tunable` +// end::self-managed-only[] + +*Type:* integer + +*Accepted values:* [`-17592186044416`, `17592186044415`] + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `10min` +endif::[] + +*Nullable:* No + +--- + +=== group_offset_retention_sec + +Consumer group offset retention seconds. To disable offset retention, set this to null. + +*Unit:* seconds + +*Requires restart:* No + +// tag::self-managed-only[] +*Visibility:* `tunable` +// end::self-managed-only[] + +*Type:* integer + +*Accepted values:* [`-17179869184`, `17179869183`] + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `24h * 7` +endif::[] + +*Nullable:* Yes + +--- + +=== group_topic_partitions + +Number of partitions in the internal group membership topic. + +*Unit:* number of partitions per topic + +*Requires restart:* No + +// tag::self-managed-only[] +*Visibility:* `tunable` +// end::self-managed-only[] + +*Type:* integer + +*Accepted values:* [`-2147483648`, `2147483647`] + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `16` +endif::[] + +*Nullable:* No + +--- + +=== health_manager_tick_interval + +How often the health manager runs. + +*Requires restart:* Yes + +// tag::self-managed-only[] +*Visibility:* `tunable` +// end::self-managed-only[] + +*Type:* integer + +*Accepted values:* [`-17592186044416`, `17592186044415`] + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `3min` +endif::[] + +*Nullable:* No + +--- + +=== health_monitor_max_metadata_age + +Maximum age of the metadata cached in the health monitor of a non-controller broker. + +*Requires restart:* No + +// tag::self-managed-only[] +*Visibility:* `tunable` +// end::self-managed-only[] + +*Type:* integer + +*Accepted values:* [`-17592186044416`, `17592186044415`] + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `10s` +endif::[] + +*Nullable:* No + +--- + +// tag::deprecated[] +=== health_monitor_tick_interval + +How often health monitor refresh cluster state + +*Requires restart:* No + +// tag::self-managed-only[] +*Visibility:* `deprecated` +// end::self-managed-only[] + +*Accepted values:* [`-17592186044416`, `17592186044415`] + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `10s` +endif::[] + +*Nullable:* No + +--- +// end::deprecated[] + +// tag::redpanda-cloud[] +=== http_authentication + +A list of supported HTTP authentication mechanisms. + +*Accepted values:* + +* `BASIC`: Basic authentication +* `OIDC`: OpenID Connect + +ifndef::env-cloud[] +*Enterprise license required*: `OIDC` (for license details, see xref:get-started:licensing/index.adoc[Redpanda Licensing]) +endif::[] + +*Requires restart:* No + +// tag::self-managed-only[] +*Visibility:* `user` +// end::self-managed-only[] + +*Type:* array + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `[BASIC]` +endif::[] + +*Nullable:* No + +--- +// end::redpanda-cloud[] + +=== iceberg_backlog_controller_i_coeff + +Controls how much past backlog (unprocessed work) affects the priority of processing new data in the Iceberg system. The system accumulates backlog errors over time, and this coefficient determines how much that accumulated backlog influences the urgency of data translation. + +*Requires restart:* No + +// tag::self-managed-only[] +*Visibility:* `tunable` +// end::self-managed-only[] + +*Type:* number + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `0.005` +endif::[] + +*Nullable:* No + +--- + +=== iceberg_backlog_controller_p_coeff + +Proportional coefficient for the Iceberg backlog controller. Number of shares assigned to the datalake scheduling group will be proportional to the backlog size error. A negative value means larger and faster changes in the number of shares in the datalake scheduling group. + +*Requires restart:* No + +// tag::self-managed-only[] +*Visibility:* `tunable` +// end::self-managed-only[] + +*Type:* number + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `0.00001` +endif::[] + +*Nullable:* No + +--- + +// tag::redpanda-cloud[] +=== iceberg_catalog_base_location + +Base path for the cloud-storage-object-backed Iceberg filesystem catalog. After Iceberg is enabled, do not change this value. + +*Requires restart:* Yes + +// tag::self-managed-only[] +*Visibility:* `user` +// end::self-managed-only[] + +*Type:* string + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `redpanda-iceberg-catalog` +endif::[] + +*Nullable:* No + +--- +// end::redpanda-cloud[] + +=== iceberg_catalog_commit_interval_ms + +The frequency at which the Iceberg coordinator commits topic files to the catalog. This is the interval between commit transactions across all topics monitored by the coordinator, not the interval between individual commits. + +*Unit:* milliseconds + +*Requires restart:* No + +// tag::self-managed-only[] +*Visibility:* `tunable` +// end::self-managed-only[] + +*Type:* integer + +*Accepted values:* [`-17592186044416`, `17592186044415`] + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `1min milliseconds` +endif::[] + +*Nullable:* No + +--- + +// tag::redpanda-cloud[] +=== iceberg_catalog_type + +Iceberg catalog type that Redpanda will use to commit table metadata updates. Supported types: `rest`, `object_storage`. +NOTE: You must set <> at the same time that you set `iceberg_catalog_type` to `rest`. + +ifdef::env-cloud[] +NOTE: This property is available only in Redpanda Cloud BYOC deployments. +endif::[] + + +*Requires restart:* Yes + +// tag::self-managed-only[] +*Visibility:* `user` +// end::self-managed-only[] + +*Type:* datalake_catalog_type + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `object_storage` +endif::[] + +*Nullable:* No + +--- +// end::redpanda-cloud[] + +// tag::redpanda-cloud[] +=== iceberg_default_partition_spec + +ifndef::env-cloud[] +Default value for the xref:reference:properties/topic-properties.adoc#redpanda-iceberg-partition-spec[`redpanda.iceberg.partition.spec`] topic property that determines the partition spec for the Iceberg table corresponding to the topic. +endif::[] + +ifdef::env-cloud[] +Default value for the `redpanda.iceberg.partition.spec` topic property that determines the partition spec for the Iceberg table corresponding to the topic. +endif::[] + +ifdef::env-cloud[] +NOTE: This property is available only in Redpanda Cloud BYOC deployments. +endif::[] + + +*Requires restart:* No + +// tag::self-managed-only[] +*Visibility:* `user` +// end::self-managed-only[] + +*Type:* string + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `(hour(redpanda.timestamp))` +endif::[] + +*Nullable:* No + +ifndef::env-cloud[] +*Related topics:* + +* xref:reference:properties/topic-properties.adoc#redpanda-iceberg-partition-spec[`redpanda.iceberg.partition.spec`] +endif::[] + +--- +// end::redpanda-cloud[] + +// tag::redpanda-cloud[] +=== iceberg_delete + +Default value for the `redpanda.iceberg.delete` topic property that determines if the corresponding Iceberg table is deleted upon deleting the topic. + +ifdef::env-cloud[] +NOTE: This property is available only in Redpanda Cloud BYOC deployments. +endif::[] + + +*Requires restart:* No + +// tag::self-managed-only[] +*Visibility:* `user` +// end::self-managed-only[] + +*Type:* boolean + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `true` +endif::[] + +*Nullable:* No + +--- +// end::redpanda-cloud[] + +=== iceberg_disable_automatic_snapshot_expiry + +Whether to disable automatic Iceberg snapshot expiry. This property may be useful if the Iceberg catalog expects to perform snapshot expiry on its own. + +*Requires restart:* No + +// tag::self-managed-only[] +*Visibility:* `user` +// end::self-managed-only[] + +*Type:* boolean + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `false` +endif::[] + +*Nullable:* No + +--- + +// tag::redpanda-cloud[] +=== iceberg_disable_snapshot_tagging + +Whether to disable tagging of Iceberg snapshots. These tags are used to ensure that the snapshots that Redpanda writes are retained during snapshot removal, which in turn, helps Redpanda ensure exactly-once delivery of records. Disabling tags is therefore not recommended, but it may be useful if the Iceberg catalog does not support tags. + +ifdef::env-cloud[] +NOTE: This property is available only in Redpanda Cloud BYOC deployments. +endif::[] + + +*Requires restart:* No + +// tag::self-managed-only[] +*Visibility:* `user` +// end::self-managed-only[] + +*Type:* boolean + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `false` +endif::[] + +*Nullable:* No + +--- +// end::redpanda-cloud[] + +// tag::redpanda-cloud[] +=== iceberg_enabled + +ifndef::env-cloud[] +Enables the translation of topic data into Iceberg tables. Setting `iceberg_enabled` to `true` activates the feature at the cluster level, but each topic must also set the xref:reference:properties/topic-properties.adoc#redpanda-iceberg-enabled[`redpanda.iceberg.enabled`] topic-level property to `true` to use it. If `iceberg_enabled` is set to `false`, then the feature is disabled for all topics in the cluster, overriding any topic-level settings. +endif::[] +ifdef::env-cloud[] +Enables the translation of topic data into Iceberg tables. Setting `iceberg_enabled` to `true` activates the feature at the cluster level, but each topic must also set the `redpanda.iceberg.enabled` topic-level property to `true` to use it. If `iceberg_enabled` is set to `false`, then the feature is disabled for all topics in the cluster, overriding any topic-level settings. +endif::[] + +ifndef::env-cloud[] +*Enterprise license required*: `true` (for license details, see xref:get-started:licensing/index.adoc[Redpanda Licensing]) +endif::[] + +ifdef::env-cloud[] +NOTE: This property is available only in Redpanda Cloud BYOC deployments. +endif::[] + + +*Requires restart:* Yes + +// tag::self-managed-only[] +*Visibility:* `user` +// end::self-managed-only[] + +*Type:* boolean + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `false` +endif::[] + +*Nullable:* No + +ifndef::env-cloud[] +*Related topics:* + +* xref:reference:properties/topic-properties.adoc#redpanda-iceberg-enabled[`redpanda.iceberg.enabled`] +endif::[] + +--- +// end::redpanda-cloud[] + +// tag::redpanda-cloud[] +=== iceberg_invalid_record_action + +ifndef::env-cloud[] +Default value for the xref:reference:properties/topic-properties.adoc#redpanda-iceberg-invalid-record-action[`redpanda.iceberg.invalid.record.action`] topic property. +endif::[] +ifdef::env-cloud[] +Default value for the `redpanda.iceberg.invalid.record.action` topic property. +endif::[] + +ifdef::env-cloud[] +NOTE: This property is available only in Redpanda Cloud BYOC deployments. +endif::[] + + +*Requires restart:* No + +// tag::self-managed-only[] +*Visibility:* `user` +// end::self-managed-only[] + +*Type:* string + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `dlq_table` +endif::[] + +*Nullable:* No + +ifndef::env-cloud[] +*Related topics:* + +* xref:reference:properties/topic-properties.adoc#redpanda-iceberg-invalid-record-action[`redpanda.iceberg.invalid.record.action`] +* xref:manage:iceberg/about-iceberg-topics.adoc#troubleshoot-errors[Troubleshoot errors] +endif::[] + +--- +// end::redpanda-cloud[] + +=== iceberg_latest_schema_cache_ttl_ms + +The TTL for caching the latest schema during translation when using the xref:manage:iceberg/specify-iceberg-schema.adoc#value_schema_latest[`value_schema_latest`] iceberg mode. This setting controls how long the latest schema remains cached during translation, which affects schema refresh behavior and performance. + +*Unit:* milliseconds + +*Requires restart:* No + +// tag::self-managed-only[] +*Visibility:* `tunable` +// end::self-managed-only[] + +*Type:* integer + +*Accepted values:* [`-17592186044416`, `17592186044415`] + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `5min milliseconds` +endif::[] + +*Nullable:* No + +*Related topics:* + +* xref:manage:iceberg/specify-iceberg-schema.adoc#value_schema_latest[`value_schema_latest`] + +--- + +// tag::redpanda-cloud[] +=== iceberg_rest_catalog_authentication_mode + +The authentication mode for client requests made to the Iceberg catalog. Choose from: `none`, `bearer`, `oauth2`, and `aws_sigv4`. In `bearer` mode, the token specified in `iceberg_rest_catalog_token` is used unconditonally, and no attempts are made to refresh the token. In `oauth2` mode, the credentials specified in `iceberg_rest_catalog_client_id` and `iceberg_rest_catalog_client_secret` are used to obtain a bearer token from the URI defined by `iceberg_rest_catalog_oauth2_server_uri`. In `aws_sigv4` mode, the same AWS credentials used for cloud storage (see `cloud_storage_region`, `cloud_storage_access_key`, `cloud_storage_secret_key`, and `cloud_storage_credentials_source`) are used to sign requests to AWS Glue catalog with SigV4. + +ifdef::env-cloud[] +NOTE: This property is available only in Redpanda Cloud BYOC deployments. +endif::[] + + +*Requires restart:* Yes + +// tag::self-managed-only[] +*Visibility:* `user` +// end::self-managed-only[] + +*Type:* datalake_catalog_auth_mode + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `none` +endif::[] + +*Nullable:* No + +--- +// end::redpanda-cloud[] + +// tag::redpanda-cloud[] +=== iceberg_rest_catalog_aws_access_key + +AWS access key for Iceberg REST catalog SigV4 authentication. If not set, falls back to xref:reference:properties/object-storage-properties.adoc#cloud_storage_access_key[`cloud_storage_access_key`] when using aws_sigv4 authentication mode. + +ifdef::env-cloud[] +NOTE: This property is available only in Redpanda Cloud BYOC deployments. +endif::[] + + +*Requires restart:* Yes + +// tag::self-managed-only[] +*Visibility:* `user` +// end::self-managed-only[] + +*Type:* string + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `null` +endif::[] + +*Nullable:* Yes + +*Related topics:* + +* xref:reference:properties/object-storage-properties.adoc#cloud_storage_access_key[`cloud_storage_access_key`] + +--- +// end::redpanda-cloud[] + +// tag::redpanda-cloud[] +=== iceberg_rest_catalog_aws_region + +AWS region for Iceberg REST catalog SigV4 authentication. If not set, falls back to xref:reference:properties/object-storage-properties.adoc#cloud_storage_region[`cloud_storage_region`] when using aws_sigv4 authentication mode. + +ifdef::env-cloud[] +NOTE: This property is available only in Redpanda Cloud BYOC deployments. +endif::[] + + +*Requires restart:* Yes + +// tag::self-managed-only[] +*Visibility:* `user` +// end::self-managed-only[] + +*Type:* string + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `null` +endif::[] + +*Nullable:* Yes + +*Related topics:* + +* xref:reference:properties/object-storage-properties.adoc#cloud_storage_region[`cloud_storage_region`] + +--- +// end::redpanda-cloud[] + +// tag::redpanda-cloud[] +=== iceberg_rest_catalog_aws_secret_key + +AWS secret key for Iceberg REST catalog SigV4 authentication. If not set, falls back to xref:reference:properties/object-storage-properties.adoc#cloud_storage_secret_key[`cloud_storage_secret_key`] when using aws_sigv4 authentication mode. + +ifdef::env-cloud[] +NOTE: This property is available only in Redpanda Cloud BYOC deployments. +endif::[] + + +*Requires restart:* Yes + +// tag::self-managed-only[] +*Visibility:* `user` +// end::self-managed-only[] + +*Type:* string + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `null` +endif::[] + +*Nullable:* Yes + +*Related topics:* + +* xref:reference:properties/object-storage-properties.adoc#cloud_storage_secret_key[`cloud_storage_secret_key`] + +--- +// end::redpanda-cloud[] + +=== iceberg_rest_catalog_aws_service_name + +AWS service name for SigV4 signing when using aws_sigv4 authentication mode. Defaults to 'glue' for AWS Glue Data Catalog. Can be changed to support other AWS services that provide Iceberg REST catalog APIs. + +*Requires restart:* Yes + +// tag::self-managed-only[] +*Visibility:* `user` +// end::self-managed-only[] + +*Type:* string + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `glue` +endif::[] + +*Nullable:* No + +--- + +// tag::redpanda-cloud[] +=== iceberg_rest_catalog_base_location + +Base URI for the Iceberg REST catalog. If unset, the REST catalog server determines the location. Some REST catalogs, like AWS Glue, require the client to set this. After Iceberg is enabled, do not change this value. + +ifdef::env-cloud[] +NOTE: This property is available only in Redpanda Cloud BYOC deployments. +endif::[] + + +*Requires restart:* Yes + +// tag::self-managed-only[] +*Visibility:* `user` +// end::self-managed-only[] + +*Type:* string + +*Nullable:* Yes + +--- +// end::redpanda-cloud[] + +// tag::redpanda-cloud[] +=== iceberg_rest_catalog_client_id + +Iceberg REST catalog user ID. This ID is used to query the catalog API for the OAuth token. Required if catalog type is set to `rest` and `iceberg_rest_catalog_authentication_mode` is set to `oauth2`. + +ifdef::env-cloud[] +NOTE: This property is available only in Redpanda Cloud BYOC deployments. +endif::[] + + +*Requires restart:* Yes + +// tag::self-managed-only[] +*Visibility:* `user` +// end::self-managed-only[] + +*Type:* string + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `null` +endif::[] + +*Nullable:* Yes + +--- +// end::redpanda-cloud[] + +// tag::redpanda-cloud[] +=== iceberg_rest_catalog_client_secret + +Secret used with the client ID to query the OAuth token endpoint for Iceberg REST catalog authentication. Required if catalog type is set to `rest` and `iceberg_rest_catalog_authentication_mode` is set to `oauth2`. + +ifdef::env-cloud[] +NOTE: This property is available only in Redpanda Cloud BYOC deployments. +endif::[] + + +*Requires restart:* Yes + +// tag::self-managed-only[] +*Visibility:* `user` +// end::self-managed-only[] + +*Type:* string + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `null` +endif::[] + +*Nullable:* Yes + +--- +// end::redpanda-cloud[] + +=== iceberg_rest_catalog_credentials_source + +Source of AWS credentials for Iceberg REST catalog SigV4 authentication. If not set, falls back to cloud_storage_credentials_source when using aws_sigv4 authentication mode. Accepted values: config_file, aws_instance_metadata, sts, gcp_instance_metadata, azure_vm_instance_metadata, azure_aks_oidc_federation. + +*Requires restart:* Yes + +// tag::self-managed-only[] +*Visibility:* `user` +// end::self-managed-only[] + +*Type:* object + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `null` +endif::[] + +*Nullable:* Yes + +// tag::self-managed-only[] +*Aliases:* iceberg_rest_catalog_aws_credentials_source +// end::self-managed-only[] + +--- + +// tag::redpanda-cloud[] +=== iceberg_rest_catalog_credentials_source + +ifndef::env-cloud[] +Source of AWS credentials for Iceberg REST catalog SigV4 authentication. If not set, falls back to xref:reference:properties/object-storage-properties.adoc#cloud_storage_credentials_source[`cloud_storage_credentials_source`] when using aws_sigv4 authentication mode. +endif::[] + +ifdef::env-cloud[] +Source of AWS credentials for Iceberg REST catalog SigV4 authentication. If providing explicit credentials using `iceberg_rest_catalog_aws_access_key` and `iceberg_rest_catalog_aws_secret_key` for Glue catalog authentication, you must set this property to `config_file`. +endif::[] + +*Accepted values*: `aws_instance_metadata`, `azure_aks_oidc_federation`, `azure_vm_instance_metadata`, `config_file`, `gcp_instance_metadata`, `sts`. + +ifdef::env-cloud[] +NOTE: This property is available only in Redpanda Cloud BYOC deployments. +endif::[] + + +// tag::self-managed-only[] +*Visibility:* `user` +// end::self-managed-only[] + +*Type:* string + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `null` +endif::[] + +*Nullable:* No + +*Related topics:* + +* xref:reference:properties/object-storage-properties.adoc#cloud_storage_credentials_source[`cloud_storage_credentials_source`] + +--- +// end::redpanda-cloud[] + +// tag::redpanda-cloud[] +=== iceberg_rest_catalog_crl + +The contents of a certificate revocation list for `iceberg_rest_catalog_trust`. Takes precedence over `iceberg_rest_catalog_crl_file`. + +ifdef::env-cloud[] +NOTE: This property is available only in Redpanda Cloud BYOC deployments. +endif::[] + + +*Requires restart:* Yes + +// tag::self-managed-only[] +*Visibility:* `user` +// end::self-managed-only[] + +*Type:* string + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `null` +endif::[] + +*Nullable:* Yes + +--- +// end::redpanda-cloud[] + +=== iceberg_rest_catalog_crl_file + +Path to certificate revocation list for `iceberg_rest_catalog_trust_file`. + +*Requires restart:* Yes + +// tag::self-managed-only[] +*Visibility:* `user` +// end::self-managed-only[] + +*Type:* string + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `null` +endif::[] + +*Nullable:* Yes + +--- + +// tag::redpanda-cloud[] +=== iceberg_rest_catalog_endpoint + +URL of Iceberg REST catalog endpoint. +NOTE: If you set <> to `rest`, you must also set this property at the same time. + +ifdef::env-cloud[] +NOTE: This property is available only in Redpanda Cloud BYOC deployments. +endif::[] + + +*Requires restart:* Yes + +// tag::self-managed-only[] +*Visibility:* `user` +// end::self-managed-only[] + +*Type:* string + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `null` +endif::[] + +*Nullable:* Yes + +--- +// end::redpanda-cloud[] + +// tag::redpanda-cloud[] +=== iceberg_rest_catalog_oauth2_scope + +The OAuth scope used to retrieve access tokens for Iceberg catalog authentication. Only meaningful when `iceberg_rest_catalog_authentication_mode` is set to `oauth2` + +ifdef::env-cloud[] +NOTE: This property is available only in Redpanda Cloud BYOC deployments. +endif::[] + + +*Requires restart:* Yes + +// tag::self-managed-only[] +*Visibility:* `user` +// end::self-managed-only[] + +*Type:* string + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `PRINCIPAL_ROLE:ALL` +endif::[] + +*Nullable:* No + +--- +// end::redpanda-cloud[] + +// tag::redpanda-cloud[] +=== iceberg_rest_catalog_oauth2_server_uri + +The OAuth URI used to retrieve access tokens for Iceberg catalog authentication. If left undefined, the deprecated Iceberg catalog endpoint `/v1/oauth/tokens` is used instead. + +ifdef::env-cloud[] +NOTE: This property is available only in Redpanda Cloud BYOC deployments. +endif::[] + + +*Requires restart:* Yes + +// tag::self-managed-only[] +*Visibility:* `user` +// end::self-managed-only[] + +*Type:* string + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `null` +endif::[] + +*Nullable:* Yes + +--- +// end::redpanda-cloud[] + +// tag::redpanda-cloud[] +=== iceberg_rest_catalog_request_timeout_ms + +Maximum length of time that Redpanda waits for a response from the REST catalog before aborting the request + +ifdef::env-cloud[] +NOTE: This property is available only in Redpanda Cloud BYOC deployments. +endif::[] + + +*Unit:* milliseconds + +*Requires restart:* No + +// tag::self-managed-only[] +*Visibility:* `tunable` +// end::self-managed-only[] + +*Type:* integer + +*Accepted values:* [`-17592186044416`, `17592186044415`] + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `10s` +endif::[] + +*Nullable:* No + +--- +// end::redpanda-cloud[] + +// tag::redpanda-cloud[] +=== iceberg_rest_catalog_token + +Token used to access the REST Iceberg catalog. If the token is present, Redpanda ignores credentials stored in the properties <> and <>. + +Required if <> is set to `bearer`. + +ifdef::env-cloud[] +NOTE: This property is available only in Redpanda Cloud BYOC deployments. +endif::[] + + +*Requires restart:* Yes + +// tag::self-managed-only[] +*Visibility:* `user` +// end::self-managed-only[] + +*Type:* string + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `null` +endif::[] + +*Nullable:* Yes + +--- +// end::redpanda-cloud[] + +// tag::redpanda-cloud[] +=== iceberg_rest_catalog_trust + +The contents of a certificate chain to trust for the REST Iceberg catalog. +ifndef::env-cloud[] +Takes precedence over <>. +endif::[] + +ifdef::env-cloud[] +NOTE: This property is available only in Redpanda Cloud BYOC deployments. +endif::[] + + +*Requires restart:* Yes + +// tag::self-managed-only[] +*Visibility:* `user` +// end::self-managed-only[] + +*Type:* string + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `null` +endif::[] + +*Nullable:* Yes + +--- +// end::redpanda-cloud[] + +=== iceberg_rest_catalog_trust_file + +Path to a file containing a certificate chain to trust for the REST Iceberg catalog. + +*Requires restart:* Yes + +// tag::self-managed-only[] +*Visibility:* `user` +// end::self-managed-only[] + +*Type:* string + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `null` +endif::[] + +*Nullable:* Yes + +--- + +// tag::redpanda-cloud[] +=== iceberg_rest_catalog_warehouse + +Warehouse to use for the Iceberg REST catalog. Redpanda queries the catalog to retrieve warehouse-specific configurations and automatically configures settings like the appropriate prefix. The prefix is appended to the catalog path (for example, `/v1/\{prefix}/namespaces`). + +ifdef::env-cloud[] +NOTE: This property is available only in Redpanda Cloud BYOC deployments. +endif::[] + + +*Requires restart:* Yes + +// tag::self-managed-only[] +*Visibility:* `user` +// end::self-managed-only[] + +*Type:* string + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `null` +endif::[] + +*Nullable:* Yes + +// tag::self-managed-only[] +*Aliases:* iceberg_rest_catalog_prefix +// end::self-managed-only[] + +--- +// end::redpanda-cloud[] + +=== iceberg_target_backlog_size + +Average size per partition of the datalake translation backlog that the backlog controller tries to maintain. When the backlog size is larger than the set point, the backlog controller will increase the translation scheduling group priority. + +*Requires restart:* No + +// tag::self-managed-only[] +*Visibility:* `tunable` +// end::self-managed-only[] + +*Type:* integer + +*Maximum value:* `4294967295` + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `100_MiB` +endif::[] + +*Nullable:* No + +--- + +// tag::redpanda-cloud[] +=== iceberg_target_lag_ms + +Default value for the redpanda.iceberg.target.lag.ms topic property, which controls how often data in an Iceberg table is refreshed with new data from the corresponding Redpanda topic. Redpanda attempts to commit all the data produced to the topic within the lag target in a best effort fashion, subject to resource availability. + +ifdef::env-cloud[] +NOTE: This property is available only in Redpanda Cloud BYOC deployments. +endif::[] + + +*Unit:* milliseconds + +*Requires restart:* No + +// tag::self-managed-only[] +*Visibility:* `user` +// end::self-managed-only[] + +*Type:* integer + +*Accepted values:* [`-17592186044416`, `17592186044415`] + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `milliseconds` +endif::[] + +*Nullable:* No + +ifndef::env-cloud[] +*Related topics:* + +* xref:reference:properties/topic-properties.adoc#redpanda-iceberg-target-lag-ms[`redpanda.iceberg.target.lag.ms`] +endif::[] + +--- +// end::redpanda-cloud[] + +=== iceberg_throttle_backlog_size_ratio + +Ration of the total backlog size to the disk space at which the throttle to iceberg producers is applied. + +*Requires restart:* No + +// tag::self-managed-only[] +*Visibility:* `tunable` +// end::self-managed-only[] + +*Type:* number + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `0.3` +endif::[] + +*Nullable:* No + +--- + +// tag::redpanda-cloud[] +=== iceberg_topic_name_dot_replacement + +Optional replacement string for dots in topic names when deriving Iceberg table names, useful when downstream systems do not permit dots in table names. The replacement string cannot contain dots. Be careful to avoid table name collisions caused by the replacement.If an Iceberg topic with dots in the name exists in the cluster, the value of this property should not be changed. + +ifdef::env-cloud[] +NOTE: This property is available only in Redpanda Cloud BYOC deployments. +endif::[] + + +*Requires restart:* No + +// tag::self-managed-only[] +*Visibility:* `user` +// end::self-managed-only[] + +*Type:* string + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `null` +endif::[] + +*Nullable:* Yes + +--- +// end::redpanda-cloud[] + +=== id_allocator_batch_size + +The ID allocator allocates messages in batches (each batch is a one log record) and then serves requests from memory without touching the log until the batch is exhausted. + +*Requires restart:* Yes + +// tag::self-managed-only[] +*Visibility:* `tunable` +// end::self-managed-only[] + +*Type:* integer + +*Accepted values:* [`-32768`, `32767`] + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `1000` +endif::[] + +*Nullable:* No + +--- + +=== id_allocator_log_capacity + +Capacity of the `id_allocator` log in number of batches. After it reaches `id_allocator_stm`, it truncates the log's prefix. + +*Requires restart:* Yes + +// tag::self-managed-only[] +*Visibility:* `tunable` +// end::self-managed-only[] + +*Type:* integer + +*Accepted values:* [`-32768`, `32767`] + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `100` +endif::[] + +*Nullable:* No + +--- + +// tag::deprecated[] +=== id_allocator_replication + +No description available. + + +*Requires restart:* Yes + +*Nullable:* No + +--- +// end::deprecated[] + +=== initial_retention_local_target_bytes_default + +Initial local retention size target for partitions of topics with xref:manage:tiered-storage.adoc[Tiered Storage] enabled. If no initial local target retention is configured, then all locally-retained data will be delivered to learner when joining the partition replica set. + +*Requires restart:* No + +// tag::self-managed-only[] +*Visibility:* `user` +// end::self-managed-only[] + +*Type:* integer + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `null` +endif::[] + +*Nullable:* Yes + +*Related topics:* + +* xref:manage:tiered-storage.adoc[Tiered Storage] + +--- + +=== initial_retention_local_target_ms_default + +Initial local retention time target for partitions of topics with xref:manage:tiered-storage.adoc[Tiered Storage] enabled. If no initial local target retention is configured, then all locally-retained data will be delivered to learner when joining the partition replica is set. + +*Requires restart:* No + +// tag::self-managed-only[] +*Visibility:* `user` +// end::self-managed-only[] + +*Type:* integer + +*Accepted values:* [`-17592186044416`, `17592186044415`] + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `null` +endif::[] + +*Nullable:* Yes + +*Related topics:* + +* xref:manage:tiered-storage.adoc[Tiered Storage] + +--- + +=== internal_topic_replication_factor + +Target replication factor for internal topics. + +*Unit*: number of replicas per topic. + +*Requires restart:* Yes + +// tag::self-managed-only[] +*Visibility:* `user` +// end::self-managed-only[] + +*Type:* integer + +*Accepted values:* [`-2147483648`, `2147483647`] + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `3` +endif::[] + +*Nullable:* No + +--- + +=== join_retry_timeout_ms + +Time between cluster join retries in milliseconds. + +*Unit:* milliseconds + +*Requires restart:* Yes + +// tag::self-managed-only[] +*Visibility:* `tunable` +// end::self-managed-only[] + +*Type:* integer + +*Accepted values:* [`-17592186044416`, `17592186044415`] + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `5s` +endif::[] + +*Nullable:* No + +--- + +// tag::deprecated[] +=== kafka_admin_topic_api_rate + +No description available. + + +*Requires restart:* Yes + +*Nullable:* No + +--- +// end::deprecated[] + +=== kafka_batch_max_bytes + +Maximum size of a batch processed by the server. If the batch is compressed, the limit applies to the compressed batch size. + +*Unit:* bytes + +*Requires restart:* No + +// tag::self-managed-only[] +*Visibility:* `tunable` +// end::self-managed-only[] + +*Type:* integer + +*Maximum value:* `4294967295` + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `1_MiB` +endif::[] + +*Nullable:* No + +--- + +// tag::deprecated[] +=== kafka_client_group_byte_rate_quota + +No description available. + + +*Requires restart:* Yes + +*Nullable:* No + +--- +// end::deprecated[] + +// tag::deprecated[] +=== kafka_client_group_fetch_byte_rate_quota + +No description available. + + +*Requires restart:* Yes + +*Nullable:* No + +--- +// end::deprecated[] + +=== kafka_connection_rate_limit + +Maximum connections per second for one core. If `null` (the default), then the number of connections per second is unlimited. + +*Requires restart:* No + +// tag::self-managed-only[] +*Visibility:* `user` +// end::self-managed-only[] + +*Type:* integer + +*Accepted values:* [`-9223372036854776000`, `9223372036854776000`] + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `null` +endif::[] + +*Nullable:* Yes + +--- + +=== kafka_connection_rate_limit_overrides + +Overrides the maximum connections per second for one core for the specified IP addresses (for example, `['127.0.0.1:90', '50.20.1.1:40']`) + +*Requires restart:* No + +// tag::self-managed-only[] +*Visibility:* `user` +// end::self-managed-only[] + +*Type:* array + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `[]` +endif::[] + +*Nullable:* No + +*Related topics:* + +* xref:manage:cluster-maintenance/configure-availability.adoc#limit-client-connections[Limit client connections] + +--- + +=== kafka_connections_max + +Maximum number of Kafka client connections per broker. If `null`, the property is disabled. + +*Requires restart:* No + +// tag::self-managed-only[] +*Visibility:* `user` +// end::self-managed-only[] + +*Type:* integer + +*Maximum value:* `4294967295` + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `null` +endif::[] + +*Nullable:* Yes + +*Related topics:* + +* xref:manage:cluster-maintenance/configure-availability.adoc#limit-client-connections[Limit client connections] + +--- + +// tag::redpanda-cloud[] +=== kafka_connections_max_overrides + +A list of IP addresses for which Kafka client connection limits are overridden and don't apply. For example, `(['127.0.0.1:90', '50.20.1.1:40']).`. + +*Requires restart:* No + +// tag::self-managed-only[] +*Visibility:* `user` +// end::self-managed-only[] + +*Type:* array + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `[]` +endif::[] + +*Nullable:* No + +*Related topics:* + +* xref:manage:cluster-maintenance/configure-availability.adoc#limit-client-connections[Limit client connections] + +--- +// end::redpanda-cloud[] + +// tag::redpanda-cloud[] +=== kafka_connections_max_per_ip + +Maximum number of Kafka client connections per IP address, per broker. If `null`, the property is disabled. + +*Requires restart:* No + +// tag::self-managed-only[] +*Visibility:* `user` +// end::self-managed-only[] + +*Type:* integer + +*Maximum value:* `4294967295` + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `null` +endif::[] + +*Nullable:* Yes + +*Related topics:* + +* xref:manage:cluster-maintenance/configure-availability.adoc#limit-client-connections[Limit client connections] + +--- +// end::redpanda-cloud[] + +=== kafka_enable_authorization + +Flag to require authorization for Kafka connections. If `null`, the property is disabled, and authorization is instead enabled by <>. + +* `null`: Ignored. Authorization is enabled with `enable_sasl`: `true` +* `true`: authorization is required. +* `false`: authorization is disabled. + +*Requires restart:* No + +// tag::self-managed-only[] +*Visibility:* `user` +// end::self-managed-only[] + +*Type:* boolean + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `null` +endif::[] + +*Nullable:* Yes + +--- + +=== kafka_enable_partition_reassignment + +Enable the Kafka partition reassignment API. + +*Requires restart:* No + +// tag::self-managed-only[] +*Visibility:* `user` +// end::self-managed-only[] + +*Type:* boolean + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `true` +endif::[] + +*Nullable:* No + +--- + +=== kafka_group_recovery_timeout_ms + +Kafka group recovery timeout. + +*Unit:* milliseconds + +*Requires restart:* No + +// tag::self-managed-only[] +*Visibility:* `user` +// end::self-managed-only[] + +*Type:* integer + +*Accepted values:* [`-17592186044416`, `17592186044415`] + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `30'000ms` +endif::[] + +*Nullable:* No + +--- + +=== kafka_max_bytes_per_fetch + +Limit fetch responses to this many bytes, even if the total of partition bytes limits is higher. + +*Requires restart:* No + +// tag::self-managed-only[] +*Visibility:* `tunable` +// end::self-managed-only[] + +*Type:* integer + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `64_MiB` +endif::[] + +*Nullable:* No + +--- + +=== kafka_memory_batch_size_estimate_for_fetch + +The size of the batch used to estimate memory consumption for fetch requests, in bytes. Smaller sizes allow more concurrent fetch requests per shard. Larger sizes prevent running out of memory because of too many concurrent fetch requests. + +*Requires restart:* No + +// tag::self-managed-only[] +*Visibility:* `user` +// end::self-managed-only[] + +*Type:* integer + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `1_MiB` +endif::[] + +*Nullable:* No + +--- + +=== kafka_memory_share_for_fetch + +The share of Kafka subsystem memory that can be used for fetch read buffers, as a fraction of the Kafka subsystem memory amount. + +*Requires restart:* Yes + +// tag::self-managed-only[] +*Visibility:* `user` +// end::self-managed-only[] + +*Type:* number + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `0.5` +endif::[] + +*Nullable:* No + +--- + +=== kafka_mtls_principal_mapping_rules + +Principal mapping rules for mTLS authentication on the Kafka API. If `null`, the property is disabled. + +*Requires restart:* No + +// tag::self-managed-only[] +*Visibility:* `user` +// end::self-managed-only[] + +*Type:* array + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `null` +endif::[] + +*Nullable:* Yes + +--- + +=== kafka_nodelete_topics + +A list of topics that are protected from deletion and configuration changes by Kafka clients. Set by default to a list of Redpanda internal topics. + +*Requires restart:* No + +// tag::self-managed-only[] +*Visibility:* `user` +// end::self-managed-only[] + +*Type:* array + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `[_redpanda.audit_log, __consumer_offsets, _schemas]` +endif::[] + +*Nullable:* No + +*Related topics:* + +* xref:develop:consume-data/consumer-offsets.adoc[Consumer Offsets] +* xref:manage:schema-registry.adoc[Schema Registry] + +--- + +=== kafka_noproduce_topics + +A list of topics that are protected from being produced to by Kafka clients. Set by default to a list of Redpanda internal topics. + +*Requires restart:* No + +// tag::self-managed-only[] +*Visibility:* `user` +// end::self-managed-only[] + +*Type:* array + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `[]` +endif::[] + +*Nullable:* No + +--- + +=== kafka_produce_batch_validation + +Controls the level of validation performed on batches produced to Redpanda. When set to `legacy`, there is minimal validation performed on the produce path. When set to `relaxed`, full validation is performed on uncompressed batches and on compressed batches with the `max_timestamp` value left unset. When set to `strict`, full validation of uncompressed and compressed batches is performed. This should be the default in environments where producing clients are not trusted. + +*Requires restart:* No + +// tag::self-managed-only[] +*Visibility:* `tunable` +// end::self-managed-only[] + +*Type:* string + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `relaxed` +endif::[] + +*Nullable:* No + +--- + +=== kafka_qdc_depth_alpha + +Smoothing factor for Kafka queue depth control depth tracking. + +*Requires restart:* Yes + +// tag::self-managed-only[] +*Visibility:* `tunable` +// end::self-managed-only[] + +*Type:* number + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `0.8` +endif::[] + +*Nullable:* No + +--- + +=== kafka_qdc_depth_update_ms + +Update frequency for Kafka queue depth control. + +*Unit:* milliseconds + +*Requires restart:* Yes + +// tag::self-managed-only[] +*Visibility:* `tunable` +// end::self-managed-only[] + +*Type:* integer + +*Accepted values:* [`-17592186044416`, `17592186044415`] + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `7s` +endif::[] + +*Nullable:* No + +--- + +=== kafka_qdc_enable + +Enable kafka queue depth control. + +*Requires restart:* Yes + +// tag::self-managed-only[] +*Visibility:* `user` +// end::self-managed-only[] + +*Type:* boolean + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `false` +endif::[] + +*Nullable:* No + +--- + +=== kafka_qdc_idle_depth + +Queue depth when idleness is detected in Kafka queue depth control. + +*Requires restart:* Yes + +// tag::self-managed-only[] +*Visibility:* `tunable` +// end::self-managed-only[] + +*Type:* integer + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `10` +endif::[] + +*Nullable:* No + +--- + +=== kafka_qdc_latency_alpha + +Smoothing parameter for Kafka queue depth control latency tracking. + +*Requires restart:* Yes + +// tag::self-managed-only[] +*Visibility:* `tunable` +// end::self-managed-only[] + +*Type:* number + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `0.002` +endif::[] + +*Nullable:* No + +--- + +=== kafka_qdc_max_depth + +Maximum queue depth used in Kafka queue depth control. + +*Requires restart:* Yes + +// tag::self-managed-only[] +*Visibility:* `tunable` +// end::self-managed-only[] + +*Type:* integer + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `100` +endif::[] + +*Nullable:* No + +--- + +=== kafka_qdc_max_latency_ms + +Maximum latency threshold for Kafka queue depth control depth tracking. + +*Unit:* milliseconds + +*Requires restart:* Yes + +// tag::self-managed-only[] +*Visibility:* `user` +// end::self-managed-only[] + +*Type:* integer + +*Accepted values:* [`-17592186044416`, `17592186044415`] + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `80ms` +endif::[] + +*Nullable:* No + +--- + +=== kafka_qdc_min_depth + +Minimum queue depth used in Kafka queue depth control. + +*Requires restart:* Yes + +// tag::self-managed-only[] +*Visibility:* `tunable` +// end::self-managed-only[] + +*Type:* integer + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `1` +endif::[] + +*Nullable:* No + +--- + +=== kafka_qdc_window_count + +Number of windows used in Kafka queue depth control latency tracking. + +*Requires restart:* Yes + +// tag::self-managed-only[] +*Visibility:* `tunable` +// end::self-managed-only[] + +*Type:* integer + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `12` +endif::[] + +*Nullable:* No + +--- + +=== kafka_qdc_window_size_ms + +Window size for Kafka queue depth control latency tracking. + +*Unit:* milliseconds + +*Requires restart:* Yes + +// tag::self-managed-only[] +*Visibility:* `tunable` +// end::self-managed-only[] + +*Type:* integer + +*Accepted values:* [`-17592186044416`, `17592186044415`] + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `1500ms` +endif::[] + +*Nullable:* No + +--- + +// tag::deprecated[] +=== kafka_quota_balancer_min_shard_throughput_bps + +No description available. + + +*Unit:* bytes per second + +*Requires restart:* Yes + +*Nullable:* No + +--- +// end::deprecated[] + +// tag::deprecated[] +=== kafka_quota_balancer_min_shard_throughput_ratio + +No description available. + + +*Requires restart:* Yes + +*Nullable:* No + +--- +// end::deprecated[] + +// tag::deprecated[] +=== kafka_quota_balancer_node_period_ms + +No description available. + + +*Unit:* milliseconds + +*Requires restart:* Yes + +*Nullable:* No + +--- +// end::deprecated[] + +// tag::deprecated[] +=== kafka_quota_balancer_window_ms + +No description available. + + +*Unit:* milliseconds + +*Requires restart:* Yes + +*Nullable:* No + +--- +// end::deprecated[] + +=== kafka_request_max_bytes + +Maximum size of a single request processed using the Kafka API. + +*Unit:* bytes + +*Requires restart:* No + +// tag::self-managed-only[] +*Visibility:* `tunable` +// end::self-managed-only[] + +*Type:* integer + +*Maximum value:* `4294967295` + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `100_MiB` +endif::[] + +*Nullable:* No + +--- + +=== kafka_rpc_server_stream_recv_buf + +Maximum size of the user-space receive buffer. If `null`, this limit is not applied. + +*Unit:* bytes + +*Requires restart:* Yes + +// tag::self-managed-only[] +*Visibility:* `tunable` +// end::self-managed-only[] + +*Type:* integer + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `null` +endif::[] + +*Nullable:* Yes + +--- + +=== kafka_rpc_server_tcp_recv_buf + +Size of the Kafka server TCP receive buffer. If `null`, the property is disabled. + +*Unit:* bytes + +*Requires restart:* Yes + +*Type:* integer + +*Accepted values:* [`-2147483648`, `2147483647`] + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `null` +endif::[] + +*Nullable:* Yes + +--- + +=== kafka_rpc_server_tcp_send_buf + +Size of the Kafka server TCP transmit buffer. If `null`, the property is disabled. + +*Unit:* bytes + +*Requires restart:* Yes + +*Type:* integer + +*Accepted values:* [`-2147483648`, `2147483647`] + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `null` +endif::[] + +*Nullable:* Yes + +--- + +=== kafka_sasl_max_reauth_ms + +The maximum time between Kafka client reauthentications. If a client has not reauthenticated a connection within this time frame, that connection is torn down. + +*Unit:* milliseconds + +*Requires restart:* No + +// tag::self-managed-only[] +*Visibility:* `user` +// end::self-managed-only[] + +*Type:* integer + +*Accepted values:* [`-17592186044416`, `17592186044415`] + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `null` +endif::[] + +*Nullable:* Yes + +--- + +=== kafka_schema_id_validation_cache_capacity + +Per-shard capacity of the cache for validating schema IDs. + +*Requires restart:* No + +// tag::self-managed-only[] +*Visibility:* `tunable` +// end::self-managed-only[] + +*Type:* integer + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `128` +endif::[] + +*Nullable:* No + +--- + +=== kafka_tcp_keepalive_probe_interval_seconds + +TCP keepalive probe interval in seconds for Kafka connections. This describes the timeout between unacknowledged TCP keepalives. Refers to the TCP_KEEPINTVL socket option. When changed, applies to new connections only. + +*Unit:* seconds + +*Requires restart:* No + +// tag::self-managed-only[] +*Visibility:* `tunable` +// end::self-managed-only[] + +*Type:* integer + +*Accepted values:* [`-17179869184`, `17179869183`] + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `60s` +endif::[] + +*Nullable:* No + +--- + +=== kafka_tcp_keepalive_probes + +TCP keepalive unacknowledged probes until the connection is considered dead for Kafka connections. Refers to the TCP_KEEPCNT socket option. When changed, applies to new connections only. + +*Requires restart:* No + +// tag::self-managed-only[] +*Visibility:* `tunable` +// end::self-managed-only[] + +*Type:* integer + +*Maximum value:* `4294967295` + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `3` +endif::[] + +*Nullable:* No + +--- + +=== kafka_tcp_keepalive_timeout + +TCP keepalive idle timeout in seconds for Kafka connections. This describes the timeout between TCP keepalive probes that the remote site successfully acknowledged. Refers to the TCP_KEEPIDLE socket option. When changed, applies to new connections only. + +*Requires restart:* No + +// tag::self-managed-only[] +*Visibility:* `tunable` +// end::self-managed-only[] + +*Type:* integer + +*Accepted values:* [`-17179869184`, `17179869183`] + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `120s` +endif::[] + +*Nullable:* No + +--- + +=== kafka_throughput_control + +List of throughput control groups that define exclusions from broker-wide throughput limits. Clients excluded from broker-wide throughput limits are still potentially subject to client-specific throughput limits. + +Each throughput control group consists of: + +* `name` (optional) - any unique group name +* `client_id` - regex to match client_id + +Example values: + +* `[{'name': 'first_group','client_id': 'client1'}, {'client_id': 'consumer-\d+'}]` +* `[{'name': 'catch all'}]` +* `[{'name': 'missing_id', 'client_id': '+empty'}]` + +A connection is assigned the first matching group and is then excluded from throughput control. A `name` is not required, but can help you categorize the exclusions. Specifying `+empty` for the `client_id` will match on clients that opt not to send a `client_id`. You can also optionally omit the `client_id` and specify only a `name`, as shown. In this situation, all clients will match the rule and Redpanda will exclude them from all from broker-wide throughput control. + +*Requires restart:* No + +// tag::self-managed-only[] +*Visibility:* `user` +// end::self-managed-only[] + +*Type:* array + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `[]` +endif::[] + +*Nullable:* No + +*Related topics:* + +* xref:manage:cluster-maintenance/manage-throughput.adoc[Manage throughput] + +--- + +=== kafka_throughput_controlled_api_keys + +List of Kafka API keys that are subject to cluster-wide and node-wide throughput limit control. + +*Requires restart:* No + +// tag::self-managed-only[] +*Visibility:* `user` +// end::self-managed-only[] + +*Type:* array + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `[produce, fetch]` +endif::[] + +*Nullable:* No + +--- + +=== kafka_throughput_limit_node_in_bps + +The maximum rate of all ingress Kafka API traffic for a node. Includes all Kafka API traffic (requests, responses, headers, fetched data, produced data, etc.). If `null`, the property is disabled, and traffic is not limited. + +*Unit:* bytes per second + +*Requires restart:* No + +// tag::self-managed-only[] +*Visibility:* `user` +// end::self-managed-only[] + +*Type:* integer + +*Accepted values:* [`-9223372036854776000`, `9223372036854776000`] + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `null` +endif::[] + +*Nullable:* Yes + +*Related topics:* + +* xref:manage:cluster-maintenance/manage-throughput.adoc#node-wide-throughput-limits[Node-wide throughput limits] + +--- + +=== kafka_throughput_limit_node_out_bps + +The maximum rate of all egress Kafka traffic for a node. Includes all Kafka API traffic (requests, responses, headers, fetched data, produced data, etc.). If `null`, the property is disabled, and traffic is not limited. + +*Unit:* bytes per second + +*Requires restart:* No + +// tag::self-managed-only[] +*Visibility:* `user` +// end::self-managed-only[] + +*Type:* integer + +*Accepted values:* [`-9223372036854776000`, `9223372036854776000`] + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `null` +endif::[] + +*Nullable:* Yes + +*Related topics:* + +* xref:manage:cluster-maintenance/manage-throughput.adoc#node-wide-throughput-limits[Node-wide throughput limits] + +--- + +=== kafka_throughput_replenish_threshold + +Threshold for refilling the token bucket as part of enforcing throughput limits. + +This threshold is evaluated with each request for data. When the number of tokens to replenish exceeds this threshold, then tokens are added to the token bucket. This ensures that the atomic is not being updated for the token count with each request. The range for this threshold is automatically clamped to the corresponding throughput limit for ingress and egress. + +*Requires restart:* No + +// tag::self-managed-only[] +*Visibility:* `tunable` +// end::self-managed-only[] + +*Type:* integer + +*Accepted values:* [`-9223372036854776000`, `9223372036854776000`] + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `null` +endif::[] + +*Nullable:* Yes + +*Related topics:* + +* xref:reference:cluster-properties.adoc#kafka_throughput_limit_node_in_bps[`kafka_throughput_limit_node_in_bps`] +* xref:reference:cluster-properties.adoc#kafka_throughput_limit_node_out_bps[`kafka_throughput_limit_node_out_bps`] +* xref:manage:cluster-maintenance/manage-throughput.adoc[Manage Throughput] + +--- + +// tag::deprecated[] +=== kafka_throughput_throttling_v2 + +No description available. + + +*Requires restart:* Yes + +*Nullable:* No + +--- +// end::deprecated[] + +=== kafka_topics_max + +Maximum number of Kafka user topics that can be created. If `null`, then no limit is enforced. + +*Requires restart:* No + +// tag::self-managed-only[] +*Visibility:* `user` +// end::self-managed-only[] + +*Type:* integer + +*Maximum value:* `4294967295` + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `null` +endif::[] + +*Nullable:* Yes + +--- + +=== kvstore_flush_interval + +Key-value store flush interval (in milliseconds). + +*Requires restart:* No + +// tag::self-managed-only[] +*Visibility:* `tunable` +// end::self-managed-only[] + +*Type:* integer + +*Accepted values:* [`-17592186044416`, `17592186044415`] + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `10 milliseconds` +endif::[] + +*Nullable:* No + +--- + +=== kvstore_max_segment_size + +Key-value maximum segment size (in bytes). + +*Requires restart:* Yes + +// tag::self-managed-only[] +*Visibility:* `tunable` +// end::self-managed-only[] + +*Type:* integer + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `16_MiB` +endif::[] + +*Nullable:* No + +--- + +=== leader_balancer_idle_timeout + +Leadership rebalancing idle timeout. + +*Requires restart:* No + +// tag::self-managed-only[] +*Visibility:* `tunable` +// end::self-managed-only[] + +*Type:* integer + +*Accepted values:* [`-17592186044416`, `17592186044415`] + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `2min` +endif::[] + +*Nullable:* No + +--- + +// tag::deprecated[] +=== leader_balancer_mode + +No description available. + + +*Requires restart:* Yes + +*Nullable:* No + +--- +// end::deprecated[] + +=== leader_balancer_mute_timeout + +The length of time that a glossterm:Raft[] group is muted after a leadership rebalance operation. Any group that has been moved, regardless of whether the move succeeded or failed, undergoes a cooling-off period. This prevents Raft groups from repeatedly experiencing leadership rebalance operations in a short time frame, which can lead to instability in the cluster. + +The leader balancer maintains a list of muted groups and reevaluates muted status at the start of each balancing iteration. Muted groups still contribute to overall cluster balance calculations although they can't themselves be moved until the mute period is over. + +*Requires restart:* No + +// tag::self-managed-only[] +*Visibility:* `tunable` +// end::self-managed-only[] + +*Type:* integer + +*Accepted values:* [`-17592186044416`, `17592186044415`] + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `5min` +endif::[] + +*Nullable:* No + +--- + +=== leader_balancer_mute_timeout + +The duration after which a broker that hasn't sent a heartbeat is considered muted. This timeout sets a threshold for identifying brokers that shouldn't be targeted for leadership transfers when the cluster rebalances, for example, because of unreliable network connectivity. + +*Requires restart:* No + +// tag::self-managed-only[] +*Visibility:* `tunable` +// end::self-managed-only[] + +*Type:* integer + +*Accepted values:* [`-17592186044416`, `17592186044415`] + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `20s` +endif::[] + +*Nullable:* No + +--- + +=== leader_balancer_transfer_limit_per_shard + +Per shard limit for in-progress leadership transfers. + +*Requires restart:* No + +// tag::self-managed-only[] +*Visibility:* `tunable` +// end::self-managed-only[] + +*Type:* integer + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `512` +endif::[] + +*Nullable:* No + +--- + +=== legacy_group_offset_retention_enabled + +Group offset retention is enabled by default starting in Redpanda version 23.1. To enable offset retention after upgrading from an older version, set this option to true. + +*Requires restart:* No + +// tag::self-managed-only[] +*Visibility:* `tunable` +// end::self-managed-only[] + +*Type:* boolean + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `false` +endif::[] + +*Nullable:* No + +--- + +=== legacy_permit_unsafe_log_operation + +Flag to enable a Redpanda cluster operator to use unsafe control characters within strings, such as consumer group names or user names. This flag applies only for Redpanda clusters that were originally on version 23.1 or earlier and have been upgraded to version 23.2 or later. Starting in version 23.2, newly-created Redpanda clusters ignore this property. + +*Requires restart:* No + +// tag::self-managed-only[] +*Visibility:* `user` +// end::self-managed-only[] + +*Type:* boolean + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `true` +endif::[] + +*Nullable:* No + +--- + +=== legacy_unsafe_log_warning_interval_sec + +Period at which to log a warning about using unsafe strings containing control characters. If unsafe strings are permitted by `legacy_permit_unsafe_log_operation`, a warning will be logged at an interval specified by this property. + +*Unit:* seconds + +*Requires restart:* No + +// tag::self-managed-only[] +*Visibility:* `user` +// end::self-managed-only[] + +*Type:* integer + +*Accepted values:* [`-17179869184`, `17179869183`] + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `300s` +endif::[] + +*Nullable:* No + +--- + +=== log_cleanup_policy + +Default cleanup policy for topic logs. + +The topic property xref:./topic-properties.adoc#cleanuppolicy[`cleanup.policy`] overrides the value of `log_cleanup_policy` at the topic level. + +*Requires restart:* No + +// tag::self-managed-only[] +*Visibility:* `user` +// end::self-managed-only[] + +*Type:* object + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `deletion` +endif::[] + +*Nullable:* No + +*Related topics:* + +* xref:./topic-properties.adoc#cleanuppolicy[`cleanup.policy`] + +--- + +// tag::deprecated[] +=== log_compaction_adjacent_merge_self_compaction_count + +No description available. + + +*Requires restart:* Yes + +*Nullable:* No + +--- +// end::deprecated[] + +=== log_compaction_interval_ms + +How often to trigger background compaction. + +*Unit:* milliseconds + +*Requires restart:* No + +// tag::self-managed-only[] +*Visibility:* `user` +// end::self-managed-only[] + +*Type:* integer + +*Accepted values:* [`-17592186044416`, `17592186044415`] + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `10s` +endif::[] + +*Nullable:* No + +--- + +=== log_compaction_merge_max_ranges + +The maximum range of segments that can be processed in a single round of adjacent segment compaction. If `null` (the default value), no maximum is imposed on the number of ranges that can be processed at once. A value below 1 effectively disables adjacent merge compaction. + +*Requires restart:* No + +// tag::self-managed-only[] +*Visibility:* `tunable` +// end::self-managed-only[] + +*Type:* integer + +*Maximum value:* `4294967295` + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `null` +endif::[] + +*Nullable:* Yes + +--- + +=== log_compaction_merge_max_segments_per_range + +The maximum number of segments that can be combined into a single segment during an adjacent merge operation. If `null` (the default value), no maximum is imposed on the number of segments that can be combined at once. A value below 2 effectively disables adjacent merge compaction. + +*Requires restart:* No + +// tag::self-managed-only[] +*Visibility:* `tunable` +// end::self-managed-only[] + +*Type:* integer + +*Maximum value:* `4294967295` + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `null` +endif::[] + +*Nullable:* Yes + +--- + +=== log_compaction_pause_use_sliding_window + +Pause use of sliding window compaction. Toggle to `true` _only_ when you want to force adjacent segment compaction. The memory reserved by `storage_compaction_key_map_memory` is not freed when this is set to `true`. + +*Requires restart:* No + +// tag::self-managed-only[] +*Visibility:* `tunable` +// end::self-managed-only[] + +*Type:* boolean + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `false` +endif::[] + +*Nullable:* No + +--- + +=== log_compaction_use_sliding_window + +Use sliding window compaction. + +*Requires restart:* Yes + +// tag::self-managed-only[] +*Visibility:* `tunable` +// end::self-managed-only[] + +*Type:* boolean + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `true` +endif::[] + +*Nullable:* No + +--- + +=== log_compression_type + +IMPORTANT: This property is ignored regardless of the value specified. The behavior is always the same as the `producer` value. Redpanda brokers do not compress or recompress data based on this property. If producers send compressed data, Redpanda stores it as-is; if producers send uncompressed data, Redpanda stores it uncompressed. Other listed values are accepted for Apache Kafka compatibility but are ignored by the broker. This property may appear in Admin API and `rpk topic describe` outputs for compatibility. + +Default for the Kafka-compatible compression.type property. Redpanda does not recompress data. + +The topic property xref:./topic-properties.adoc#compressiontype[`compression.type`] overrides the value of `log_compression_type` at the topic level. + +*Requires restart:* No + +// tag::self-managed-only[] +*Visibility:* `user` +// end::self-managed-only[] + +*Type:* object + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `producer` +endif::[] + +*Nullable:* No + +*Related topics:* + +* xref:./topic-properties.adoc#compressiontype[`compression.type`] + +--- + +=== log_disable_housekeeping_for_tests + +Disables the housekeeping loop for local storage. This property is used to simplify testing, and should not be set in production. + +*Requires restart:* Yes + +// tag::self-managed-only[] +*Visibility:* `tunable` +// end::self-managed-only[] + +*Type:* boolean + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `false` +endif::[] + +*Nullable:* No + +--- + +=== log_message_timestamp_alert_after_ms + +Threshold in milliseconds for alerting on messages with a timestamp after the broker's time, meaning the messages are in the future relative to the broker's clock. + +*Unit:* milliseconds + +*Requires restart:* No + +// tag::self-managed-only[] +*Visibility:* `tunable` +// end::self-managed-only[] + +*Type:* integer + +*Accepted values:* [`-17592186044416`, `17592186044415`] + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `2h` +endif::[] + +*Nullable:* No + +--- + +=== log_message_timestamp_alert_before_ms + +Threshold in milliseconds for alerting on messages with a timestamp before the broker's time, meaning the messages are in the past relative to the broker's clock. To disable this check, set to `null`. + +*Unit:* milliseconds + +*Requires restart:* No + +// tag::self-managed-only[] +*Visibility:* `tunable` +// end::self-managed-only[] + +*Type:* integer + +*Accepted values:* [`-17592186044416`, `17592186044415`] + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `null` +endif::[] + +*Nullable:* Yes + +--- + +=== log_message_timestamp_type + +Default timestamp type for topic messages (CreateTime or LogAppendTime). + +The topic property xref:./topic-properties.adoc#messagetimestamptype[`message.timestamp.type`] overrides the value of `log_message_timestamp_type` at the topic level. + +*Requires restart:* No + +// tag::self-managed-only[] +*Visibility:* `user` +// end::self-managed-only[] + +*Type:* object + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `create_time` +endif::[] + +*Nullable:* No + +*Related topics:* + +* xref:./topic-properties.adoc#messagetimestamptype[`message.timestamp.type`] + +--- + +=== log_retention_ms + +The amount of time to keep a log file before deleting it (in milliseconds). If set to `-1`, no time limit is applied. This is a cluster-wide default when a topic does not set or disable `retention.ms`. + +*Unit:* milliseconds + +*Requires restart:* No + +// tag::self-managed-only[] +*Visibility:* `user` +// end::self-managed-only[] + +*Type:* integer + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `7 * 24h` +endif::[] + +*Nullable:* No + +// tag::self-managed-only[] +*Aliases:* delete_retention_ms +// end::self-managed-only[] + +--- + +// tag::redpanda-cloud[] +=== log_segment_ms + +Default lifetime of log segments. If `null`, the property is disabled, and no default lifetime is set. Any value under 60 seconds (60000 ms) is rejected. This property can also be set in the Kafka API using the Kafka-compatible alias, `log.roll.ms`. The topic property `segment.ms` overrides the value of `log_segment_ms` at the topic level. + +*Unit:* milliseconds + +*Requires restart:* No + +// tag::self-managed-only[] +*Visibility:* `user` +// end::self-managed-only[] + +*Type:* integer + +*Accepted values:* [`-17592186044416`, `17592186044415`] + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `weeks` +endif::[] + +*Nullable:* Yes + +--- +// end::redpanda-cloud[] + +=== log_segment_ms_max + +Upper bound on topic `segment.ms`: higher values will be clamped to this value. + +*Requires restart:* No + +// tag::self-managed-only[] +*Visibility:* `tunable` +// end::self-managed-only[] + +*Type:* integer + +*Accepted values:* [`-17592186044416`, `17592186044415`] + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `24h * 365` +endif::[] + +*Nullable:* No + +--- + +=== log_segment_ms_min + +Lower bound on topic `segment.ms`: lower values will be clamped to this value. + +*Requires restart:* No + +// tag::self-managed-only[] +*Visibility:* `tunable` +// end::self-managed-only[] + +*Type:* integer + +*Accepted values:* [`-17592186044416`, `17592186044415`] + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `10min` +endif::[] + +*Nullable:* No + +--- + +=== log_segment_size + +Default log segment size in bytes for topics which do not set `segment.bytes`. + +*Requires restart:* No + +// tag::self-managed-only[] +*Visibility:* `tunable` +// end::self-managed-only[] + +*Type:* integer + +*Maximum value:* `18446744073709552000` + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `128_MiB` +endif::[] + +*Nullable:* No + +--- + +=== log_segment_size_jitter_percent + +Random variation to the segment size limit used for each partition. + +*Unit:* percent + +*Requires restart:* Yes + +// tag::self-managed-only[] +*Visibility:* `tunable` +// end::self-managed-only[] + +*Type:* integer + +*Maximum value:* `65535` + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `5` +endif::[] + +*Nullable:* No + +--- + +=== log_segment_size_max + +Upper bound on topic `segment.bytes`: higher values will be clamped to this limit. + +*Requires restart:* No + +// tag::self-managed-only[] +*Visibility:* `tunable` +// end::self-managed-only[] + +*Type:* integer + +*Maximum value:* `18446744073709552000` + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `null` +endif::[] + +*Nullable:* Yes + +--- + +=== log_segment_size_min + +Lower bound on topic `segment.bytes`: lower values will be clamped to this limit. + +*Requires restart:* No + +// tag::self-managed-only[] +*Visibility:* `tunable` +// end::self-managed-only[] + +*Type:* integer + +*Maximum value:* `18446744073709552000` + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `1_MiB` +endif::[] + +*Nullable:* Yes + +--- + +=== lz4_decompress_reusable_buffers_disabled + +Disable reusable preallocated buffers for LZ4 decompression. + +*Requires restart:* Yes + +// tag::self-managed-only[] +*Visibility:* `tunable` +// end::self-managed-only[] + +*Type:* boolean + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `false` +endif::[] + +*Nullable:* No + +--- + +=== max_compacted_log_segment_size + +Maximum compacted segment size after consolidation. + +*Requires restart:* No + +// tag::self-managed-only[] +*Visibility:* `tunable` +// end::self-managed-only[] + +*Type:* integer + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `512_MiB` +endif::[] + +*Nullable:* No + +--- + +=== max_compaction_lag_ms + +For a compacted topic, the maximum time a message remains ineligible for compaction. The topic property `max.compaction.lag.ms` overrides this property. + +*Unit:* milliseconds + +*Requires restart:* No + +// tag::self-managed-only[] +*Visibility:* `user` +// end::self-managed-only[] + +*Type:* integer + +*Accepted values:* [`-17592186044416`, `17592186044415`] + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `max_serializable_ms` +endif::[] + +*Nullable:* No + +*Related topics:* + +* xref:reference:properties/topic-properties.adoc#max.compaction.lag.ms[`max.compaction.lag.ms`] + +--- + +=== max_concurrent_producer_ids + +Maximum number of active producer sessions. When the threshold is passed, Redpanda terminates old sessions. When an idle producer corresponding to the terminated session wakes up and produces, its message batches are rejected, and an out of order sequence error is emitted. Consumers don't affect this setting. + +*Requires restart:* No + +// tag::self-managed-only[] +*Visibility:* `tunable` +// end::self-managed-only[] + +*Type:* integer + +*Maximum value:* `18446744073709552000` + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `Maximum value` +endif::[] + +*Nullable:* No + +--- + +=== max_in_flight_pandaproxy_requests_per_shard + +Maximum number of in-flight HTTP requests to HTTP Proxy permitted per shard. Any additional requests above this limit will be rejected with a 429 error. + +*Requires restart:* No + +// tag::self-managed-only[] +*Visibility:* `tunable` +// end::self-managed-only[] + +*Type:* integer + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `500` +endif::[] + +*Nullable:* No + +--- + +=== max_in_flight_schema_registry_requests_per_shard + +Maximum number of in-flight HTTP requests to Schema Registry permitted per shard. Any additional requests above this limit will be rejected with a 429 error. + +*Requires restart:* No + +// tag::self-managed-only[] +*Visibility:* `tunable` +// end::self-managed-only[] + +*Type:* integer + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `500` +endif::[] + +*Nullable:* No + +--- + +=== max_kafka_throttle_delay_ms + +Fail-safe maximum throttle delay on Kafka requests. + +*Unit:* milliseconds + +*Requires restart:* No + +// tag::self-managed-only[] +*Visibility:* `tunable` +// end::self-managed-only[] + +*Type:* integer + +*Accepted values:* [`-17592186044416`, `17592186044415`] + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `30'000ms` +endif::[] + +*Nullable:* No + +--- + +=== max_transactions_per_coordinator + +Specifies the maximum number of active transaction sessions per coordinator. When the threshold is passed Redpanda terminates old sessions. When an idle producer corresponding to the terminated session wakes up and produces, it leads to its batches being rejected with invalid producer epoch or invalid_producer_id_mapping error (depends on the transaction execution phase). + +For details, see xref:develop:transactions#transaction-usage-tips[Transaction usage tips]. + +*Requires restart:* No + +// tag::self-managed-only[] +*Visibility:* `tunable` +// end::self-managed-only[] + +*Type:* integer + +*Maximum value:* `18446744073709552000` + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `Maximum value` +endif::[] + +*Nullable:* No + +*Related topics:* + +* xref:develop:transactions#transaction-usage-tips[Transaction usage tips] + +--- + +// tag::deprecated[] +=== max_version + +No description available. + + +*Requires restart:* Yes + +*Nullable:* No + +--- +// end::deprecated[] + +=== members_backend_retry_ms + +Time between members backend reconciliation loop retries. + +*Unit:* milliseconds + +*Requires restart:* Yes + +// tag::self-managed-only[] +*Visibility:* `tunable` +// end::self-managed-only[] + +*Type:* integer + +*Accepted values:* [`-17592186044416`, `17592186044415`] + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `5s` +endif::[] + +*Nullable:* No + +--- + +=== memory_abort_on_alloc_failure + +If `true`, the Redpanda process will terminate immediately when an allocation cannot be satisfied due to memory exhaustion. If false, an exception is thrown. + +*Requires restart:* No + +// tag::self-managed-only[] +*Visibility:* `tunable` +// end::self-managed-only[] + +*Type:* boolean + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `true` +endif::[] + +*Nullable:* No + +--- + +=== memory_enable_memory_sampling + +When `true`, memory allocations are sampled and tracked. A sampled live set of allocations can then be retrieved from the Admin API. Additionally, Redpanda will periodically log the top-n allocation sites. + +*Requires restart:* No + +// tag::self-managed-only[] +*Visibility:* `tunable` +// end::self-managed-only[] + +*Type:* boolean + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `true` +endif::[] + +*Nullable:* No + +--- + +=== metadata_dissemination_interval_ms + +Interval for metadata dissemination batching. + +*Unit:* milliseconds + +*Requires restart:* Yes + +// tag::self-managed-only[] +*Visibility:* `tunable` +// end::self-managed-only[] + +*Type:* integer + +*Accepted values:* [`-17592186044416`, `17592186044415`] + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `3'000ms` +endif::[] + +*Nullable:* No + +--- + +=== metadata_dissemination_retries + +Number of attempts to look up a topic's metadata-like shard before a request fails. This configuration controls the number of retries that request handlers perform when internal topic metadata (for topics like tx, consumer offsets, etc) is missing. These topics are usually created on demand when users try to use the cluster for the first time and it may take some time for the creation to happen and the metadata to propagate to all the brokers (particularly the broker handling the request). In the meantime Redpanda waits and retries. This configuration controls the number retries. + +*Requires restart:* Yes + +// tag::self-managed-only[] +*Visibility:* `tunable` +// end::self-managed-only[] + +*Type:* integer + +*Accepted values:* [`-32768`, `32767`] + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `30` +endif::[] + +*Nullable:* No + +--- + +=== metadata_dissemination_retry_delay_ms + +Delay before retrying a topic lookup in a shard or other meta tables. + +*Unit:* milliseconds + +*Requires restart:* Yes + +// tag::self-managed-only[] +*Visibility:* `tunable` +// end::self-managed-only[] + +*Type:* integer + +*Accepted values:* [`-17592186044416`, `17592186044415`] + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `0'500ms` +endif::[] + +*Nullable:* No + +--- + +=== metadata_status_wait_timeout_ms + +Maximum time to wait in metadata request for cluster health to be refreshed. + +*Unit:* milliseconds + +*Requires restart:* Yes + +// tag::self-managed-only[] +*Visibility:* `tunable` +// end::self-managed-only[] + +*Type:* integer + +*Accepted values:* [`-17592186044416`, `17592186044415`] + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `2s` +endif::[] + +*Nullable:* No + +--- + +=== metrics_reporter_report_interval + +Cluster metrics reporter report interval. + +*Requires restart:* No + +// tag::self-managed-only[] +*Visibility:* `tunable` +// end::self-managed-only[] + +*Type:* integer + +*Accepted values:* [`-17592186044416`, `17592186044415`] + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `24h` +endif::[] + +*Nullable:* No + +--- + +=== metrics_reporter_tick_interval + +Cluster metrics reporter tick interval. + +*Requires restart:* No + +// tag::self-managed-only[] +*Visibility:* `tunable` +// end::self-managed-only[] + +*Type:* integer + +*Accepted values:* [`-17592186044416`, `17592186044415`] + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `1min` +endif::[] + +*Nullable:* No + +--- + +=== metrics_reporter_url + +URL of the cluster metrics reporter. + +*Requires restart:* No + +// tag::self-managed-only[] +*Visibility:* `tunable` +// end::self-managed-only[] + +*Type:* string + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `https://m.rp.vectorized.io/v2` +endif::[] + +*Nullable:* No + +--- + +=== min_cleanable_dirty_ratio + +The minimum ratio between the number of bytes in dirty segments and the total number of bytes in closed segments that must be reached before a partition's log is eligible for compaction in a compact topic. The topic property `min.cleanable.dirty.ratio` overrides this value at the topic level. + +*Requires restart:* No + +// tag::self-managed-only[] +*Visibility:* `user` +// end::self-managed-only[] + +*Type:* number + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `0.2` +endif::[] + +*Nullable:* Yes + +--- + +=== min_compaction_lag_ms + +The minimum amount of time (in ms) that a log segment must remain unaltered before it can be compacted in a compact topic. + +*Unit:* milliseconds + +*Requires restart:* No + +// tag::self-managed-only[] +*Visibility:* `user` +// end::self-managed-only[] + +*Type:* integer + +*Accepted values:* [`-17592186044416`, `17592186044415`] + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `0ms` +endif::[] + +*Nullable:* No + +*Related topics:* + +* xref:reference:properties/topic-properties.adoc#min.compaction.lag.ms[`min.compaction.lag.ms`] + +--- + +// tag::deprecated[] +=== min_version + +No description available. + + +*Requires restart:* Yes + +*Nullable:* No + +--- +// end::deprecated[] + +=== minimum_topic_replications + +Minimum allowable replication factor for topics in this cluster. The set value must be positive, odd, and equal to or less than the number of available brokers. Changing this parameter only restricts newly-created topics. Redpanda returns an `INVALID_REPLICATION_FACTOR` error on any attempt to create a topic with a replication factor less than this property. If you change the `minimum_topic_replications` setting, the replication factor of existing topics remains unchanged. However, Redpanda will log a warning on start-up with a list of any topics that have fewer replicas than this minimum. For example, you might see a message such as `Topic X has a replication factor less than specified minimum: 1 < 3`. + +*Requires restart:* No + +// tag::self-managed-only[] +*Visibility:* `user` +// end::self-managed-only[] + +*Type:* integer + +*Accepted values:* [`-32768`, `32767`] + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `1` +endif::[] + +*Nullable:* No + +--- + +=== node_isolation_heartbeat_timeout + +How long after the last heartbeat request a node will wait before considering itself to be isolated. + +*Requires restart:* No + +// tag::self-managed-only[] +*Visibility:* `tunable` +// end::self-managed-only[] + +*Type:* integer + +*Accepted values:* [`-9223372036854776000`, `9223372036854776000`] + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `3000` +endif::[] + +*Nullable:* No + +--- + +=== node_management_operation_timeout_ms + +Timeout for executing node management operations. + +*Unit:* milliseconds + +*Requires restart:* Yes + +// tag::self-managed-only[] +*Visibility:* `tunable` +// end::self-managed-only[] + +*Type:* integer + +*Accepted values:* [`-17592186044416`, `17592186044415`] + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `5s` +endif::[] + +*Nullable:* No + +--- + +=== node_status_interval + +Time interval between two node status messages. Node status messages establish liveness status outside of the Raft protocol. + +*Requires restart:* No + +// tag::self-managed-only[] +*Visibility:* `tunable` +// end::self-managed-only[] + +*Type:* integer + +*Accepted values:* [`-17592186044416`, `17592186044415`] + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `100ms` +endif::[] + +*Nullable:* No + +--- + +=== node_status_reconnect_max_backoff_ms + +Maximum backoff (in milliseconds) to reconnect to an unresponsive peer during node status liveness checks. + +*Unit:* milliseconds + +*Requires restart:* No + +// tag::self-managed-only[] +*Visibility:* `user` +// end::self-managed-only[] + +*Type:* integer + +*Accepted values:* [`-17592186044416`, `17592186044415`] + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `15s` +endif::[] + +*Nullable:* No + +--- + +=== oidc_clock_skew_tolerance + +The amount of time (in seconds) to allow for when validating the expiry claim in the token. + +*Unit*: seconds + +*Requires restart:* No + +// tag::self-managed-only[] +*Visibility:* `user` +// end::self-managed-only[] + +*Type:* integer + +*Accepted values:* [`-17179869184`, `17179869183`] + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `seconds` +endif::[] + +*Nullable:* No + +--- + +// tag::redpanda-cloud[] +=== oidc_discovery_url + +ifdef::env-cloud[] +NOTE: This property is read-only in Redpanda Cloud. +endif::[] + +The URL pointing to the well-known discovery endpoint for the OIDC provider. + +*Requires restart:* No + +// tag::self-managed-only[] +*Visibility:* `user` +// end::self-managed-only[] + +*Type:* string + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `https://auth.prd.cloud.redpanda.com/.well-known/openid-configuration` +endif::[] + +*Nullable:* No + +--- +// end::redpanda-cloud[] + +=== oidc_keys_refresh_interval + +The frequency of refreshing the JSON Web Keys (JWKS) used to validate access tokens. + +*Requires restart:* No + +// tag::self-managed-only[] +*Visibility:* `user` +// end::self-managed-only[] + +*Type:* integer + +*Accepted values:* [`-17179869184`, `17179869183`] + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `1h` +endif::[] + +*Nullable:* No + +--- + +// tag::redpanda-cloud[] +=== oidc_principal_mapping + +ifdef::env-cloud[] +NOTE: This property is read-only in Redpanda Cloud. +endif::[] + +Rule for mapping JWT payload claim to a Redpanda user principal. + +*Requires restart:* No + +// tag::self-managed-only[] +*Visibility:* `user` +// end::self-managed-only[] + +*Type:* string + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `$.sub` +endif::[] + +*Nullable:* No + +*Related topics:* + +* xref:manage:security/authentication.adoc#oidc[OpenID Connect authentication] +ifndef::env-cloud[] +* xref:manage:kubernetes/security/authentication/k-authentication.adoc[OpenID Connect authentication in Kubernetes] +endif::[] + +--- +// end::redpanda-cloud[] + +// tag::redpanda-cloud[] +=== oidc_token_audience + +ifdef::env-cloud[] +NOTE: This property is read-only in Redpanda Cloud. +endif::[] + +A string representing the intended recipient of the token. + +*Requires restart:* No + +// tag::self-managed-only[] +*Visibility:* `user` +// end::self-managed-only[] + +*Type:* string + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `redpanda` +endif::[] + +*Nullable:* No + +--- +// end::redpanda-cloud[] + +=== partition_autobalancing_concurrent_moves + +Number of partitions that can be reassigned at once. + +*Requires restart:* No + +// tag::self-managed-only[] +*Visibility:* `tunable` +// end::self-managed-only[] + +*Type:* integer + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `50` +endif::[] + +*Nullable:* No + +--- + +=== partition_autobalancing_max_disk_usage_percent + +When the disk usage of a node exceeds this threshold, it triggers Redpanda to move partitions off of the node. This property applies only when partition_autobalancing_mode is set to `continuous`. + +*Unit:* percent + +*Requires restart:* No + +// tag::self-managed-only[] +*Visibility:* `user` +// end::self-managed-only[] + +*Type:* integer + +*Maximum value:* `4294967295` + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `80` +endif::[] + +*Nullable:* No + +*Related topics:* + +* xref:manage:cluster-maintenance/continuous-data-balancing.adoc[Configure Continuous Data Balancing] + +--- + +=== partition_autobalancing_min_size_threshold + +Minimum size of partition that is going to be prioritized when rebalancing a cluster due to the disk size threshold being breached. This value is calculated automatically by default. + +*Requires restart:* No + +// tag::self-managed-only[] +*Visibility:* `tunable` +// end::self-managed-only[] + +*Type:* integer + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `null` +endif::[] + +*Nullable:* Yes + +--- + +=== partition_autobalancing_mode + +Mode of xref:manage:cluster-maintenance/cluster-balancing.adoc[partition balancing] for a cluster. + +*Accepted values:* + +* `continuous`: partition balancing happens automatically to maintain optimal performance and availability, based on continuous monitoring for node changes (same as `node_add`) and also high disk usage. This option is customized by <> and <> properties. +* `node_add`: partition balancing happens when a node is added. +* `off`: partition balancing is disabled. This option is not recommended for production clusters. + +ifndef::env-cloud[] +*Enterprise license required*: `continuous` (for license details, see xref:get-started:licensing/index.adoc[Redpanda Licensing]) +endif::[] + +*Requires restart:* No + +// tag::self-managed-only[] +*Visibility:* `user` +// end::self-managed-only[] + +*Type:* object + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `node_add` +endif::[] + +*Nullable:* No + +*Related topics:* + +* xref:manage:cluster-maintenance/cluster-balancing.adoc[partition balancing] +* xref:manage:cluster-maintenance/continuous-data-balancing.adoc[Configure Continuous Data Balancing] + +--- + +// tag::deprecated[] +=== partition_autobalancing_movement_batch_size_bytes + +Total size of partitions that autobalancer is going to move in one batch (deprecated, use partition_autobalancing_concurrent_moves to limit the autobalancer concurrency) + +*Unit:* bytes + +*Requires restart:* No + +// tag::self-managed-only[] +*Visibility:* `deprecated` +// end::self-managed-only[] + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `5_GiB` +endif::[] + +*Nullable:* No + +--- +// end::deprecated[] + +=== partition_autobalancing_node_availability_timeout_sec + +When a node is unavailable for at least this timeout duration, it triggers Redpanda to move partitions off of the node. This property applies only when `partition_autobalancing_mode` is set to `continuous`. + +*Unit:* seconds + +*Requires restart:* No + +// tag::self-managed-only[] +*Visibility:* `user` +// end::self-managed-only[] + +*Type:* integer + +*Accepted values:* [`-17179869184`, `17179869183`] + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `15min` +endif::[] + +*Nullable:* No + +*Related topics:* + +* xref:manage:cluster-maintenance/continuous-data-balancing.adoc[Configure Continuous Data Balancing] + +--- + +=== partition_autobalancing_tick_interval_ms + +Partition autobalancer tick interval. + +*Unit:* milliseconds + +*Requires restart:* No + +// tag::self-managed-only[] +*Visibility:* `tunable` +// end::self-managed-only[] + +*Type:* integer + +*Accepted values:* [`-17592186044416`, `17592186044415`] + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `30s` +endif::[] + +*Nullable:* No + +--- + +=== partition_autobalancing_tick_moves_drop_threshold + +If the number of scheduled tick moves drops by this ratio, a new tick is scheduled immediately. Valid values are (0, 1]. For example, with a value of 0.2 and 100 scheduled moves in a tick, a new tick is scheduled when the in-progress moves are fewer than 80. + +*Requires restart:* No + +// tag::self-managed-only[] +*Visibility:* `tunable` +// end::self-managed-only[] + +*Type:* number + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `0.2` +endif::[] + +*Nullable:* No + +--- + +=== partition_autobalancing_topic_aware + +If `true`, Redpanda prioritizes balancing a topic’s partition replica count evenly across all brokers while it’s balancing the cluster’s overall partition count. Because different topics in a cluster can have vastly different load profiles, this better distributes the workload of the most heavily-used topics evenly across brokers. + +*Requires restart:* No + +// tag::self-managed-only[] +*Visibility:* `user` +// end::self-managed-only[] + +*Type:* boolean + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `true` +endif::[] + +*Nullable:* No + +--- + +=== partition_manager_shutdown_watchdog_timeout + +A threshold value to detect partitions which might have been stuck while shutting down. After this threshold, a watchdog in partition manager will log information about partition shutdown not making progress. + +*Requires restart:* No + +// tag::self-managed-only[] +*Visibility:* `tunable` +// end::self-managed-only[] + +*Type:* integer + +*Accepted values:* [`-17592186044416`, `17592186044415`] + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `30s` +endif::[] + +*Nullable:* No + +--- + +=== pp_sr_smp_max_non_local_requests + +Maximum number of Cross-core(Inter-shard communication) requests pending in HTTP Proxy and Schema Registry seastar::smp group. (For more details, see the `seastar::smp_service_group` documentation). + +See https://docs.seastar.io/master/[Seastar documentation^] + +*Requires restart:* Yes + +// tag::self-managed-only[] +*Visibility:* `tunable` +// end::self-managed-only[] + +*Type:* integer + +*Maximum value:* `4294967295` + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `null` +endif::[] + +*Nullable:* Yes + +--- + +=== quota_manager_gc_sec + +Quota manager GC frequency in milliseconds. + +*Unit:* seconds + +*Requires restart:* No + +// tag::self-managed-only[] +*Visibility:* `tunable` +// end::self-managed-only[] + +*Type:* integer + +*Accepted values:* [`-17592186044416`, `17592186044415`] + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `30000 milliseconds` +endif::[] + +*Nullable:* No + +--- + +=== raft_enable_longest_log_detection + +Enables an additional step in leader election where a candidate is allowed to wait for all the replies from the broker it requested votes from. This may introduce a small delay when recovering from failure, but it prevents truncation if any of the replicas have more data than the majority. + +*Requires restart:* No + +// tag::self-managed-only[] +*Visibility:* `tunable` +// end::self-managed-only[] + +*Type:* boolean + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `true` +endif::[] + +*Nullable:* No + +--- + +=== raft_enable_lw_heartbeat + +Enables Raft optimization of heartbeats. + +*Requires restart:* No + +// tag::self-managed-only[] +*Visibility:* `tunable` +// end::self-managed-only[] + +*Type:* boolean + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `true` +endif::[] + +*Nullable:* No + +--- + +// tag::deprecated[] +=== raft_flush_timer_interval_ms + +Interval of checking partition against the `raft_replica_max_pending_flush_bytes`, deprecated started 24.1, use raft_replica_max_flush_delay_ms instead + +*Unit:* milliseconds + +*Requires restart:* Yes + +// tag::self-managed-only[] +*Visibility:* `deprecated` +// end::self-managed-only[] + +*Accepted values:* [`-17592186044416`, `17592186044415`] + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `100ms` +endif::[] + +*Nullable:* No + +--- +// end::deprecated[] + +=== raft_heartbeat_disconnect_failures + +The number of failed heartbeats after which an unresponsive TCP connection is forcibly closed. To disable forced disconnection, set to 0. + +*Requires restart:* Yes + +// tag::self-managed-only[] +*Visibility:* `tunable` +// end::self-managed-only[] + +*Type:* integer + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `3` +endif::[] + +*Nullable:* No + +--- + +=== raft_heartbeat_interval_ms + +Number of milliseconds for Raft leader heartbeats. + +*Unit:* milliseconds + +*Requires restart:* No + +// tag::self-managed-only[] +*Visibility:* `tunable` +// end::self-managed-only[] + +*Type:* integer + +*Accepted values:* [`-17592186044416`, `17592186044415`] + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `150 milliseconds` +endif::[] + +*Nullable:* No + +--- + +=== raft_heartbeat_timeout_ms + +Raft heartbeat RPC (remote procedure call) timeout. Raft uses a heartbeat mechanism to maintain leadership authority and to trigger leader elections. The `raft_heartbeat_interval_ms` is a periodic heartbeat sent by the partition leader to all followers to declare its leadership. If a follower does not receive a heartbeat within the `raft_heartbeat_timeout_ms`, then it triggers an election to choose a new partition leader. + +*Unit:* milliseconds + +*Requires restart:* No + +// tag::self-managed-only[] +*Visibility:* `tunable` +// end::self-managed-only[] + +*Type:* integer + +*Accepted values:* [`-17592186044416`, `17592186044415`] + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `3s` +endif::[] + +*Nullable:* No + +--- + +=== raft_io_timeout_ms + +Raft I/O timeout. + +*Unit:* milliseconds + +*Requires restart:* No + +// tag::self-managed-only[] +*Visibility:* `tunable` +// end::self-managed-only[] + +*Type:* integer + +*Accepted values:* [`-17592186044416`, `17592186044415`] + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `10'000ms` +endif::[] + +*Nullable:* No + +--- + +=== raft_learner_recovery_rate + +Raft learner recovery rate limit. Throttles the rate of data communicated to nodes (learners) that need to catch up to leaders. This rate limit is placed on a node sending data to a recovering node. Each sending node is limited to this rate. The recovering node accepts data as fast as possible according to the combined limits of all healthy nodes in the cluster. For example, if two nodes are sending data to the recovering node, and `raft_learner_recovery_rate` is 100 MB/sec, then the recovering node will recover at a rate of 200 MB/sec. + +*Requires restart:* No + +// tag::self-managed-only[] +*Visibility:* `tunable` +// end::self-managed-only[] + +*Type:* integer + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `100_MiB` +endif::[] + +*Nullable:* No + +--- + +=== raft_max_buffered_follower_append_entries_bytes_per_shard + +The total size of append entry requests that may be cached per shard, using the Raft-buffered protocol. When an entry is cached, the leader can continue serving requests because the ordering of the cached requests cannot change. When the total size of cached requests reaches the set limit, back pressure is applied to throttle producers. + +*Requires restart:* No + +// tag::self-managed-only[] +*Visibility:* `tunable` +// end::self-managed-only[] + +*Type:* integer + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `0` +endif::[] + +*Nullable:* No + +--- + +// tag::deprecated[] +=== raft_max_concurrent_append_requests_per_follower + +No description available. + + +*Requires restart:* Yes + +*Nullable:* No + +--- +// end::deprecated[] + +=== raft_max_inflight_follower_append_entries_requests_per_shard + +The maximum number of append entry requests that may be sent from Raft groups on a Seastar shard to the current node, and are awaiting a reply. This property replaces `raft_max_concurrent_append_requests_per_follower`. + +*Requires restart:* No + +// tag::self-managed-only[] +*Visibility:* `tunable` +// end::self-managed-only[] + +*Type:* integer + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `1024` +endif::[] + +*Nullable:* No + +--- + +=== raft_max_recovery_memory + +Maximum memory that can be used for reads in Raft recovery process by default 15% of total memory. + +*Requires restart:* No + +// tag::self-managed-only[] +*Visibility:* `tunable` +// end::self-managed-only[] + +*Type:* integer + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `null` +endif::[] + +*Nullable:* Yes + +--- + +=== raft_recovery_concurrency_per_shard + +Number of partitions that may simultaneously recover data to a particular shard. This number is limited to avoid overwhelming nodes when they come back online after an outage. + +*Requires restart:* No + +// tag::self-managed-only[] +*Visibility:* `tunable` +// end::self-managed-only[] + +*Type:* integer + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `64` +endif::[] + +*Nullable:* No + +--- + +=== raft_recovery_default_read_size + +Specifies the default size of a read issued during Raft follower recovery. + +*Requires restart:* No + +// tag::self-managed-only[] +*Visibility:* `tunable` +// end::self-managed-only[] + +*Type:* integer + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `512_KiB` +endif::[] + +*Nullable:* No + +--- + +=== raft_recovery_throttle_disable_dynamic_mode + +include::reference:partial$internal-use-property.adoc[] + +Disables cross shard sharing used to throttle recovery traffic. Should only be used to debug unexpected problems. + +*Requires restart:* No + +// tag::self-managed-only[] +*Visibility:* `tunable` +// end::self-managed-only[] + +*Type:* boolean + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `false` +endif::[] + +*Nullable:* No + +--- + +=== raft_replica_max_flush_delay_ms + +Maximum delay between two subsequent flushes. After this delay, the log is automatically force flushed. + +*Unit:* milliseconds + +*Requires restart:* No + +// tag::self-managed-only[] +*Visibility:* `tunable` +// end::self-managed-only[] + +*Type:* integer + +*Accepted values:* [`-17592186044416`, `17592186044415`] + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `100ms` +endif::[] + +*Nullable:* No + +--- + +=== raft_replica_max_pending_flush_bytes + +Maximum number of bytes that are not flushed per partition. If the configured threshold is reached, the log is automatically flushed even if it has not been explicitly requested. + +*Unit:* bytes + +*Requires restart:* No + +// tag::self-managed-only[] +*Visibility:* `tunable` +// end::self-managed-only[] + +*Type:* integer + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `256_KiB` +endif::[] + +*Nullable:* Yes + +--- + +=== raft_replicate_batch_window_size + +Maximum size of requests cached for replication. + +*Requires restart:* Yes + +// tag::self-managed-only[] +*Visibility:* `tunable` +// end::self-managed-only[] + +*Type:* integer + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `1_MiB` +endif::[] + +*Nullable:* No + +--- + +=== raft_smp_max_non_local_requests + +Maximum number of Cross-core(Inter-shard communication) requests pending in Raft seastar::smp group. For details, refer to the `seastar::smp_service_group` documentation). + +See https://docs.seastar.io/master/[Seastar documentation^] + +*Requires restart:* Yes + +// tag::self-managed-only[] +*Visibility:* `tunable` +// end::self-managed-only[] + +*Type:* integer + +*Maximum value:* `4294967295` + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `null` +endif::[] + +*Nullable:* Yes + +--- + +=== raft_timeout_now_timeout_ms + +Timeout for Raft's timeout_now RPC. This RPC is used to force a follower to dispatch a round of votes immediately. + +*Unit:* milliseconds + +*Requires restart:* No + +// tag::self-managed-only[] +*Visibility:* `tunable` +// end::self-managed-only[] + +*Type:* integer + +*Accepted values:* [`-17592186044416`, `17592186044415`] + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `1s` +endif::[] + +*Nullable:* No + +--- + +=== raft_transfer_leader_recovery_timeout_ms + +Follower recovery timeout waiting period when transferring leadership. + +*Unit:* milliseconds + +*Requires restart:* No + +// tag::self-managed-only[] +*Visibility:* `tunable` +// end::self-managed-only[] + +*Type:* integer + +*Accepted values:* [`-17592186044416`, `17592186044415`] + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `10s` +endif::[] + +*Nullable:* No + +--- + +=== readers_cache_eviction_timeout_ms + +Duration after which inactive readers are evicted from cache. + +*Unit:* milliseconds + +*Requires restart:* Yes + +// tag::self-managed-only[] +*Visibility:* `tunable` +// end::self-managed-only[] + +*Type:* integer + +*Accepted values:* [`-17592186044416`, `17592186044415`] + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `30s` +endif::[] + +*Nullable:* No + +--- + +=== readers_cache_target_max_size + +Maximum desired number of readers cached per NTP. This a soft limit, meaning that a number of readers in cache may temporarily increase as cleanup is performed in the background. + +*Requires restart:* No + +// tag::self-managed-only[] +*Visibility:* `tunable` +// end::self-managed-only[] + +*Type:* integer + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `200` +endif::[] + +*Nullable:* No + +--- + +=== reclaim_batch_cache_min_free + +Minimum amount of free memory maintained by the batch cache background reclaimer. + +*Requires restart:* Yes + +// tag::self-managed-only[] +*Visibility:* `tunable` +// end::self-managed-only[] + +*Type:* integer + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `64_MiB` +endif::[] + +*Nullable:* No + +--- + +=== reclaim_growth_window + +Starting from the last point in time when memory was reclaimed from the batch cache, this is the duration during which the amount of memory to reclaim grows at a significant rate, based on heuristics about the amount of available memory. + +*Requires restart:* Yes + +// tag::self-managed-only[] +*Visibility:* `tunable` +// end::self-managed-only[] + +*Type:* integer + +*Accepted values:* [`-17592186044416`, `17592186044415`] + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `3'000ms` +endif::[] + +*Nullable:* No + +--- + +=== reclaim_max_size + +Maximum batch cache reclaim size. + +*Requires restart:* Yes + +// tag::self-managed-only[] +*Visibility:* `tunable` +// end::self-managed-only[] + +*Type:* integer + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `4_MiB` +endif::[] + +*Nullable:* No + +--- + +=== reclaim_min_size + +Minimum batch cache reclaim size. + +*Requires restart:* Yes + +// tag::self-managed-only[] +*Visibility:* `tunable` +// end::self-managed-only[] + +*Type:* integer + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `128_KiB` +endif::[] + +*Nullable:* No + +--- + +=== reclaim_stable_window + +If the duration since the last time memory was reclaimed is longer than the amount of time specified in this property, the memory usage of the batch cache is considered stable, so only the minimum size (<>) is set to be reclaimed. + +*Requires restart:* Yes + +// tag::self-managed-only[] +*Visibility:* `tunable` +// end::self-managed-only[] + +*Type:* integer + +*Accepted values:* [`-17592186044416`, `17592186044415`] + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `10'000ms` +endif::[] + +*Nullable:* No + +--- + +=== recovery_append_timeout_ms + +Timeout for append entry requests issued while updating a stale follower. + +*Unit:* milliseconds + +*Requires restart:* Yes + +// tag::self-managed-only[] +*Visibility:* `tunable` +// end::self-managed-only[] + +*Type:* integer + +*Accepted values:* [`-17592186044416`, `17592186044415`] + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `5s` +endif::[] + +*Nullable:* No + +--- + +=== release_cache_on_segment_roll + +Flag for specifying whether or not to release cache when a full segment is rolled. + +*Requires restart:* No + +// tag::self-managed-only[] +*Visibility:* `tunable` +// end::self-managed-only[] + +*Type:* boolean + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `false` +endif::[] + +*Nullable:* No + +--- + +=== replicate_append_timeout_ms + +Timeout for append entry requests issued while replicating entries. + +*Unit:* milliseconds + +*Requires restart:* Yes + +// tag::self-managed-only[] +*Visibility:* `tunable` +// end::self-managed-only[] + +*Type:* integer + +*Accepted values:* [`-17592186044416`, `17592186044415`] + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `3s` +endif::[] + +*Nullable:* No + +--- + +=== retention_bytes + +Default maximum number of bytes per partition on disk before triggering deletion of the oldest messages. If `null` (the default value), no limit is applied. + +The topic property xref:./topic-properties.adoc#retentionbytes[`retention.bytes`] overrides the value of `retention_bytes` at the topic level. + +*Unit:* bytes + +*Requires restart:* No + +// tag::self-managed-only[] +*Visibility:* `user` +// end::self-managed-only[] + +*Type:* integer + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `null` +endif::[] + +*Nullable:* Yes + +*Related topics:* + +* xref:./topic-properties.adoc#retentionbytes[`retention.bytes`] + +--- + +=== retention_local_strict + +Flag to allow Tiered Storage topics to expand to consumable retention policy limits. When this flag is enabled, non-local retention settings are used, and local retention settings are used to inform data removal policies in low-disk space scenarios. + +*Requires restart:* No + +// tag::self-managed-only[] +*Visibility:* `user` +// end::self-managed-only[] + +*Type:* boolean + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `false` +endif::[] + +*Nullable:* No + +--- + +=== retention_local_strict_override + +Trim log data when a cloud topic reaches its local retention limit. When this option is disabled Redpanda will allow partitions to grow past the local retention limit, and will be trimmed automatically as storage reaches the configured target size. + +*Requires restart:* No + +// tag::self-managed-only[] +*Visibility:* `user` +// end::self-managed-only[] + +*Type:* boolean + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `true` +endif::[] + +*Nullable:* No + +--- + +=== retention_local_target_bytes_default + +Local retention size target for partitions of topics with object storage write enabled. If `null`, the property is disabled. + +This property can be overridden on a per-topic basis by setting `retention.local.target.bytes` in each topic enabled for Tiered Storage. See xref:manage:cluster-maintenance/disk-utilization.adoc#configure-message-retention[Configure message retention]. + +*Requires restart:* No + +// tag::self-managed-only[] +*Visibility:* `user` +// end::self-managed-only[] + +*Type:* integer + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `null` +endif::[] + +*Nullable:* Yes + +*Related topics:* + +* xref:manage:cluster-maintenance/disk-utilization.adoc#configure-message-retention[Configure message retention] + +--- + +=== retention_local_target_capacity_bytes + +The target capacity (in bytes) that log storage will try to use before additional retention rules take over to trim data to meet the target. When no target is specified, storage usage is unbounded. + +NOTE: Redpanda Data recommends setting only one of <> or <>. If both are set, the minimum of the two is used as the effective target capacity. + +*Unit:* bytes + +*Requires restart:* No + +// tag::self-managed-only[] +*Visibility:* `user` +// end::self-managed-only[] + +*Type:* integer + +*Maximum value:* `18446744073709552000` + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `null` +endif::[] + +*Nullable:* Yes + +--- + +=== retention_local_target_capacity_percent + +The target capacity in percent of unreserved space (<>) that log storage will try to use before additional retention rules will take over to trim data in order to meet the target. When no target is specified storage usage is unbounded. + +NOTE: Redpanda Data recommends setting only one of <> or <>. If both are set, the minimum of the two is used as the effective target capacity. + +*Unit:* percent + +*Requires restart:* No + +// tag::self-managed-only[] +*Visibility:* `user` +// end::self-managed-only[] + +*Type:* number + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `80.0` +endif::[] + +*Nullable:* Yes + +--- + +=== retention_local_target_ms_default + +Local retention time target for partitions of topics with object storage write enabled. + +This property can be overridden on a per-topic basis by setting `retention.local.target.ms` in each topic enabled for Tiered Storage. See xref:manage:cluster-maintenance/disk-utilization.adoc#configure-message-retention[Configure message retention]. + +*Requires restart:* No + +// tag::self-managed-only[] +*Visibility:* `user` +// end::self-managed-only[] + +*Type:* integer + +*Accepted values:* [`-17592186044416`, `17592186044415`] + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `24h` +endif::[] + +*Nullable:* No + +*Related topics:* + +* xref:manage:cluster-maintenance/disk-utilization.adoc#configure-message-retention[Configure message retention] + +--- + +=== retention_local_trim_interval + +The period during which disk usage is checked for disk pressure, and data is optionally trimmed to meet the target. + +*Requires restart:* No + +// tag::self-managed-only[] +*Visibility:* `tunable` +// end::self-managed-only[] + +*Type:* integer + +*Accepted values:* [`-17592186044416`, `17592186044415`] + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `30s` +endif::[] + +*Nullable:* No + +--- + +=== retention_local_trim_overage_coeff + +The space management control loop reclaims the overage multiplied by this this coefficient to compensate for data that is written during the idle period between control loop invocations. + +*Requires restart:* No + +// tag::self-managed-only[] +*Visibility:* `tunable` +// end::self-managed-only[] + +*Type:* number + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `2.0` +endif::[] + +*Nullable:* No + +--- + +=== rm_sync_timeout_ms + +Resource manager's synchronization timeout. Specifies the maximum time for this node to wait for the internal state machine to catch up with all events written by previous leaders before rejecting a request. + +*Unit:* milliseconds + +*Requires restart:* No + +// tag::self-managed-only[] +*Visibility:* `user` +// end::self-managed-only[] + +*Type:* integer + +*Accepted values:* [`-17592186044416`, `17592186044415`] + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `10s` +endif::[] + +*Nullable:* No + +--- + +// tag::deprecated[] +=== rm_violation_recovery_policy + +No description available. + + +*Requires restart:* Yes + +*Nullable:* No + +--- +// end::deprecated[] + +=== rpc_client_connections_per_peer + +The maximum number of connections a broker will open to each of its peers. + +*Requires restart:* Yes + +*Type:* integer + +*Accepted values:* [`-2147483648`, `2147483647`] + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `128` +endif::[] + +*Nullable:* No + +--- + +=== rpc_server_compress_replies + +Enable compression for internal RPC (remote procedure call) server replies. + +*Requires restart:* No + +// tag::self-managed-only[] +*Visibility:* `tunable` +// end::self-managed-only[] + +*Type:* boolean + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `false` +endif::[] + +*Nullable:* No + +--- + +=== rpc_server_listen_backlog + +Maximum TCP connection queue length for Kafka server and internal RPC server. If `null` (the default value), no queue length is set. + +*Requires restart:* Yes + +// tag::self-managed-only[] +*Visibility:* `user` +// end::self-managed-only[] + +*Type:* integer + +*Accepted values:* [`-2147483648`, `2147483647`] + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `null` +endif::[] + +*Nullable:* Yes + +--- + +=== rpc_server_tcp_recv_buf + +Internal RPC TCP receive buffer size. If `null` (the default value), no buffer size is set by Redpanda. + +*Unit:* bytes + +*Requires restart:* Yes + +*Type:* integer + +*Accepted values:* [`-2147483648`, `2147483647`] + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `null` +endif::[] + +*Nullable:* Yes + +--- + +=== rpc_server_tcp_send_buf + +Internal RPC TCP send buffer size. If `null` (the default value), then no buffer size is set by Redpanda. + +*Unit:* bytes + +*Requires restart:* Yes + +*Type:* integer + +*Accepted values:* [`-2147483648`, `2147483647`] + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `null` +endif::[] + +*Nullable:* Yes + +--- + +=== rpk_path + +Path to RPK binary. + +*Requires restart:* No + +// tag::self-managed-only[] +*Visibility:* `tunable` +// end::self-managed-only[] + +*Type:* string + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `/usr/bin/rpk` +endif::[] + +*Nullable:* No + +--- + +=== rps_limit_acls_and_users_operations + +Rate limit for controller ACLs and user's operations. + +*Requires restart:* No + +// tag::self-managed-only[] +*Visibility:* `tunable` +// end::self-managed-only[] + +*Type:* integer + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `1000` +endif::[] + +*Nullable:* No + +--- + +=== rps_limit_configuration_operations + +Rate limit for controller configuration operations. + +*Requires restart:* No + +// tag::self-managed-only[] +*Visibility:* `tunable` +// end::self-managed-only[] + +*Type:* integer + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `1000` +endif::[] + +*Nullable:* No + +--- + +=== rps_limit_move_operations + +Rate limit for controller move operations. + +*Requires restart:* No + +// tag::self-managed-only[] +*Visibility:* `tunable` +// end::self-managed-only[] + +*Type:* integer + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `1000` +endif::[] + +*Nullable:* No + +--- + +=== rps_limit_node_management_operations + +Rate limit for controller node management operations. + +*Requires restart:* No + +// tag::self-managed-only[] +*Visibility:* `tunable` +// end::self-managed-only[] + +*Type:* integer + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `1000` +endif::[] + +*Nullable:* No + +--- + +=== rps_limit_topic_operations + +Rate limit for controller topic operations. + +*Requires restart:* No + +// tag::self-managed-only[] +*Visibility:* `tunable` +// end::self-managed-only[] + +*Type:* integer + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `1000` +endif::[] + +*Nullable:* No + +--- + +=== sasl_kerberos_config + +The location of the Kerberos `krb5.conf` file for Redpanda. + +*Requires restart:* No + +// tag::self-managed-only[] +*Visibility:* `user` +// end::self-managed-only[] + +*Type:* string + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `/etc/krb5.conf` +endif::[] + +*Nullable:* No + +--- + +=== sasl_kerberos_keytab + +The location of the Kerberos keytab file for Redpanda. + +*Requires restart:* No + +// tag::self-managed-only[] +*Visibility:* `user` +// end::self-managed-only[] + +*Type:* string + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `/var/lib/redpanda/redpanda.keytab` +endif::[] + +*Nullable:* No + +--- + +=== sasl_kerberos_principal + +The primary of the Kerberos Service Principal Name (SPN) for Redpanda. + +*Requires restart:* No + +// tag::self-managed-only[] +*Visibility:* `user` +// end::self-managed-only[] + +*Type:* string + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `redpanda` +endif::[] + +*Nullable:* No + +--- + +=== sasl_kerberos_principal_mapping + +Rules for mapping Kerberos principal names to Redpanda user principals. + +*Requires restart:* No + +// tag::self-managed-only[] +*Visibility:* `user` +// end::self-managed-only[] + +*Type:* array + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `[DEFAULT]` +endif::[] + +*Nullable:* No + +--- + +// tag::redpanda-cloud[] +=== sasl_mechanisms + +A list of supported SASL mechanisms. + +*Accepted values:* + +* `SCRAM` +* `GSSAPI` +* `OAUTHBEARER` +* `PLAIN` + +Note that in order to enable PLAIN, you must also enable SCRAM. + +ifndef::env-cloud[] +*Enterprise license required*: `GSSAPI,OAUTHBEARER` (for license details, see xref:get-started:licensing/index.adoc[Redpanda Licensing]) +endif::[] + +*Requires restart:* No + +// tag::self-managed-only[] +*Visibility:* `user` +// end::self-managed-only[] + +*Type:* array + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `[SCRAM]` +endif::[] + +*Nullable:* No + +--- +// end::redpanda-cloud[] + +=== sasl_mechanisms_overrides + +A list of overrides for SASL mechanisms, defined by listener. SASL mechanisms defined here will replace the ones set in `sasl_mechanisms`. The same limitations apply as for `sasl_mechanisms`. + +ifndef::env-cloud[] +*Enterprise license required*: `Any override containing enterprise mechanisms (GSSAPI, OAUTHBEARER).` (for license details, see xref:get-started:licensing/index.adoc[Redpanda Licensing]) +endif::[] + +*Requires restart:* No + +// tag::self-managed-only[] +*Visibility:* `user` +// end::self-managed-only[] + +*Type:* array + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `[]` +endif::[] + +*Nullable:* No + +--- + +=== schema_registry_always_normalize + +Always normalize schemas. If set, this overrides the `normalize` parameter in requests to the Schema Registry API. + +*Requires restart:* No + +// tag::self-managed-only[] +*Visibility:* `user` +// end::self-managed-only[] + +*Type:* boolean + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `false` +endif::[] + +*Nullable:* No + +// tag::self-managed-only[] +*Aliases:* schema_registry_normalize_on_startup +// end::self-managed-only[] + +--- + +// tag::redpanda-cloud[] +=== schema_registry_enable_authorization + +Enables ACL-based authorization for Schema Registry requests. When `true`, Schema Registry +uses ACL-based authorization instead of the default `public/user/superuser` authorization model. +ifdef::env-cloud[] +Requires authentication to be enabled using the `authentication_method` property in the `schema_registry_api` broker configuration. +endif::[] + +ifndef::env-cloud[] +*Enterprise license required*: `true` (for license details, see xref:get-started:licensing/index.adoc[Redpanda Licensing]) +endif::[] + +*Requires restart:* No + +// tag::self-managed-only[] +*Visibility:* `user` +// end::self-managed-only[] + +*Type:* boolean + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `false` +endif::[] + +*Nullable:* No + +--- +// end::redpanda-cloud[] + +// tag::deprecated[] +=== schema_registry_protobuf_renderer_v2 + +No description available. + + +*Requires restart:* Yes + +*Nullable:* No + +--- +// end::deprecated[] + +// tag::deprecated[] +=== seed_server_meta_topic_partitions + +No description available. + + +*Unit:* number of partitions per topic + +*Requires restart:* Yes + +*Nullable:* No + +--- +// end::deprecated[] + +=== segment_appender_flush_timeout_ms + +Maximum delay until buffered data is written. + +*Unit:* milliseconds + +*Requires restart:* No + +// tag::self-managed-only[] +*Visibility:* `tunable` +// end::self-managed-only[] + +*Type:* integer + +*Accepted values:* [`-17592186044416`, `17592186044415`] + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `1s milliseconds` +endif::[] + +*Nullable:* No + +--- + +=== segment_fallocation_step + +Size for segments fallocation. + +*Requires restart:* No + +// tag::self-managed-only[] +*Visibility:* `tunable` +// end::self-managed-only[] + +*Type:* integer + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `32_MiB` +endif::[] + +*Nullable:* No + +--- + +// tag::deprecated[] +=== seq_table_min_size + +No description available. + + +*Requires restart:* Yes + +*Nullable:* No + +--- +// end::deprecated[] + +=== space_management_enable + +Option to explicitly disable automatic disk space management. If this property was explicitly disabled while using v23.2, it will remain disabled following an upgrade. + +*Requires restart:* No + +// tag::self-managed-only[] +*Visibility:* `user` +// end::self-managed-only[] + +*Type:* boolean + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `true` +endif::[] + +*Nullable:* No + +--- + +=== space_management_enable_override + +Enable automatic space management. This option is ignored and deprecated in versions >= v23.3. + +*Requires restart:* No + +// tag::self-managed-only[] +*Visibility:* `user` +// end::self-managed-only[] + +*Type:* boolean + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `false` +endif::[] + +*Nullable:* No + +--- + +=== space_management_max_log_concurrency + +Maximum parallel logs inspected during space management process. + +*Requires restart:* No + +// tag::self-managed-only[] +*Visibility:* `tunable` +// end::self-managed-only[] + +*Type:* integer + +*Maximum value:* `65535` + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `20` +endif::[] + +*Nullable:* No + +--- + +=== space_management_max_segment_concurrency + +Maximum parallel segments inspected during space management process. + +*Requires restart:* No + +// tag::self-managed-only[] +*Visibility:* `tunable` +// end::self-managed-only[] + +*Type:* integer + +*Maximum value:* `65535` + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `10` +endif::[] + +*Nullable:* No + +--- + +=== storage_compaction_index_memory + +Maximum number of bytes that may be used on each shard by compaction index writers. + +*Requires restart:* No + +// tag::self-managed-only[] +*Visibility:* `tunable` +// end::self-managed-only[] + +*Type:* integer + +*Maximum value:* `18446744073709552000` + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `128_MiB` +endif::[] + +*Nullable:* No + +--- + +=== storage_compaction_key_map_memory + +Maximum number of bytes that may be used on each shard by compaction key-offset maps. Only applies when <> is set to `true`. + +*Requires restart:* Yes + +// tag::self-managed-only[] +*Visibility:* `tunable` +// end::self-managed-only[] + +*Type:* integer + +*Maximum value:* `18446744073709552000` + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `128_MiB` +endif::[] + +*Nullable:* No + +--- + +=== storage_compaction_key_map_memory_limit_percent + +Limit on <>, expressed as a percentage of memory per shard, that bounds the amount of memory used by compaction key-offset maps. + +NOTE: Memory per shard is computed after <>, and only applies when <> is set to `true`. + +*Unit:* percent + +*Requires restart:* Yes + +// tag::self-managed-only[] +*Visibility:* `tunable` +// end::self-managed-only[] + +*Type:* number + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `12.0` +endif::[] + +*Nullable:* No + +--- + +=== storage_ignore_cstore_hints + +When set, cstore hints are ignored and not used for data access (but are otherwise generated). + +*Requires restart:* No + +// tag::self-managed-only[] +*Visibility:* `tunable` +// end::self-managed-only[] + +*Type:* boolean + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `false` +endif::[] + +*Nullable:* No + +--- + +=== storage_ignore_timestamps_in_future_sec + +The maximum number of seconds that a record's timestamp can be ahead of a Redpanda broker's clock and still be used when deciding whether to clean up the record for data retention. This property makes possible the timely cleanup of records from clients with clocks that are drastically unsynchronized relative to Redpanda. + +When determining whether to clean up a record with timestamp more than `storage_ignore_timestamps_in_future_sec` seconds ahead of the broker, Redpanda ignores the record's timestamp and instead uses a valid timestamp of another record in the same segment, or (if another record's valid timestamp is unavailable) the timestamp of when the segment file was last modified (mtime). + +By default, `storage_ignore_timestamps_in_future_sec` is disabled (null). + +[TIP] +==== +To figure out whether to set `storage_ignore_timestamps_in_future_sec` for your system: + +. Look for logs with segments that are unexpectedly large and not being cleaned up. +. In the logs, search for records with unsynchronized timestamps that are further into the future than tolerable by your data retention and storage settings. For example, timestamps 60 seconds or more into the future can be considered to be too unsynchronized. +. If you find unsynchronized timestamps throughout your logs, determine the number of seconds that the timestamps are ahead of their actual time, and set `storage_ignore_timestamps_in_future_sec` to that value so data retention can proceed. +. If you only find unsynchronized timestamps that are the result of transient behavior, you can disable `storage_ignore_timestamps_in_future_sec`. +==== + +*Unit:* seconds + +*Requires restart:* No + +// tag::self-managed-only[] +*Visibility:* `tunable` +// end::self-managed-only[] + +*Type:* integer + +*Accepted values:* [`-17179869184`, `17179869183`] + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `null` +endif::[] + +*Nullable:* Yes + +--- + +=== storage_max_concurrent_replay + +Maximum number of partitions' logs that will be replayed concurrently at startup, or flushed concurrently on shutdown. + +*Requires restart:* No + +// tag::self-managed-only[] +*Visibility:* `tunable` +// end::self-managed-only[] + +*Type:* integer + +*Maximum value:* `18446744073709552000` + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `1024` +endif::[] + +*Nullable:* No + +--- + +=== storage_min_free_bytes + +Threshold of minimum bytes free space before rejecting producers. + +*Unit:* bytes + +*Requires restart:* No + +// tag::self-managed-only[] +*Visibility:* `tunable` +// end::self-managed-only[] + +*Type:* integer + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `5_GiB` +endif::[] + +*Nullable:* No + +--- + +=== storage_read_buffer_size + +Size of each read buffer (one per in-flight read, per log segment). + +*Requires restart:* No + +// tag::self-managed-only[] +*Visibility:* `tunable` +// end::self-managed-only[] + +*Type:* integer + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `128_KiB` +endif::[] + +*Nullable:* No + +--- + +=== storage_read_readahead_count + +How many additional reads to issue ahead of current read location. + +*Requires restart:* No + +// tag::self-managed-only[] +*Visibility:* `tunable` +// end::self-managed-only[] + +*Type:* integer + +*Accepted values:* [`-32768`, `32767`] + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `1` +endif::[] + +*Nullable:* No + +--- + +=== storage_reserve_min_segments + +The number of segments per partition that the system will attempt to reserve disk capacity for. For example, if the maximum segment size is configured to be 100 MB, and the value of this option is 2, then in a system with 10 partitions Redpanda will attempt to reserve at least 2 GB of disk space. + +*Requires restart:* No + +// tag::self-managed-only[] +*Visibility:* `tunable` +// end::self-managed-only[] + +*Type:* integer + +*Accepted values:* [`-32768`, `32767`] + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `2` +endif::[] + +*Nullable:* No + +--- + +=== storage_space_alert_free_threshold_bytes + +Threshold of minimum bytes free space before setting storage space alert. + +*Unit:* bytes + +*Requires restart:* No + +// tag::self-managed-only[] +*Visibility:* `tunable` +// end::self-managed-only[] + +*Type:* integer + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `0` +endif::[] + +*Nullable:* No + +--- + +=== storage_space_alert_free_threshold_percent + +Threshold of minimum percent free space before setting storage space alert. + +*Unit:* percent + +*Requires restart:* No + +// tag::self-managed-only[] +*Visibility:* `tunable` +// end::self-managed-only[] + +*Type:* integer + +*Maximum value:* `4294967295` + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `5` +endif::[] + +*Nullable:* No + +--- + +=== storage_strict_data_init + +Requires that an empty file named `.redpanda_data_dir` be present in the xref:reference:properties/broker-properties.adoc#data_directory[`data_ directory`]. If set to `true`, Redpanda will refuse to start if the file is not found in the data directory. + +*Requires restart:* No + +// tag::self-managed-only[] +*Visibility:* `user` +// end::self-managed-only[] + +*Type:* boolean + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `false` +endif::[] + +*Nullable:* No + +*Related topics:* + +* xref:reference:properties/broker-properties.adoc#data_directory[`data_ directory`] + +--- + +=== storage_target_replay_bytes + +Target bytes to replay from disk on startup after clean shutdown: controls frequency of snapshots and checkpoints. + +*Unit:* bytes + +*Requires restart:* No + +// tag::self-managed-only[] +*Visibility:* `tunable` +// end::self-managed-only[] + +*Type:* integer + +*Maximum value:* `18446744073709552000` + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `10_GiB` +endif::[] + +*Nullable:* No + +--- + +=== superusers + +List of superuser usernames. + +*Requires restart:* No + +// tag::self-managed-only[] +*Visibility:* `user` +// end::self-managed-only[] + +*Type:* array + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `[]` +endif::[] + +*Nullable:* No + +--- + +// tag::deprecated[] +=== target_fetch_quota_byte_rate + +No description available. + + +*Requires restart:* Yes + +*Nullable:* No + +--- +// end::deprecated[] + +// tag::deprecated[] +=== target_quota_byte_rate + +No description available. + + +*Requires restart:* Yes + +*Nullable:* No + +--- +// end::deprecated[] + +=== tls_certificate_name_format + +The format of the certificates's distinguished name to use for mTLS principal mapping. The `legacy` format would appear as 'C=US,ST=California,L=San Francisco,O=Redpanda,CN=redpanda', while the `rfc2253` format would appear as 'CN=redpanda,O=Redpanda,L=San Francisco,ST=California,C=US'. + +*Requires restart:* No + +// tag::self-managed-only[] +*Visibility:* `user` +// end::self-managed-only[] + +*Type:* tls_name_format + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `legacy` +endif::[] + +*Nullable:* No + +--- + +=== tls_enable_renegotiation + +TLS client-initiated renegotiation is considered unsafe and is disabled by default . Only re-enable it if you are experiencing issues with your TLS-enabled client. This option has no effect on TLSv1.3 connections as client-initiated renegotiation was removed. + +*Requires restart:* Yes + +// tag::self-managed-only[] +*Visibility:* `tunable` +// end::self-managed-only[] + +*Type:* boolean + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `false` +endif::[] + +*Nullable:* No + +--- + +// tag::redpanda-cloud[] +=== tls_min_version + +The minimum TLS version that Redpanda clusters support. This property prevents client applications from negotiating a downgrade to the TLS version when they make a connection to a Redpanda cluster. + +*Requires restart:* Yes + +// tag::self-managed-only[] +*Visibility:* `user` +// end::self-managed-only[] + +*Type:* tls_version + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `v1_2` +endif::[] + +*Nullable:* No + +--- +// end::redpanda-cloud[] + +=== tm_sync_timeout_ms + +Transaction manager's synchronization timeout. Maximum time to wait for internal state machine to catch up before rejecting a request. + +*Unit:* milliseconds + +*Requires restart:* Yes + +// tag::self-managed-only[] +*Visibility:* `user` +// end::self-managed-only[] + +*Type:* integer + +*Accepted values:* [`-17592186044416`, `17592186044415`] + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `10s` +endif::[] + +*Nullable:* No + +--- + +// tag::deprecated[] +=== tm_violation_recovery_policy + +No description available. + + +*Requires restart:* Yes + +*Nullable:* No + +--- +// end::deprecated[] + +=== tombstone_retention_ms + +The retention time for tombstone records in a compacted topic. Cannot be enabled at the same time as any of `cloud_storage_enabled`, `cloud_storage_enable_remote_read`, or `cloud_storage_enable_remote_write`. A typical default setting is `86400000`, or 24 hours. + +*Unit:* milliseconds + +*Requires restart:* No + +// tag::self-managed-only[] +*Visibility:* `user` +// end::self-managed-only[] + +*Type:* integer + +*Accepted values:* [`-17592186044416`, `17592186044415`] + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `null` +endif::[] + +*Nullable:* Yes + +*Related topics:* + +* xref:manage:cluster-maintenance/compaction-settings.adoc#tombstone-record-removal[Tombstone record removal] + +--- + +=== topic_fds_per_partition + +File descriptors required per partition replica. If topic creation results in the ratio of file descriptor limit to partition replicas being lower than this value, creation of new topics is fails. + +*Requires restart:* No + +// tag::self-managed-only[] +*Visibility:* `tunable` +// end::self-managed-only[] + +*Type:* integer + +*Accepted values:* [`-2147483648`, `2147483647`] + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `5` +endif::[] + +*Nullable:* Yes + +--- + +=== topic_label_aggregation_limit + +When the number of topics exceeds this limit, the topic label in generated metrics will be aggregated. If `null`, then there is no limit. + +*Requires restart:* No + +// tag::self-managed-only[] +*Visibility:* `tunable` +// end::self-managed-only[] + +*Type:* integer + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `null` +endif::[] + +*Nullable:* Yes + +--- + +=== topic_memory_per_partition + +Required memory in bytes per partition replica when creating or altering topics. The total size of the memory pool for partitions is the total memory available to Redpanda times `topic_partitions_memory_allocation_percent`. Each partition created requires `topic_memory_per_partition` bytes from that pool. If insufficient memory is available, creating or altering topics fails. + +*Requires restart:* No + +// tag::self-managed-only[] +*Visibility:* `tunable` +// end::self-managed-only[] + +*Type:* integer + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `DEFAULT_TOPIC_MEMORY_PER_PARTITION` +endif::[] + +*Nullable:* Yes + +--- + +=== topic_partitions_memory_allocation_percent + +Percentage of total memory to reserve for topic partitions. See <> for details. + +*Unit:* percent + +*Requires restart:* Yes + +// tag::self-managed-only[] +*Visibility:* `tunable` +// end::self-managed-only[] + +*Type:* integer + +*Maximum value:* `4294967295` + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `10` +endif::[] + +*Nullable:* No + +--- + +=== topic_partitions_per_shard + +Maximum number of partition replicas per shard. If topic creation results in the ratio of partition replicas to shards being higher than this value, creation of new topics fails. + +*Requires restart:* No + +// tag::self-managed-only[] +*Visibility:* `tunable` +// end::self-managed-only[] + +*Type:* integer + +*Maximum value:* `4294967295` + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `5000` +endif::[] + +*Nullable:* No + +--- + +=== topic_partitions_reserve_shard0 + +Reserved partition slots on shard (CPU core) 0 on each node. If this is greater than or equal to <>, no data partitions will be scheduled on shard 0. + +*Requires restart:* No + +// tag::self-managed-only[] +*Visibility:* `tunable` +// end::self-managed-only[] + +*Type:* integer + +*Maximum value:* `4294967295` + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `0` +endif::[] + +*Nullable:* No + +--- + +=== transaction_coordinator_cleanup_policy + +Cleanup policy for a transaction coordinator topic. + +*Accepted values:* + +* `compact` +* `delete` +* `["compact","delete"]` +* `none` + +*Requires restart:* No + +// tag::self-managed-only[] +*Visibility:* `user` +// end::self-managed-only[] + +*Type:* object + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `deletion` +endif::[] + +*Nullable:* No + +--- + +=== transaction_coordinator_delete_retention_ms + +Delete segments older than this age. To ensure transaction state is retained for as long as the longest-running transaction, make sure this is greater than or equal to <>. + +For example, if your typical transactions run for one hour, consider setting both `transaction_coordinator_delete_retention_ms` and `transactional_id_expiration_ms` to at least 3600000 (one hour), or a little over. + +*Unit:* milliseconds + +*Requires restart:* No + +// tag::self-managed-only[] +*Visibility:* `user` +// end::self-managed-only[] + +*Type:* integer + +*Accepted values:* [`-17592186044416`, `17592186044415`] + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `10080min` +endif::[] + +*Nullable:* No + +--- + +=== transaction_coordinator_log_segment_size + +The size (in bytes) each log segment should be. + +*Requires restart:* No + +// tag::self-managed-only[] +*Visibility:* `tunable` +// end::self-managed-only[] + +*Type:* integer + +*Maximum value:* `18446744073709552000` + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `1_GiB` +endif::[] + +*Nullable:* No + +--- + +=== transaction_coordinator_partitions + +Number of partitions for transactions coordinator. + +*Unit:* number of partitions per topic + +*Requires restart:* No + +// tag::self-managed-only[] +*Visibility:* `tunable` +// end::self-managed-only[] + +*Type:* integer + +*Accepted values:* [`-2147483648`, `2147483647`] + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `50` +endif::[] + +*Nullable:* No + +--- + +// tag::deprecated[] +=== transaction_coordinator_replication + +No description available. + + +*Requires restart:* Yes + +*Nullable:* No + +--- +// end::deprecated[] + +=== transaction_max_timeout_ms + +The maximum allowed timeout for transactions. If a client-requested transaction timeout exceeds this configuration, the broker returns an error during transactional producer initialization. This guardrail prevents hanging transactions from blocking consumer progress. + +*Unit:* milliseconds + +*Requires restart:* No + +// tag::self-managed-only[] +*Visibility:* `tunable` +// end::self-managed-only[] + +*Type:* integer + +*Accepted values:* [`-17592186044416`, `17592186044415`] + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `15min` +endif::[] + +*Nullable:* No + +--- + +=== transactional_id_expiration_ms + +Expiration time of producer IDs. Measured starting from the time of the last write until now for a given ID. + +*Unit:* milliseconds + +*Requires restart:* No + +// tag::self-managed-only[] +*Visibility:* `user` +// end::self-managed-only[] + +*Type:* integer + +*Accepted values:* [`-17592186044416`, `17592186044415`] + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `10080min` +endif::[] + +*Nullable:* No + +--- + +// tag::deprecated[] +=== tx_log_stats_interval_s + +How often to log per partition tx stats, works only with debug logging enabled. + +*Requires restart:* No + +// tag::self-managed-only[] +*Visibility:* `deprecated` +// end::self-managed-only[] + +*Accepted values:* [`-17179869184`, `17179869183`] + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `10s` +endif::[] + +*Nullable:* No + +--- +// end::deprecated[] + +// tag::deprecated[] +=== tx_registry_log_capacity + +No description available. + + +*Requires restart:* Yes + +*Nullable:* No + +--- +// end::deprecated[] + +// tag::deprecated[] +=== tx_registry_sync_timeout_ms + +No description available. + + +*Unit:* milliseconds + +*Requires restart:* Yes + +*Nullable:* No + +--- +// end::deprecated[] + +=== tx_timeout_delay_ms + +Delay before scheduling the next check for timed out transactions. + +*Unit:* milliseconds + +*Requires restart:* Yes + +// tag::self-managed-only[] +*Visibility:* `user` +// end::self-managed-only[] + +*Type:* integer + +*Accepted values:* [`-17592186044416`, `17592186044415`] + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `1000ms` +endif::[] + +*Nullable:* No + +--- + +=== unsafe_enable_consumer_offsets_delete_retention + +Enables delete retention of consumer offsets topic. This is an internal-only configuration and should be enabled only after consulting with Redpanda support. + +*Requires restart:* Yes + +// tag::self-managed-only[] +*Visibility:* `user` +// end::self-managed-only[] + +*Type:* boolean + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `false` +endif::[] + +*Nullable:* No + +--- + +=== usage_disk_persistance_interval_sec + +The interval in which all usage stats are written to disk. + +*Unit:* seconds + +*Requires restart:* No + +// tag::self-managed-only[] +*Visibility:* `tunable` +// end::self-managed-only[] + +*Type:* integer + +*Accepted values:* [`-17179869184`, `17179869183`] + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `60 * 5 seconds` +endif::[] + +*Nullable:* No + +--- + +=== usage_num_windows + +The number of windows to persist in memory and disk. + +*Requires restart:* No + +// tag::self-managed-only[] +*Visibility:* `tunable` +// end::self-managed-only[] + +*Type:* integer + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `24` +endif::[] + +*Nullable:* No + +--- + +=== usage_window_width_interval_sec + +The width of a usage window, tracking cloud and kafka ingress/egress traffic each interval. + +*Unit:* seconds + +*Requires restart:* No + +// tag::self-managed-only[] +*Visibility:* `tunable` +// end::self-managed-only[] + +*Type:* integer + +*Accepted values:* [`-17179869184`, `17179869183`] + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `3600 seconds` +endif::[] + +*Nullable:* No + +--- + +=== use_fetch_scheduler_group + +Use a separate scheduler group for fetch processing. + +*Requires restart:* No + +// tag::self-managed-only[] +*Visibility:* `tunable` +// end::self-managed-only[] + +*Type:* boolean + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `true` +endif::[] + +*Nullable:* No + +--- + +=== use_kafka_handler_scheduler_group + +Use a separate scheduler group to handle parsing Kafka protocol requests. + +*Requires restart:* No + +// tag::self-managed-only[] +*Visibility:* `tunable` +// end::self-managed-only[] + +*Type:* boolean + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `true` +endif::[] + +*Nullable:* No + +--- + +=== use_produce_scheduler_group + +Use a separate scheduler group to process Kafka produce requests. + +*Requires restart:* No + +// tag::self-managed-only[] +*Visibility:* `tunable` +// end::self-managed-only[] + +*Type:* boolean + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `true` +endif::[] + +*Nullable:* No + +--- + +// tag::deprecated[] +=== use_scheduling_groups + +No description available. + + +*Requires restart:* Yes + +*Nullable:* No + +--- +// end::deprecated[] + +=== virtual_cluster_min_producer_ids + +Minimum number of active producers per virtual cluster. + +*Requires restart:* No + +// tag::self-managed-only[] +*Visibility:* `tunable` +// end::self-managed-only[] + +*Type:* integer + +*Maximum value:* `18446744073709552000` + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `Maximum value` +endif::[] + +*Nullable:* No + +--- + +=== wait_for_leader_timeout_ms + +Timeout to wait for leadership in metadata cache. + +*Unit:* milliseconds + +*Requires restart:* No + +// tag::self-managed-only[] +*Visibility:* `tunable` +// end::self-managed-only[] + +*Type:* integer + +*Accepted values:* [`-17592186044416`, `17592186044415`] + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `5'000ms` +endif::[] + +*Nullable:* No + +--- + +=== write_caching_default + +The default write caching mode to apply to user topics. Write caching acknowledges a message as soon as it is received and acknowledged on a majority of brokers, without waiting for it to be written to disk. With `acks=all`, this provides lower latency while still ensuring that a majority of brokers acknowledge the write. + +Fsyncs follow <> and <>, whichever is reached first. + +The `write_caching_default` cluster property can be overridden with the xref:reference:properties/topic-properties.adoc#writecaching[`write.caching`] topic property. + +*Requires restart:* No + +// tag::self-managed-only[] +*Visibility:* `user` +// end::self-managed-only[] + +*Type:* string + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `default_false` +endif::[] + +*Nullable:* No + +*Related topics:* + +* xref:reference:properties/topic-properties.adoc#writecaching[`write.caching`] +* xref:develop:config-topics.adoc#configure-write-caching[Write caching] + +--- + +=== zstd_decompress_workspace_bytes + +Size of the zstd decompression workspace. + +*Unit:* bytes + +*Requires restart:* Yes + +// tag::self-managed-only[] +*Visibility:* `tunable` +// end::self-managed-only[] + +*Type:* integer + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `8_MiB` +endif::[] + +*Nullable:* No + +--- diff --git a/modules/reference/partials/properties/object-storage-properties.adoc b/modules/reference/partials/properties/object-storage-properties.adoc new file mode 100644 index 0000000000..a86e88379b --- /dev/null +++ b/modules/reference/partials/properties/object-storage-properties.adoc @@ -0,0 +1,2731 @@ +// This content is autogenerated. Do not edit manually. To override descriptions, use the doc-tools CLI with the --overrides option: https://redpandadata.atlassian.net/wiki/spaces/DOC/pages/1396244485/Review+Redpanda+configuration+properties +=== cloud_storage_access_key + +AWS or GCP access key. This access key is part of the credentials that Redpanda requires to authenticate with object storage services for Tiered Storage. This access key is used with the <> to form the complete credentials required for authentication. +To authenticate using IAM roles, see <>. + +*Requires restart:* Yes + +// tag::self-managed-only[] +*Visibility:* `user` +// end::self-managed-only[] + +*Type:* string + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `null` +endif::[] + +*Nullable:* Yes + +--- + +=== cloud_storage_api_endpoint + +Optional API endpoint. The only instance in which you must set this value is when using a custom domain with your object storage service. + +- AWS: If not set, this is automatically generated using <> and <>. Otherwise, this uses the value assigned. +- GCP: If not set, this is automatically generated using `storage.googleapis.com` and <>. +- Azure: If not set, this is automatically generated using `blob.core.windows.net` and <>. If you have enabled hierarchical namespaces for your storage account and use a custom endpoint, use <>. + +*Requires restart:* Yes + +// tag::self-managed-only[] +*Visibility:* `user` +// end::self-managed-only[] + +*Type:* string + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `null` +endif::[] + +*Nullable:* Yes + +--- + +=== cloud_storage_api_endpoint_port + +TLS port override. + +*Requires restart:* Yes + +// tag::self-managed-only[] +*Visibility:* `user` +// end::self-managed-only[] + +*Type:* integer + +*Accepted values:* [`-32768`, `32767`] + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `443` +endif::[] + +*Nullable:* No + +--- + +=== cloud_storage_attempt_cluster_restore_on_bootstrap + +When set to `true`, Redpanda automatically retrieves cluster metadata from a specified object storage bucket at the cluster's first startup. This option is ideal for orchestrated deployments, such as Kubernetes. Ensure any previous cluster linked to the bucket is fully decommissioned to prevent conflicts between Tiered Storage subsystems. + +*Requires restart:* Yes + +// tag::self-managed-only[] +*Visibility:* `tunable` +// end::self-managed-only[] + +*Type:* boolean + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `false` +endif::[] + +*Nullable:* No + +--- + +=== cloud_storage_azure_adls_endpoint + +Azure Data Lake Storage v2 endpoint override. Use when hierarchical namespaces are enabled on your storage account and you have set up a custom endpoint. + +If not set, this is automatically generated using `dfs.core.windows.net` and <>. + +*Requires restart:* Yes + +// tag::self-managed-only[] +*Visibility:* `user` +// end::self-managed-only[] + +*Type:* string + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `null` +endif::[] + +*Nullable:* Yes + +--- + +=== cloud_storage_azure_adls_port + +Azure Data Lake Storage v2 port override. See also: <>. Use when hierarchical namespaces are enabled on your storage account and you have set up a custom endpoint. + +*Requires restart:* Yes + +// tag::self-managed-only[] +*Visibility:* `user` +// end::self-managed-only[] + +*Type:* integer + +*Maximum value:* `65535` + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `null` +endif::[] + +*Nullable:* Yes + +--- + +// tag::redpanda-cloud[] +=== cloud_storage_azure_container + +The name of the Azure container to use with Tiered Storage. If `null`, the property is disabled. + +NOTE: The container must belong to <>. + +*Requires restart:* Yes + +// tag::self-managed-only[] +*Visibility:* `user` +// end::self-managed-only[] + +*Type:* string + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `null` +endif::[] + +*Nullable:* Yes + +--- +// end::redpanda-cloud[] + +=== cloud_storage_azure_hierarchical_namespace_enabled + +Force Redpanda to use or not use an Azure Data Lake Storage (ADLS) Gen2 hierarchical namespace-compliant client in <>. + +When this property is not set, <> must be set, and each broker checks at startup if a hierarchical namespace is enabled. + +When set to `true`, this property disables the check and assumes a hierarchical namespace is enabled. + +When set to `false`, this property disables the check and assumes a hierarchical namespace is not enabled. + +This setting should be used only in emergencies where Redpanda fails to detect the correct a hierarchical namespace status. + +*Requires restart:* Yes + +// tag::self-managed-only[] +*Visibility:* `tunable` +// end::self-managed-only[] + +*Type:* boolean + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `null` +endif::[] + +*Nullable:* Yes + +--- + +=== cloud_storage_azure_managed_identity_id + +The managed identity ID to use for access to the Azure storage account. To use Azure managed identities, you must set <> to `azure_vm_instance_metadata`. See xref:manage:security/iam-roles.adoc[IAM Roles] for more information on managed identities. + +*Requires restart:* No + +// tag::self-managed-only[] +*Visibility:* `user` +// end::self-managed-only[] + +*Type:* string + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `null` +endif::[] + +*Nullable:* Yes + +*Related topics:* + +* xref:manage:security/iam-roles.adoc[IAM Roles] + +--- + +=== cloud_storage_azure_shared_key + +The account access key to be used for Azure Shared Key authentication with the Azure storage account configured by <>. If `null`, the property is disabled. + +*Requires restart:* No + +// tag::self-managed-only[] +*Visibility:* `user` +// end::self-managed-only[] + +*Type:* string + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `null` +endif::[] + +*Nullable:* Yes + +--- + +// tag::redpanda-cloud[] +=== cloud_storage_azure_storage_account + +The name of the Azure storage account to use with Tiered Storage. If `null`, the property is disabled. + +*Requires restart:* Yes + +// tag::self-managed-only[] +*Visibility:* `user` +// end::self-managed-only[] + +*Type:* string + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `null` +endif::[] + +*Nullable:* Yes + +--- +// end::redpanda-cloud[] + +=== cloud_storage_backend + +Optional object storage backend variant used to select API capabilities. If not supplied, this will be inferred from other configuration properties. + +*Requires restart:* Yes + +// tag::self-managed-only[] +*Visibility:* `user` +// end::self-managed-only[] + +*Type:* object + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `unknown` +endif::[] + +*Nullable:* No + +--- + +=== cloud_storage_background_jobs_quota + +The total number of requests the object storage background jobs can make during one background housekeeping run. This is a per-shard limit. Adjusting this limit can optimize object storage traffic and impact shard performance. + +*Requires restart:* No + +// tag::self-managed-only[] +*Visibility:* `tunable` +// end::self-managed-only[] + +*Type:* integer + +*Accepted values:* [`-2147483648`, `2147483647`] + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `5000` +endif::[] + +*Nullable:* No + +--- + +=== cloud_storage_bucket + +AWS or GCP bucket that should be used to store data. + +WARNING: Modifying this property after writing data to a bucket could cause data loss. + +*Requires restart:* Yes + +// tag::self-managed-only[] +*Visibility:* `user` +// end::self-managed-only[] + +*Type:* string + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `null` +endif::[] + +*Nullable:* Yes + +--- + +=== cloud_storage_cache_check_interval + +Minimum interval between Tiered Storage cache trims, measured in milliseconds. This setting dictates the cooldown period after a cache trim operation before another trim can occur. If a cache fetch operation requests a trim but the interval since the last trim has not yet passed, the trim will be postponed until this cooldown expires. Adjusting this interval helps manage the balance between cache size and retrieval performance. + +*Requires restart:* Yes + +// tag::self-managed-only[] +*Visibility:* `tunable` +// end::self-managed-only[] + +*Type:* integer + +*Accepted values:* [`-17592186044416`, `17592186044415`] + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `5s` +endif::[] + +*Nullable:* No + +--- + +=== cloud_storage_cache_chunk_size + +Size of chunks of segments downloaded into object storage cache. Reduces space usage by only downloading the necessary chunk from a segment. + +*Requires restart:* Yes + +// tag::self-managed-only[] +*Visibility:* `tunable` +// end::self-managed-only[] + +*Type:* integer + +*Maximum value:* `18446744073709552000` + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `16_MiB` +endif::[] + +*Nullable:* No + +--- + +=== cloud_storage_cache_max_objects + +Maximum number of objects that may be held in the Tiered Storage cache. This applies simultaneously with <>, and whichever limit is hit first will trigger trimming of the cache. + +*Requires restart:* No + +// tag::self-managed-only[] +*Visibility:* `tunable` +// end::self-managed-only[] + +*Type:* integer + +*Maximum value:* `4294967295` + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `100000` +endif::[] + +*Nullable:* No + +--- + +=== cloud_storage_cache_num_buckets + +Divide the object storage cache across the specified number of buckets. This only works for objects with randomized prefixes. The names are not changed when the value is set to zero. + +*Requires restart:* No + +// tag::self-managed-only[] +*Visibility:* `tunable` +// end::self-managed-only[] + +*Type:* integer + +*Maximum value:* `4294967295` + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `0` +endif::[] + +*Nullable:* No + +--- + +=== cloud_storage_cache_size + +Maximum size of the object storage cache, in bytes. + +This property works together with <> to define cache behavior: + +- When both properties are set, Redpanda uses the smaller calculated value of the two, in bytes. + +- If one of these properties is set to `0`, Redpanda uses the non-zero value. + +- These properties cannot both be `0`. + +- `cloud_storage_cache_size` cannot be `0` while `cloud_storage_cache_size_percent` is `null`. + +*Requires restart:* No + +// tag::self-managed-only[] +*Visibility:* `user` +// end::self-managed-only[] + +*Type:* integer + +*Maximum value:* `18446744073709552000` + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `0` +endif::[] + +*Nullable:* No + +--- + +=== cloud_storage_cache_size_percent + +Maximum size of the cloud cache as a percentage of unreserved disk space disk_reservation_percent. The default value for this option is tuned for a shared disk configuration. Consider increasing the value if using a dedicated cache disk. The property <> controls the same limit expressed as a fixed number of bytes. If both `cloud_storage_cache_size` and `cloud_storage_cache_size_percent` are set, Redpanda uses the minimum of the two. + +*Unit:* percent + +*Requires restart:* No + +// tag::self-managed-only[] +*Visibility:* `user` +// end::self-managed-only[] + +*Type:* number + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `20.0` +endif::[] + +*Nullable:* Yes + +*Related topics:* + +* xref:reference:cluster-properties.adoc#disk_reservation_percent[`disk_reservation_percent`] + +--- + +// tag::deprecated[] +=== cloud_storage_cache_trim_carryover_bytes + +The cache performs a recursive directory inspection during the cache trim. The information obtained during the inspection can be carried over to the next trim operation. This parameter sets a limit on the memory occupied by objects that can be carried over from one trim to next, and allows cache to quickly unblock readers before starting the directory inspection (deprecated) + +*Unit:* bytes + +*Requires restart:* No + +// tag::self-managed-only[] +*Visibility:* `deprecated` +// end::self-managed-only[] + +*Maximum value:* `4294967295` + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `0_KiB` +endif::[] + +*Nullable:* No + +--- +// end::deprecated[] + +=== cloud_storage_cache_trim_threshold_percent_objects + +ifndef::env-cloud[] +*Introduced in 24.1.10* +endif::[] + +Cache trimming is triggered when the number of objects in the cache reaches this percentage relative to its maximum object count. If unset, the default behavior is to start trimming when the cache is full. + +*Requires restart:* No + +// tag::self-managed-only[] +*Visibility:* `tunable` +// end::self-managed-only[] + +*Type:* number + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `null` +endif::[] + +*Nullable:* Yes + +--- + +=== cloud_storage_cache_trim_threshold_percent_size + +ifndef::env-cloud[] +*Introduced in 24.1.10* +endif::[] + +Cache trimming is triggered when the cache size reaches this percentage relative to its maximum capacity. If unset, the default behavior is to start trimming when the cache is full. + +*Requires restart:* No + +// tag::self-managed-only[] +*Visibility:* `tunable` +// end::self-managed-only[] + +*Type:* number + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `null` +endif::[] + +*Nullable:* Yes + +--- + +=== cloud_storage_cache_trim_walk_concurrency + +The maximum number of concurrent tasks launched for traversing the directory structure during cache trimming. A higher number allows cache trimming to run faster but can cause latency spikes due to increased pressure on I/O subsystem and syscall threads. + +*Requires restart:* No + +// tag::self-managed-only[] +*Visibility:* `tunable` +// end::self-managed-only[] + +*Type:* integer + +*Maximum value:* `65535` + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `1` +endif::[] + +*Nullable:* No + +--- + +=== cloud_storage_chunk_eviction_strategy + +Selects a strategy for evicting unused cache chunks. + +*Requires restart:* No + +// tag::self-managed-only[] +*Visibility:* `tunable` +// end::self-managed-only[] + +*Type:* object + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `eager` +endif::[] + +*Nullable:* No + +--- + +=== cloud_storage_chunk_prefetch + +Number of chunks to prefetch ahead of every downloaded chunk. Prefetching additional chunks can enhance read performance by reducing wait times for sequential data access. A value of `0` disables prefetching, relying solely on on-demand downloads. Adjusting this property allows for tuning the balance between improved read performance and increased network and storage I/O. + +*Requires restart:* No + +// tag::self-managed-only[] +*Visibility:* `tunable` +// end::self-managed-only[] + +*Type:* integer + +*Maximum value:* `65535` + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `0` +endif::[] + +*Nullable:* No + +--- + +=== cloud_storage_client_lease_timeout_ms + +Maximum time to hold a cloud storage client lease (ms), after which any outstanding connection is immediately closed. + +*Unit:* milliseconds + +*Requires restart:* No + +// tag::self-managed-only[] +*Visibility:* `tunable` +// end::self-managed-only[] + +*Type:* integer + +*Accepted values:* [`-17592186044416`, `17592186044415`] + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `900s` +endif::[] + +*Nullable:* No + +--- + +=== cloud_storage_cluster_metadata_num_consumer_groups_per_upload + +Number of groups to upload in a single snapshot object during consumer offsets upload. Setting a lower value will mean a larger number of smaller snapshots are uploaded. + +*Requires restart:* No + +// tag::self-managed-only[] +*Visibility:* `tunable` +// end::self-managed-only[] + +*Type:* integer + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `1000` +endif::[] + +*Nullable:* No + +--- + +=== cloud_storage_cluster_metadata_retries + +Number of attempts metadata operations may be retried. + +*Requires restart:* Yes + +// tag::self-managed-only[] +*Visibility:* `tunable` +// end::self-managed-only[] + +*Type:* integer + +*Accepted values:* [`-32768`, `32767`] + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `5` +endif::[] + +*Nullable:* No + +--- + +=== cloud_storage_cluster_metadata_upload_interval_ms + +Time interval to wait between cluster metadata uploads. + +*Unit:* milliseconds + +*Requires restart:* No + +// tag::self-managed-only[] +*Visibility:* `tunable` +// end::self-managed-only[] + +*Type:* integer + +*Accepted values:* [`-17592186044416`, `17592186044415`] + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `1h` +endif::[] + +*Nullable:* No + +--- + +=== cloud_storage_cluster_metadata_upload_timeout_ms + +Timeout for cluster metadata uploads. + +*Unit:* milliseconds + +*Requires restart:* No + +// tag::self-managed-only[] +*Visibility:* `tunable` +// end::self-managed-only[] + +*Type:* integer + +*Accepted values:* [`-17592186044416`, `17592186044415`] + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `60s` +endif::[] + +*Nullable:* No + +--- + +=== cloud_storage_credentials_host + +The hostname to connect to for retrieving role based credentials. Derived from <> if not set. Only required when using IAM role based access. To authenticate using access keys, see <>. + +*Requires restart:* Yes + +// tag::self-managed-only[] +*Visibility:* `tunable` +// end::self-managed-only[] + +*Type:* string + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `null` +endif::[] + +*Nullable:* Yes + +--- + +=== cloud_storage_credentials_source + +The source of credentials used to authenticate to object storage services. +Required for AWS or GCP authentication with IAM roles. + +To authenticate using access keys, see <>. + +*Requires restart:* Yes + +// tag::self-managed-only[] +*Visibility:* `user` +// end::self-managed-only[] + +*Type:* object + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `config_file` +endif::[] + +*Nullable:* No + +--- + +=== cloud_storage_crl_file + +Path to certificate revocation list for <>. + +*Requires restart:* Yes + +// tag::self-managed-only[] +*Visibility:* `user` +// end::self-managed-only[] + +*Type:* string + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `null` +endif::[] + +*Nullable:* Yes + +--- + +=== cloud_storage_disable_archival_stm_rw_fence + +Disables the concurrency control mechanism in Tiered Storage. This safety feature keeps data organized and correct when multiple processes access it simultaneously. Disabling it can cause data consistency problems, so use this setting only for testing, never in production systems. + +*Requires restart:* No + +// tag::self-managed-only[] +*Visibility:* `tunable` +// end::self-managed-only[] + +*Type:* boolean + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `false` +endif::[] + +*Nullable:* No + +--- + +=== cloud_storage_disable_archiver_manager + +Use legacy upload mode and do not start archiver_manager. + +*Requires restart:* Yes + +// tag::self-managed-only[] +*Visibility:* `user` +// end::self-managed-only[] + +*Type:* boolean + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `true` +endif::[] + +*Nullable:* No + +--- + +=== cloud_storage_disable_chunk_reads + +Disable chunk reads and switch back to legacy mode where full segments are downloaded. When set to `true`, this option disables the more efficient chunk-based reads, causing Redpanda to download entire segments. This legacy behavior might be useful in specific scenarios where chunk-based fetching is not optimal. + +*Requires restart:* No + +// tag::self-managed-only[] +*Visibility:* `tunable` +// end::self-managed-only[] + +*Type:* boolean + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `false` +endif::[] + +*Nullable:* No + +--- + +// tag::deprecated[] +=== cloud_storage_disable_metadata_consistency_checks + +No description available. + + +*Requires restart:* Yes + +*Nullable:* No + +--- +// end::deprecated[] + +=== cloud_storage_disable_read_replica_loop_for_tests + +Begins the read replica sync loop in topic partitions with Tiered Storage enabled. The property exists to simplify testing and shouldn't be set in production. + +*Requires restart:* No + +// tag::self-managed-only[] +*Visibility:* `tunable` +// end::self-managed-only[] + +*Type:* boolean + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `false` +endif::[] + +*Nullable:* No + +--- + +=== cloud_storage_disable_remote_labels_for_tests + +If `true`, Redpanda disables remote labels and falls back on the hash-based object naming scheme for new topics. + +*Requires restart:* No + +// tag::self-managed-only[] +*Visibility:* `tunable` +// end::self-managed-only[] + +*Type:* boolean + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `false` +endif::[] + +*Nullable:* No + +--- + +=== cloud_storage_disable_tls + +Disable TLS for all object storage connections. + +*Requires restart:* Yes + +// tag::self-managed-only[] +*Visibility:* `user` +// end::self-managed-only[] + +*Type:* boolean + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `false` +endif::[] + +*Nullable:* No + +--- + +=== cloud_storage_disable_upload_consistency_checks + +Disable all upload consistency checks to allow Redpanda to upload logs with gaps and replicate metadata with consistency violations. Do not change the default value unless requested by Redpanda Support. + +*Requires restart:* No + +// tag::self-managed-only[] +*Visibility:* `tunable` +// end::self-managed-only[] + +*Type:* boolean + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `false` +endif::[] + +*Nullable:* No + +--- + +=== cloud_storage_disable_upload_loop_for_tests + +Begins the upload loop in topic partitions with Tiered Storage enabled. The property exists to simplify testing and shouldn't be set in production. + +*Requires restart:* No + +// tag::self-managed-only[] +*Visibility:* `tunable` +// end::self-managed-only[] + +*Type:* boolean + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `false` +endif::[] + +*Nullable:* No + +--- + +=== cloud_storage_enable_compacted_topic_reupload + +Enable re-uploading data for compacted topics. +When set to `true`, Redpanda can re-upload data for compacted topics to object storage, ensuring that the most current state of compacted topics is available in the cloud. Disabling this property (`false`) may reduce storage and network overhead but at the risk of not having the latest compacted data state in object storage. + +*Requires restart:* No + +// tag::self-managed-only[] +*Visibility:* `tunable` +// end::self-managed-only[] + +*Type:* boolean + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `true` +endif::[] + +*Nullable:* No + +--- + +=== cloud_storage_enable_remote_allow_gaps + +Controls the eviction of locally stored log segments when Tiered Storage uploads are paused. Set to `false` to only evict data that has already been uploaded to object storage. If the retained data fills the local volume, Redpanda throttles producers. Set to `true` to allow the eviction of locally stored log segments, which may create gaps in offsets. + +*Requires restart:* No + +// tag::self-managed-only[] +*Visibility:* `tunable` +// end::self-managed-only[] + +*Type:* boolean + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `false` +endif::[] + +*Nullable:* No + +--- + +=== cloud_storage_enable_remote_read + +Default remote read config value for new topics. +When set to `true`, new topics are by default configured to allow reading data directly from object storage, facilitating access to older data that might have been offloaded as part of Tiered Storage. With the default set to `false`, remote reads must be explicitly enabled at the topic level. + +*Requires restart:* No + +// tag::self-managed-only[] +*Visibility:* `tunable` +// end::self-managed-only[] + +*Type:* boolean + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `false` +endif::[] + +*Nullable:* No + +--- + +=== cloud_storage_enable_remote_write + +Default remote write value for new topics. +When set to `true`, new topics are by default configured to upload data to object storage. With the default set to `false`, remote write must be explicitly enabled at the topic level. + +*Requires restart:* No + +// tag::self-managed-only[] +*Visibility:* `tunable` +// end::self-managed-only[] + +*Type:* boolean + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `false` +endif::[] + +*Nullable:* No + +--- + +=== cloud_storage_enable_scrubbing + +Enable routine checks (scrubbing) of object storage partitions. The scrubber validates the integrity of data and metadata uploaded to object storage. + +*Requires restart:* No + +// tag::self-managed-only[] +*Visibility:* `tunable` +// end::self-managed-only[] + +*Type:* boolean + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `false` +endif::[] + +*Nullable:* No + +--- + +=== cloud_storage_enable_segment_merging + +Enables adjacent segment merging. The segments are reuploaded if there is an opportunity for that and if it will improve the tiered-storage performance + +*Requires restart:* No + +// tag::self-managed-only[] +*Visibility:* `tunable` +// end::self-managed-only[] + +*Type:* boolean + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `true` +endif::[] + +*Nullable:* No + +*Related topics:* + +* xref:manage:tiered-storage.adoc#object-storage-housekeeping[Object storage housekeeping] + +--- + +=== cloud_storage_enable_segment_uploads + +Controls the upload of log segments to Tiered Storage. If set to `false`, this property temporarily pauses all log segment uploads from the Redpanda cluster. When the uploads are paused, the <> cluster configuration and `redpanda.remote.allowgaps` topic properties control local retention behavior. + +*Requires restart:* No + +// tag::self-managed-only[] +*Visibility:* `tunable` +// end::self-managed-only[] + +*Type:* boolean + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `true` +endif::[] + +*Nullable:* No + +--- + +=== cloud_storage_enabled + +Enable object storage. Must be set to `true` to use Tiered Storage or Remote Read Replicas. + +ifndef::env-cloud[] +*Enterprise license required*: `true` (for license details, see xref:get-started:licensing/index.adoc[Redpanda Licensing]) +endif::[] + +*Requires restart:* Yes + +// tag::self-managed-only[] +*Visibility:* `user` +// end::self-managed-only[] + +*Type:* boolean + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `false` +endif::[] + +*Nullable:* No + +--- + +=== cloud_storage_full_scrub_interval_ms + +Interval, in milliseconds, between a final scrub and the next scrub. + +*Unit:* milliseconds + +*Requires restart:* No + +// tag::self-managed-only[] +*Visibility:* `tunable` +// end::self-managed-only[] + +*Type:* integer + +*Accepted values:* [`-17592186044416`, `17592186044415`] + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `12h` +endif::[] + +*Nullable:* No + +--- + +=== cloud_storage_garbage_collect_timeout_ms + +Timeout for running the cloud storage garbage collection, in milliseconds. + +*Unit:* milliseconds + +*Requires restart:* No + +// tag::self-managed-only[] +*Visibility:* `tunable` +// end::self-managed-only[] + +*Type:* integer + +*Accepted values:* [`-17592186044416`, `17592186044415`] + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `30s` +endif::[] + +*Nullable:* No + +--- + +=== cloud_storage_graceful_transfer_timeout_ms + +Time limit on waiting for uploads to complete before a leadership transfer. If this is `null`, leadership transfers proceed without waiting. + +*Unit:* milliseconds + +*Requires restart:* No + +// tag::self-managed-only[] +*Visibility:* `tunable` +// end::self-managed-only[] + +*Type:* integer + +*Accepted values:* [`-17592186044416`, `17592186044415`] + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `5s` +endif::[] + +*Nullable:* Yes + +// tag::self-managed-only[] +*Aliases:* cloud_storage_graceful_transfer_timeout +// end::self-managed-only[] + +--- + +=== cloud_storage_housekeeping_interval_ms + +Interval, in milliseconds, between object storage housekeeping tasks. + +*Unit:* milliseconds + +*Requires restart:* No + +// tag::self-managed-only[] +*Visibility:* `tunable` +// end::self-managed-only[] + +*Type:* integer + +*Accepted values:* [`-17592186044416`, `17592186044415`] + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `5min` +endif::[] + +*Nullable:* No + +--- + +=== cloud_storage_hydrated_chunks_per_segment_ratio + +The maximum number of chunks per segment that can be hydrated at a time. Above this number, unused chunks are trimmed. + +A segment is divided into chunks. Chunk hydration means downloading the chunk (which is a small part of a full segment) from cloud storage and placing it in the local disk cache. Redpanda periodically removes old, unused chunks from your local disk. This process is called chunk eviction. This property controls how many chunks can be present for a given segment in local disk at a time, before eviction is triggered, removing the oldest ones from disk. Note that this property is not used for the default eviction strategy which simply removes all unused chunks. + +*Requires restart:* No + +// tag::self-managed-only[] +*Visibility:* `tunable` +// end::self-managed-only[] + +*Type:* number + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `0.7` +endif::[] + +*Nullable:* No + +--- + +=== cloud_storage_hydration_timeout_ms + +Time to wait for a hydration request to be fulfilled. If hydration is not completed within this time, the consumer is notified with a timeout error. + +Negative doesn't make sense, but it may not be checked-for/enforced. Large is subjective, but a huge timeout also doesn't make sense. This particular config doesn't have a min/max bounds control, but it probably should to avoid mistakes. + +*Unit:* milliseconds + +*Requires restart:* No + +// tag::self-managed-only[] +*Visibility:* `tunable` +// end::self-managed-only[] + +*Type:* integer + +*Accepted values:* [`-17592186044416`, `17592186044415`] + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `600s` +endif::[] + +*Nullable:* No + +--- + +=== cloud_storage_idle_threshold_rps + +The object storage request rate threshold for idle state detection. If the average request rate for the configured period is lower than this threshold, the object storage is considered idle. + +*Requires restart:* No + +// tag::self-managed-only[] +*Visibility:* `tunable` +// end::self-managed-only[] + +*Type:* number + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `10.0` +endif::[] + +*Nullable:* No + +--- + +=== cloud_storage_idle_timeout_ms + +The timeout, in milliseconds, used to detect the idle state of the object storage API. If the average object storage request rate is below this threshold for a configured amount of time, the object storage is considered idle and the housekeeping jobs are started. + +*Unit:* milliseconds + +*Requires restart:* No + +// tag::self-managed-only[] +*Visibility:* `tunable` +// end::self-managed-only[] + +*Type:* integer + +*Accepted values:* [`-17592186044416`, `17592186044415`] + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `10s` +endif::[] + +*Nullable:* No + +--- + +=== cloud_storage_initial_backoff_ms + +Initial backoff time for exponential backoff algorithm (ms). + +*Unit:* milliseconds + +*Requires restart:* No + +// tag::self-managed-only[] +*Visibility:* `tunable` +// end::self-managed-only[] + +*Type:* integer + +*Accepted values:* [`-17592186044416`, `17592186044415`] + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `100ms` +endif::[] + +*Nullable:* No + +--- + +=== cloud_storage_inventory_based_scrub_enabled + +Scrubber uses the latest cloud storage inventory report, if available, to check if the required objects exist in the bucket or container. + +*Requires restart:* Yes + +// tag::self-managed-only[] +*Visibility:* `tunable` +// end::self-managed-only[] + +*Type:* boolean + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `false` +endif::[] + +*Nullable:* No + +--- + +=== cloud_storage_inventory_id + +The name of the scheduled inventory job created by Redpanda to generate bucket or container inventory reports. + +*Requires restart:* Yes + +// tag::self-managed-only[] +*Visibility:* `tunable` +// end::self-managed-only[] + +*Type:* string + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `redpanda_scrubber_inventory` +endif::[] + +*Nullable:* No + +--- + +=== cloud_storage_inventory_max_hash_size_during_parse + +Maximum bytes of hashes held in memory before writing data to disk during inventory report parsing. This affects the number of files written to disk during inventory report parsing. When this limit is reached, new files are written to disk. + +*Requires restart:* No + +// tag::self-managed-only[] +*Visibility:* `tunable` +// end::self-managed-only[] + +*Type:* integer + +*Maximum value:* `18446744073709552000` + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `64_MiB` +endif::[] + +*Nullable:* No + +--- + +=== cloud_storage_inventory_report_check_interval_ms + +Time interval between checks for a new inventory report in the cloud storage bucket or container. + +*Unit:* milliseconds + +*Requires restart:* Yes + +// tag::self-managed-only[] +*Visibility:* `tunable` +// end::self-managed-only[] + +*Type:* integer + +*Accepted values:* [`-17592186044416`, `17592186044415`] + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `6h` +endif::[] + +*Nullable:* No + +--- + +=== cloud_storage_inventory_reports_prefix + +The prefix to the path in the cloud storage bucket or container where inventory reports will be placed. + +*Requires restart:* Yes + +// tag::self-managed-only[] +*Visibility:* `tunable` +// end::self-managed-only[] + +*Type:* string + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `redpanda_scrubber_inventory` +endif::[] + +*Nullable:* No + +--- + +=== cloud_storage_inventory_self_managed_report_config + +If enabled, Redpanda will not attempt to create the scheduled report configuration using cloud storage APIs. The scrubbing process will look for reports in the expected paths in the bucket or container, and use the latest report found. Primarily intended for use in testing and on backends where scheduled inventory reports are not supported. + +*Requires restart:* Yes + +// tag::self-managed-only[] +*Visibility:* `tunable` +// end::self-managed-only[] + +*Type:* boolean + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `false` +endif::[] + +*Nullable:* No + +--- + +=== cloud_storage_manifest_cache_size + +Amount of memory that can be used to handle Tiered Storage metadata. + +*Requires restart:* No + +// tag::self-managed-only[] +*Visibility:* `tunable` +// end::self-managed-only[] + +*Type:* integer + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `1_MiB` +endif::[] + +*Nullable:* No + +--- + +=== cloud_storage_manifest_max_upload_interval_sec + +Minimum interval, in seconds, between partition manifest uploads. Actual time between uploads may be greater than this interval. If this is `null`, metadata is updated after each segment upload. + +*Unit:* seconds + +*Requires restart:* No + +// tag::self-managed-only[] +*Visibility:* `tunable` +// end::self-managed-only[] + +*Type:* integer + +*Accepted values:* [`-17179869184`, `17179869183`] + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `60s` +endif::[] + +*Nullable:* Yes + +--- + +=== cloud_storage_manifest_upload_timeout_ms + +Manifest upload timeout, in milliseconds. + +*Unit:* milliseconds + +*Requires restart:* No + +// tag::self-managed-only[] +*Visibility:* `tunable` +// end::self-managed-only[] + +*Type:* integer + +*Accepted values:* [`-17592186044416`, `17592186044415`] + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `30s` +endif::[] + +*Nullable:* No + +--- + +=== cloud_storage_materialized_manifest_ttl_ms + +The time interval that determines how long the materialized manifest can stay in cache under contention. This parameter is used for performance tuning. When the spillover manifest is materialized and stored in cache and the cache needs to evict it it will use 'cloud_storage_materialized_manifest_ttl_ms' value as a timeout. The cursor that uses the spillover manifest uses this value as a TTL interval after which it stops referencing the manifest making it available for eviction. This only affects spillover manifests under contention. + +*Unit:* milliseconds + +*Requires restart:* No + +// tag::self-managed-only[] +*Visibility:* `tunable` +// end::self-managed-only[] + +*Type:* integer + +*Accepted values:* [`-17592186044416`, `17592186044415`] + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `10s` +endif::[] + +*Nullable:* No + +--- + +=== cloud_storage_max_concurrent_hydrations_per_shard + +Maximum concurrent segment hydrations of remote data per CPU core. If unset, value of `cloud_storage_max_connections / 2` is used, which means that half of available object storage bandwidth could be used to download data from object storage. If the cloud storage cache is empty every new segment reader will require a download. This will lead to 1:1 mapping between number of partitions scanned by the fetch request and number of parallel downloads. If this value is too large the downloads can affect other workloads. In case of any problem caused by the tiered-storage reads this value can be lowered. This will only affect segment hydrations (downloads) but won't affect cached segments. If fetch request is reading from the tiered-storage cache its concurrency will only be limited by available memory. + +*Requires restart:* No + +// tag::self-managed-only[] +*Visibility:* `tunable` +// end::self-managed-only[] + +*Type:* integer + +*Maximum value:* `4294967295` + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `null` +endif::[] + +*Nullable:* Yes + +--- + +=== cloud_storage_max_connection_idle_time_ms + +Defines the maximum duration an HTTPS connection to object storage can stay idle, in milliseconds, before being terminated. +This setting reduces resource utilization by closing inactive connections. Adjust this property to balance keeping connections ready for subsequent requests and freeing resources associated with idle connections. + +*Unit:* milliseconds + +*Requires restart:* Yes + +// tag::self-managed-only[] +*Visibility:* `tunable` +// end::self-managed-only[] + +*Type:* integer + +*Accepted values:* [`-17592186044416`, `17592186044415`] + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `5s` +endif::[] + +*Nullable:* No + +--- + +=== cloud_storage_max_connections + +Maximum simultaneous object storage connections per shard, applicable to upload and download activities. + +*Requires restart:* Yes + +// tag::self-managed-only[] +*Visibility:* `user` +// end::self-managed-only[] + +*Type:* integer + +*Accepted values:* [`-32768`, `32767`] + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `20` +endif::[] + +*Nullable:* No + +--- + +// tag::deprecated[] +=== cloud_storage_max_materialized_segments_per_shard + +Maximum concurrent readers of remote data per CPU core. If unset, value of `topic_partitions_per_shard` multiplied by 2 is used. + +*Requires restart:* Yes + +// tag::self-managed-only[] +*Visibility:* `deprecated` +// end::self-managed-only[] + +*Maximum value:* `4294967295` + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `null` +endif::[] + +*Nullable:* Yes + +--- +// end::deprecated[] + +// tag::deprecated[] +=== cloud_storage_max_partition_readers_per_shard + +Maximum partition readers per shard (deprecated) + +*Requires restart:* No + +// tag::self-managed-only[] +*Visibility:* `deprecated` +// end::self-managed-only[] + +*Maximum value:* `4294967295` + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `null` +endif::[] + +*Nullable:* Yes + +--- +// end::deprecated[] + +=== cloud_storage_max_segment_readers_per_shard + +Maximum concurrent I/O cursors of materialized remote segments per CPU core. If unset, the value of `topic_partitions_per_shard` is used, where one segment reader per partition is used if the shard is at its maximum partition capacity. These readers are cached across Kafka consume requests and store a readahead buffer. + +*Requires restart:* No + +// tag::self-managed-only[] +*Visibility:* `tunable` +// end::self-managed-only[] + +*Type:* integer + +*Maximum value:* `4294967295` + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `null` +endif::[] + +*Nullable:* Yes + +// tag::self-managed-only[] +*Aliases:* cloud_storage_max_readers_per_shard +// end::self-managed-only[] + +--- + +=== cloud_storage_max_segments_pending_deletion_per_partition + +The per-partition limit for the number of segments pending deletion from the cloud. Segments can be deleted due to retention or compaction. If this limit is breached and deletion fails, then segments are orphaned in the cloud and must be removed manually. + +*Requires restart:* No + +// tag::self-managed-only[] +*Visibility:* `tunable` +// end::self-managed-only[] + +*Type:* integer + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `5000` +endif::[] + +*Nullable:* No + +--- + +=== cloud_storage_max_throughput_per_shard + +Maximum bandwidth allocated to Tiered Storage operations per shard, in bytes per second. +This setting limits the Tiered Storage subsystem's throughput per shard, facilitating precise control over bandwidth usage in testing scenarios. In production environments, use `cloud_storage_throughput_limit_percent` for more dynamic throughput management based on actual storage capabilities. + +*Requires restart:* No + +// tag::self-managed-only[] +*Visibility:* `tunable` +// end::self-managed-only[] + +*Type:* integer + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `1_GiB` +endif::[] + +*Nullable:* Yes + +--- + +=== cloud_storage_metadata_sync_timeout_ms + +Timeout for xref:manage:tiered-storage.adoc[] metadata synchronization. + +*Unit:* milliseconds + +*Requires restart:* No + +// tag::self-managed-only[] +*Visibility:* `tunable` +// end::self-managed-only[] + +*Type:* integer + +*Accepted values:* [`-17592186044416`, `17592186044415`] + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `10s` +endif::[] + +*Nullable:* No + +--- + +=== cloud_storage_min_chunks_per_segment_threshold + +The minimum number of chunks per segment for trimming to be enabled. If the number of chunks in a segment is below this threshold, the segment is small enough that all chunks in it can be hydrated at any given time. + +*Requires restart:* No + +// tag::self-managed-only[] +*Visibility:* `tunable` +// end::self-managed-only[] + +*Type:* integer + +*Maximum value:* `18446744073709552000` + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `5` +endif::[] + +*Nullable:* No + +--- + +=== cloud_storage_partial_scrub_interval_ms + +Time interval between two partial scrubs of the same partition. + +*Unit:* milliseconds + +*Requires restart:* No + +// tag::self-managed-only[] +*Visibility:* `tunable` +// end::self-managed-only[] + +*Type:* integer + +*Accepted values:* [`-17592186044416`, `17592186044415`] + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `1h` +endif::[] + +*Nullable:* No + +--- + +=== cloud_storage_readreplica_manifest_sync_timeout_ms + +Timeout to check if new data is available for partitions in object storage for read replicas. + +*Unit:* milliseconds + +*Requires restart:* No + +// tag::self-managed-only[] +*Visibility:* `tunable` +// end::self-managed-only[] + +*Type:* integer + +*Accepted values:* [`-17592186044416`, `17592186044415`] + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `30s` +endif::[] + +*Nullable:* No + +--- + +// tag::deprecated[] +=== cloud_storage_reconciliation_interval_ms + +No description available. + + +*Unit:* milliseconds + +*Requires restart:* Yes + +*Nullable:* No + +--- +// end::deprecated[] + +=== cloud_storage_recovery_temporary_retention_bytes_default + +Retention in bytes for topics created during automated recovery. + +*Requires restart:* No + +// tag::self-managed-only[] +*Visibility:* `tunable` +// end::self-managed-only[] + +*Type:* integer + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `1_GiB` +endif::[] + +*Nullable:* No + +--- + +=== cloud_storage_recovery_topic_validation_depth + +Number of metadata segments to validate, from newest to oldest, when <> is set to `check_manifest_and_segment_metadata`. + +*Requires restart:* No + +// tag::self-managed-only[] +*Visibility:* `tunable` +// end::self-managed-only[] + +*Type:* integer + +*Maximum value:* `4294967295` + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `10` +endif::[] + +*Nullable:* No + +--- + +=== cloud_storage_recovery_topic_validation_mode + +Validation performed before recovering a topic from object storage. In case of failure, the reason for the failure appears as `ERROR` lines in the Redpanda application log. For each topic, this reports errors for all partitions, but for each partition, only the first error is reported. + +This property accepts the following parameters: + +- `no_check`: Skips the checks for topic recovery. +- `check_manifest_existence`: Runs an existence check on each `partition_manifest`. Fails if there are connection issues to the object storage. +- `check_manifest_and_segment_metadata`: Downloads the manifest and runs a consistency check, comparing the metadata with the cloud storage objects. The process fails if metadata references any missing cloud storage objects. + +Example: Redpanda validates the topic `kafka/panda-topic-recovery-NOT-OK` and stops due to a fatal error on partition 0: + +```bash +ERROR 2024-04-24 21:29:08,166 [shard 1:main] cluster - [fiber11|0|299996ms recovery validation of {kafka/panda-topic-recovery-NOT-OK/0}/24] - manifest metadata check: missing segment, validation not ok +ERROR 2024-04-24 21:29:08,166 [shard 1:main] cluster - topics_frontend.cc:519 - Stopping recovery of {kafka/panda-topic-recovery-NOT-OK} due to validation error +``` + +Each failing partition error message has the following format: + +```bash +ERROR .... [... recovery validation of {}...] - , validation not ok +``` + +At the end of the process, Redpanda outputs a final ERROR message: + +```bash +ERROR ... ... - Stopping recovery of {} due to validation error +``` + +*Requires restart:* No + +// tag::self-managed-only[] +*Visibility:* `tunable` +// end::self-managed-only[] + +*Type:* string + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `check_manifest_existence` +endif::[] + +*Nullable:* No + +--- + +=== cloud_storage_region + +Cloud provider region that houses the bucket or container used for storage. + +*Requires restart:* Yes + +// tag::self-managed-only[] +*Visibility:* `user` +// end::self-managed-only[] + +*Type:* string + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `null` +endif::[] + +*Nullable:* Yes + +--- + +=== cloud_storage_roles_operation_timeout_ms + +Timeout for IAM role related operations (ms). + +*Unit:* milliseconds + +*Requires restart:* Yes + +// tag::self-managed-only[] +*Visibility:* `tunable` +// end::self-managed-only[] + +*Type:* integer + +*Accepted values:* [`-17592186044416`, `17592186044415`] + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `30s` +endif::[] + +*Nullable:* No + +--- + +=== cloud_storage_scrubbing_interval_jitter_ms + +Jitter applied to the object storage scrubbing interval. + +*Unit:* milliseconds + +*Requires restart:* No + +// tag::self-managed-only[] +*Visibility:* `tunable` +// end::self-managed-only[] + +*Type:* integer + +*Accepted values:* [`-17592186044416`, `17592186044415`] + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `10min` +endif::[] + +*Nullable:* No + +--- + +=== cloud_storage_secret_key + +Cloud provider secret key. + +*Requires restart:* Yes + +// tag::self-managed-only[] +*Visibility:* `user` +// end::self-managed-only[] + +*Type:* string + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `null` +endif::[] + +*Nullable:* Yes + +--- + +=== cloud_storage_segment_max_upload_interval_sec + +Time that a segment can be kept locally without uploading it to the object storage, in seconds. + +*Unit:* seconds + +*Requires restart:* No + +// tag::self-managed-only[] +*Visibility:* `tunable` +// end::self-managed-only[] + +*Type:* integer + +*Accepted values:* [`-17179869184`, `17179869183`] + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `1h` +endif::[] + +*Nullable:* Yes + +--- + +=== cloud_storage_segment_size_min + +Smallest acceptable segment size in the object storage. Default: `cloud_storage_segment_size_target`/2. + +*Requires restart:* No + +// tag::self-managed-only[] +*Visibility:* `tunable` +// end::self-managed-only[] + +*Type:* integer + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `null` +endif::[] + +*Nullable:* Yes + +--- + +=== cloud_storage_segment_size_target + +Desired segment size in the object storage. The default is set in the topic-level `segment.bytes` property. + +*Requires restart:* No + +// tag::self-managed-only[] +*Visibility:* `tunable` +// end::self-managed-only[] + +*Type:* integer + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `null` +endif::[] + +*Nullable:* Yes + +--- + +=== cloud_storage_segment_upload_timeout_ms + +Log segment upload timeout, in milliseconds. + +*Unit:* milliseconds + +*Requires restart:* No + +// tag::self-managed-only[] +*Visibility:* `tunable` +// end::self-managed-only[] + +*Type:* integer + +*Accepted values:* [`-17592186044416`, `17592186044415`] + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `90s` +endif::[] + +*Nullable:* No + +--- + +=== cloud_storage_spillover_manifest_max_segments + +Maximum number of segments in the spillover manifest that can be offloaded to the object storage. This setting serves as a threshold for triggering data offload based on the number of segments, rather than the total size of the manifest. It is designed for use in testing environments to control the offload behavior more granularly. In production settings, manage offloads based on the manifest size through `cloud_storage_spillover_manifest_size` for more predictable outcomes. + +*Requires restart:* No + +// tag::self-managed-only[] +*Visibility:* `tunable` +// end::self-managed-only[] + +*Type:* integer + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `null` +endif::[] + +*Nullable:* Yes + +--- + +=== cloud_storage_spillover_manifest_size + +The size of the manifest which can be offloaded to the cloud. If the size of the local manifest stored in Redpanda exceeds `cloud_storage_spillover_manifest_size` by two times the spillover mechanism will split the manifest into two parts and one will be uploaded to object storage. + +*Requires restart:* No + +// tag::self-managed-only[] +*Visibility:* `tunable` +// end::self-managed-only[] + +*Type:* integer + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `64_KiB` +endif::[] + +*Nullable:* Yes + +--- + +=== cloud_storage_throughput_limit_percent + +Maximum throughput used by Tiered Storage per broker expressed as a percentage of the disk bandwidth. If the server has several disks, Redpanda uses the one that stores the Tiered Storage cache. Even if Tiered Storage is allowed to use the full bandwidth of the disk (100%), it won't necessarily use it in full. The actual usage depends on your workload and the state of the Tiered Storage cache. This setting is a safeguard that prevents Tiered Storage from using too many system resources: it is not a performance tuning knob. + +*Unit:* percent + +*Requires restart:* No + +// tag::self-managed-only[] +*Visibility:* `tunable` +// end::self-managed-only[] + +*Type:* integer + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `50` +endif::[] + +*Nullable:* Yes + +--- + +=== cloud_storage_topic_purge_grace_period_ms + +Grace period during which the purger refuses to purge the topic. + +*Unit:* milliseconds + +*Requires restart:* No + +// tag::self-managed-only[] +*Visibility:* `tunable` +// end::self-managed-only[] + +*Type:* integer + +*Accepted values:* [`-17592186044416`, `17592186044415`] + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `30s` +endif::[] + +*Nullable:* No + +--- + +=== cloud_storage_trust_file + +Path to certificate that should be used to validate server certificate during TLS handshake. + +*Requires restart:* Yes + +// tag::self-managed-only[] +*Visibility:* `user` +// end::self-managed-only[] + +*Type:* string + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `null` +endif::[] + +*Nullable:* Yes + +--- + +=== cloud_storage_upload_ctrl_d_coeff + +Derivative coefficient for upload PID controller. + +*Requires restart:* Yes + +// tag::self-managed-only[] +*Visibility:* `tunable` +// end::self-managed-only[] + +*Type:* number + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `0.0` +endif::[] + +*Nullable:* No + +--- + +=== cloud_storage_upload_ctrl_max_shares + +Maximum number of I/O and CPU shares that archival upload can use. + +*Requires restart:* Yes + +// tag::self-managed-only[] +*Visibility:* `tunable` +// end::self-managed-only[] + +*Type:* integer + +*Accepted values:* [`-32768`, `32767`] + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `1000` +endif::[] + +*Nullable:* No + +--- + +=== cloud_storage_upload_ctrl_min_shares + +Minimum number of I/O and CPU shares that archival upload can use. + +*Requires restart:* Yes + +// tag::self-managed-only[] +*Visibility:* `tunable` +// end::self-managed-only[] + +*Type:* integer + +*Accepted values:* [`-32768`, `32767`] + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `100` +endif::[] + +*Nullable:* No + +--- + +=== cloud_storage_upload_ctrl_p_coeff + +Proportional coefficient for upload PID controller. + +*Requires restart:* Yes + +// tag::self-managed-only[] +*Visibility:* `tunable` +// end::self-managed-only[] + +*Type:* number + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `-2.0` +endif::[] + +*Nullable:* No + +--- + +=== cloud_storage_upload_ctrl_update_interval_ms + +The interval (in milliseconds) for updating the controller that manages the priority of Tiered Storage uploads. This property determines how frequently the system recalculates and adjusts the work scheduling for uploads to object storage. + +This is an internal-only configuration and should be enabled only after consulting with Redpanda support. + +*Unit:* milliseconds + +*Requires restart:* Yes + +// tag::self-managed-only[] +*Visibility:* `tunable` +// end::self-managed-only[] + +*Type:* integer + +*Accepted values:* [`-17592186044416`, `17592186044415`] + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `60s` +endif::[] + +*Nullable:* No + +--- + +=== cloud_storage_upload_loop_initial_backoff_ms + +Initial backoff interval when there is nothing to upload for a partition, in milliseconds. + +*Unit:* milliseconds + +*Requires restart:* No + +// tag::self-managed-only[] +*Visibility:* `tunable` +// end::self-managed-only[] + +*Type:* integer + +*Accepted values:* [`-17592186044416`, `17592186044415`] + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `100ms` +endif::[] + +*Nullable:* No + +--- + +=== cloud_storage_upload_loop_max_backoff_ms + +Maximum backoff interval when there is nothing to upload for a partition, in milliseconds. + +*Unit:* milliseconds + +*Requires restart:* No + +// tag::self-managed-only[] +*Visibility:* `tunable` +// end::self-managed-only[] + +*Type:* integer + +*Accepted values:* [`-17592186044416`, `17592186044415`] + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `10s` +endif::[] + +*Nullable:* No + +--- + +=== cloud_storage_url_style + +Configure the addressing style that controls how Redpanda formats bucket URLs for S3-compatible object storage. + +Leave this property unset (`null`) to use automatic configuration: + +* For AWS S3: Redpanda attempts `virtual_host` addressing first, then falls back to `path` style if needed +* For MinIO: Redpanda automatically uses `path` style regardless of `MINIO_DOMAIN` configuration + +Set this property explicitly to override automatic configuration, ensure consistent behavior across deployments, or when using S3-compatible storage that requires a specific URL format. + +*Requires restart:* Yes + +// tag::self-managed-only[] +*Visibility:* `user` +// end::self-managed-only[] + +*Type:* s3_url_style + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `null` +endif::[] + +*Nullable:* Yes + +--- + +=== kafka_enable_describe_log_dirs_remote_storage + +Whether to include Tiered Storage as a special remote:// directory in `DescribeLogDirs Kafka` API requests. + +*Requires restart:* No + +// tag::self-managed-only[] +*Visibility:* `user` +// end::self-managed-only[] + +*Type:* boolean + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `true` +endif::[] + +*Nullable:* No + +--- diff --git a/modules/reference/partials/properties/topic-properties.adoc b/modules/reference/partials/properties/topic-properties.adoc new file mode 100644 index 0000000000..a7d5c04712 --- /dev/null +++ b/modules/reference/partials/properties/topic-properties.adoc @@ -0,0 +1,845 @@ +// This content is autogenerated. Do not edit manually. To override descriptions, use the doc-tools CLI with the --overrides option: https://redpandadata.atlassian.net/wiki/spaces/DOC/pages/1396244485/Review+Redpanda+configuration+properties +// tag::category-retention-compaction[] +=== cleanup.policy + +The cleanup policy to apply for log segments of a topic. +When `cleanup.policy` is set, it overrides the cluster property xref:cluster-properties.adoc#log_cleanup_policy[`log_cleanup_policy`] for the topic. + +*Type:* string + +*Accepted values:* [`delete`, `compact`, `compact,delete`] + +*Related cluster property:* xref:reference:cluster-properties.adoc#log_cleanup_policy[log_cleanup_policy] + +*Nullable:* No + +*Related topics:* + +* xref:cluster-properties.adoc#log_cleanup_policy[`log_cleanup_policy`] + +--- +// end::category-retention-compaction[] + +=== cloud_storage_inventory_hash_path_directory + +Directory to store inventory report hashes for use by cloud storage scrubber. If not specified, Redpanda uses a default path within the data directory. + +*Type:* string + +*Default:* `null` + +*Nullable:* No + + +.Example +[,yaml] +---- +redpanda: + cloud_storage_inventory_hash_store: +---- + + +--- + +=== cloud_storage_materialized_manifest_ttl_ms + +The interval, in milliseconds, determines how long the materialized manifest can stay in the cache under contention. This setting is used for performance tuning. When the spillover manifest is materialized and stored in the cache, and the cache needs to evict it, it uses this value as a timeout. The cursor that uses the spillover manifest uses this value as a TTL interval, after which it stops referencing the manifest making it available for eviction. This only affects spillover manifests under contention. + +*Type:* string + +*Default:* `null` + +*Nullable:* No + +--- + +// tag::category-retention-compaction[] +=== compaction.strategy + +Specifies the strategy used to determine which records to remove during log compaction. The compaction strategy controls how Redpanda identifies and removes duplicate records while preserving the latest value for each key. + +*Type:* string + +*Related cluster property:* xref:reference:cluster-properties.adoc#compaction_strategy[compaction_strategy] + +*Nullable:* No + +*Related topics:* + +* xref:./cluster-properties.adoc#compaction_strategy[`compaction_strategy`] + +--- +// end::category-retention-compaction[] + +// tag::category-segment-message[] +=== compression.type + +Redpanda ignores this property and always uses producer compression semantics. If producers send compressed data, Redpanda stores and serves it as-is. If producers send uncompressed data, Redpanda stores it uncompressed. + +This property exists for Apache Kafka compatibility. Configure compression in your producers instead of using this topic property. + +Compression reduces message size and improves throughput, but increases CPU utilization. Enable producer batching to increase compression efficiency. + +When set, this property overrides the cluster property xref:./cluster-properties.adoc#log_compression_type[`log_compression_type`] for the topic. + +*Type:* string + +*Accepted values:* [`none`, `gzip`, `snappy`, `lz4`, `zstd`] + +*Related cluster property:* xref:reference:cluster-properties.adoc#log_compression_type[log_compression_type] + +*Nullable:* No + +*Related topics:* + +* xref:./cluster-properties.adoc#log_compression_type[`log_compression_type`] +* xref:./cluster-properties.adoc#log_compression_type[`log_compression_type`] +* xref:develop:produce-data/configure-producers.adoc#message-batching[Message batching] +* xref:develop:produce-data/configure-producers.adoc#commonly-used-producer-configuration-options[Common producer configuration options] + +--- +// end::category-segment-message[] + +// tag::category-schema-registry[] +=== confluent.key.schema.validation + +Enable validation of the schema ID for keys on a record. This is a compatibility alias for `redpanda.key.schema.id.validation`. When enabled, Redpanda validates that the schema ID encoded in the record's key is registered in the Schema Registry according to the configured subject name strategy. + +*Type:* string + +*Nullable:* No + +--- +// end::category-schema-registry[] + +// tag::category-schema-registry[] +=== confluent.key.subject.name.strategy + +The subject name strategy for keys when `confluent.key.schema.validation` is enabled. This is a compatibility alias for `redpanda.key.subject.name.strategy` that determines how the topic and schema are mapped to a subject name in the Schema Registry. + +*Type:* string + +*Nullable:* No + +--- +// end::category-schema-registry[] + +// tag::category-schema-registry[] +=== confluent.value.schema.validation + +Enable validation of the schema ID for values on a record. This is a compatibility alias for `redpanda.value.schema.id.validation`. When enabled, Redpanda validates that the schema ID encoded in the record's value is registered in the Schema Registry according to the configured subject name strategy. + +*Type:* string + +*Nullable:* No + +--- +// end::category-schema-registry[] + +// tag::category-schema-registry[] +=== confluent.value.subject.name.strategy + +The subject name strategy for values when `confluent.value.schema.validation` is enabled. This is a compatibility alias for `redpanda.value.subject.name.strategy`. This determines how the topic and schema are mapped to a subject name in the Schema Registry. + +*Type:* string + +*Nullable:* No + +--- +// end::category-schema-registry[] + +// tag::category-retention-compaction[] +=== delete.retention.ms + +The retention time for tombstone records in a compacted topic. Redpanda removes tombstone records after the retention limit is exceeded. + +If you have enabled Tiered Storage and set <> or <> for the topic, you cannot enable tombstone removal. + +If both `delete.retention.ms` and the cluster property config_ref:tombstone_retention_ms,true,properties/cluster-properties[] are set, `delete.retention.ms` overrides the cluster level tombstone retention for an individual topic. + +*Type:* string + +*Related cluster property:* xref:reference:cluster-properties.adoc#delete_retention_ms[delete_retention_ms] + +*Nullable:* No + +*Related topics:* + +* xref:./cluster-properties.adoc#tombstone_retention_ms[`tombstone_retention_ms`] +* xref:manage:cluster-maintenance/compaction-settings.adoc#tombstone-record-removal[Tombstone record removal] + +--- +// end::category-retention-compaction[] + +// tag::category-performance-cluster[] +=== flush.bytes + +The maximum bytes not fsynced per partition. If this configured threshold is reached, the log is automatically fsynced, even though it wasn't explicitly requested. + +*Type:* integer + +*Accepted values:* bytes (integer) + +*Related cluster property:* xref:reference:cluster-properties.adoc#flush_bytes[flush_bytes] + +*Nullable:* No + +*Related topics:* + +* xref:./cluster-properties.adoc#flush_bytes[`flush_bytes`] + +--- +// end::category-performance-cluster[] + +// tag::category-performance-cluster[] +=== flush.ms + +The maximum delay (in ms) between two subsequent fsyncs. After this delay, the log is automatically fsynced. + +*Type:* integer + +*Accepted values:* milliseconds (integer) + +*Related cluster property:* xref:reference:cluster-properties.adoc#flush_ms[flush_ms] + +*Nullable:* No + +*Related topics:* + +* xref:./cluster-properties.adoc#flush_ms[`flush_ms`] + +--- +// end::category-performance-cluster[] + +// tag::category-tiered-storage[] +=== initial.retention.local.target.bytes + +A size-based initial retention limit for Tiered Storage that determines how much data in local storage is transferred to a partition replica when a cluster is resized. If `null` (default), all locally retained data is transferred. + +*Type:* integer + +*Accepted values:* bytes (integer) + +*Related cluster property:* xref:reference:cluster-properties.adoc#initial_retention_local_target_bytes[initial_retention_local_target_bytes] + +*Nullable:* No + +*Related topics:* + +* xref:./cluster-properties.adoc#initial_retention_local_target_bytes[`initial_retention_local_target_bytes`] +* xref:manage:tiered-storage.adoc#fast-commission-and-decommission[Fast commission and decommission through Tiered Storage] + +--- +// end::category-tiered-storage[] + +// tag::category-tiered-storage[] +=== initial.retention.local.target.ms + +A time-based initial retention limit for Tiered Storage that determines how much data in local storage is transferred to a partition replica when a cluster is resized. If `null` (default), all locally retained data is transferred. + +*Type:* integer + +*Accepted values:* milliseconds (integer) + +*Related cluster property:* xref:reference:cluster-properties.adoc#initial_retention_local_target_ms[initial_retention_local_target_ms] + +*Nullable:* No + +*Related topics:* + +* xref:./cluster-properties.adoc#initial_retention_local_target_ms[`initial_retention_local_target_ms`] +* xref:manage:tiered-storage.adoc#fast-commission-and-decommission[Fast commission and decommission through Tiered Storage] + +--- +// end::category-tiered-storage[] + +// tag::category-retention-compaction[] +=== max.compaction.lag.ms + +The maximum amount of time (in ms) that a log segment can remain unaltered before it is eligible for compaction in a compact topic. Overrides the cluster property xref:cluster-properties.adoc#max_compaction_lag_ms[`max_compaction_lag_ms`] for the topic. + +*Type:* integer + +*Accepted values:* milliseconds (integer) + +*Related cluster property:* xref:reference:cluster-properties.adoc#max_compaction_lag_ms[max_compaction_lag_ms] + +*Nullable:* No + +*Related topics:* + +* xref:cluster-properties.adoc#max_compaction_lag_ms[`max_compaction_lag_ms`] +* xref:./cluster-properties.adoc#max_compaction_lag_ms[`max_compaction_lag_ms`] +* xref:manage:cluster-maintenance/compaction-settings.adoc#configuration-options[Configure maximum compaction lag] + +--- +// end::category-retention-compaction[] + +// tag::category-segment-message[] +=== max.message.bytes + +The maximum size of a message or batch of a topic. If a compression type is enabled, `max.message.bytes` sets the maximum size of the compressed message or batch. + +If `max.message.bytes` is set to a positive value, it overrides the cluster property xref:./cluster-properties.adoc#kafka_batch_max_bytes[`kafka_batch_max_bytes`] for the topic. + +*Type:* integer + +*Accepted values:* bytes (integer) + +*Related cluster property:* xref:reference:cluster-properties.adoc#kafka_batch_max_bytes[kafka_batch_max_bytes] + +*Nullable:* No + +*Related topics:* + +* xref:./cluster-properties.adoc#kafka_batch_max_bytes[`kafka_batch_max_bytes`] +* xref:./cluster-properties.adoc#kafka_batch_max_bytes[`kafka_batch_max_bytes`] +* xref:develop:produce-data/configure-producers.adoc#message-batching[Message batching] + +--- +// end::category-segment-message[] + +// tag::category-segment-message[] +=== message.timestamp.type + +The source of a message's timestamp: either the message's creation time or its log append time. + +When `message.timestamp.type` is set, it overrides the cluster property xref:./cluster-properties.adoc#log_message_timestamp_type[`log_message_timestamp_type`] for the topic. + +*Type:* string + +*Accepted values:* [`CreateTime`, `LogAppendTime`] + +*Related cluster property:* xref:reference:cluster-properties.adoc#log_message_timestamp_type[log_message_timestamp_type] + +*Nullable:* No + +*Related topics:* + +* xref:./cluster-properties.adoc#log_message_timestamp_type[`log_message_timestamp_type`] +* xref:./cluster-properties.adoc#log_message_timestamp_type[`log_message_timestamp_type`] + +--- +// end::category-segment-message[] + +// tag::category-retention-compaction[] +=== min.cleanable.dirty.ratio + +The minimum ratio between the number of bytes in dirty segments and the total number of bytes in closed segments that must be reached before a partition's log is eligible for compaction in a compact topic. + +*Type:* number + +*Accepted values:* [`0`, `1.0`] + +*Related cluster property:* xref:reference:cluster-properties.adoc#min_cleanable_dirty_ratio[min_cleanable_dirty_ratio] + +*Nullable:* No + +*Related topics:* + +* xref:./cluster-properties.adoc#min_cleanable_dirty_ratio[`min_cleanable_dirty_ratio`] + +--- +// end::category-retention-compaction[] + +// tag::category-retention-compaction[] +=== min.compaction.lag.ms + +The minimum amount of time (in ms) that a log segment must remain unaltered before it can be compacted in a compact topic. Overrides the cluster property xref:cluster-properties.adoc#min_compaction_lag_ms[`min_compaction_lag_ms`] for the topic. + +*Type:* integer + +*Accepted values:* milliseconds (integer) + +*Related cluster property:* xref:reference:cluster-properties.adoc#min_compaction_lag_ms[min_compaction_lag_ms] + +*Nullable:* No + +*Related topics:* + +* xref:cluster-properties.adoc#min_compaction_lag_ms[`min_compaction_lag_ms`] +* xref:./cluster-properties.adoc#min_compaction_lag_ms[`min_compaction_lag_ms`] +* xref:manage:cluster-maintenance/compaction-settings.adoc#configure-min-compaction-lag[Configure minimum compaction lag] + +--- +// end::category-retention-compaction[] + +// tag::category-tiered-storage[] +=== redpanda.cloud_topic.enabled + +No description available. + +*Type:* string + +*Nullable:* No + +--- +// end::category-tiered-storage[] + +// tag::category-iceberg-integration[] +=== redpanda.iceberg.delete + +Whether the corresponding Iceberg table is deleted upon deleting the topic. + +*Type:* string + +*Nullable:* No + +--- +// end::category-iceberg-integration[] + +// tag::category-iceberg-integration[] +=== redpanda.iceberg.invalid.record.action + +Whether to write invalid records to a dead-letter queue (DLQ). + +*Type:* string + +*Nullable:* No + +*Related topics:* + +* xref:manage:iceberg/about-iceberg-topics.adoc#troubleshoot-errors[Troubleshoot errors] + +--- +// end::category-iceberg-integration[] + +// tag::category-iceberg-integration[] +=== redpanda.iceberg.mode + +Enable the Iceberg integration for the topic. You can choose one of four modes. + +*Type:* string + +*Nullable:* No + +*Related topics:* + +* xref:manage:iceberg/choose-iceberg-mode.adoc#override-value-schema-latest-default[Choose an Iceberg Mode] + +--- +// end::category-iceberg-integration[] + +// tag::category-iceberg-integration[] +=== redpanda.iceberg.partition.spec + +The link:https://iceberg.apache.org/docs/nightly/partitioning/[partitioning^] specification for the Iceberg table. + +*Type:* string + +*Nullable:* No + +*Related topics:* + +* xref:manage:iceberg/about-iceberg-topics.adoc#use-custom-partitioning[Use custom partitioning] + +--- +// end::category-iceberg-integration[] + +// tag::category-iceberg-integration[] +=== redpanda.iceberg.target.lag.ms + +Controls how often the data in the Iceberg table is refreshed with new data from the topic. Redpanda attempts to commit all data produced to the topic within the lag target, subject to resource availability. + +*Type:* integer + +*Accepted values:* milliseconds (integer) + +*Nullable:* No + +--- +// end::category-iceberg-integration[] + +// tag::category-schema-registry[] +=== redpanda.key.schema.id.validation + +No description available. + +*Type:* string + +*Nullable:* No + +--- +// end::category-schema-registry[] + +// tag::category-schema-registry[] +=== redpanda.key.subject.name.strategy + +No description available. + +*Type:* string + +*Nullable:* No + +--- +// end::category-schema-registry[] + +// tag::category-performance-cluster[] +=== redpanda.leaders.preference + +The preferred location (rack) for partition leaders of a topic. + +This property inherits the value from the config_ref:default_leaders_preference,true,properties/cluster-properties[] cluster configuration property. You may override the cluster-wide setting by specifying the value for individual topics. + +If the cluster configuration property config_ref:enable_rack_awareness,true,properties/cluster-properties[] is set to `false`, Leader Pinning is disabled across the cluster. + +*Type:* string + +*Nullable:* No + +*Related topics:* + +* xref:develop:produce-data/leader-pinning.adoc[Leader pinning] + +--- +// end::category-performance-cluster[] + +// tag::category-other[] +// tag::exclude-from-docs[] +=== redpanda.remote.allowgaps + +No description available. + +*Type:* string + +*Nullable:* No + +--- +// end::exclude-from-docs[] +// end::category-other[] + +// tag::category-tiered-storage[] +=== redpanda.remote.delete + +A flag that enables deletion of data from object storage for Tiered Storage when it's deleted from local storage for a topic. + +NOTE: `redpanda.remote.delete` doesn't apply to Remote Read Replica topics: a Remote Read Replica topic isn't deleted from object storage when this flag is `true`. + +*Type:* boolean + +*Accepted values:* [`true`, `false`] + +*Nullable:* No + +*Related topics:* + +* xref:manage:tiered-storage.adoc[Tiered Storage] + +--- +// end::category-tiered-storage[] + +// tag::category-tiered-storage[] +=== redpanda.remote.read + +A flag for enabling Redpanda to fetch data for a topic from object storage to local storage. When set to `true` together with <>, it enables the xref:manage:tiered-storage.adoc[Tiered Storage] feature. + +*Type:* boolean + +*Accepted values:* [`true`, `false`] + +*Nullable:* No + +*Related topics:* + +* xref:manage:tiered-storage.adoc[Tiered Storage] + +--- +// end::category-tiered-storage[] + +// tag::category-remote-read-replica[] +=== redpanda.remote.readreplica + +The name of the object storage bucket for a Remote Read Replica topic. + +CAUTION: Setting `redpanda.remote.readreplica` together with either `redpanda.remote.read` or `redpanda.remote.write` results in an error. + +*Type:* boolean + +*Accepted values:* [`true`, `false`] + +*Nullable:* No + +*Related topics:* + +* xref:manage:remote-read-replicas.adoc[Remote Read Replicas] + +--- +// end::category-remote-read-replica[] + +// tag::category-tiered-storage[] +=== redpanda.remote.recovery + +A flag that enables the recovery or reproduction of a topic from object storage for Tiered Storage. The recovered data is saved in local storage, and the maximum amount of recovered data is determined by the local storage retention limits of the topic. + +TIP: You can only configure `redpanda.remote.recovery` when you create a topic. You cannot apply this setting to existing topics. + +*Type:* boolean + +*Accepted values:* [`true`, `false`] + +*Nullable:* No + +*Related topics:* + +* xref:manage:tiered-storage.adoc[Tiered Storage] + +--- +// end::category-tiered-storage[] + +// tag::category-tiered-storage[] +=== redpanda.remote.write + +A flag for enabling Redpanda to upload data for a topic from local storage to object storage. When set to `true` together with <>, it enables the xref:manage:tiered-storage.adoc[Tiered Storage] feature. + +*Type:* boolean + +*Accepted values:* [`true`, `false`] + +*Nullable:* No + +*Related topics:* + +* xref:manage:tiered-storage.adoc[Tiered Storage] +* xref:manage:tiered-storage.adoc[Tiered Storage] + +--- +// end::category-tiered-storage[] + +// tag::category-schema-registry[] +=== redpanda.value.schema.id.validation + +No description available. + +*Type:* string + +*Nullable:* No + +--- +// end::category-schema-registry[] + +// tag::category-schema-registry[] +=== redpanda.value.subject.name.strategy + +No description available. + +*Type:* string + +*Nullable:* No + +--- +// end::category-schema-registry[] + +// tag::category-other[] +// tag::exclude-from-docs[] +=== redpanda.virtual.cluster.id + +No description available. + +*Type:* string + +*Nullable:* No + +--- +// end::exclude-from-docs[] +// end::category-other[] + +// tag::category-performance-cluster[] +=== replication.factor + +The number of replicas of a topic to save in different nodes (brokers) of a cluster. + +If `replication.factor` is set to a positive value, it overrides the cluster property xref:./cluster-properties.adoc#default_topic_replication[default_topic_replication] for the topic. + +NOTE: Although `replication.factor` isn't returned or displayed by xref:reference:rpk/rpk-topic/rpk-topic-describe.adoc[`rpk topic describe`] as a valid Kafka property, you can set it using xref:reference:rpk/rpk-topic/rpk-topic-alter-config.adoc[`rpk topic alter-config`]. When the `replication.factor` of a topic is altered, it isn't simply a property value that's updated, but rather the actual replica sets of topic partitions that are changed. + +*Type:* integer + +*Accepted values:* integer (1 or greater) + +*Related cluster property:* xref:reference:cluster-properties.adoc#replication_factor[replication_factor] + +*Nullable:* No + +*Related topics:* + +* xref:./cluster-properties.adoc#default_topic_replication[default_topic_replication] +* xref:reference:rpk/rpk-topic/rpk-topic-describe.adoc[`rpk topic describe`] +* xref:reference:rpk/rpk-topic/rpk-topic-alter-config.adoc[`rpk topic alter-config`] +* xref:./cluster-properties.adoc#default_topic_replication[`default_topic_replication`] +* xref:develop:config-topics.adoc#choose-the-replication-factor[Choose the replication factor] +* xref:develop:config-topics.adoc#change-the-replication-factor[Change the replication factor] + +--- +// end::category-performance-cluster[] + +// tag::category-retention-compaction[] +=== retention.bytes + +A size-based retention limit that configures the maximum size that a topic partition can grow before becoming eligible for cleanup. + +If `retention.bytes` is set to a positive value, it overrides the cluster property xref:cluster-properties.adoc#retention_bytes[`retention_bytes`] for the topic, and the total retained size for the topic is `retention.bytes` multiplied by the number of partitions for the topic. + +When both size-based (`retention.bytes`) and time-based (`retention.ms`) retention limits are set, cleanup occurs when either limit is reached. + +*Type:* integer + +*Accepted values:* bytes (integer) + +*Related cluster property:* xref:reference:cluster-properties.adoc#retention_bytes[retention_bytes] + +*Nullable:* No + +*Related topics:* + +* xref:cluster-properties.adoc#retention_bytes[`retention_bytes`] +* xref:./cluster-properties.adoc#retention_bytes[`retention_bytes`] +* xref:manage:cluster-maintenance/disk-utilization.adoc#configure-message-retention[Configure message retention] + +--- +// end::category-retention-compaction[] + +// tag::category-tiered-storage[] +=== retention.local.target.bytes + +A size-based retention limit for Tiered Storage that configures the maximum size that a topic partition in local storage can grow before becoming eligible for cleanup. It applies per partition and is equivalent to <> without Tiered Storage. + +*Type:* integer + +*Accepted values:* bytes (integer) + +*Related cluster property:* xref:reference:cluster-properties.adoc#retention_local_target_bytes[retention_local_target_bytes] + +*Nullable:* No + +*Related topics:* + +* xref:./cluster-properties.adoc#retention_local_target_bytes[`retention_local_target_bytes`] +* xref:manage:tiered-storage.adoc[Tiered Storage] + +--- +// end::category-tiered-storage[] + +// tag::category-tiered-storage[] +=== retention.local.target.ms + +A time-based retention limit for Tiered Storage that sets the maximum duration that a log's segment file for a topic is retained in local storage before it's eligible for cleanup. This property is equivalent to <> without Tiered Storage. + +*Type:* integer + +*Accepted values:* milliseconds (integer) + +*Related cluster property:* xref:reference:cluster-properties.adoc#retention_local_target_ms[retention_local_target_ms] + +*Nullable:* No + +*Related topics:* + +* xref:./cluster-properties.adoc#retention_local_target_ms[`retention_local_target_ms`] +* xref:manage:tiered-storage.adoc[Tiered Storage] +* xref:manage:remote-read-replicas.adoc[Remote Read Replicas] + +--- +// end::category-tiered-storage[] + +// tag::category-retention-compaction[] +=== retention.ms + +A time-based retention limit that configures the maximum duration that a log's segment file for a topic is retained before it becomes eligible to be cleaned up. To consume all data, a consumer of the topic must read from a segment before its `retention.ms` elapses, otherwise the segment may be compacted and/or deleted. If a non-positive value, no per-topic limit is applied. + +If `retention.ms` is set to a positive value, it overrides the cluster property xref:./cluster-properties.adoc#log_retention_ms[`log_retention_ms`] for the topic. + +When both size-based (`retention.bytes`) and time-based (`retention.ms`) retention limits are set, the earliest occurring limit applies. + +*Type:* integer + +*Accepted values:* milliseconds (integer) + +*Related cluster property:* xref:reference:cluster-properties.adoc#retention_ms[retention_ms] + +*Nullable:* No + +*Related topics:* + +* xref:./cluster-properties.adoc#log_retention_ms[`log_retention_ms`] +* xref:./cluster-properties.adoc#log_retention_ms[`log_retention_ms`] +* xref:manage:cluster-maintenance/disk-utilization.adoc#configure-message-retention[Configure message retention] + +--- +// end::category-retention-compaction[] + +// tag::category-segment-message[] +=== segment.bytes + +The maximum size of an active log segment for a topic. When the size of an active segment exceeds `segment.bytes`, the segment is closed and a new active segment is created. The closed, inactive segment is then eligible to be cleaned up according to retention properties. + +When `segment.bytes` is set to a positive value, it overrides the cluster property xref:./cluster-properties.adoc#log_segment_size[`log_segment_size`] for the topic. + +*Type:* integer + +*Accepted values:* bytes (integer) + +*Related cluster property:* xref:reference:cluster-properties.adoc#log_segment_size[log_segment_size] + +*Nullable:* No + +*Related topics:* + +* xref:./cluster-properties.adoc#log_segment_size[`log_segment_size`] +* xref:./cluster-properties.adoc#log_segment_size[`log_segment_size`] +* xref:manage:cluster-maintenance/disk-utilization.adoc#configure-segment-size[Configure segment size] +* xref:manage:cluster-maintenance/disk-utilization.adoc#configure-message-retention[Configure message retention] +* xref:manage:remote-read-replicas.adoc[Remote Read Replicas] + +--- +// end::category-segment-message[] + +// tag::category-segment-message[] +=== segment.ms + +The maximum duration that a log segment of a topic is active (open for writes and not deletable). A periodic event, with `segment.ms` as its period, forcibly closes the active segment and transitions, or rolls, to a new active segment. The closed (inactive) segment is then eligible to be cleaned up according to cleanup and retention properties. + +If set to a positive duration, `segment.ms` overrides the cluster property xref:./cluster-properties.adoc#log_segment_ms[`log_segment_ms`]. Values are automatically clamped between the cluster bounds set by xref:./cluster-properties.adoc#log_segment_ms_min[`log_segment_ms_min`] (default: 10 minutes) and xref:./cluster-properties.adoc#log_segment_ms_max[`log_segment_ms_max`] (default: 1 year). If your configured value exceeds these bounds, Redpanda uses the bound value and logs a warning. Check current cluster bounds with `rpk cluster config get log_segment_ms_min log_segment_ms_max`. + +*Type:* integer + +*Accepted values:* milliseconds (integer) + +*Related cluster property:* xref:reference:cluster-properties.adoc#segment_ms[segment_ms] + +*Nullable:* No + +*Related topics:* + +* xref:./cluster-properties.adoc#log_segment_ms[`log_segment_ms`] +* xref:./cluster-properties.adoc#log_segment_ms_min[`log_segment_ms_min`] +* xref:./cluster-properties.adoc#log_segment_ms_max[`log_segment_ms_max`] +* xref:./cluster-properties.adoc#log_segment_ms[`log_segment_ms`] +* xref:manage:cluster-maintenance/disk-utilization.adoc#log-rolling[Log rolling] + +--- +// end::category-segment-message[] + +// tag::category-performance-cluster[] +=== write.caching + +The write caching mode to apply to a topic. + +When `write.caching` is set, it overrides the cluster property xref:cluster-properties.adoc#write_caching_default[`write_caching_default`]. Write caching acknowledges a message as soon as it is received and acknowledged on a majority of brokers, without waiting for it to be written to disk. With `acks=all`, this provides lower latency while still ensuring that a majority of brokers acknowledge the write. Fsyncs follow <> and <>, whichever is reached first. + +*Type:* boolean + +*Accepted values:* [`true`, `false`] + +*Related cluster property:* xref:reference:cluster-properties.adoc#write_caching[write_caching] + +*Nullable:* No + +*Related topics:* + +* xref:cluster-properties.adoc#write_caching_default[`write_caching_default`] +* xref:./cluster-properties.adoc#write_caching_default[`write_caching_default`] +* xref:develop:config-topics.adoc#configure-write-caching[Write caching] +* xref:manage:tiered-storage.adoc[Tiered Storage] + +--- +// end::category-performance-cluster[] diff --git a/modules/reference/partials/topic-property-mappings.adoc b/modules/reference/partials/topic-property-mappings.adoc new file mode 100644 index 0000000000..7b7caa995f --- /dev/null +++ b/modules/reference/partials/topic-property-mappings.adoc @@ -0,0 +1,49 @@ +// This content is autogenerated. Do not edit manually. To override descriptions, use the doc-tools CLI with the --overrides option: https://redpandadata.atlassian.net/wiki/spaces/DOC/pages/1396244485/Review+Redpanda+configuration+properties +[cols="1a,1a"] +|=== +| Topic property | Corresponding cluster property + +| <> +| xref:./cluster-properties.adoc#log_cleanup_policy[`log_cleanup_policy`] +| <> +| xref:./cluster-properties.adoc#compaction_strategy[`compaction_strategy`] +| <> +| xref:./cluster-properties.adoc#log_compression_type[`log_compression_type`] +| <> +| xref:./cluster-properties.adoc#delete_retention_ms[`delete_retention_ms`] +| <> +| xref:./cluster-properties.adoc#flush_bytes[`flush_bytes`] +| <> +| xref:./cluster-properties.adoc#flush_ms[`flush_ms`] +| <> +| xref:./cluster-properties.adoc#initial_retention_local_target_bytes[`initial_retention_local_target_bytes`] +| <> +| xref:./cluster-properties.adoc#initial_retention_local_target_ms[`initial_retention_local_target_ms`] +| <> +| xref:./cluster-properties.adoc#max_compaction_lag_ms[`max_compaction_lag_ms`] +| <> +| xref:./cluster-properties.adoc#kafka_batch_max_bytes[`kafka_batch_max_bytes`] +| <> +| xref:./cluster-properties.adoc#log_message_timestamp_type[`log_message_timestamp_type`] +| <> +| xref:./cluster-properties.adoc#min_cleanable_dirty_ratio[`min_cleanable_dirty_ratio`] +| <> +| xref:./cluster-properties.adoc#min_compaction_lag_ms[`min_compaction_lag_ms`] +| <> +| xref:./cluster-properties.adoc#replication_factor[`replication_factor`] +| <> +| xref:./cluster-properties.adoc#retention_bytes[`retention_bytes`] +| <> +| xref:./cluster-properties.adoc#retention_local_target_bytes[`retention_local_target_bytes`] +| <> +| xref:./cluster-properties.adoc#retention_local_target_ms[`retention_local_target_ms`] +| <> +| xref:./cluster-properties.adoc#retention_ms[`retention_ms`] +| <> +| xref:./cluster-properties.adoc#log_segment_size[`log_segment_size`] +| <> +| xref:./cluster-properties.adoc#segment_ms[`segment_ms`] +| <> +| xref:./cluster-properties.adoc#write_caching[`write_caching`] +|=== + diff --git a/modules/reference/property-changes-v25.2.1-to-v25.2.10.json b/modules/reference/property-changes-v25.2.1-to-v25.2.10.json new file mode 100644 index 0000000000..4961643df6 --- /dev/null +++ b/modules/reference/property-changes-v25.2.1-to-v25.2.10.json @@ -0,0 +1,50 @@ +{ + "comparison": { + "oldVersion": "v25.2.1", + "newVersion": "v25.2.10", + "timestamp": "2025-10-28T15:40:10.309Z" + }, + "summary": { + "newProperties": 0, + "changedDefaults": 3, + "changedDescriptions": 0, + "changedTypes": 2, + "deprecatedProperties": 0, + "removedProperties": 0 + }, + "details": { + "newProperties": [], + "changedDefaults": [ + { + "name": "cloud_storage_client_lease_timeout_ms", + "oldDefault": null, + "newDefault": "900s" + }, + { + "name": "kafka_produce_batch_validation", + "oldDefault": null, + "newDefault": "relaxed" + }, + { + "name": "sasl_mechanisms_overrides", + "oldDefault": null, + "newDefault": [] + } + ], + "changedDescriptions": [], + "changedTypes": [ + { + "name": "cloud_storage_client_lease_timeout_ms", + "oldType": "string", + "newType": "integer" + }, + { + "name": "sasl_mechanisms_overrides", + "oldType": "string", + "newType": "array" + } + ], + "deprecatedProperties": [], + "removedProperties": [] + } +} \ No newline at end of file From 2f5ed715b4d8094eb8a01046c87e5536d3eadef0 Mon Sep 17 00:00:00 2001 From: Jake Cahill <45230295+JakeSCahill@users.noreply.github.com> Date: Thu, 30 Oct 2025 12:02:36 +0000 Subject: [PATCH 6/8] Delete modules/reference/pages/property-report.json --- modules/reference/pages/property-report.json | 100 ------------------- 1 file changed, 100 deletions(-) delete mode 100644 modules/reference/pages/property-report.json diff --git a/modules/reference/pages/property-report.json b/modules/reference/pages/property-report.json deleted file mode 100644 index 8066bed5a9..0000000000 --- a/modules/reference/pages/property-report.json +++ /dev/null @@ -1,100 +0,0 @@ -{ - "empty_descriptions": [ - "cloud_storage_disable_metadata_consistency_checks", - "cloud_storage_reconciliation_interval_ms", - "coproc_max_batch_size", - "coproc_max_inflight_bytes", - "coproc_max_ingest_bytes", - "coproc_offset_flush_interval_ms", - "coproc_supervisor_server", - "dashboard_dir", - "datalake_disk_space_monitor_interval", - "enable_admin_api", - "enable_central_config", - "enable_coproc", - "find_coordinator_timeout_ms", - "full_raft_configuration_recovery_pattern", - "id_allocator_replication", - "kafka_admin_topic_api_rate", - "kafka_client_group_byte_rate_quota", - "kafka_client_group_fetch_byte_rate_quota", - "kafka_quota_balancer_min_shard_throughput_bps", - "kafka_quota_balancer_min_shard_throughput_ratio", - "kafka_quota_balancer_node_period_ms", - "kafka_quota_balancer_window_ms", - "kafka_throughput_throttling_v2", - "leader_balancer_mode", - "log_compaction_adjacent_merge_self_compaction_count", - "max_version", - "min_version", - "raft_max_concurrent_append_requests_per_follower", - "redpanda.cloud_topic.enabled", - "redpanda.key.schema.id.validation", - "redpanda.key.subject.name.strategy", - "redpanda.remote.allowgaps", - "redpanda.value.schema.id.validation", - "redpanda.value.subject.name.strategy", - "redpanda.virtual.cluster.id", - "rm_violation_recovery_policy", - "schema_registry_protobuf_renderer_v2", - "seed_server_meta_topic_partitions", - "seq_table_min_size", - "target_fetch_quota_byte_rate", - "target_quota_byte_rate", - "tm_violation_recovery_policy", - "transaction_coordinator_replication", - "tx_registry_log_capacity", - "tx_registry_sync_timeout_ms", - "use_scheduling_groups" - ], - "deprecated_properties": [ - "cloud_storage_cache_trim_carryover_bytes", - "cloud_storage_disable_metadata_consistency_checks", - "cloud_storage_max_materialized_segments_per_shard", - "cloud_storage_max_partition_readers_per_shard", - "cloud_storage_reconciliation_interval_ms", - "coproc_max_batch_size", - "coproc_max_inflight_bytes", - "coproc_max_ingest_bytes", - "coproc_offset_flush_interval_ms", - "coproc_supervisor_server", - "dashboard_dir", - "datalake_disk_space_monitor_interval", - "enable_admin_api", - "enable_auto_rebalance_on_node_add", - "enable_central_config", - "enable_coproc", - "find_coordinator_timeout_ms", - "full_raft_configuration_recovery_pattern", - "health_monitor_tick_interval", - "id_allocator_replication", - "kafka_admin_topic_api_rate", - "kafka_client_group_byte_rate_quota", - "kafka_client_group_fetch_byte_rate_quota", - "kafka_quota_balancer_min_shard_throughput_bps", - "kafka_quota_balancer_min_shard_throughput_ratio", - "kafka_quota_balancer_node_period_ms", - "kafka_quota_balancer_window_ms", - "kafka_throughput_throttling_v2", - "leader_balancer_mode", - "log_compaction_adjacent_merge_self_compaction_count", - "max_version", - "min_version", - "partition_autobalancing_movement_batch_size_bytes", - "raft_flush_timer_interval_ms", - "raft_max_concurrent_append_requests_per_follower", - "rm_violation_recovery_policy", - "schema_registry_protobuf_renderer_v2", - "seed_server_meta_topic_partitions", - "seq_table_min_size", - "target_fetch_quota_byte_rate", - "target_quota_byte_rate", - "tm_violation_recovery_policy", - "transaction_coordinator_replication", - "tx_log_stats_interval_s", - "tx_registry_log_capacity", - "tx_registry_sync_timeout_ms", - "use_scheduling_groups" - ], - "undocumented_properties": [] -} \ No newline at end of file From 3d28a8e889f13ff25ca1e775e0f0520c853fbfa5 Mon Sep 17 00:00:00 2001 From: JakeSCahill Date: Thu, 30 Oct 2025 15:59:11 +0000 Subject: [PATCH 7/8] Update overrides --- .github/workflows/update-property-docs.yml | 31 +++- docs-data/property-overrides.json | 202 +++++++++++++++------ 2 files changed, 173 insertions(+), 60 deletions(-) diff --git a/.github/workflows/update-property-docs.yml b/.github/workflows/update-property-docs.yml index 09f09fd561..efa0b5f5da 100644 --- a/.github/workflows/update-property-docs.yml +++ b/.github/workflows/update-property-docs.yml @@ -50,7 +50,6 @@ jobs: - name: Determine tag id: tag run: | - # Prefer input tag (workflow_dispatch), else use repository_dispatch payload if [ -n "${{ github.event.inputs.tag }}" ]; then echo "tag=${{ github.event.inputs.tag }}" >> $GITHUB_OUTPUT elif [ -n "${{ github.event.client_payload.tag }}" ]; then @@ -60,8 +59,33 @@ jobs: exit 1 fi + - name: Check if tag is newer than antora.yml latest + id: version_check + run: | + set -euo pipefail + TAG="${{ steps.tag.outputs.tag }}" + CURRENT=$(grep 'latest-redpanda-tag:' antora.yml | awk '{print $2}' | tr -d '"') + + echo "📄 Current latest-redpanda-tag in antora.yml: $CURRENT" + echo "🔖 Incoming tag: $TAG" + + # Strip leading 'v' for numeric comparison + CUR_NUM=$(echo "$CURRENT" | sed 's/^v//') + NEW_NUM=$(echo "$TAG" | sed 's/^v//') + + # Compare semver using sort -V + if [ "$(printf "%s\n%s" "$CUR_NUM" "$NEW_NUM" | sort -V | tail -n1)" = "$NEW_NUM" ] && [ "$CUR_NUM" != "$NEW_NUM" ]; then + echo "$TAG is newer than $CURRENT" + echo "is_newer=true" >> $GITHUB_OUTPUT + else + echo "$TAG is not newer than $CURRENT — skipping doc generation" + echo "is_newer=false" >> $GITHUB_OUTPUT + fi + - name: Generate property docs + if: steps.version_check.outputs.is_newer == 'true' run: | + set -euo pipefail echo "Running doc generation for: ${{ steps.tag.outputs.tag }}" npx doc-tools generate property-docs \ --tag "${{ steps.tag.outputs.tag }}" \ @@ -72,6 +96,7 @@ jobs: GITHUB_TOKEN: ${{ env.ACTIONS_BOT_TOKEN }} - name: Create pull request + if: steps.version_check.outputs.is_newer == 'true' uses: peter-evans/create-pull-request@v6 with: token: ${{ env.ACTIONS_BOT_TOKEN }} @@ -81,3 +106,7 @@ jobs: body: | This PR auto-generates updated Redpanda property documentation for **${{ steps.tag.outputs.tag }}**. labels: auto-docs + + - name: Skip notice + if: steps.version_check.outputs.is_newer != 'true' + run: echo "🟡 Skipping doc generation — tag is not newer than latest-redpanda-tag in antora.yml." diff --git a/docs-data/property-overrides.json b/docs-data/property-overrides.json index 1251689ea8..733cc5e7f3 100644 --- a/docs-data/property-overrides.json +++ b/docs-data/property-overrides.json @@ -14,7 +14,13 @@ " - name: ", " address: ", " port: ", - "----" + "----", + "", + "Replace the following placeholders with your values:", + "", + "* ``: Name for the Admin API listener (TLS configuration is handled separately in the <> broker property)", + "* ``: The externally accessible hostname or IP address that clients use to connect to this broker", + "* ``: The port number for the Admin API endpoint" ], "description": "Network address for the glossterm:Admin API[] server.", "config_scope": "broker", @@ -37,7 +43,14 @@ " key_file: ", " truststore_file: ", " require_client_auth: true", - "----" + "----", + "", + "Replace the following placeholders with your values:", + "", + "* ``: Name that matches your Admin API listener (defined in the <> broker property)", + "* ``: Full path to the TLS certificate file", + "* ``: Full path to the TLS private key file", + "* ``: Full path to the Certificate Authority file" ], "config_scope": "broker", "category": "redpanda" @@ -53,7 +66,13 @@ " - name: ", " address: ", " port: ", - "----" + "----", + "", + "Replace the following placeholders with your values:", + "", + "* ``: Name that matches your Kafka API listener (defined in the <> broker property)", + "* ``: The externally accessible hostname or IP address that clients use to connect to this broker", + "* ``: The port number for the Kafka API endpoint" ], "config_scope": "broker", "category": "redpanda" @@ -73,7 +92,12 @@ " advertised_rpc_api:", " address: ", " port: ", - "----" + "----", + "", + "Replace the following placeholders with your values:", + "", + "* ``: The externally accessible hostname or IP address that other brokers use to communicate with this broker", + "* ``: The port number for the RPC endpoint (default is 33145)" ], "config_scope": "broker", "category": "redpanda" @@ -91,8 +115,7 @@ "category": "pandaproxy" }, "audit_enabled": { - "related_topics": [ - ], + "related_topics": [], "config_scope": "cluster" }, "auto_create_topics_enabled": { @@ -266,8 +289,7 @@ "description": "Controls the upload of log segments to Tiered Storage. If set to `false`, this property temporarily pauses all log segment uploads from the Redpanda cluster. When the uploads are paused, the <> cluster configuration and `redpanda.remote.allowgaps` topic properties control local retention behavior." }, "cloud_storage_enabled": { - "related_topics": [ - ] + "related_topics": [] }, "cloud_storage_full_scrub_interval_ms": { "description": "Interval, in milliseconds, between a final scrub and the next scrub." @@ -304,8 +326,11 @@ "----", "redpanda:", " cloud_storage_inventory_hash_store: ", - "----" - ] + "----", + "", + "Replace `` with the full path to your desired inventory hash storage directory." + ], + "config_scope": "broker" }, "cloud_storage_inventory_hash_store": { "description": "Directory to store inventory report hashes for use by cloud storage scrubber. If not specified, Redpanda uses a default path within the data directory.", @@ -333,7 +358,8 @@ "description": "Manifest upload timeout, in milliseconds." }, "cloud_storage_materialized_manifest_ttl_ms": { - "description": "The interval, in milliseconds, determines how long the materialized manifest can stay in the cache under contention. This setting is used for performance tuning. When the spillover manifest is materialized and stored in the cache, and the cache needs to evict it, it uses this value as a timeout. The cursor that uses the spillover manifest uses this value as a TTL interval, after which it stops referencing the manifest making it available for eviction. This only affects spillover manifests under contention." + "description": "The interval, in milliseconds, determines how long the materialized manifest can stay in the cache under contention. This setting is used for performance tuning. When the spillover manifest is materialized and stored in the cache, and the cache needs to evict it, it uses this value as a timeout. The cursor that uses the spillover manifest uses this value as a TTL interval, after which it stops referencing the manifest making it available for eviction. This only affects spillover manifests under contention.", + "config_scope": "cluster" }, "cloud_storage_max_concurrent_hydrations_per_shard": { "description": "Maximum concurrent segment hydrations of remote data per CPU core. If unset, value of `cloud_storage_max_connections / 2` is used, which means that half of available object storage bandwidth could be used to download data from object storage. If the cloud storage cache is empty every new segment reader will require a download. This will lead to 1:1 mapping between number of partitions scanned by the fetch request and number of parallel downloads. If this value is too large the downloads can affect other workloads. In case of any problem caused by the tiered-storage reads this value can be lowered. This will only affect segment hydrations (downloads) but won't affect cached segments. If fetch request is reading from the tiered-storage cache its concurrency will only be limited by available memory." @@ -511,8 +537,7 @@ "config_scope": "cluster" }, "core_balancing_continuous": { - "related_topics": [ - ], + "related_topics": [], "config_scope": "cluster" }, "core_balancing_debounce_timeout": { @@ -576,8 +601,7 @@ }, "default_leaders_preference": { "description": "Default settings for preferred location of topic partition leaders. It can be either \"none\" (no preference), or \"racks:,,...\" (prefer brokers with rack ID from the list).\n\nThe list can contain one or more rack IDs. If you specify multiple IDs, Redpanda tries to distribute the partition leader locations equally across brokers in these racks.\n\nIf config_ref:enable_rack_awareness,true,properties/cluster-properties[] is set to `false`, leader pinning is disabled across the cluster.", - "related_topics": [ - ], + "related_topics": [], "config_scope": "cluster" }, "delete.retention.ms": { @@ -692,8 +716,7 @@ }, "http_authentication": { "description": "A list of supported HTTP authentication mechanisms.\n\n*Accepted values:*\n\n* `BASIC`: Basic authentication\n* `OIDC`: OpenID Connect", - "related_topics": [ - ], + "related_topics": [], "config_scope": "cluster" }, "iceberg_backlog_controller_i_coeff": { @@ -760,6 +783,13 @@ ], "config_scope": "cluster" }, + "iceberg_rest_catalog_aws_credentials_source": { + "description": "ifndef::env-cloud[]\nSource of AWS credentials for Iceberg REST catalog SigV4 authentication. If not set, falls back to xref:reference:properties/object-storage-properties.adoc#cloud_storage_credentials_source[`cloud_storage_credentials_source`] when using aws_sigv4 authentication mode.\nendif::[]\n\nifdef::env-cloud[]\nSource of AWS credentials for Iceberg REST catalog SigV4 authentication. If providing explicit credentials using `iceberg_rest_catalog_aws_access_key` and `iceberg_rest_catalog_aws_secret_key` for Glue catalog authentication, you must set this property to `config_file`.\nendif::[]\n\n*Accepted values*: `aws_instance_metadata`, `azure_aks_oidc_federation`, `azure_vm_instance_metadata`, `config_file`, `gcp_instance_metadata`, `sts`.", + "related_topics": [ + "xref:reference:properties/object-storage-properties.adoc#cloud_storage_credentials_source[`cloud_storage_credentials_source`]" + ], + "config_scope": "cluster" + }, "iceberg_rest_catalog_aws_region": { "description": "AWS region for Iceberg REST catalog SigV4 authentication. If not set, falls back to xref:reference:properties/object-storage-properties.adoc#cloud_storage_region[`cloud_storage_region`] when using aws_sigv4 authentication mode.", "related_topics": [ @@ -782,13 +812,6 @@ "description": "Secret used with the client ID to query the OAuth token endpoint for Iceberg REST catalog authentication. Required if catalog type is set to `rest` and `iceberg_rest_catalog_authentication_mode` is set to `oauth2`.", "config_scope": "cluster" }, - "iceberg_rest_catalog_credentials_source": { - "description": "ifndef::env-cloud[]\nSource of AWS credentials for Iceberg REST catalog SigV4 authentication. If not set, falls back to xref:reference:properties/object-storage-properties.adoc#cloud_storage_credentials_source[`cloud_storage_credentials_source`] when using aws_sigv4 authentication mode.\nendif::[]\n\nifdef::env-cloud[]\nSource of AWS credentials for Iceberg REST catalog SigV4 authentication. If providing explicit credentials using `iceberg_rest_catalog_aws_access_key` and `iceberg_rest_catalog_aws_secret_key` for Glue catalog authentication, you must set this property to `config_file`.\nendif::[]\n\n*Accepted values*: `aws_instance_metadata`, `azure_aks_oidc_federation`, `azure_vm_instance_metadata`, `config_file`, `gcp_instance_metadata`, `sts`.", - "related_topics": [ - "xref:reference:properties/object-storage-properties.adoc#cloud_storage_credentials_source[`cloud_storage_credentials_source`]" - ], - "config_scope": "cluster" - }, "iceberg_rest_catalog_crl": { "description": "The contents of a certificate revocation list for `iceberg_rest_catalog_trust`. Takes precedence over `iceberg_rest_catalog_crl_file`.", "config_scope": "cluster" @@ -874,15 +897,58 @@ "config_scope": "cluster" }, "internal_topic_replication_factor": { - "description": "Target replication factor for internal topics.\n\n*Unit*: number of replicas per topic.", + "description": "Target replication factor for internal topics", "config_scope": "cluster" }, "kafka_api": { "description": "IP address and port of the Kafka API endpoint that handles requests. Supports multiple listeners with different configurations.", "related_topics": [ - "xref:reference:properties/cluster-properties.adoc#sasl_mechanisms[`sasl_mechanisms`]", "xref:reference:properties/cluster-properties.adoc#sasl_mechanisms[`sasl_mechanisms`]" ], + "example": [ + ".Basic example", + "[,yaml]", + "----", + "redpanda:", + " kafka_api:", + " - address: ", + " port: ", + " authentication_method: sasl", + "----", + "", + ".Multiple listeners example (for different networks or authentication methods)", + "[,yaml]", + "----", + "redpanda:", + " kafka_api:", + " - name: ", + " address: ", + " port: ", + " authentication_method: none", + " - name: ", + " address: ", + " port: ", + " authentication_method: sasl", + " - name: ", + " address: ", + " port: ", + " authentication_method: mtls_identity", + "----", + "", + "Replace the following placeholders with your values:", + "", + "* ``: The IP address to bind the listener to (typically `0.0.0.0` for all interfaces)", + "* ``: The port number for the Kafka API endpoint", + "* ``: Name for internal network connections (for example, `internal`)", + "* ``: Name for external network connections (for example, `external`)", + "* ``: Name for mTLS connections (for example, `mtls`)", + "* ``: The IP address for internal connections", + "* ``: The port number for internal Kafka API connections", + "* ``: The IP address for external connections", + "* ``: The port number for external Kafka API connections", + "* ``: The IP address for mTLS connections", + "* ``: The port number for mTLS Kafka API connections" + ], "config_scope": "broker", "category": "redpanda" }, @@ -899,7 +965,16 @@ " key_file: ", " truststore_file: ", " require_client_auth: false", - "----" + "----", + "", + "Replace the following placeholders with your values:", + "", + "* ``: Name that matches your Kafka API listener (defined in the <> broker property)", + "* ``: Full path to the TLS certificate file", + "* ``: Full path to the TLS private key file", + "* ``: Full path to the Certificate Authority file", + "", + "NOTE: Set `require_client_auth: true` for mutual TLS (mTLS) authentication, or `false` for server-side TLS only." ], "config_scope": "broker", "category": "redpanda" @@ -1069,7 +1144,11 @@ "config_scope": "cluster" }, "max_concurrent_producer_ids": { - "description": "Maximum number of active producer sessions. When the threshold is passed, Redpanda terminates old sessions. When an idle producer corresponding to the terminated session wakes up and produces, its message batches are rejected, and an out of order sequence error is emitted. Consumers don't affect this setting.", + "description": "Maximum number of active producer sessions per shard. Each shard tracks producer IDs using an LRU (Least Recently Used) eviction policy. When the configured limit is exceeded, the least recently used producer IDs are evicted from the cache. IMPORTANT: The default value is unlimited, which can lead to unbounded memory growth and out-of-memory (OOM) crashes in production environments with heavy producer usage, especially when using transactions or idempotent producers. It is strongly recommended to set a reasonable limit in production deployments.", + "related_topics": [ + "xref:develop:transactions.adoc#tune-producer-id-limits[Tune producer ID limits]", + "xref:reference:properties/cluster-properties.adoc#transactional_id_expiration_ms[transactional_id_expiration_ms]" + ], "config_scope": "cluster" }, "max_transactions_per_coordinator": { @@ -1149,13 +1228,24 @@ " new_id: ", " new_uuid: \"\"", " ignore_existing_node_id: ", - "----" + "----", + "", + "Replace the following placeholders with your values:", + "", + "* ``: The current UUID of the broker to override", + "* ``: The new broker ID to assign", + "* ``: The new UUID to assign to the broker", + "* ``: Set to `true` to force override on brokers that already have a node ID, or `false` to apply override only to brokers without existing node IDs", + "* ``: Additional broker UUID for multiple overrides", + "* ``: Additional new broker ID", + "* ``: Additional new UUID", + "* ``: Additional ignore existing node ID flag" ], "config_scope": "broker", "category": "redpanda" }, "oidc_clock_skew_tolerance": { - "description": "The amount of time (in seconds) to allow for when validating the expiry claim in the token.\n\n*Unit*: seconds", + "description": "The amount of time (in seconds) to allow for when validating the expiry claim in the token.", "config_scope": "cluster" }, "oidc_discovery_url": { @@ -1237,6 +1327,7 @@ "description": "Number of acknowledgments the producer requires the leader to have received before considering a request complete." }, "produce_batch_delay_ms": { + "description": "Delay (in milliseconds) to wait before sending batch.", "config_scope": "broker", "category": "pandaproxy-client" }, @@ -1357,7 +1448,8 @@ "related_topics": [ "xref:manage:remote-read-replicas.adoc[Remote Read Replicas]" ], - "config_scope": "topic" + "config_scope": "topic", + "type": "string" }, "redpanda.remote.recovery": { "description": "A flag that enables the recovery or reproduction of a topic from object storage for Tiered Storage. The recovered data is saved in local storage, and the maximum amount of recovered data is determined by the local storage retention limits of the topic.\n\nTIP: You can only configure `redpanda.remote.recovery` when you create a topic. You cannot apply this setting to existing topics.", @@ -1461,6 +1553,7 @@ "category": "pandaproxy-client" }, "retry_base_backoff_ms": { + "description": "Delay (in milliseconds) for initial retry backoff.", "config_scope": "broker", "category": "pandaproxy-client" }, @@ -1484,7 +1577,13 @@ " key_file: \"\"", " truststore_file: \"\"", " require_client_auth: true", - "----" + "----", + "", + "Replace the following placeholders with your values:", + "", + "* ``: Full path to the RPC TLS certificate file", + "* ``: Full path to the RPC TLS private key file", + "* ``: Full path to the certificate authority file" ], "config_scope": "broker", "category": "redpanda" @@ -1494,7 +1593,7 @@ "config_scope": "cluster" }, "sasl_mechanism": { - "description": "The SASL mechanism to use when the HTTP Proxy client connects to the Kafka API. These credentials are used when the HTTP Proxy API listener has <>: `none` but the cluster requires authenticated access to the Kafka API.\n\nThis property specifies which individual SASL mechanism the HTTP Proxy client should use, while the cluster-wide available mechanisms are configured using the xref:reference:properties/cluster-properties.adoc#sasl_mechanisms[`sasl_mechanisms`] cluster property.\n\ninclude::shared:partial$http-proxy-ephemeral-credentials-breaking-change.adoc[]", + "description": "The SASL mechanism to use when the HTTP Proxy client connects to the Kafka API. These credentials are used when the HTTP Proxy API listener has <>: `none` but the cluster requires authenticated access to the Kafka API.\n\nThis property specifies which individual SASL mechanism the HTTP Proxy client should use, while the cluster-wide available mechanisms are configured using the xref:reference:properties/cluster-properties.adoc#sasl_mechanisms[`sasl_mechanisms`] cluster property.\n\ninclude::shared:partial$http-proxy-ephemeral-credentials-breaking-change.adoc[]\n\nNOTE: While the cluster-wide xref:reference:properties/cluster-properties.adoc#sasl_mechanisms[`sasl_mechanisms`] property may support additional mechanisms (PLAIN, GSSAPI, OAUTHBEARER), HTTP Proxy client connections only support SCRAM mechanisms.", "related_topics": [ "xref:reference:properties/cluster-properties.adoc#sasl_mechanisms[`sasl_mechanisms`]" ], @@ -1503,14 +1602,12 @@ }, "sasl_mechanisms": { "description": "A list of supported SASL mechanisms.\n\n*Accepted values:*\n\n* `SCRAM`\n* `GSSAPI`\n* `OAUTHBEARER`\n* `PLAIN`\n\nNote that in order to enable PLAIN, you must also enable SCRAM.", - "related_topics": [ - ], + "related_topics": [], "config_scope": "cluster" }, "sasl_mechanisms_overrides": { "description": "A list of overrides for SASL mechanisms, defined by listener. SASL mechanisms defined here will replace the ones set in `sasl_mechanisms`. The same limitations apply as for `sasl_mechanisms`.", - "related_topics": [ - ], + "related_topics": [], "config_scope": "cluster" }, "schema_registry_always_normalize": { @@ -1538,8 +1635,7 @@ }, "schema_registry_enable_authorization": { "description": "Enables ACL-based authorization for Schema Registry requests. When `true`, Schema Registry\nuses ACL-based authorization instead of the default `public/user/superuser` authorization model. \nifdef::env-cloud[]\nRequires authentication to be enabled using the `authentication_method` property in the `schema_registry_api` broker configuration.\nendif::[]", - "related_topics": [ - ], + "related_topics": [], "config_scope": "cluster" }, "schema_registry_replication_factor": { @@ -1664,6 +1760,13 @@ "description": "Delete segments older than this age. To ensure transaction state is retained for as long as the longest-running transaction, make sure this is greater than or equal to <>.\n\nFor example, if your typical transactions run for one hour, consider setting both `transaction_coordinator_delete_retention_ms` and `transactional_id_expiration_ms` to at least 3600000 (one hour), or a little over.", "config_scope": "cluster" }, + "transactional_id_expiration_ms": { + "description": "Expiration time of producer IDs. Measured starting from the time of the last write until now for a given ID. Producer IDs are automatically removed from memory when they expire, which helps manage memory usage. However, this natural cleanup may not be sufficient for workloads with high producer churn rates. For applications with long-running transactions, ensure this value accommodates your typical transaction lifetime to avoid premature producer ID expiration.", + "related_topics": [ + "xref:develop:transactions.adoc#tune-producer-id-limits[Tune producer ID limits]", + "xref:reference:properties/cluster-properties.adoc#max_concurrent_producer_ids[max_concurrent_producer_ids]" + ] + }, "upgrade_override_checks": { "config_scope": "broker", "category": "redpanda" @@ -1677,32 +1780,13 @@ "config_scope": "cluster" }, "verbose_logging_timeout_sec_max": { - "example": [ - ".Example", - "[,yaml]", - "----", - "schema_registry:", - " schema_registry_api:", - " address: 0.0.0.0", - " port: 8081", - " authentication_method: http_basic", - " schema_registry_replication_factor: 3", - " mode_mutability: true", - "----" - ], - "related_topics": [ - "xref:reference:properties/cluster-properties.adoc#http_authentication[`http_authentication`]", - "xref:reference:properties/cluster-properties.adoc#http_authentication[`http_authentication`]" - ], "config_scope": "broker", "category": "redpanda", - "description": "Maximum duration in seconds for verbose (`TRACE` or `DEBUG`) logging. Values configured above this will be clamped. If null (the default) there is no limit. Can be overridden in the Admin API on a per-request basis.\n\n*Unit:* seconds" + "description": "Maximum duration in seconds for verbose (`TRACE` or `DEBUG`) logging. Values configured above this will be clamped. If null (the default) there is no limit. Can be overridden in the Admin API on a per-request basis." }, "write.caching": { "description": "The write caching mode to apply to a topic.\n\nWhen `write.caching` is set, it overrides the cluster property xref:cluster-properties.adoc#write_caching_default[`write_caching_default`]. Write caching acknowledges a message as soon as it is received and acknowledged on a majority of brokers, without waiting for it to be written to disk. With `acks=all`, this provides lower latency while still ensuring that a majority of brokers acknowledge the write. Fsyncs follow <> and <>, whichever is reached first.", "related_topics": [ - "xref:cluster-properties.adoc#write_caching_default[`write_caching_default`]", - "xref:./cluster-properties.adoc#write_caching_default[`write_caching_default`]", "xref:develop:config-topics.adoc#configure-write-caching[Write caching]", "xref:manage:tiered-storage.adoc[Tiered Storage]" ], From 160f365b32db0eaf57ff4e2842a573c470952888 Mon Sep 17 00:00:00 2001 From: JakeSCahill Date: Fri, 31 Oct 2025 16:35:11 +0000 Subject: [PATCH 8/8] Apply latest improvements --- docs-data/property-overrides.json | 28 +- .../examples/v25.2.10-properties.json | 318 +-- .../properties/broker-properties.adoc | 449 ++++- .../properties/cluster-properties.adoc | 1748 +++++++++++++++-- .../properties/object-storage-properties.adoc | 459 ++++- .../partials/properties/topic-properties.adoc | 239 ++- 6 files changed, 2716 insertions(+), 525 deletions(-) diff --git a/docs-data/property-overrides.json b/docs-data/property-overrides.json index 733cc5e7f3..1cdc132a2a 100644 --- a/docs-data/property-overrides.json +++ b/docs-data/property-overrides.json @@ -482,11 +482,11 @@ "config_scope": "topic" }, "confluent.value.schema.validation": { - "description": "Enable validation of the schema ID for values on a record. This is a compatibility alias for `redpanda.value.schema.id.validation`. When enabled, Redpanda validates that the schema ID encoded in the record's value is registered in the Schema Registry according to the configured subject name strategy.", + "description": "Enable validation of the schema ID for values on a record. This is a compatibility alias for <>. When enabled, Redpanda validates that the schema ID encoded in the record's value is registered in the Schema Registry according to the configured subject name strategy.", "config_scope": "topic" }, "confluent.value.subject.name.strategy": { - "description": "The subject name strategy for values when `confluent.value.schema.validation` is enabled. This is a compatibility alias for `redpanda.value.subject.name.strategy`. This determines how the topic and schema are mapped to a subject name in the Schema Registry.", + "description": "The subject name strategy for values when `confluent.value.schema.validation` is enabled. This is a compatibility alias for <>. This determines how the topic and schema are mapped to a subject name in the Schema Registry.", "config_scope": "topic" }, "consumer_group_lag_collection_interval_sec": { @@ -1465,6 +1465,29 @@ ], "config_scope": "topic" }, + "redpanda.value.schema.id.validation": { + "description": "Enable validation of the schema ID for values on a record. When enabled, Redpanda validates that the schema ID encoded in the record's value is registered in the Schema Registry according to the configured subject name strategy.", + "related_topics": [ + "xref:manage:schema-reg/schema-id-validation.adoc[Server-Side Schema ID Validation]" + ], + "config_scope": "topic", + "type": "boolean", + "default": "false" + }, + "redpanda.value.subject.name.strategy": { + "description": "The subject name strategy for values when `confluent.value.schema.validation` is enabled. This is a compatibility alias for `redpanda.value.subject.name.strategy`. This determines how the topic and schema are mapped to a subject name in the Schema Registry.", + "related_topics": [ + "xref:manage:schema-reg/schema-id-validation.adoc[Server-Side Schema ID Validation]" + ], + "config_scope": "topic", + "type": "string", + "acceptable_values": [ + "TopicNameStrategy", + "RecordNameStrategy", + "TopicRecordNameStrategy" + ], + "default": "TopicNameStrategy" + }, "redpanda.virtual.cluster.id": { "exclude_from_docs": true, "config_scope": "topic" @@ -1472,7 +1495,6 @@ "replication.factor": { "description": "The number of replicas of a topic to save in different nodes (brokers) of a cluster.\n\nIf `replication.factor` is set to a positive value, it overrides the cluster property xref:./cluster-properties.adoc#default_topic_replication[default_topic_replication] for the topic.\n\nNOTE: Although `replication.factor` isn't returned or displayed by xref:reference:rpk/rpk-topic/rpk-topic-describe.adoc[`rpk topic describe`] as a valid Kafka property, you can set it using xref:reference:rpk/rpk-topic/rpk-topic-alter-config.adoc[`rpk topic alter-config`]. When the `replication.factor` of a topic is altered, it isn't simply a property value that's updated, but rather the actual replica sets of topic partitions that are changed.", "related_topics": [ - "xref:./cluster-properties.adoc#default_topic_replication[default_topic_replication]", "xref:reference:rpk/rpk-topic/rpk-topic-describe.adoc[`rpk topic describe`]", "xref:reference:rpk/rpk-topic/rpk-topic-alter-config.adoc[`rpk topic alter-config`]", "xref:./cluster-properties.adoc#default_topic_replication[`default_topic_replication`]", diff --git a/modules/reference/examples/v25.2.10-properties.json b/modules/reference/examples/v25.2.10-properties.json index f53045a37b..1e4f4cdf22 100644 --- a/modules/reference/examples/v25.2.10-properties.json +++ b/modules/reference/examples/v25.2.10-properties.json @@ -257,7 +257,6 @@ "maximum": 4294967295, "minimum": 0, "name": "abort_index_segment_size", - "needs_restart": true, "nullable": false, "type": "integer", "visibility": "tunable" @@ -274,7 +273,6 @@ "maximum": 17592186044415, "minimum": -17592186044416, "name": "abort_timed_out_transactions_interval_ms", - "needs_restart": true, "nullable": false, "type": "integer", "visibility": "tunable" @@ -294,12 +292,11 @@ ], "defined_in": "src/v/config/node_config.cc", "description": "Network address for the glossterm:Admin API[] server.", - "example": ".Example\n[,yaml]\n----\nredpanda:\n admin:\n - name: \n address: \n port: \n----", + "example": ".Example\n[,yaml]\n----\nredpanda:\n admin:\n - name: \n address: \n port: \n----\n\nReplace the following placeholders with your values:\n\n* ``: Name for the Admin API listener (TLS configuration is handled separately in the <> broker property)\n* ``: The externally accessible hostname or IP address that clients use to connect to this broker\n* ``: The port number for the Admin API endpoint", "items": { "type": "object" }, "name": "admin", - "needs_restart": true, "nullable": false, "type": "array", "visibility": "user" @@ -315,7 +312,6 @@ "defined_in": "src/v/config/node_config.cc", "description": "Path to the API specifications for the Admin API.", "name": "admin_api_doc_dir", - "needs_restart": true, "nullable": false, "type": "string", "visibility": "user" @@ -345,12 +341,11 @@ "default": [], "defined_in": "src/v/config/node_config.cc", "description": "Specifies the TLS configuration for the HTTP Admin API.", - "example": ".Example\n[,yaml]\n----\nredpanda:\n admin_api_tls:\n - name: \n enabled: true\n cert_file: \n key_file: \n truststore_file: \n require_client_auth: true\n----", + "example": ".Example\n[,yaml]\n----\nredpanda:\n admin_api_tls:\n - name: \n enabled: true\n cert_file: \n key_file: \n truststore_file: \n require_client_auth: true\n----\n\nReplace the following placeholders with your values:\n\n* ``: Name that matches your Admin API listener (defined in the <> broker property)\n* ``: Full path to the TLS certificate file\n* ``: Full path to the TLS private key file\n* ``: Full path to the Certificate Authority file", "items": { "type": "object" }, "name": "admin_api_tls", - "needs_restart": true, "nullable": false, "type": "array", "visibility": "user" @@ -365,7 +360,7 @@ "default": null, "defined_in": "override", "description": "Address of the Kafka API published to the clients. If not set, the <> broker property is used. When behind a load balancer or in containerized environments, this should be the externally-accessible address that clients use to connect.", - "example": ".Example\n[,yaml]\n----\nredpanda:\n advertised_kafka_api:\n - name: \n address: \n port: \n----", + "example": ".Example\n[,yaml]\n----\nredpanda:\n advertised_kafka_api:\n - name: \n address: \n port: \n----\n\nReplace the following placeholders with your values:\n\n* ``: Name that matches your Kafka API listener (defined in the <> broker property)\n* ``: The externally accessible hostname or IP address that clients use to connect to this broker\n* ``: The port number for the Kafka API endpoint", "is_deprecated": false, "is_topic_property": false, "name": "advertised_kafka_api", @@ -386,7 +381,6 @@ "type": "object" }, "name": "advertised_pandaproxy_api", - "needs_restart": true, "nullable": false, "type": "array" }, @@ -400,7 +394,7 @@ "default": null, "defined_in": "override", "description": "Address of RPC endpoint published to other cluster members. If not set, the <> broker property is used. This should be the address other brokers can use to communicate with this broker.", - "example": ".Example\n[,yaml]\n----\nredpanda:\n advertised_rpc_api:\n address: \n port: \n----", + "example": ".Example\n[,yaml]\n----\nredpanda:\n advertised_rpc_api:\n address: \n port: \n----\n\nReplace the following placeholders with your values:\n\n* ``: The externally accessible hostname or IP address that other brokers use to communicate with this broker\n* ``: The port number for the RPC endpoint (default is 33145)", "is_deprecated": false, "is_topic_property": false, "name": "advertised_rpc_api", @@ -469,7 +463,6 @@ "defined_in": "src/v/pandaproxy/rest/configuration.cc", "description": "Path to the API specifications directory. This directory contains API documentation for both the HTTP Proxy API and Schema Registry API.", "name": "api_doc_dir", - "needs_restart": true, "nullable": false, "type": "string" }, @@ -483,7 +476,6 @@ "defined_in": "src/v/config/configuration.cc", "description": "Size of direct write operations to disk in bytes. A larger chunk size can improve performance for write-heavy workloads, but increase latency for these writes as more data is collected before each write operation. A smaller chunk size can decrease write latency, but potentially increase the number of disk I/O operations.", "name": "append_chunk_size", - "needs_restart": true, "nullable": false, "type": "integer", "visibility": "tunable" @@ -692,7 +684,6 @@ "defined_in": "src/v/kafka/client/configuration.cc", "description": "TLS configuration for the Kafka API servers to which the HTTP Proxy client should connect.", "name": "broker_tls", - "needs_restart": true, "nullable": false, "type": "object" }, @@ -703,14 +694,13 @@ "cloud_readonly": false, "cloud_supported": false, "config_scope": "broker", - "default": "vector", + "default": "std::vector({{\"127.0.0.1\", 9092}})", "defined_in": "src/v/kafka/client/configuration.cc", "description": "Network addresses of the Kafka API servers to which the HTTP Proxy client should connect.", "items": { "type": "object" }, "name": "brokers", - "needs_restart": true, "nullable": false, "type": "array" }, @@ -759,7 +749,6 @@ "defined_in": "src/v/kafka/client/configuration.cc", "description": "Custom identifier to include in the Kafka request header for the HTTP Proxy client. This identifier can help debug or monitor client activities.", "name": "client_identifier", - "needs_restart": true, "nullable": true, "type": "string" }, @@ -791,7 +780,6 @@ "description": "AWS or GCP access key. This access key is part of the credentials that Redpanda requires to authenticate with object storage services for Tiered Storage. This access key is used with the <> to form the complete credentials required for authentication.\nTo authenticate using IAM roles, see <>.", "gets_restored": false, "name": "cloud_storage_access_key", - "needs_restart": true, "nullable": true, "type": "string", "visibility": "user" @@ -807,7 +795,6 @@ "description": "Optional API endpoint. The only instance in which you must set this value is when using a custom domain with your object storage service.\n\n- AWS: If not set, this is automatically generated using <> and <>. Otherwise, this uses the value assigned.\n- GCP: If not set, this is automatically generated using `storage.googleapis.com` and <>.\n- Azure: If not set, this is automatically generated using `blob.core.windows.net` and <>. If you have enabled hierarchical namespaces for your storage account and use a custom endpoint, use <>.", "gets_restored": false, "name": "cloud_storage_api_endpoint", - "needs_restart": true, "nullable": true, "type": "string", "visibility": "user" @@ -824,7 +811,6 @@ "maximum": 32767, "minimum": -32768, "name": "cloud_storage_api_endpoint_port", - "needs_restart": true, "nullable": false, "type": "integer", "visibility": "user" @@ -1004,7 +990,6 @@ "description": "AWS or GCP bucket that should be used to store data.\n\nWARNING: Modifying this property after writing data to a bucket could cause data loss.", "gets_restored": false, "name": "cloud_storage_bucket", - "needs_restart": true, "nullable": true, "type": "string", "visibility": "user" @@ -1021,7 +1006,6 @@ "maximum": 17592186044415, "minimum": -17592186044416, "name": "cloud_storage_cache_check_interval", - "needs_restart": true, "nullable": false, "type": "integer", "visibility": "tunable" @@ -1055,7 +1039,6 @@ "description": "Directory for archival cache. Set when the xref:reference:properties/cluster-properties.adoc#cloud_storage_enabled[`cloud_storage_enabled`] cluster property is enabled. If not specified, Redpanda uses a default path within the data directory.", "example": ".Example\n[,yaml]\n----\nredpanda:\n cloud_storage_cache_directory: \n----\n\n\nReplace `` with the full path to your desired cache directory.", "name": "cloud_storage_cache_directory", - "needs_restart": true, "nullable": true, "related_topics": [ "xref:reference:properties/cluster-properties.adoc#cloud_storage_enabled[`cloud_storage_enabled`]" @@ -1357,7 +1340,6 @@ "defined_in": "src/v/config/configuration.cc", "description": "Path to certificate revocation list for <>.", "name": "cloud_storage_crl_file", - "needs_restart": true, "nullable": true, "type": "string", "visibility": "user" @@ -1417,7 +1399,6 @@ "description": null, "is_deprecated": true, "name": "cloud_storage_disable_metadata_consistency_checks", - "needs_restart": true, "nullable": false, "type": null }, @@ -1461,7 +1442,6 @@ "defined_in": "src/v/config/configuration.cc", "description": "Disable TLS for all object storage connections.", "name": "cloud_storage_disable_tls", - "needs_restart": true, "nullable": false, "type": "boolean", "visibility": "user" @@ -1794,13 +1774,13 @@ "cloud_editable": false, "cloud_readonly": false, "cloud_supported": false, - "config_scope": "topic", + "config_scope": "broker", "default": null, "defined_in": "override", "description": "Directory to store inventory report hashes for use by cloud storage scrubber. If not specified, Redpanda uses a default path within the data directory.", - "example": ".Example\n[,yaml]\n----\nredpanda:\n cloud_storage_inventory_hash_store: \n----", + "example": ".Example\n[,yaml]\n----\nredpanda:\n cloud_storage_inventory_hash_store: \n----\n\nReplace `` with the full path to your desired inventory hash storage directory.", "is_deprecated": false, - "is_topic_property": true, + "is_topic_property": false, "name": "cloud_storage_inventory_hash_path_directory", "type": "string", "visibility": "user" @@ -1817,7 +1797,6 @@ "description": "Directory to store inventory report hashes for use by cloud storage scrubber. If not specified, Redpanda uses a default path within the data directory.", "example": ".Example\n[,yaml]\n----\nredpanda:\n cloud_storage_inventory_hash_store: \n----", "name": "cloud_storage_inventory_hash_path_directory", - "needs_restart": true, "nullable": true, "type": "string", "visibility": "user" @@ -1972,12 +1951,12 @@ "cloud_editable": false, "cloud_readonly": false, "cloud_supported": false, - "config_scope": "topic", + "config_scope": "cluster", "default": null, "defined_in": "override", "description": "The interval, in milliseconds, determines how long the materialized manifest can stay in the cache under contention. This setting is used for performance tuning. When the spillover manifest is materialized and stored in the cache, and the cache needs to evict it, it uses this value as a timeout. The cursor that uses the spillover manifest uses this value as a TTL interval, after which it stops referencing the manifest making it available for eviction. This only affects spillover manifests under contention.", "is_deprecated": false, - "is_topic_property": true, + "is_topic_property": false, "name": "cloud_storage_materialized_manifest_ttl_ms", "type": "string", "visibility": "user" @@ -2011,7 +1990,6 @@ "maximum": 17592186044415, "minimum": -17592186044416, "name": "cloud_storage_max_connection_idle_time_ms", - "needs_restart": true, "nullable": false, "type": "integer", "visibility": "tunable" @@ -2028,7 +2006,6 @@ "maximum": 32767, "minimum": -32768, "name": "cloud_storage_max_connections", - "needs_restart": true, "nullable": false, "type": "integer", "visibility": "user" @@ -2046,7 +2023,6 @@ "maximum": 4294967295, "minimum": 0, "name": "cloud_storage_max_materialized_segments_per_shard", - "needs_restart": true, "nullable": true, "type": null, "visibility": "deprecated" @@ -2197,7 +2173,6 @@ "description": null, "is_deprecated": true, "name": "cloud_storage_reconciliation_interval_ms", - "needs_restart": true, "nullable": false, "type": null }, @@ -2259,7 +2234,6 @@ "description": "Cloud provider region that houses the bucket or container used for storage.", "gets_restored": false, "name": "cloud_storage_region", - "needs_restart": true, "nullable": true, "type": "string", "visibility": "user" @@ -2276,7 +2250,6 @@ "maximum": 17592186044415, "minimum": -17592186044416, "name": "cloud_storage_roles_operation_timeout_ms", - "needs_restart": true, "nullable": false, "type": "integer", "visibility": "tunable" @@ -2310,7 +2283,6 @@ "gets_restored": false, "is_secret": true, "name": "cloud_storage_secret_key", - "needs_restart": true, "nullable": true, "type": "string", "visibility": "user" @@ -2452,7 +2424,6 @@ "description": "Path to certificate that should be used to validate server certificate during TLS handshake.", "gets_restored": false, "name": "cloud_storage_trust_file", - "needs_restart": true, "nullable": true, "type": "string", "visibility": "user" @@ -2467,7 +2438,6 @@ "defined_in": "src/v/config/configuration.cc", "description": "Derivative coefficient for upload PID controller.", "name": "cloud_storage_upload_ctrl_d_coeff", - "needs_restart": true, "nullable": false, "type": "number", "visibility": "tunable" @@ -2484,7 +2454,6 @@ "maximum": 32767, "minimum": -32768, "name": "cloud_storage_upload_ctrl_max_shares", - "needs_restart": true, "nullable": false, "type": "integer", "visibility": "tunable" @@ -2501,7 +2470,6 @@ "maximum": 32767, "minimum": -32768, "name": "cloud_storage_upload_ctrl_min_shares", - "needs_restart": true, "nullable": false, "type": "integer", "visibility": "tunable" @@ -2516,7 +2484,6 @@ "defined_in": "src/v/config/configuration.cc", "description": "Proportional coefficient for upload PID controller.", "name": "cloud_storage_upload_ctrl_p_coeff", - "needs_restart": true, "nullable": false, "type": "number", "visibility": "tunable" @@ -2533,7 +2500,6 @@ "maximum": 17592186044415, "minimum": -17592186044416, "name": "cloud_storage_upload_ctrl_update_interval_ms", - "needs_restart": true, "nullable": false, "type": "integer", "visibility": "tunable" @@ -2648,7 +2614,6 @@ "defined_in": "src/v/config/configuration.cc", "description": "Target backlog size for compaction controller. If not set the max backlog size is configured to 80% of total disk space available.", "name": "compaction_ctrl_backlog_size", - "needs_restart": true, "nullable": true, "type": "integer", "visibility": "tunable" @@ -2663,7 +2628,6 @@ "defined_in": "src/v/config/configuration.cc", "description": "Derivative coefficient for compaction PID controller.", "name": "compaction_ctrl_d_coeff", - "needs_restart": true, "nullable": false, "type": "number", "visibility": "tunable" @@ -2678,7 +2642,6 @@ "defined_in": "src/v/config/configuration.cc", "description": "Integral coefficient for compaction PID controller.", "name": "compaction_ctrl_i_coeff", - "needs_restart": true, "nullable": false, "type": "number", "visibility": "tunable" @@ -2695,7 +2658,6 @@ "maximum": 32767, "minimum": -32768, "name": "compaction_ctrl_max_shares", - "needs_restart": true, "nullable": false, "type": "integer", "visibility": "tunable" @@ -2712,7 +2674,6 @@ "maximum": 32767, "minimum": -32768, "name": "compaction_ctrl_min_shares", - "needs_restart": true, "nullable": false, "type": "integer", "visibility": "tunable" @@ -2727,7 +2688,6 @@ "defined_in": "src/v/config/configuration.cc", "description": "Proportional coefficient for compaction PID controller. This must be negative, because the compaction backlog should decrease when the number of compaction shares increases.", "name": "compaction_ctrl_p_coeff", - "needs_restart": true, "nullable": false, "type": "number", "visibility": "tunable" @@ -2744,7 +2704,6 @@ "maximum": 17592186044415, "minimum": -17592186044416, "name": "compaction_ctrl_update_interval_ms", - "needs_restart": true, "nullable": false, "type": "integer", "visibility": "tunable" @@ -2812,7 +2771,7 @@ "cloud_supported": false, "config_scope": "topic", "corresponding_cluster_property": null, - "description": "Enable validation of the schema ID for values on a record. This is a compatibility alias for `redpanda.value.schema.id.validation`. When enabled, Redpanda validates that the schema ID encoded in the record's value is registered in the Schema Registry according to the configured subject name strategy.", + "description": "Enable validation of the schema ID for values on a record. This is a compatibility alias for <>. When enabled, Redpanda validates that the schema ID encoded in the record's value is registered in the Schema Registry according to the configured subject name strategy.", "is_deprecated": false, "is_topic_property": true, "name": "confluent.value.schema.validation", @@ -2828,7 +2787,7 @@ "cloud_supported": false, "config_scope": "topic", "corresponding_cluster_property": null, - "description": "The subject name strategy for values when `confluent.value.schema.validation` is enabled. This is a compatibility alias for `redpanda.value.subject.name.strategy`. This determines how the topic and schema are mapped to a subject name in the Schema Registry.", + "description": "The subject name strategy for values when `confluent.value.schema.validation` is enabled. This is a compatibility alias for <>. This determines how the topic and schema are mapped to a subject name in the Schema Registry.", "is_deprecated": false, "is_topic_property": true, "name": "confluent.value.subject.name.strategy", @@ -2879,7 +2838,6 @@ "maximum": 17592186044415, "minimum": -17592186044416, "name": "consumer_heartbeat_interval_ms", - "needs_restart": true, "nullable": false, "type": "integer" }, @@ -2905,13 +2863,12 @@ "cloud_readonly": false, "cloud_supported": false, "config_scope": "broker", - "default": "minutes", + "default": "5 min", "defined_in": "src/v/pandaproxy/rest/configuration.cc", "description": "How long to wait for an idle consumer before removing it. A consumer is considered idle when it's not making requests or heartbeats.", "maximum": 17592186044415, "minimum": -17592186044416, "name": "consumer_instance_timeout_ms", - "needs_restart": true, "nullable": false, "type": "integer" }, @@ -2958,7 +2915,6 @@ "maximum": 17592186044415, "minimum": -17592186044416, "name": "consumer_rebalance_timeout_ms", - "needs_restart": true, "nullable": false, "type": "integer" }, @@ -2991,7 +2947,6 @@ "maximum": 2147483647, "minimum": -2147483648, "name": "consumer_request_max_bytes", - "needs_restart": true, "nullable": false, "type": "integer" }, @@ -3008,7 +2963,6 @@ "maximum": 2147483647, "minimum": -2147483648, "name": "consumer_request_min_bytes", - "needs_restart": true, "nullable": false, "type": "integer" }, @@ -3024,7 +2978,6 @@ "maximum": 17592186044415, "minimum": -17592186044416, "name": "consumer_request_timeout_ms", - "needs_restart": true, "nullable": false, "type": "integer" }, @@ -3056,7 +3009,6 @@ "maximum": 17592186044415, "minimum": -17592186044416, "name": "consumer_session_timeout_ms", - "needs_restart": true, "nullable": false, "type": "integer" }, @@ -3195,7 +3147,6 @@ "description": null, "is_deprecated": true, "name": "coproc_max_batch_size", - "needs_restart": true, "nullable": false, "type": null }, @@ -3209,7 +3160,6 @@ "description": null, "is_deprecated": true, "name": "coproc_max_inflight_bytes", - "needs_restart": true, "nullable": false, "type": null }, @@ -3223,7 +3173,6 @@ "description": null, "is_deprecated": true, "name": "coproc_max_ingest_bytes", - "needs_restart": true, "nullable": false, "type": null }, @@ -3237,7 +3186,6 @@ "description": null, "is_deprecated": true, "name": "coproc_offset_flush_interval_ms", - "needs_restart": true, "nullable": false, "type": null }, @@ -3251,7 +3199,6 @@ "description": null, "is_deprecated": true, "name": "coproc_supervisor_server", - "needs_restart": true, "nullable": false, "type": null }, @@ -3350,7 +3297,6 @@ "maximum": 4294967295, "minimum": 0, "name": "crash_loop_limit", - "needs_restart": true, "nullable": true, "type": "integer", "visibility": "user" @@ -3368,7 +3314,6 @@ "maximum": 17179869183, "minimum": -17179869184, "name": "crash_loop_sleep_sec", - "needs_restart": true, "nullable": true, "type": "integer", "version": "v24.3.4", @@ -3380,7 +3325,7 @@ "cloud_readonly": false, "cloud_supported": false, "config_scope": "cluster", - "default": "2'000ms", + "default": "2000ms", "defined_in": "src/v/config/configuration.cc", "description": "Timeout, in milliseconds, to wait for new topic creation.", "maximum": 17592186044415, @@ -3401,7 +3346,6 @@ "description": null, "is_deprecated": true, "name": "dashboard_dir", - "needs_restart": true, "nullable": false, "type": null }, @@ -3415,7 +3359,6 @@ "defined_in": "src/v/config/node_config.cc", "description": "Path to the directory for storing Redpanda's streaming data files.", "name": "data_directory", - "needs_restart": true, "nullable": false, "type": "string", "visibility": "user" @@ -3639,7 +3582,6 @@ "description": null, "is_deprecated": true, "name": "datalake_disk_space_monitor_interval", - "needs_restart": true, "nullable": false, "type": null }, @@ -3820,7 +3762,7 @@ "cloud_readonly": false, "cloud_supported": false, "config_scope": "cluster", - "default": "leaders_preference", + "default": "none", "defined_in": "src/v/config/configuration.cc", "description": "Default settings for preferred location of topic partition leaders. It can be either \"none\" (no preference), or \"racks:,,...\" (prefer brokers with rack ID from the list).\n\nThe list can contain one or more rack IDs. If you specify multiple IDs, Redpanda tries to distribute the partition leader locations equally across brokers in these racks.\n\nIf config_ref:enable_rack_awareness,true,properties/cluster-properties[] is set to `false`, leader pinning is disabled across the cluster.", "enterprise_value": "Any rack preference (not `none`)", @@ -3931,7 +3873,6 @@ "defined_in": "src/v/config/node_config.cc", "description": "CAUTION: Enabling `developer_mode` isn't recommended for production use.\n\nEnable developer mode, which skips most of the checks performed at startup.", "name": "developer_mode", - "needs_restart": true, "nullable": false, "type": "boolean", "visibility": "tunable" @@ -3996,7 +3937,6 @@ "defined_in": "src/v/config/configuration.cc", "description": "Disable batch cache in log manager.", "name": "disable_batch_cache", - "needs_restart": true, "nullable": false, "type": "boolean", "visibility": "tunable" @@ -4026,7 +3966,6 @@ "defined_in": "src/v/config/configuration.cc", "description": "Disable registering the metrics exposed on the internal `/metrics` endpoint.", "name": "disable_metrics", - "needs_restart": true, "nullable": false, "type": "boolean" }, @@ -4040,7 +3979,6 @@ "defined_in": "src/v/config/configuration.cc", "description": "Disable registering the metrics exposed on the `/public_metrics` endpoint.", "name": "disable_public_metrics", - "needs_restart": true, "nullable": false, "type": "boolean" }, @@ -4085,7 +4023,6 @@ "defined_in": "src/v/config/node_config.cc", "description": "Override the cluster property xref:reference:properties/cluster-properties.adoc#data_transforms_enabled[`data_transforms_enabled`] and disable Wasm-powered data transforms. This is an emergency shutoff button.", "name": "emergency_disable_data_transforms", - "needs_restart": true, "nullable": false, "related_topics": [ "xref:reference:properties/cluster-properties.adoc#data_transforms_enabled[`data_transforms_enabled`]" @@ -4104,7 +4041,6 @@ "defined_in": "src/v/config/node_config.cc", "description": "Controls how a new cluster is formed. All brokers in a cluster must have the same value.\n\n<> to form a cluster.\n\nTIP: For backward compatibility, `true` is the default. Redpanda recommends using `false` in production environments to prevent accidental cluster formation.", "name": "empty_seed_starts_cluster", - "needs_restart": true, "nullable": false, "type": "boolean", "visibility": "user" @@ -4119,7 +4055,6 @@ "description": null, "is_deprecated": true, "name": "enable_admin_api", - "needs_restart": true, "nullable": false, "type": null }, @@ -4149,7 +4084,6 @@ "description": null, "is_deprecated": true, "name": "enable_central_config", - "needs_restart": true, "nullable": false, "type": null }, @@ -4225,7 +4159,6 @@ "description": null, "is_deprecated": true, "name": "enable_coproc", - "needs_restart": true, "nullable": false, "type": null }, @@ -4273,7 +4206,6 @@ "defined_in": "src/v/config/configuration.cc", "description": "Enable idempotent producers.", "name": "enable_idempotence", - "needs_restart": true, "nullable": false, "type": "boolean", "visibility": "user" @@ -4336,7 +4268,6 @@ "defined_in": "src/v/config/configuration.cc", "description": "Enable PID file. You should not need to change.", "name": "enable_pid_file", - "needs_restart": true, "nullable": false, "type": "boolean", "visibility": "tunable" @@ -4404,7 +4335,6 @@ "defined_in": "src/v/config/configuration.cc", "description": "Enable transactions (atomic writes).", "name": "enable_transactions", - "needs_restart": true, "nullable": false, "type": "boolean", "visibility": "user" @@ -4575,7 +4505,6 @@ "maximum": 17592186044415, "minimum": -17592186044416, "name": "fetch_session_eviction_timeout_ms", - "needs_restart": true, "nullable": false, "type": "integer", "visibility": "tunable" @@ -4590,7 +4519,6 @@ "description": null, "is_deprecated": true, "name": "find_coordinator_timeout_ms", - "needs_restart": true, "nullable": false, "type": null }, @@ -4605,7 +4533,6 @@ "defined_in": "src/v/config/node_config.cc", "description": "Controls whether Redpanda starts in FIPS mode. This property allows for three values: \n\n* Disabled - Redpanda does not start in FIPS mode.\n\n* Permissive - Redpanda performs the same check as enabled, but a warning is logged, and Redpanda continues to run. Redpanda loads the OpenSSL FIPS provider into the OpenSSL library. After this completes, Redpanda is operating in FIPS mode, which means that the TLS cipher suites available to users are limited to the TLSv1.2 and TLSv1.3 NIST-approved cryptographic methods.\n\n* Enabled - Redpanda verifies that the operating system is enabled for FIPS by checking `/proc/sys/crypto/fips_enabled`. If the file does not exist or does not return `1`, Redpanda immediately exits.", "name": "fips_mode", - "needs_restart": true, "nullable": false, "type": "fips_mode_flag", "visibility": "user" @@ -4658,7 +4585,6 @@ "description": null, "is_deprecated": true, "name": "full_raft_configuration_recovery_pattern", - "needs_restart": true, "nullable": false, "type": null }, @@ -4717,7 +4643,7 @@ "cloud_readonly": false, "cloud_supported": false, "config_scope": "cluster", - "default": "30'000ms", + "default": "30000ms", "defined_in": "src/v/config/configuration.cc", "description": "Timeout for new member joins.", "maximum": 17592186044415, @@ -4791,7 +4717,6 @@ "maximum": 17592186044415, "minimum": -17592186044416, "name": "health_manager_tick_interval", - "needs_restart": true, "nullable": false, "type": "integer", "visibility": "tunable" @@ -5097,10 +5022,13 @@ "config_scope": "cluster", "default": null, "defined_in": "src/v/config/configuration.cc", - "description": "Source of AWS credentials for Iceberg REST catalog SigV4 authentication. If not set, falls back to cloud_storage_credentials_source when using aws_sigv4 authentication mode. Accepted values: config_file, aws_instance_metadata, sts, gcp_instance_metadata, azure_vm_instance_metadata, azure_aks_oidc_federation.", + "description": "ifndef::env-cloud[]\nSource of AWS credentials for Iceberg REST catalog SigV4 authentication. If not set, falls back to xref:reference:properties/object-storage-properties.adoc#cloud_storage_credentials_source[`cloud_storage_credentials_source`] when using aws_sigv4 authentication mode.\nendif::[]\n\nifdef::env-cloud[]\nSource of AWS credentials for Iceberg REST catalog SigV4 authentication. If providing explicit credentials using `iceberg_rest_catalog_aws_access_key` and `iceberg_rest_catalog_aws_secret_key` for Glue catalog authentication, you must set this property to `config_file`.\nendif::[]\n\n*Accepted values*: `aws_instance_metadata`, `azure_aks_oidc_federation`, `azure_vm_instance_metadata`, `config_file`, `gcp_instance_metadata`, `sts`.", "name": "iceberg_rest_catalog_credentials_source", "needs_restart": true, "nullable": true, + "related_topics": [ + "xref:reference:properties/object-storage-properties.adoc#cloud_storage_credentials_source[`cloud_storage_credentials_source`]" + ], "type": "object", "visibility": "user" }, @@ -5201,24 +5129,6 @@ "type": "string", "visibility": "user" }, - "iceberg_rest_catalog_credentials_source": { - "cloud_byoc_only": true, - "cloud_editable": true, - "cloud_readonly": false, - "cloud_supported": true, - "config_scope": "cluster", - "default": null, - "defined_in": "override", - "description": "ifndef::env-cloud[]\nSource of AWS credentials for Iceberg REST catalog SigV4 authentication. If not set, falls back to xref:reference:properties/object-storage-properties.adoc#cloud_storage_credentials_source[`cloud_storage_credentials_source`] when using aws_sigv4 authentication mode.\nendif::[]\n\nifdef::env-cloud[]\nSource of AWS credentials for Iceberg REST catalog SigV4 authentication. If providing explicit credentials using `iceberg_rest_catalog_aws_access_key` and `iceberg_rest_catalog_aws_secret_key` for Glue catalog authentication, you must set this property to `config_file`.\nendif::[]\n\n*Accepted values*: `aws_instance_metadata`, `azure_aks_oidc_federation`, `azure_vm_instance_metadata`, `config_file`, `gcp_instance_metadata`, `sts`.", - "is_deprecated": false, - "is_topic_property": false, - "name": "iceberg_rest_catalog_credentials_source", - "related_topics": [ - "xref:reference:properties/object-storage-properties.adoc#cloud_storage_credentials_source[`cloud_storage_credentials_source`]" - ], - "type": "string", - "visibility": "user" - }, "iceberg_rest_catalog_crl": { "cloud_byoc_only": true, "cloud_editable": true, @@ -5400,7 +5310,7 @@ "cloud_readonly": false, "cloud_supported": true, "config_scope": "cluster", - "default": "milliseconds", + "default": "std::chrono::milliseconds{1min}", "defined_in": "src/v/config/configuration.cc", "description": "Default value for the redpanda.iceberg.target.lag.ms topic property, which controls how often data in an Iceberg table is refreshed with new data from the corresponding Redpanda topic. Redpanda attempts to commit all the data produced to the topic within the lag target in a best effort fashion, subject to resource availability.", "maximum": 17592186044415, @@ -5456,7 +5366,6 @@ "maximum": 32767, "minimum": -32768, "name": "id_allocator_batch_size", - "needs_restart": true, "nullable": false, "type": "integer", "visibility": "tunable" @@ -5473,7 +5382,6 @@ "maximum": 32767, "minimum": -32768, "name": "id_allocator_log_capacity", - "needs_restart": true, "nullable": false, "type": "integer", "visibility": "tunable" @@ -5488,7 +5396,6 @@ "description": null, "is_deprecated": true, "name": "id_allocator_replication", - "needs_restart": true, "nullable": false, "type": null }, @@ -5578,11 +5485,10 @@ "config_scope": "cluster", "default": "3", "defined_in": "src/v/config/configuration.cc", - "description": "Target replication factor for internal topics.\n\n*Unit*: number of replicas per topic.", + "description": "Target replication factor for internal topics", "maximum": 2147483647, "minimum": -2147483648, "name": "internal_topic_replication_factor", - "needs_restart": true, "nullable": false, "type": "integer", "visibility": "user" @@ -5599,7 +5505,6 @@ "maximum": 17592186044415, "minimum": -17592186044416, "name": "join_retry_timeout_ms", - "needs_restart": true, "nullable": false, "type": "integer", "visibility": "tunable" @@ -5614,7 +5519,6 @@ "description": null, "is_deprecated": true, "name": "kafka_admin_topic_api_rate", - "needs_restart": true, "nullable": false, "type": null }, @@ -5633,14 +5537,13 @@ ], "defined_in": "src/v/config/node_config.cc", "description": "IP address and port of the Kafka API endpoint that handles requests. Supports multiple listeners with different configurations.", + "example": ".Basic example\n[,yaml]\n----\nredpanda:\n kafka_api:\n - address: \n port: \n authentication_method: sasl\n----\n\n.Multiple listeners example (for different networks or authentication methods)\n[,yaml]\n----\nredpanda:\n kafka_api:\n - name: \n address: \n port: \n authentication_method: none\n - name: \n address: \n port: \n authentication_method: sasl\n - name: \n address: \n port: \n authentication_method: mtls_identity\n----\n\nReplace the following placeholders with your values:\n\n* ``: The IP address to bind the listener to (typically `0.0.0.0` for all interfaces)\n* ``: The port number for the Kafka API endpoint\n* ``: Name for internal network connections (for example, `internal`)\n* ``: Name for external network connections (for example, `external`)\n* ``: Name for mTLS connections (for example, `mtls`)\n* ``: The IP address for internal connections\n* ``: The port number for internal Kafka API connections\n* ``: The IP address for external connections\n* ``: The port number for external Kafka API connections\n* ``: The IP address for mTLS connections\n* ``: The port number for mTLS Kafka API connections", "items": { "type": "object" }, "name": "kafka_api", - "needs_restart": true, "nullable": false, "related_topics": [ - "xref:reference:properties/cluster-properties.adoc#sasl_mechanisms[`sasl_mechanisms`]", "xref:reference:properties/cluster-properties.adoc#sasl_mechanisms[`sasl_mechanisms`]" ], "type": "array", @@ -5656,12 +5559,11 @@ "default": [], "defined_in": "src/v/config/node_config.cc", "description": "Transport Layer Security (TLS) configuration for the Kafka API endpoint.", - "example": ".Example\n[,yaml]\n----\nredpanda:\n kafka_api_tls:\n - name: \n enabled: true\n cert_file: \n key_file: \n truststore_file: \n require_client_auth: false\n----", + "example": ".Example\n[,yaml]\n----\nredpanda:\n kafka_api_tls:\n - name: \n enabled: true\n cert_file: \n key_file: \n truststore_file: \n require_client_auth: false\n----\n\nReplace the following placeholders with your values:\n\n* ``: Name that matches your Kafka API listener (defined in the <> broker property)\n* ``: Full path to the TLS certificate file\n* ``: Full path to the TLS private key file\n* ``: Full path to the Certificate Authority file\n\nNOTE: Set `require_client_auth: true` for mutual TLS (mTLS) authentication, or `false` for server-side TLS only.", "items": { "type": "object" }, "name": "kafka_api_tls", - "needs_restart": true, "nullable": false, "type": "array", "visibility": "user" @@ -5693,7 +5595,6 @@ "description": null, "is_deprecated": true, "name": "kafka_client_group_byte_rate_quota", - "needs_restart": true, "nullable": false, "type": null }, @@ -5707,7 +5608,6 @@ "description": null, "is_deprecated": true, "name": "kafka_client_group_fetch_byte_rate_quota", - "needs_restart": true, "nullable": false, "type": null }, @@ -5861,7 +5761,7 @@ "cloud_readonly": false, "cloud_supported": false, "config_scope": "cluster", - "default": "30'000ms", + "default": "30000ms", "defined_in": "src/v/config/configuration.cc", "description": "Kafka group recovery timeout.", "maximum": 17592186044415, @@ -6004,7 +5904,6 @@ "defined_in": "src/v/config/configuration.cc", "description": "Smoothing factor for Kafka queue depth control depth tracking.", "name": "kafka_qdc_depth_alpha", - "needs_restart": true, "nullable": false, "type": "number", "visibility": "tunable" @@ -6021,7 +5920,6 @@ "maximum": 17592186044415, "minimum": -17592186044416, "name": "kafka_qdc_depth_update_ms", - "needs_restart": true, "nullable": false, "type": "integer", "visibility": "tunable" @@ -6036,7 +5934,6 @@ "defined_in": "src/v/config/configuration.cc", "description": "Enable kafka queue depth control.", "name": "kafka_qdc_enable", - "needs_restart": true, "nullable": false, "type": "boolean", "visibility": "user" @@ -6051,7 +5948,6 @@ "defined_in": "src/v/config/configuration.cc", "description": "Queue depth when idleness is detected in Kafka queue depth control.", "name": "kafka_qdc_idle_depth", - "needs_restart": true, "nullable": false, "type": "integer", "visibility": "tunable" @@ -6066,7 +5962,6 @@ "defined_in": "src/v/config/configuration.cc", "description": "Smoothing parameter for Kafka queue depth control latency tracking.", "name": "kafka_qdc_latency_alpha", - "needs_restart": true, "nullable": false, "type": "number", "visibility": "tunable" @@ -6081,7 +5976,6 @@ "defined_in": "src/v/config/configuration.cc", "description": "Maximum queue depth used in Kafka queue depth control.", "name": "kafka_qdc_max_depth", - "needs_restart": true, "nullable": false, "type": "integer", "visibility": "tunable" @@ -6098,7 +5992,6 @@ "maximum": 17592186044415, "minimum": -17592186044416, "name": "kafka_qdc_max_latency_ms", - "needs_restart": true, "nullable": false, "type": "integer", "visibility": "user" @@ -6113,7 +6006,6 @@ "defined_in": "src/v/config/configuration.cc", "description": "Minimum queue depth used in Kafka queue depth control.", "name": "kafka_qdc_min_depth", - "needs_restart": true, "nullable": false, "type": "integer", "visibility": "tunable" @@ -6128,7 +6020,6 @@ "defined_in": "src/v/config/configuration.cc", "description": "Number of windows used in Kafka queue depth control latency tracking.", "name": "kafka_qdc_window_count", - "needs_restart": true, "nullable": false, "type": "integer", "visibility": "tunable" @@ -6145,7 +6036,6 @@ "maximum": 17592186044415, "minimum": -17592186044416, "name": "kafka_qdc_window_size_ms", - "needs_restart": true, "nullable": false, "type": "integer", "visibility": "tunable" @@ -6160,7 +6050,6 @@ "description": null, "is_deprecated": true, "name": "kafka_quota_balancer_min_shard_throughput_bps", - "needs_restart": true, "nullable": false, "type": null }, @@ -6174,7 +6063,6 @@ "description": null, "is_deprecated": true, "name": "kafka_quota_balancer_min_shard_throughput_ratio", - "needs_restart": true, "nullable": false, "type": null }, @@ -6188,7 +6076,6 @@ "description": null, "is_deprecated": true, "name": "kafka_quota_balancer_node_period_ms", - "needs_restart": true, "nullable": false, "type": null }, @@ -6202,7 +6089,6 @@ "description": null, "is_deprecated": true, "name": "kafka_quota_balancer_window_ms", - "needs_restart": true, "nullable": false, "type": null }, @@ -6233,7 +6119,6 @@ "defined_in": "src/v/config/configuration.cc", "description": "Maximum size of the user-space receive buffer. If `null`, this limit is not applied.", "name": "kafka_rpc_server_stream_recv_buf", - "needs_restart": true, "nullable": true, "type": "integer", "visibility": "tunable" @@ -6250,7 +6135,6 @@ "maximum": 2147483647, "minimum": -2147483648, "name": "kafka_rpc_server_tcp_recv_buf", - "needs_restart": true, "nullable": true, "type": "integer" }, @@ -6266,7 +6150,6 @@ "maximum": 2147483647, "minimum": -2147483648, "name": "kafka_rpc_server_tcp_send_buf", - "needs_restart": true, "nullable": true, "type": "integer" }, @@ -6467,7 +6350,6 @@ "description": null, "is_deprecated": true, "name": "kafka_throughput_throttling_v2", - "needs_restart": true, "nullable": false, "type": null }, @@ -6515,7 +6397,6 @@ "defined_in": "src/v/config/configuration.cc", "description": "Key-value maximum segment size (in bytes).", "name": "kvstore_max_segment_size", - "needs_restart": true, "nullable": false, "type": "integer", "visibility": "tunable" @@ -6547,7 +6428,6 @@ "description": null, "is_deprecated": true, "name": "leader_balancer_mode", - "needs_restart": true, "nullable": false, "type": null }, @@ -6675,7 +6555,6 @@ "description": null, "is_deprecated": true, "name": "log_compaction_adjacent_merge_self_compaction_count", - "needs_restart": true, "nullable": false, "type": null }, @@ -6869,7 +6748,7 @@ "cloud_readonly": true, "cloud_supported": true, "config_scope": "cluster", - "default": "weeks", + "default": "2 weeks", "defined_in": "src/v/config/configuration.cc", "description": "Default lifetime of log segments. If `null`, the property is disabled, and no default lifetime is set. Any value under 60 seconds (60000 ms) is rejected. This property can also be set in the Kafka API using the Kafka-compatible alias, `log.roll.ms`. The topic property `segment.ms` overrides the value of `log_segment_ms` at the topic level.", "maximum": 17592186044415, @@ -7082,12 +6961,16 @@ "config_scope": "cluster", "default": "Maximum value", "defined_in": "src/v/config/configuration.cc", - "description": "Maximum number of active producer sessions. When the threshold is passed, Redpanda terminates old sessions. When an idle producer corresponding to the terminated session wakes up and produces, its message batches are rejected, and an out of order sequence error is emitted. Consumers don't affect this setting.", + "description": "Maximum number of active producer sessions per shard. Each shard tracks producer IDs using an LRU (Least Recently Used) eviction policy. When the configured limit is exceeded, the least recently used producer IDs are evicted from the cache. IMPORTANT: The default value is unlimited, which can lead to unbounded memory growth and out-of-memory (OOM) crashes in production environments with heavy producer usage, especially when using transactions or idempotent producers. It is strongly recommended to set a reasonable limit in production deployments.", "maximum": 18446744073709551615, "minimum": 0, "name": "max_concurrent_producer_ids", "needs_restart": false, "nullable": false, + "related_topics": [ + "xref:develop:transactions.adoc#tune-producer-id-limits[Tune producer ID limits]", + "xref:reference:properties/cluster-properties.adoc#transactional_id_expiration_ms[transactional_id_expiration_ms]" + ], "type": "integer", "visibility": "tunable" }, @@ -7127,7 +7010,7 @@ "cloud_readonly": false, "cloud_supported": false, "config_scope": "cluster", - "default": "30'000ms", + "default": "30000ms", "defined_in": "src/v/config/configuration.cc", "description": "Fail-safe maximum throttle delay on Kafka requests.", "maximum": 17592186044415, @@ -7168,7 +7051,6 @@ "description": null, "is_deprecated": true, "name": "max_version", - "needs_restart": true, "nullable": false, "type": null }, @@ -7184,7 +7066,6 @@ "maximum": 17592186044415, "minimum": -17592186044416, "name": "members_backend_retry_ms", - "needs_restart": true, "nullable": false, "type": "integer", "visibility": "tunable" @@ -7215,7 +7096,6 @@ "defined_in": "src/v/config/node_config.cc", "description": "Threshold for log messages that contain a larger memory allocation than specified.", "name": "memory_allocation_warning_threshold", - "needs_restart": true, "nullable": true, "type": "integer", "visibility": "tunable" @@ -7246,13 +7126,12 @@ "cloud_readonly": false, "cloud_supported": false, "config_scope": "cluster", - "default": "3'000ms", + "default": "3000ms", "defined_in": "src/v/config/configuration.cc", "description": "Interval for metadata dissemination batching.", "maximum": 17592186044415, "minimum": -17592186044416, "name": "metadata_dissemination_interval_ms", - "needs_restart": true, "nullable": false, "type": "integer", "visibility": "tunable" @@ -7269,7 +7148,6 @@ "maximum": 32767, "minimum": -32768, "name": "metadata_dissemination_retries", - "needs_restart": true, "nullable": false, "type": "integer", "visibility": "tunable" @@ -7280,13 +7158,12 @@ "cloud_readonly": false, "cloud_supported": false, "config_scope": "cluster", - "default": "0'500ms", + "default": "0500ms", "defined_in": "src/v/config/configuration.cc", "description": "Delay before retrying a topic lookup in a shard or other meta tables.", "maximum": 17592186044415, "minimum": -17592186044416, "name": "metadata_dissemination_retry_delay_ms", - "needs_restart": true, "nullable": false, "type": "integer", "visibility": "tunable" @@ -7303,7 +7180,6 @@ "maximum": 17592186044415, "minimum": -17592186044416, "name": "metadata_status_wait_timeout_ms", - "needs_restart": true, "nullable": false, "type": "integer", "visibility": "tunable" @@ -7442,7 +7318,6 @@ "description": null, "is_deprecated": true, "name": "min_version", - "needs_restart": true, "nullable": false, "type": null }, @@ -7474,7 +7349,6 @@ "defined_in": "src/v/pandaproxy/schema_registry/configuration.cc", "description": "Enable modifications to the read-only `mode` of the Schema Registry. When set to `true`, the entire Schema Registry or its subjects can be switched to `READONLY` or `READWRITE`. This property is useful for preventing unwanted changes to the entire Schema Registry or specific subjects.", "name": "mode_mutability", - "needs_restart": true, "nullable": false, "type": "boolean" }, @@ -7489,7 +7363,6 @@ "defined_in": "src/v/config/node_config.cc", "description": "A number that uniquely identifies the broker within the cluster. If `null` (the default value), Redpanda automatically assigns an ID. If set, it must be non-negative value.\n\n.Do not set `node_id` manually.\n[WARNING]\n====\nRedpanda assigns unique IDs automatically to prevent issues such as:\n\n- Brokers with empty disks rejoining the cluster.\n- Conflicts during recovery or scaling.\n\nManually setting or reusing `node_id` values, even for decommissioned brokers, can cause cluster inconsistencies and operational failures.\n====\n\nBroker IDs are immutable. After a broker joins the cluster, its `node_id` *cannot* be changed.", "name": "node_id", - "needs_restart": true, "nullable": true, "type": "integer", "visibility": "user" @@ -7504,12 +7377,11 @@ "default": [], "defined_in": "src/v/config/node_config.cc", "description": "List of node ID and UUID overrides applied at broker startup. Each entry includes the current UUID, the desired new ID and UUID, and an ignore flag. An entry applies only if `current_uuid` matches the broker's actual UUID.\n\nRemove this property after the cluster restarts successfully and operates normally. This prevents reapplication and maintains consistent configuration across brokers.", - "example": ".Example\n[,yaml]\n----\nredpanda:\n node_id_overrides:\n - current_uuid: \"\"\n new_id: \n new_uuid: \"\"\n ignore_existing_node_id: \n - current_uuid: \"\"\n new_id: \n new_uuid: \"\"\n ignore_existing_node_id: \n----", + "example": ".Example\n[,yaml]\n----\nredpanda:\n node_id_overrides:\n - current_uuid: \"\"\n new_id: \n new_uuid: \"\"\n ignore_existing_node_id: \n - current_uuid: \"\"\n new_id: \n new_uuid: \"\"\n ignore_existing_node_id: \n----\n\nReplace the following placeholders with your values:\n\n* ``: The current UUID of the broker to override\n* ``: The new broker ID to assign\n* ``: The new UUID to assign to the broker\n* ``: Set to `true` to force override on brokers that already have a node ID, or `false` to apply override only to brokers without existing node IDs\n* ``: Additional broker UUID for multiple overrides\n* ``: Additional new broker ID\n* ``: Additional new UUID\n* ``: Additional ignore existing node ID flag", "items": { "type": "config::node_id_override" }, "name": "node_id_overrides", - "needs_restart": true, "nullable": false, "type": "array", "visibility": "user" @@ -7543,7 +7415,6 @@ "maximum": 17592186044415, "minimum": -17592186044416, "name": "node_management_operation_timeout_ms", - "needs_restart": true, "nullable": false, "type": "integer", "visibility": "tunable" @@ -7588,9 +7459,9 @@ "cloud_readonly": false, "cloud_supported": false, "config_scope": "cluster", - "default": "seconds", + "default": "", "defined_in": "src/v/config/configuration.cc", - "description": "The amount of time (in seconds) to allow for when validating the expiry claim in the token.\n\n*Unit*: seconds", + "description": "The amount of time (in seconds) to allow for when validating the expiry claim in the token.", "maximum": 17179869183, "minimum": -17179869184, "name": "oidc_clock_skew_tolerance", @@ -7676,7 +7547,6 @@ "defined_in": "src/v/config/node_config.cc", "description": "Path to the configuration file used by OpenSSL to properly load the FIPS-compliant module.", "name": "openssl_config_file", - "needs_restart": true, "nullable": true, "type": "string", "visibility": "user" @@ -7692,7 +7562,6 @@ "defined_in": "src/v/config/node_config.cc", "description": "Path to the directory that contains the OpenSSL FIPS-compliant module. The filename that Redpanda looks for is `fips.so`.", "name": "openssl_module_directory", - "needs_restart": true, "nullable": true, "type": "string", "visibility": "user" @@ -7717,7 +7586,6 @@ "type": "object" }, "name": "pandaproxy_api", - "needs_restart": true, "nullable": false, "type": "array" }, @@ -7735,7 +7603,6 @@ "type": "object" }, "name": "pandaproxy_api_tls", - "needs_restart": true, "nullable": false, "type": "array" }, @@ -7940,7 +7807,6 @@ "maximum": 32767, "minimum": -32768, "name": "produce_ack_level", - "needs_restart": true, "nullable": false, "type": "integer" }, @@ -7956,7 +7822,6 @@ "maximum": 17592186044415, "minimum": -17592186044416, "name": "produce_batch_delay_ms", - "needs_restart": true, "nullable": false, "type": "integer" }, @@ -7969,7 +7834,7 @@ "config_scope": "broker", "default": null, "defined_in": "override", - "description": "Configuration property: produce_batch_delay_ms", + "description": "Delay (in milliseconds) to wait before sending batch.", "is_deprecated": false, "is_topic_property": false, "name": "produce_batch_delay_ms", @@ -7989,7 +7854,6 @@ "maximum": 2147483647, "minimum": -2147483648, "name": "produce_batch_record_count", - "needs_restart": true, "nullable": false, "type": "integer" }, @@ -8006,7 +7870,6 @@ "maximum": 2147483647, "minimum": -2147483648, "name": "produce_batch_size_bytes", - "needs_restart": true, "nullable": false, "type": "integer" }, @@ -8021,7 +7884,6 @@ "defined_in": "src/v/kafka/client/configuration.cc", "description": "Enable or disable compression by the Kafka client. Specify `none` to disable compression or one of the supported types [gzip, snappy, lz4, zstd].", "name": "produce_compression_type", - "needs_restart": true, "nullable": false, "type": "string" }, @@ -8037,7 +7899,6 @@ "maximum": 17592186044415, "minimum": -17592186044416, "name": "produce_shutdown_delay_ms", - "needs_restart": true, "nullable": false, "type": "integer" }, @@ -8085,7 +7946,6 @@ "defined_in": "src/v/config/node_config.cc", "description": "A label that identifies a failure zone. Apply the same label to all brokers in the same failure zone. When xref:./cluster-properties.adoc#enable_rack_awareness[enable_rack_awareness] is set to `true` at the cluster level, the system uses the rack labels to spread partition replicas across different failure zones.", "name": "rack", - "needs_restart": true, "nullable": true, "related_topics": [ "xref:./cluster-properties.adoc#enable_rack_awareness[enable_rack_awareness]" @@ -8099,7 +7959,7 @@ "cloud_readonly": false, "cloud_supported": false, "config_scope": "cluster", - "default": "1'500ms", + "default": "1500ms", "defined_in": "src/v/config/configuration.cc", "description": "Election timeout expressed in milliseconds.", "maximum": 17592186044415, @@ -8153,7 +8013,6 @@ "maximum": 17592186044415, "minimum": -17592186044416, "name": "raft_flush_timer_interval_ms", - "needs_restart": true, "nullable": false, "type": null, "visibility": "deprecated" @@ -8168,7 +8027,6 @@ "defined_in": "src/v/config/configuration.cc", "description": "The number of failed heartbeats after which an unresponsive TCP connection is forcibly closed. To disable forced disconnection, set to 0.", "name": "raft_heartbeat_disconnect_failures", - "needs_restart": true, "nullable": false, "type": "integer", "visibility": "tunable" @@ -8213,7 +8071,7 @@ "cloud_readonly": false, "cloud_supported": false, "config_scope": "cluster", - "default": "10'000ms", + "default": "10000ms", "defined_in": "src/v/config/configuration.cc", "description": "Raft I/O timeout.", "maximum": 17592186044415, @@ -8264,7 +8122,6 @@ "description": null, "is_deprecated": true, "name": "raft_max_concurrent_append_requests_per_follower", - "needs_restart": true, "nullable": false, "type": null }, @@ -8385,7 +8242,6 @@ "defined_in": "src/v/config/configuration.cc", "description": "Maximum size of requests cached for replication.", "name": "raft_replicate_batch_window_size", - "needs_restart": true, "nullable": false, "type": "integer", "visibility": "tunable" @@ -8402,7 +8258,6 @@ "maximum": 4294967295, "minimum": 0, "name": "raft_smp_max_non_local_requests", - "needs_restart": true, "nullable": true, "type": "integer", "visibility": "tunable" @@ -8453,7 +8308,6 @@ "maximum": 17592186044415, "minimum": -17592186044416, "name": "readers_cache_eviction_timeout_ms", - "needs_restart": true, "nullable": false, "type": "integer", "visibility": "tunable" @@ -8483,7 +8337,6 @@ "defined_in": "src/v/config/configuration.cc", "description": "Minimum amount of free memory maintained by the batch cache background reclaimer.", "name": "reclaim_batch_cache_min_free", - "needs_restart": true, "nullable": false, "type": "integer", "visibility": "tunable" @@ -8494,13 +8347,12 @@ "cloud_readonly": false, "cloud_supported": false, "config_scope": "cluster", - "default": "3'000ms", + "default": "3000ms", "defined_in": "src/v/config/configuration.cc", "description": "Starting from the last point in time when memory was reclaimed from the batch cache, this is the duration during which the amount of memory to reclaim grows at a significant rate, based on heuristics about the amount of available memory.", "maximum": 17592186044415, "minimum": -17592186044416, "name": "reclaim_growth_window", - "needs_restart": true, "nullable": false, "type": "integer", "visibility": "tunable" @@ -8515,7 +8367,6 @@ "defined_in": "src/v/config/configuration.cc", "description": "Maximum batch cache reclaim size.", "name": "reclaim_max_size", - "needs_restart": true, "nullable": false, "type": "integer", "visibility": "tunable" @@ -8530,7 +8381,6 @@ "defined_in": "src/v/config/configuration.cc", "description": "Minimum batch cache reclaim size.", "name": "reclaim_min_size", - "needs_restart": true, "nullable": false, "type": "integer", "visibility": "tunable" @@ -8541,13 +8391,12 @@ "cloud_readonly": false, "cloud_supported": false, "config_scope": "cluster", - "default": "10'000ms", + "default": "10000ms", "defined_in": "src/v/config/configuration.cc", "description": "If the duration since the last time memory was reclaimed is longer than the amount of time specified in this property, the memory usage of the batch cache is considered stable, so only the minimum size (<>) is set to be reclaimed.", "maximum": 17592186044415, "minimum": -17592186044416, "name": "reclaim_stable_window", - "needs_restart": true, "nullable": false, "type": "integer", "visibility": "tunable" @@ -8564,7 +8413,6 @@ "maximum": 17592186044415, "minimum": -17592186044416, "name": "recovery_append_timeout_ms", - "needs_restart": true, "nullable": false, "type": "integer", "visibility": "tunable" @@ -8580,7 +8428,6 @@ "defined_in": "src/v/config/node_config.cc", "description": "If `true`, start Redpanda in xref:manage:recovery-mode.adoc[recovery mode], where user partitions are not loaded and only administrative operations are allowed.", "name": "recovery_mode_enabled", - "needs_restart": true, "nullable": false, "related_topics": [ "xref:manage:recovery-mode.adoc[recovery mode]" @@ -8605,7 +8452,7 @@ "type": "string" }, "redpanda.iceberg.delete": { - "acceptable_values": "", + "acceptable_values": "[`true`, `false`]", "category": "iceberg-integration", "cloud_byoc_only": false, "cloud_editable": false, @@ -8618,7 +8465,7 @@ "is_topic_property": true, "name": "redpanda.iceberg.delete", "source_file": "src/v/kafka/server/handlers/topics/types.h", - "type": "string" + "type": "boolean" }, "redpanda.iceberg.invalid.record.action": { "acceptable_values": "", @@ -8800,7 +8647,7 @@ "type": "boolean" }, "redpanda.remote.readreplica": { - "acceptable_values": "[`true`, `false`]", + "acceptable_values": "", "category": "remote-read-replica", "cloud_byoc_only": false, "cloud_editable": false, @@ -8816,7 +8663,7 @@ "xref:manage:remote-read-replicas.adoc[Remote Read Replicas]" ], "source_file": "src/v/kafka/server/handlers/topics/types.h", - "type": "boolean" + "type": "string" }, "redpanda.remote.recovery": { "acceptable_values": "[`true`, `false`]", @@ -8851,7 +8698,6 @@ "is_topic_property": true, "name": "redpanda.remote.write", "related_topics": [ - "xref:manage:tiered-storage.adoc[Tiered Storage]", "xref:manage:tiered-storage.adoc[Tiered Storage]" ], "source_file": "src/v/kafka/server/handlers/topics/types.h", @@ -8866,12 +8712,16 @@ "cloud_supported": false, "config_scope": "topic", "corresponding_cluster_property": null, - "description": "", + "default": "false", + "description": "Enable validation of the schema ID for values on a record. When enabled, Redpanda validates that the schema ID encoded in the record's value is registered in the Schema Registry according to the configured subject name strategy.", "is_deprecated": false, "is_topic_property": true, "name": "redpanda.value.schema.id.validation", + "related_topics": [ + "xref:manage:schema-reg/schema-id-validation.adoc[Server-Side Schema ID Validation]" + ], "source_file": "src/v/kafka/server/handlers/topics/types.h", - "type": "string" + "type": "boolean" }, "redpanda.value.subject.name.strategy": { "acceptable_values": "", @@ -8882,10 +8732,14 @@ "cloud_supported": false, "config_scope": "topic", "corresponding_cluster_property": null, - "description": "", + "default": "TopicNameStrategy", + "description": "The subject name strategy for values when `confluent.value.schema.validation` is enabled. This is a compatibility alias for `redpanda.value.subject.name.strategy`. This determines how the topic and schema are mapped to a subject name in the Schema Registry.", "is_deprecated": false, "is_topic_property": true, "name": "redpanda.value.subject.name.strategy", + "related_topics": [ + "xref:manage:schema-reg/schema-id-validation.adoc[Server-Side Schema ID Validation]" + ], "source_file": "src/v/kafka/server/handlers/topics/types.h", "type": "string" }, @@ -8933,7 +8787,6 @@ "maximum": 17592186044415, "minimum": -17592186044416, "name": "replicate_append_timeout_ms", - "needs_restart": true, "nullable": false, "type": "integer", "visibility": "tunable" @@ -8952,7 +8805,6 @@ "is_topic_property": true, "name": "replication.factor", "related_topics": [ - "xref:./cluster-properties.adoc#default_topic_replication[default_topic_replication]", "xref:reference:rpk/rpk-topic/rpk-topic-describe.adoc[`rpk topic describe`]", "xref:reference:rpk/rpk-topic/rpk-topic-alter-config.adoc[`rpk topic alter-config`]", "xref:./cluster-properties.adoc#default_topic_replication[`default_topic_replication`]", @@ -9206,7 +9058,6 @@ "defined_in": "src/v/kafka/client/configuration.cc", "description": "Number of times to retry a request to a broker.", "name": "retries", - "needs_restart": true, "nullable": false, "type": "integer" }, @@ -9222,7 +9073,6 @@ "maximum": 17592186044415, "minimum": -17592186044416, "name": "retry_base_backoff_ms", - "needs_restart": true, "nullable": false, "type": "integer" }, @@ -9235,7 +9085,7 @@ "config_scope": "broker", "default": null, "defined_in": "override", - "description": "Configuration property: retry_base_backoff_ms", + "description": "Delay (in milliseconds) for initial retry backoff.", "is_deprecated": false, "is_topic_property": false, "name": "retry_base_backoff_ms", @@ -9269,7 +9119,6 @@ "description": null, "is_deprecated": true, "name": "rm_violation_recovery_policy", - "needs_restart": true, "nullable": false, "type": null }, @@ -9285,7 +9134,6 @@ "maximum": 2147483647, "minimum": -2147483648, "name": "rpc_client_connections_per_peer", - "needs_restart": true, "nullable": false, "type": "integer" }, @@ -9303,7 +9151,6 @@ "defined_in": "src/v/config/node_config.cc", "description": "IP address and port for the Remote Procedure Call (RPC) server.", "name": "rpc_server", - "needs_restart": true, "nullable": false, "type": "object", "visibility": "user" @@ -9335,7 +9182,6 @@ "maximum": 2147483647, "minimum": -2147483648, "name": "rpc_server_listen_backlog", - "needs_restart": true, "nullable": true, "type": "integer", "visibility": "user" @@ -9352,7 +9198,6 @@ "maximum": 2147483647, "minimum": -2147483648, "name": "rpc_server_tcp_recv_buf", - "needs_restart": true, "nullable": true, "type": "integer" }, @@ -9368,7 +9213,6 @@ "maximum": 2147483647, "minimum": -2147483648, "name": "rpc_server_tcp_send_buf", - "needs_restart": true, "nullable": true, "type": "integer" }, @@ -9388,9 +9232,8 @@ }, "defined_in": "src/v/config/node_config.cc", "description": "TLS configuration for the RPC server.", - "example": ".Example\n[,yaml]\n----\nredpanda:\n rpc_server_tls:\n enabled: true\n cert_file: \"\"\n key_file: \"\"\n truststore_file: \"\"\n require_client_auth: true\n----", + "example": ".Example\n[,yaml]\n----\nredpanda:\n rpc_server_tls:\n enabled: true\n cert_file: \"\"\n key_file: \"\"\n truststore_file: \"\"\n require_client_auth: true\n----\n\nReplace the following placeholders with your values:\n\n* ``: Full path to the RPC TLS certificate file\n* ``: Full path to the RPC TLS private key file\n* ``: Full path to the certificate authority file", "name": "rpc_server_tls", - "needs_restart": true, "nullable": false, "type": "object", "visibility": "user" @@ -9574,9 +9417,8 @@ "config_scope": "broker", "default": "", "defined_in": "src/v/kafka/client/configuration.cc", - "description": "The SASL mechanism to use when the HTTP Proxy client connects to the Kafka API. These credentials are used when the HTTP Proxy API listener has <>: `none` but the cluster requires authenticated access to the Kafka API.\n\nThis property specifies which individual SASL mechanism the HTTP Proxy client should use, while the cluster-wide available mechanisms are configured using the xref:reference:properties/cluster-properties.adoc#sasl_mechanisms[`sasl_mechanisms`] cluster property.\n\ninclude::shared:partial$http-proxy-ephemeral-credentials-breaking-change.adoc[]", + "description": "The SASL mechanism to use when the HTTP Proxy client connects to the Kafka API. These credentials are used when the HTTP Proxy API listener has <>: `none` but the cluster requires authenticated access to the Kafka API.\n\nThis property specifies which individual SASL mechanism the HTTP Proxy client should use, while the cluster-wide available mechanisms are configured using the xref:reference:properties/cluster-properties.adoc#sasl_mechanisms[`sasl_mechanisms`] cluster property.\n\ninclude::shared:partial$http-proxy-ephemeral-credentials-breaking-change.adoc[]\n\nNOTE: While the cluster-wide xref:reference:properties/cluster-properties.adoc#sasl_mechanisms[`sasl_mechanisms`] property may support additional mechanisms (PLAIN, GSSAPI, OAUTHBEARER), HTTP Proxy client connections only support SCRAM mechanisms.", "name": "sasl_mechanism", - "needs_restart": true, "nullable": false, "related_topics": [ "xref:reference:properties/cluster-properties.adoc#sasl_mechanisms[`sasl_mechanisms`]" @@ -9668,7 +9510,6 @@ "type": "object" }, "name": "schema_registry_api", - "needs_restart": true, "nullable": false, "type": "array" }, @@ -9686,7 +9527,6 @@ "type": "object" }, "name": "schema_registry_api_tls", - "needs_restart": true, "nullable": false, "type": "array" }, @@ -9718,7 +9558,6 @@ "description": null, "is_deprecated": true, "name": "schema_registry_protobuf_renderer_v2", - "needs_restart": true, "nullable": false, "type": null }, @@ -9735,7 +9574,6 @@ "maximum": 32767, "minimum": -32768, "name": "schema_registry_replication_factor", - "needs_restart": true, "nullable": true, "related_topics": [ "xref:../cluster-properties.adoc#default_topic_replication[`default_topic_replication`]" @@ -9754,7 +9592,6 @@ "description": "Password to use for SCRAM authentication mechanisms when the HTTP Proxy client connects to the Kafka API. This property is required when the HTTP Proxy API listener has <>: `none` but the cluster requires authenticated access to the Kafka API.\n\ninclude::shared:partial$http-proxy-ephemeral-credentials-breaking-change.adoc[]", "is_secret": true, "name": "scram_password", - "needs_restart": true, "nullable": false, "type": "string" }, @@ -9769,7 +9606,6 @@ "defined_in": "src/v/kafka/client/configuration.cc", "description": "Username to use for SCRAM authentication mechanisms when the HTTP Proxy client connects to the Kafka API. This property is required when the HTTP Proxy API listener has <>: `none` but the cluster requires authenticated access to the Kafka API.\n\ninclude::shared:partial$http-proxy-ephemeral-credentials-breaking-change.adoc[]", "name": "scram_username", - "needs_restart": true, "nullable": false, "type": "string" }, @@ -9783,7 +9619,6 @@ "description": null, "is_deprecated": true, "name": "seed_server_meta_topic_partitions", - "needs_restart": true, "nullable": false, "type": null }, @@ -9802,7 +9637,6 @@ "type": "object" }, "name": "seed_servers", - "needs_restart": true, "nullable": false, "type": "array", "visibility": "user" @@ -9895,7 +9729,6 @@ "description": null, "is_deprecated": true, "name": "seq_table_min_size", - "needs_restart": true, "nullable": false, "type": null }, @@ -10023,7 +9856,6 @@ "defined_in": "src/v/config/node_config.cc", "description": "Path to the configuration file used for low level storage failure injection.", "name": "storage_failure_injection_config_path", - "needs_restart": true, "nullable": true, "type": "string", "visibility": "tunable" @@ -10039,7 +9871,6 @@ "defined_in": "src/v/config/node_config.cc", "description": "If `true`, inject low level storage failures on the write path. Do _not_ use for production instances.", "name": "storage_failure_injection_enabled", - "needs_restart": true, "nullable": false, "type": "boolean", "visibility": "tunable" @@ -10252,7 +10083,6 @@ "description": null, "is_deprecated": true, "name": "target_fetch_quota_byte_rate", - "needs_restart": true, "nullable": false, "type": null }, @@ -10266,7 +10096,6 @@ "description": null, "is_deprecated": true, "name": "target_quota_byte_rate", - "needs_restart": true, "nullable": false, "type": null }, @@ -10327,7 +10156,6 @@ "maximum": 17592186044415, "minimum": -17592186044416, "name": "tm_sync_timeout_ms", - "needs_restart": true, "nullable": false, "type": "integer", "visibility": "user" @@ -10342,7 +10170,6 @@ "description": null, "is_deprecated": true, "name": "tm_violation_recovery_policy", - "needs_restart": true, "nullable": false, "type": null }, @@ -10540,7 +10367,6 @@ "description": null, "is_deprecated": true, "name": "transaction_coordinator_replication", - "needs_restart": true, "nullable": false, "type": null }, @@ -10569,12 +10395,16 @@ "config_scope": "cluster", "default": "10080min", "defined_in": "src/v/config/configuration.cc", - "description": "Expiration time of producer IDs. Measured starting from the time of the last write until now for a given ID.", + "description": "Expiration time of producer IDs. Measured starting from the time of the last write until now for a given ID. Producer IDs are automatically removed from memory when they expire, which helps manage memory usage. However, this natural cleanup may not be sufficient for workloads with high producer churn rates. For applications with long-running transactions, ensure this value accommodates your typical transaction lifetime to avoid premature producer ID expiration.", "maximum": 17592186044415, "minimum": -17592186044416, "name": "transactional_id_expiration_ms", "needs_restart": false, "nullable": false, + "related_topics": [ + "xref:develop:transactions.adoc#tune-producer-id-limits[Tune producer ID limits]", + "xref:reference:properties/cluster-properties.adoc#max_concurrent_producer_ids[max_concurrent_producer_ids]" + ], "type": "integer", "visibility": "user" }, @@ -10606,7 +10436,6 @@ "description": null, "is_deprecated": true, "name": "tx_registry_log_capacity", - "needs_restart": true, "nullable": false, "type": null }, @@ -10620,7 +10449,6 @@ "description": null, "is_deprecated": true, "name": "tx_registry_sync_timeout_ms", - "needs_restart": true, "nullable": false, "type": null }, @@ -10636,7 +10464,6 @@ "maximum": 17592186044415, "minimum": -17592186044416, "name": "tx_timeout_delay_ms", - "needs_restart": true, "nullable": false, "type": "integer", "visibility": "user" @@ -10667,7 +10494,6 @@ "defined_in": "src/v/config/node_config.cc", "description": "Whether to violate safety checks when starting a Redpanda version newer than the cluster's consensus version.", "name": "upgrade_override_checks", - "needs_restart": true, "nullable": false, "type": "boolean", "visibility": "tunable" @@ -10776,7 +10602,6 @@ "description": null, "is_deprecated": true, "name": "use_scheduling_groups", - "needs_restart": true, "nullable": false, "type": null }, @@ -10789,17 +10614,11 @@ "config_scope": "broker", "default": null, "defined_in": "src/v/config/node_config.cc", - "description": "Maximum duration in seconds for verbose (`TRACE` or `DEBUG`) logging. Values configured above this will be clamped. If null (the default) there is no limit. Can be overridden in the Admin API on a per-request basis.\n\n*Unit:* seconds", - "example": ".Example\n[,yaml]\n----\nschema_registry:\n schema_registry_api:\n address: 0.0.0.0\n port: 8081\n authentication_method: http_basic\n schema_registry_replication_factor: 3\n mode_mutability: true\n----", + "description": "Maximum duration in seconds for verbose (`TRACE` or `DEBUG`) logging. Values configured above this will be clamped. If null (the default) there is no limit. Can be overridden in the Admin API on a per-request basis.", "maximum": 17179869183, "minimum": -17179869184, "name": "verbose_logging_timeout_sec_max", - "needs_restart": true, "nullable": true, - "related_topics": [ - "xref:reference:properties/cluster-properties.adoc#http_authentication[`http_authentication`]", - "xref:reference:properties/cluster-properties.adoc#http_authentication[`http_authentication`]" - ], "type": "integer", "visibility": "tunable" }, @@ -10826,7 +10645,7 @@ "cloud_readonly": false, "cloud_supported": false, "config_scope": "cluster", - "default": "5'000ms", + "default": "5000ms", "defined_in": "src/v/config/configuration.cc", "description": "Timeout to wait for leadership in metadata cache.", "maximum": 17592186044415, @@ -10851,8 +10670,6 @@ "is_topic_property": true, "name": "write.caching", "related_topics": [ - "xref:cluster-properties.adoc#write_caching_default[`write_caching_default`]", - "xref:./cluster-properties.adoc#write_caching_default[`write_caching_default`]", "xref:develop:config-topics.adoc#configure-write-caching[Write caching]", "xref:manage:tiered-storage.adoc[Tiered Storage]" ], @@ -10888,7 +10705,6 @@ "defined_in": "src/v/config/configuration.cc", "description": "Size of the zstd decompression workspace.", "name": "zstd_decompress_workspace_bytes", - "needs_restart": true, "nullable": false, "type": "integer", "visibility": "tunable" diff --git a/modules/reference/partials/properties/broker-properties.adoc b/modules/reference/partials/properties/broker-properties.adoc index 947ecd7211..cb6b677453 100644 --- a/modules/reference/partials/properties/broker-properties.adoc +++ b/modules/reference/partials/properties/broker-properties.adoc @@ -4,6 +4,10 @@ Network address for the glossterm:Admin API[] server. +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] + // tag::self-managed-only[] *Visibility:* `user` // end::self-managed-only[] @@ -30,6 +34,12 @@ redpanda: port: ---- +Replace the following placeholders with your values: + +* ``: Name for the Admin API listener (TLS configuration is handled separately in the <> broker property) +* ``: The externally accessible hostname or IP address that clients use to connect to this broker +* ``: The port number for the Admin API endpoint + --- // end::category-redpanda[] @@ -39,6 +49,10 @@ redpanda: Path to the API specifications for the Admin API. +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] + // tag::self-managed-only[] *Visibility:* `user` // end::self-managed-only[] @@ -62,6 +76,10 @@ endif::[] Specifies the TLS configuration for the HTTP Admin API. +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] + // tag::self-managed-only[] *Visibility:* `user` // end::self-managed-only[] @@ -91,6 +109,13 @@ redpanda: require_client_auth: true ---- +Replace the following placeholders with your values: + +* ``: Name that matches your Admin API listener (defined in the <> broker property) +* ``: Full path to the TLS certificate file +* ``: Full path to the TLS private key file +* ``: Full path to the Certificate Authority file + --- // end::category-redpanda[] @@ -100,6 +125,10 @@ redpanda: Address of the Kafka API published to the clients. If not set, the <> broker property is used. When behind a load balancer or in containerized environments, this should be the externally-accessible address that clients use to connect. +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] + // tag::self-managed-only[] *Visibility:* `user` // end::self-managed-only[] @@ -126,6 +155,12 @@ redpanda: port: ---- +Replace the following placeholders with your values: + +* ``: Name that matches your Kafka API listener (defined in the <> broker property) +* ``: The externally accessible hostname or IP address that clients use to connect to this broker +* ``: The port number for the Kafka API endpoint + --- // end::category-redpanda[] @@ -135,7 +170,9 @@ redpanda: Network address for the HTTP Proxy API server to publish to clients. -*Requires restart:* Yes +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] *Type:* array @@ -156,6 +193,10 @@ endif::[] Address of RPC endpoint published to other cluster members. If not set, the <> broker property is used. This should be the address other brokers can use to communicate with this broker. +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] + // tag::self-managed-only[] *Visibility:* `user` // end::self-managed-only[] @@ -181,6 +222,11 @@ redpanda: port: ---- +Replace the following placeholders with your values: + +* ``: The externally accessible hostname or IP address that other brokers use to communicate with this broker +* ``: The port number for the RPC endpoint (default is 33145) + --- // end::category-redpanda[] @@ -190,7 +236,9 @@ redpanda: Path to the API specifications directory. This directory contains API documentation for both the HTTP Proxy API and Schema Registry API. -*Requires restart:* Yes +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] *Type:* string @@ -211,7 +259,9 @@ endif::[] TLS configuration for the Kafka API servers to which the HTTP Proxy client should connect. -*Requires restart:* Yes +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] *Type:* object @@ -232,7 +282,9 @@ endif::[] Network addresses of the Kafka API servers to which the HTTP Proxy client should connect. -*Requires restart:* Yes +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] *Type:* array @@ -240,7 +292,7 @@ ifdef::env-cloud[] *Default:* Available in the Redpanda Cloud Console endif::[] ifndef::env-cloud[] -*Default:* `vector` +*Default:* `127.0.0.1:9092` endif::[] *Nullable:* No @@ -255,6 +307,10 @@ The maximum number of Kafka client connections that Redpanda can cache in the LR *Requires restart:* Yes +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] + *Type:* integer ifdef::env-cloud[] @@ -274,7 +330,9 @@ endif::[] Custom identifier to include in the Kafka request header for the HTTP Proxy client. This identifier can help debug or monitor client activities. -*Requires restart:* Yes +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] *Type:* string @@ -297,6 +355,10 @@ Time, in milliseconds, that an idle client connection may remain open to the HTT *Requires restart:* Yes +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] + *Type:* integer *Accepted values:* [`-17592186044416`, `17592186044415`] @@ -318,6 +380,10 @@ endif::[] Directory for archival cache. Set when the xref:reference:properties/cluster-properties.adoc#cloud_storage_enabled[`cloud_storage_enabled`] cluster property is enabled. If not specified, Redpanda uses a default path within the data directory. +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] + // tag::self-managed-only[] *Visibility:* `user` // end::self-managed-only[] @@ -352,11 +418,51 @@ Replace `` with the full path to your desired cache direct --- // end::category-redpanda[] +=== cloud_storage_inventory_hash_path_directory + +Directory to store inventory report hashes for use by cloud storage scrubber. If not specified, Redpanda uses a default path within the data directory. + +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] + +// tag::self-managed-only[] +*Visibility:* `user` +// end::self-managed-only[] + +*Type:* string + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `null` +endif::[] + +*Nullable:* No + + +.Example +[,yaml] +---- +redpanda: + cloud_storage_inventory_hash_store: +---- + +Replace `` with the full path to your desired inventory hash storage directory. + + +--- + // tag::category-redpanda[] === cloud_storage_inventory_hash_path_directory Directory to store inventory report hashes for use by cloud storage scrubber. If not specified, Redpanda uses a default path within the data directory. +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] + // tag::self-managed-only[] *Visibility:* `user` // end::self-managed-only[] @@ -390,7 +496,9 @@ Interval (in milliseconds) for consumer heartbeats. *Unit:* milliseconds -*Requires restart:* Yes +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] *Type:* integer @@ -414,6 +522,10 @@ Interval (in milliseconds) for consumer heartbeats. *Unit:* milliseconds +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] + // tag::self-managed-only[] *Visibility:* `user` // end::self-managed-only[] @@ -438,7 +550,9 @@ How long to wait for an idle consumer before removing it. A consumer is consider *Unit:* milliseconds -*Requires restart:* Yes +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] *Type:* integer @@ -448,7 +562,7 @@ ifdef::env-cloud[] *Default:* Available in the Redpanda Cloud Console endif::[] ifndef::env-cloud[] -*Default:* `minutes` +*Default:* `5 min` endif::[] *Nullable:* No @@ -462,6 +576,10 @@ How long to wait for an idle consumer before removing it. A consumer is consider *Unit:* milliseconds +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] + // tag::self-managed-only[] *Visibility:* `user` // end::self-managed-only[] @@ -486,7 +604,9 @@ Timeout (in milliseconds) for consumer rebalance. *Unit:* milliseconds -*Requires restart:* Yes +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] *Type:* integer @@ -510,6 +630,10 @@ Timeout (in milliseconds) for consumer rebalance. *Unit:* milliseconds +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] + // tag::self-managed-only[] *Visibility:* `user` // end::self-managed-only[] @@ -535,7 +659,9 @@ Maximum bytes to fetch per request. *Unit:* bytes -*Requires restart:* Yes +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] *Type:* integer @@ -560,7 +686,9 @@ Minimum bytes to fetch per request. *Unit:* bytes -*Requires restart:* Yes +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] *Type:* integer @@ -584,7 +712,9 @@ Interval (in milliseconds) for consumer request timeout. *Unit:* milliseconds -*Requires restart:* Yes +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] *Type:* integer @@ -608,6 +738,10 @@ Interval (in milliseconds) for consumer request timeout. *Unit:* milliseconds +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] + // tag::self-managed-only[] *Visibility:* `user` // end::self-managed-only[] @@ -632,7 +766,9 @@ Timeout (in milliseconds) for consumer session. *Unit:* milliseconds -*Requires restart:* Yes +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] *Type:* integer @@ -656,6 +792,10 @@ Timeout (in milliseconds) for consumer session. *Unit:* milliseconds +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] + // tag::self-managed-only[] *Visibility:* `user` // end::self-managed-only[] @@ -680,6 +820,10 @@ endif::[] No description available. +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] + *Nullable:* No --- @@ -699,6 +843,10 @@ The crash-tracking logic is reset (to zero consecutive crashes) by any of the fo * The `redpanda.yaml` broker configuration file is updated. * The `startup_log` file in the broker's <> broker property is manually deleted. +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] + // tag::self-managed-only[] *Visibility:* `user` // end::self-managed-only[] @@ -734,6 +882,10 @@ For information about how to reset the crash loop limit, see the < + port: + authentication_method: sasl +---- + +.Multiple listeners example (for different networks or authentication methods) +[,yaml] +---- +redpanda: + kafka_api: + - name: + address: + port: + authentication_method: none + - name: + address: + port: + authentication_method: sasl + - name: + address: + port: + authentication_method: mtls_identity +---- + +Replace the following placeholders with your values: + +* ``: The IP address to bind the listener to (typically `0.0.0.0` for all interfaces) +* ``: The port number for the Kafka API endpoint +* ``: Name for internal network connections (for example, `internal`) +* ``: Name for external network connections (for example, `external`) +* ``: Name for mTLS connections (for example, `mtls`) +* ``: The IP address for internal connections +* ``: The port number for internal Kafka API connections +* ``: The IP address for external connections +* ``: The port number for external Kafka API connections +* ``: The IP address for mTLS connections +* ``: The port number for mTLS Kafka API connections + + *Related topics:* -* xref:reference:properties/cluster-properties.adoc#sasl_mechanisms[`sasl_mechanisms`] * xref:reference:properties/cluster-properties.adoc#sasl_mechanisms[`sasl_mechanisms`] --- @@ -933,6 +1161,10 @@ endif::[] Transport Layer Security (TLS) configuration for the Kafka API endpoint. +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] + // tag::self-managed-only[] *Visibility:* `user` // end::self-managed-only[] @@ -962,6 +1194,15 @@ redpanda: require_client_auth: false ---- +Replace the following placeholders with your values: + +* ``: Name that matches your Kafka API listener (defined in the <> broker property) +* ``: Full path to the TLS certificate file +* ``: Full path to the TLS private key file +* ``: Full path to the Certificate Authority file + +NOTE: Set `require_client_auth: true` for mutual TLS (mTLS) authentication, or `false` for server-side TLS only. + --- // end::category-redpanda[] @@ -971,6 +1212,10 @@ redpanda: Threshold for log messages that contain a larger memory allocation than specified. +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] + // tag::self-managed-only[] *Visibility:* `tunable` // end::self-managed-only[] @@ -994,7 +1239,9 @@ endif::[] Enable modifications to the read-only `mode` of the Schema Registry. When set to `true`, the entire Schema Registry or its subjects can be switched to `READONLY` or `READWRITE`. This property is useful for preventing unwanted changes to the entire Schema Registry or specific subjects. -*Requires restart:* Yes +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] *Type:* boolean @@ -1028,6 +1275,10 @@ Manually setting or reusing `node_id` values, even for decommissioned brokers, c Broker IDs are immutable. After a broker joins the cluster, its `node_id` *cannot* be changed. +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] + // tag::self-managed-only[] *Visibility:* `user` // end::self-managed-only[] @@ -1053,6 +1304,10 @@ List of node ID and UUID overrides applied at broker startup. Each entry include Remove this property after the cluster restarts successfully and operates normally. This prevents reapplication and maintains consistent configuration across brokers. +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] + // tag::self-managed-only[] *Visibility:* `user` // end::self-managed-only[] @@ -1084,6 +1339,17 @@ redpanda: ignore_existing_node_id: ---- +Replace the following placeholders with your values: + +* ``: The current UUID of the broker to override +* ``: The new broker ID to assign +* ``: The new UUID to assign to the broker +* ``: Set to `true` to force override on brokers that already have a node ID, or `false` to apply override only to brokers without existing node IDs +* ``: Additional broker UUID for multiple overrides +* ``: Additional new broker ID +* ``: Additional new UUID +* ``: Additional ignore existing node ID flag + --- // end::category-redpanda[] @@ -1093,6 +1359,10 @@ redpanda: Path to the configuration file used by OpenSSL to properly load the FIPS-compliant module. +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] + // tag::self-managed-only[] *Visibility:* `user` // end::self-managed-only[] @@ -1116,6 +1386,10 @@ endif::[] Path to the directory that contains the OpenSSL FIPS-compliant module. The filename that Redpanda looks for is `fips.so`. +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] + // tag::self-managed-only[] *Visibility:* `user` // end::self-managed-only[] @@ -1139,7 +1413,9 @@ endif::[] Rest API listener address and port. -*Requires restart:* Yes +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] *Type:* array @@ -1172,7 +1448,9 @@ pandaproxy: TLS configuration for Pandaproxy API. -*Requires restart:* Yes +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] *Type:* array @@ -1193,7 +1471,9 @@ endif::[] Number of acknowledgments the producer requires the leader to have received before considering a request complete. -*Requires restart:* Yes +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] *Type:* integer @@ -1217,7 +1497,9 @@ Delay (in milliseconds) to wait before sending batch. *Unit:* milliseconds -*Requires restart:* Yes +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] *Type:* integer @@ -1237,10 +1519,14 @@ endif::[] // tag::category-pandaproxy-client[] === produce_batch_delay_ms -Configuration property: produce_batch_delay_ms +Delay (in milliseconds) to wait before sending batch. *Unit:* milliseconds +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] + // tag::self-managed-only[] *Visibility:* `user` // end::self-managed-only[] @@ -1264,7 +1550,9 @@ endif::[] Number of records to batch before sending to broker. -*Requires restart:* Yes +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] *Type:* integer @@ -1289,7 +1577,9 @@ Number of bytes to batch before sending to broker. *Unit:* bytes -*Requires restart:* Yes +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] *Type:* integer @@ -1312,7 +1602,9 @@ endif::[] Enable or disable compression by the Kafka client. Specify `none` to disable compression or one of the supported types [gzip, snappy, lz4, zstd]. -*Requires restart:* Yes +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] *Type:* string @@ -1334,7 +1626,9 @@ Delay (in milliseconds) to allow for final flush of buffers before shutting down *Unit:* milliseconds -*Requires restart:* Yes +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] *Type:* integer @@ -1358,6 +1652,10 @@ Delay (in milliseconds) to allow for final flush of buffers before shutting down *Unit:* milliseconds +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] + // tag::self-managed-only[] *Visibility:* `user` // end::self-managed-only[] @@ -1381,6 +1679,10 @@ endif::[] A label that identifies a failure zone. Apply the same label to all brokers in the same failure zone. When xref:./cluster-properties.adoc#enable_rack_awareness[enable_rack_awareness] is set to `true` at the cluster level, the system uses the rack labels to spread partition replicas across different failure zones. +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] + // tag::self-managed-only[] *Visibility:* `user` // end::self-managed-only[] @@ -1408,6 +1710,10 @@ endif::[] If `true`, start Redpanda in xref:manage:recovery-mode.adoc[recovery mode], where user partitions are not loaded and only administrative operations are allowed. +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] + // tag::self-managed-only[] *Visibility:* `user` // end::self-managed-only[] @@ -1435,7 +1741,9 @@ endif::[] Number of times to retry a request to a broker. -*Requires restart:* Yes +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] *Type:* integer @@ -1457,7 +1765,9 @@ Delay (in milliseconds) for initial retry backoff. *Unit:* milliseconds -*Requires restart:* Yes +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] *Type:* integer @@ -1477,10 +1787,14 @@ endif::[] // tag::category-pandaproxy-client[] === retry_base_backoff_ms -Configuration property: retry_base_backoff_ms +Delay (in milliseconds) for initial retry backoff. *Unit:* milliseconds +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] + // tag::self-managed-only[] *Visibility:* `user` // end::self-managed-only[] @@ -1504,6 +1818,10 @@ endif::[] IP address and port for the Remote Procedure Call (RPC) server. +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] + // tag::self-managed-only[] *Visibility:* `user` // end::self-managed-only[] @@ -1527,6 +1845,10 @@ endif::[] TLS configuration for the RPC server. +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] + // tag::self-managed-only[] *Visibility:* `user` // end::self-managed-only[] @@ -1555,6 +1877,12 @@ redpanda: require_client_auth: true ---- +Replace the following placeholders with your values: + +* ``: Full path to the RPC TLS certificate file +* ``: Full path to the RPC TLS private key file +* ``: Full path to the certificate authority file + --- // end::category-redpanda[] @@ -1568,7 +1896,11 @@ This property specifies which individual SASL mechanism the HTTP Proxy client sh include::shared:partial$http-proxy-ephemeral-credentials-breaking-change.adoc[] -*Requires restart:* Yes +NOTE: While the cluster-wide xref:reference:properties/cluster-properties.adoc#sasl_mechanisms[`sasl_mechanisms`] property may support additional mechanisms (PLAIN, GSSAPI, OAUTHBEARER), HTTP Proxy client connections only support SCRAM mechanisms. + +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] *Type:* string @@ -1593,7 +1925,9 @@ endif::[] Schema Registry API listener address and port -*Requires restart:* Yes +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] *Type:* array @@ -1626,7 +1960,9 @@ schema_registry: TLS configuration for Schema Registry API. -*Requires restart:* Yes +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] *Type:* array @@ -1647,7 +1983,9 @@ endif::[] Replication factor for internal `_schemas` topic. If unset, defaults to the xref:../cluster-properties.adoc#default_topic_replication[`default_topic_replication`] cluster property. -*Requires restart:* Yes +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] *Type:* integer @@ -1676,7 +2014,9 @@ Password to use for SCRAM authentication mechanisms when the HTTP Proxy client c include::shared:partial$http-proxy-ephemeral-credentials-breaking-change.adoc[] -*Requires restart:* Yes +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] *Type:* string @@ -1699,7 +2039,9 @@ Username to use for SCRAM authentication mechanisms when the HTTP Proxy client c include::shared:partial$http-proxy-ephemeral-credentials-breaking-change.adoc[] -*Requires restart:* Yes +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] *Type:* string @@ -1734,6 +2076,10 @@ Only one broker, the designated cluster root, should have an empty `seed_servers The `seed_servers` list must be consistent across all seed brokers to prevent cluster fragmentation and ensure stable cluster formation. ==== +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] + // tag::self-managed-only[] *Visibility:* `user` // end::self-managed-only[] @@ -1803,6 +2149,10 @@ Replace the following placeholders with your values: Path to the configuration file used for low level storage failure injection. +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] + // tag::self-managed-only[] *Visibility:* `tunable` // end::self-managed-only[] @@ -1826,6 +2176,10 @@ endif::[] If `true`, inject low level storage failures on the write path. Do _not_ use for production instances. +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] + // tag::self-managed-only[] *Visibility:* `tunable` // end::self-managed-only[] @@ -1849,6 +2203,10 @@ endif::[] Whether to violate safety checks when starting a Redpanda version newer than the cluster's consensus version. +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] + // tag::self-managed-only[] *Visibility:* `tunable` // end::self-managed-only[] @@ -1872,7 +2230,9 @@ endif::[] Maximum duration in seconds for verbose (`TRACE` or `DEBUG`) logging. Values configured above this will be clamped. If null (the default) there is no limit. Can be overridden in the Admin API on a per-request basis. -*Unit:* seconds +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] // tag::self-managed-only[] *Visibility:* `tunable` @@ -1891,24 +2251,5 @@ endif::[] *Nullable:* Yes - -.Example -[,yaml] ----- -schema_registry: - schema_registry_api: - address: 0.0.0.0 - port: 8081 - authentication_method: http_basic - schema_registry_replication_factor: 3 - mode_mutability: true ----- - - -*Related topics:* - -* xref:reference:properties/cluster-properties.adoc#http_authentication[`http_authentication`] -* xref:reference:properties/cluster-properties.adoc#http_authentication[`http_authentication`] - --- // end::category-redpanda[] diff --git a/modules/reference/partials/properties/cluster-properties.adoc b/modules/reference/partials/properties/cluster-properties.adoc index 16e3b066c1..1b84190f9f 100644 --- a/modules/reference/partials/properties/cluster-properties.adoc +++ b/modules/reference/partials/properties/cluster-properties.adoc @@ -5,7 +5,9 @@ Capacity (in number of txns) of an abort index segment. Each partition tracks the aborted transaction offset ranges to help service client requests. If the number of transactions increases beyond this threshold, they are flushed to disk to ease memory pressure. Then they're loaded on demand. This configuration controls the maximum number of aborted transactions before they are flushed to disk. -*Requires restart:* Yes +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] // tag::self-managed-only[] *Visibility:* `tunable` @@ -32,7 +34,9 @@ Interval, in milliseconds, at which Redpanda looks for inactive transactions and *Unit:* milliseconds -*Requires restart:* Yes +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] // tag::self-managed-only[] *Visibility:* `tunable` @@ -59,6 +63,10 @@ Whether Admin API clients must provide HTTP basic authentication headers. *Requires restart:* No +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] + // tag::self-managed-only[] *Visibility:* `user` // end::self-managed-only[] @@ -82,6 +90,10 @@ Enable aggregation of metrics returned by the xref:reference:internal-metrics-re *Requires restart:* No +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] + *Type:* boolean ifdef::env-cloud[] @@ -107,6 +119,10 @@ The amount of time since the last broker status heartbeat. After this time, a br *Requires restart:* No +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] + // tag::self-managed-only[] *Visibility:* `tunable` // end::self-managed-only[] @@ -134,6 +150,10 @@ The duration, in milliseconds, that Redpanda waits for the replication of entrie *Requires restart:* No +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] + // tag::self-managed-only[] *Visibility:* `tunable` // end::self-managed-only[] @@ -157,7 +177,9 @@ endif::[] Size of direct write operations to disk in bytes. A larger chunk size can improve performance for write-heavy workloads, but increase latency for these writes as more data is collected before each write operation. A smaller chunk size can decrease write latency, but potentially increase the number of disk I/O operations. -*Requires restart:* Yes +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] // tag::self-managed-only[] *Visibility:* `tunable` @@ -182,6 +204,10 @@ Defines the number of bytes allocated by the internal audit client for audit mes *Requires restart:* No +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] + // tag::self-managed-only[] *Visibility:* `user` // end::self-managed-only[] @@ -210,6 +236,10 @@ endif::[] *Requires restart:* No +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] + // tag::self-managed-only[] *Visibility:* `user` // end::self-managed-only[] @@ -234,6 +264,10 @@ List of strings in JSON style identifying the event types to include in the audi *Requires restart:* No +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] + // tag::self-managed-only[] *Visibility:* `user` // end::self-managed-only[] @@ -258,6 +292,10 @@ List of user principals to exclude from auditing. *Requires restart:* No +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] + // tag::self-managed-only[] *Visibility:* `user` // end::self-managed-only[] @@ -283,6 +321,10 @@ List of topics to exclude from auditing. *Requires restart:* No +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] + // tag::self-managed-only[] *Visibility:* `user` // end::self-managed-only[] @@ -307,6 +349,10 @@ Defines the policy for rejecting audit log messages when the audit log queue is *Requires restart:* No +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] + // tag::self-managed-only[] *Visibility:* `user` // end::self-managed-only[] @@ -333,6 +379,10 @@ Defines the number of partitions used by a newly-created audit topic. This confi *Requires restart:* No +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] + // tag::self-managed-only[] *Visibility:* `user` // end::self-managed-only[] @@ -359,6 +409,10 @@ Defines the replication factor for a newly-created audit log topic. This configu *Requires restart:* No +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] + // tag::self-managed-only[] *Visibility:* `user` // end::self-managed-only[] @@ -386,6 +440,10 @@ Interval, in milliseconds, at which Redpanda flushes the queued audit log messag *Requires restart:* No +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] + // tag::self-managed-only[] *Visibility:* `tunable` // end::self-managed-only[] @@ -411,6 +469,10 @@ Defines the maximum amount of memory in bytes used by the audit buffer in each s *Requires restart:* Yes +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] + // tag::self-managed-only[] *Visibility:* `tunable` // end::self-managed-only[] @@ -437,6 +499,10 @@ If you produce to a topic that doesn't exist, the topic will be created with def *Requires restart:* No +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] + // tag::self-managed-only[] *Visibility:* `user` // end::self-managed-only[] @@ -463,6 +529,10 @@ Cluster identifier. *Requires restart:* No +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* No +endif::[] + *Type:* string ifdef::env-cloud[] @@ -482,6 +552,10 @@ Size (in bytes) for each compacted log segment. *Requires restart:* No +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] + // tag::self-managed-only[] *Visibility:* `tunable` // end::self-managed-only[] @@ -505,7 +579,9 @@ endif::[] Target backlog size for compaction controller. If not set the max backlog size is configured to 80% of total disk space available. -*Requires restart:* Yes +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] // tag::self-managed-only[] *Visibility:* `tunable` @@ -528,7 +604,9 @@ endif::[] Derivative coefficient for compaction PID controller. -*Requires restart:* Yes +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] // tag::self-managed-only[] *Visibility:* `tunable` @@ -551,7 +629,9 @@ endif::[] Integral coefficient for compaction PID controller. -*Requires restart:* Yes +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] // tag::self-managed-only[] *Visibility:* `tunable` @@ -574,7 +654,9 @@ endif::[] Maximum number of I/O and CPU shares that compaction process can use. -*Requires restart:* Yes +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] // tag::self-managed-only[] *Visibility:* `tunable` @@ -599,7 +681,9 @@ endif::[] Minimum number of I/O and CPU shares that compaction process can use. -*Requires restart:* Yes +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] // tag::self-managed-only[] *Visibility:* `tunable` @@ -624,7 +708,9 @@ endif::[] Proportional coefficient for compaction PID controller. This must be negative, because the compaction backlog should decrease when the number of compaction shares increases. -*Requires restart:* Yes +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] // tag::self-managed-only[] *Visibility:* `tunable` @@ -651,7 +737,9 @@ This is an internal-only configuration and should be enabled only after consulti *Unit:* milliseconds -*Requires restart:* Yes +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] // tag::self-managed-only[] *Visibility:* `tunable` @@ -680,6 +768,10 @@ How often to run the collection loop when enable_consumer_group_metrics contains *Requires restart:* No +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] + // tag::self-managed-only[] *Visibility:* `tunable` // end::self-managed-only[] @@ -707,6 +799,10 @@ Reducing the value of `consumer_group_lag_collection_interval_sec` increases the *Unit:* seconds +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] + // tag::self-managed-only[] *Visibility:* `user` // end::self-managed-only[] @@ -730,6 +826,10 @@ This property lets you enable the batch cache for the consumer offsets topic. By *Requires restart:* Yes +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] + // tag::self-managed-only[] *Visibility:* `tunable` // end::self-managed-only[] @@ -755,6 +855,10 @@ Interval between iterations of controller backend housekeeping loop. *Requires restart:* No +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] + // tag::self-managed-only[] *Visibility:* `tunable` // end::self-managed-only[] @@ -780,6 +884,10 @@ Maximum capacity of rate limit accumulation in controller ACLs and users operati *Requires restart:* No +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] + // tag::self-managed-only[] *Visibility:* `tunable` // end::self-managed-only[] @@ -803,6 +911,10 @@ Maximum capacity of rate limit accumulation in controller configuration operatio *Requires restart:* No +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] + // tag::self-managed-only[] *Visibility:* `tunable` // end::self-managed-only[] @@ -826,6 +938,10 @@ Maximum capacity of rate limit accumulation in controller move operations limit. *Requires restart:* No +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] + // tag::self-managed-only[] *Visibility:* `tunable` // end::self-managed-only[] @@ -849,6 +965,10 @@ Maximum capacity of rate limit accumulation in controller node management operat *Requires restart:* No +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] + // tag::self-managed-only[] *Visibility:* `tunable` // end::self-managed-only[] @@ -872,6 +992,10 @@ Maximum capacity of rate limit accumulation in controller topic operations limit *Requires restart:* No +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] + // tag::self-managed-only[] *Visibility:* `tunable` // end::self-managed-only[] @@ -897,6 +1021,10 @@ Maximum amount of time before Redpanda attempts to create a controller snapshot *Requires restart:* No +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] + // tag::self-managed-only[] *Visibility:* `tunable` // end::self-managed-only[] @@ -922,7 +1050,9 @@ endif::[] No description available. -*Requires restart:* Yes +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] *Nullable:* No @@ -937,7 +1067,9 @@ No description available. *Unit:* bytes -*Requires restart:* Yes +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] *Nullable:* No @@ -952,7 +1084,9 @@ No description available. *Unit:* bytes -*Requires restart:* Yes +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] *Nullable:* No @@ -967,7 +1101,9 @@ No description available. *Unit:* milliseconds -*Requires restart:* Yes +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] *Nullable:* No @@ -984,6 +1120,10 @@ endif::[] *Requires restart:* No +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] + // tag::self-managed-only[] *Visibility:* `user` // end::self-managed-only[] @@ -1007,6 +1147,10 @@ Interval, in milliseconds, between trigger and invocation of core balancing. *Requires restart:* No +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] + // tag::self-managed-only[] *Visibility:* `tunable` // end::self-managed-only[] @@ -1032,6 +1176,10 @@ If set to `true`, and if after a restart the number of cores changes, Redpanda w *Requires restart:* No +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] + // tag::self-managed-only[] *Visibility:* `user` // end::self-managed-only[] @@ -1055,6 +1203,10 @@ Enables CPU profiling for Redpanda. *Requires restart:* No +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] + // tag::self-managed-only[] *Visibility:* `user` // end::self-managed-only[] @@ -1080,6 +1232,10 @@ The sample period for the CPU profiler. *Requires restart:* No +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] + // tag::self-managed-only[] *Visibility:* `user` // end::self-managed-only[] @@ -1107,6 +1263,10 @@ Timeout, in milliseconds, to wait for new topic creation. *Requires restart:* No +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] + // tag::self-managed-only[] *Visibility:* `tunable` // end::self-managed-only[] @@ -1119,7 +1279,7 @@ ifdef::env-cloud[] *Default:* Available in the Redpanda Cloud Console endif::[] ifndef::env-cloud[] -*Default:* `2'000ms` +*Default:* `2000ms` endif::[] *Nullable:* No @@ -1137,6 +1297,10 @@ The maximum size for a deployable WebAssembly binary that the broker can store. *Requires restart:* No +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] + // tag::self-managed-only[] *Visibility:* `tunable` // end::self-managed-only[] @@ -1163,6 +1327,10 @@ The commit interval at which data transforms progress. *Requires restart:* No +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] + // tag::self-managed-only[] *Visibility:* `tunable` // end::self-managed-only[] @@ -1189,6 +1357,10 @@ Enables WebAssembly-powered data transforms directly in the broker. When `data_t *Requires restart:* Yes +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] + // tag::self-managed-only[] *Visibility:* `user` // end::self-managed-only[] @@ -1215,6 +1387,10 @@ Buffer capacity for transform logs, per shard. Buffer occupancy is calculated as *Requires restart:* Yes +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] + // tag::self-managed-only[] *Visibility:* `tunable` // end::self-managed-only[] @@ -1240,6 +1416,10 @@ Flush interval for transform logs. When a timer expires, pending logs are collec *Requires restart:* No +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] + // tag::self-managed-only[] *Visibility:* `tunable` // end::self-managed-only[] @@ -1268,6 +1448,10 @@ Transform log lines truncate to this length. Truncation occurs after any charact *Requires restart:* No +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] + // tag::self-managed-only[] *Visibility:* `tunable` // end::self-managed-only[] @@ -1297,6 +1481,10 @@ The amount of memory to reserve per core for data transform (Wasm) virtual machi *Requires restart:* Yes +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] + // tag::self-managed-only[] *Visibility:* `user` // end::self-managed-only[] @@ -1330,6 +1518,10 @@ The amount of memory to give an instance of a data transform (Wasm) virtual mach *Requires restart:* Yes +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] + // tag::self-managed-only[] *Visibility:* `user` // end::self-managed-only[] @@ -1360,6 +1552,10 @@ The percentage of available memory in the transform subsystem to use for read bu *Requires restart:* Yes +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] + // tag::self-managed-only[] *Visibility:* `tunable` // end::self-managed-only[] @@ -1385,6 +1581,10 @@ The maximum amount of runtime to start up a data transform, and the time it take *Requires restart:* Yes +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] + // tag::self-managed-only[] *Visibility:* `tunable` // end::self-managed-only[] @@ -1412,6 +1612,10 @@ The percentage of available memory in the transform subsystem to use for write b *Requires restart:* Yes +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] + // tag::self-managed-only[] *Visibility:* `tunable` // end::self-managed-only[] @@ -1435,6 +1639,10 @@ Maximum amount of time the coordinator waits to snapshot after a command appears *Requires restart:* No +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] + // tag::self-managed-only[] *Visibility:* `tunable` // end::self-managed-only[] @@ -1460,6 +1668,10 @@ Option to explicitly disable enforcement of datalake disk space usage. *Requires restart:* No +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] + // tag::self-managed-only[] *Visibility:* `user` // end::self-managed-only[] @@ -1483,7 +1695,9 @@ endif::[] No description available. -*Requires restart:* Yes +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] *Nullable:* No @@ -1496,6 +1710,10 @@ The datalake disk usage monitor reclaims the overage multiplied by this this coe *Requires restart:* No +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] + // tag::self-managed-only[] *Visibility:* `tunable` // end::self-managed-only[] @@ -1521,6 +1739,10 @@ Size, in bytes, of each memory block reserved for record translation, as tracked *Requires restart:* Yes +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] + // tag::self-managed-only[] *Visibility:* `tunable` // end::self-managed-only[] @@ -1544,6 +1766,10 @@ The size, in bytes, of the block of disk reservation that the datalake manager w *Requires restart:* No +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] + // tag::self-managed-only[] *Visibility:* `tunable` // end::self-managed-only[] @@ -1567,6 +1793,10 @@ The maximum number of translations that the datalake scheduler will allow to run *Requires restart:* No +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] + // tag::self-managed-only[] *Visibility:* `tunable` // end::self-managed-only[] @@ -1592,6 +1822,10 @@ Time, in milliseconds, for a datalake translation as scheduled by the datalake s *Requires restart:* Yes +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] + // tag::self-managed-only[] *Visibility:* `tunable` // end::self-managed-only[] @@ -1619,6 +1853,10 @@ Size, in bytes, of the amount of scratch space datalake should use. *Requires restart:* No +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] + // tag::self-managed-only[] *Visibility:* `tunable` // end::self-managed-only[] @@ -1644,6 +1882,10 @@ Size of the scratch space datalake soft limit expressed as a percentage of the ` *Requires restart:* No +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] + // tag::self-managed-only[] *Visibility:* `user` // end::self-managed-only[] @@ -1669,6 +1911,10 @@ Size, in bytes, of the amount of per translator data that may be flushed to disk *Requires restart:* No +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] + // tag::self-managed-only[] *Visibility:* `tunable` // end::self-managed-only[] @@ -1694,6 +1940,10 @@ If set, how long debug bundles are kept in the debug bundle storage directory af *Requires restart:* No +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] + // tag::self-managed-only[] *Visibility:* `user` // end::self-managed-only[] @@ -1719,6 +1969,10 @@ Path to the debug bundle storage directory. Note: Changing this path does not cl *Requires restart:* No +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] + // tag::self-managed-only[] *Visibility:* `user` // end::self-managed-only[] @@ -1742,6 +1996,10 @@ The recursion depth after which debug logging is enabled automatically for the l *Requires restart:* No +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] + // tag::self-managed-only[] *Visibility:* `tunable` // end::self-managed-only[] @@ -1775,6 +2033,10 @@ endif::[] *Requires restart:* No +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] + // tag::self-managed-only[] *Visibility:* `user` // end::self-managed-only[] @@ -1785,7 +2047,7 @@ ifdef::env-cloud[] *Default:* Available in the Redpanda Cloud Console endif::[] ifndef::env-cloud[] -*Default:* `leaders_preference` +*Default:* `none` endif::[] *Nullable:* No @@ -1798,6 +2060,10 @@ Default number of quota tracking windows. *Requires restart:* No +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] + // tag::self-managed-only[] *Visibility:* `tunable` // end::self-managed-only[] @@ -1825,6 +2091,10 @@ Default number of partitions per topic. *Requires restart:* No +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] + // tag::self-managed-only[] *Visibility:* `user` // end::self-managed-only[] @@ -1850,6 +2120,10 @@ Default replication factor for new topics. *Requires restart:* No +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] + // tag::self-managed-only[] *Visibility:* `user` // end::self-managed-only[] @@ -1877,6 +2151,10 @@ Default quota tracking window size in milliseconds. *Requires restart:* No +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] + // tag::self-managed-only[] *Visibility:* `tunable` // end::self-managed-only[] @@ -1902,6 +2180,10 @@ Enable cloud topics. *Requires restart:* No +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] + // tag::self-managed-only[] *Visibility:* `user` // end::self-managed-only[] @@ -1925,6 +2207,10 @@ Enable cluster linking. *Requires restart:* No +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] + // tag::self-managed-only[] *Visibility:* `user` // end::self-managed-only[] @@ -1948,6 +2234,10 @@ Development feature property for testing only. *Requires restart:* No +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] + // tag::self-managed-only[] *Visibility:* `user` // end::self-managed-only[] @@ -1971,7 +2261,9 @@ endif::[] Disable batch cache in log manager. -*Requires restart:* Yes +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] // tag::self-managed-only[] *Visibility:* `tunable` @@ -1998,6 +2290,10 @@ Disables the cluster recovery loop. This property is used to simplify testing an *Requires restart:* No +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] + // tag::self-managed-only[] *Visibility:* `tunable` // end::self-managed-only[] @@ -2019,7 +2315,9 @@ endif::[] Disable registering the metrics exposed on the internal `/metrics` endpoint. -*Requires restart:* Yes +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] *Type:* boolean @@ -2038,7 +2336,9 @@ endif::[] Disable registering the metrics exposed on the `/public_metrics` endpoint. -*Requires restart:* Yes +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] *Type:* boolean @@ -2064,6 +2364,10 @@ It is recommended to not run disks near capacity to avoid blocking I/O due to lo *Requires restart:* No +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] + // tag::self-managed-only[] *Visibility:* `tunable` // end::self-managed-only[] @@ -2087,6 +2391,10 @@ Raft election timeout expressed in milliseconds. *Unit:* milliseconds +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] + // tag::self-managed-only[] *Visibility:* `user` // end::self-managed-only[] @@ -2112,6 +2420,10 @@ Election timeout expressed in milliseconds. *Requires restart:* No +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] + // tag::self-managed-only[] *Visibility:* `tunable` // end::self-managed-only[] @@ -2124,7 +2436,7 @@ ifdef::env-cloud[] *Default:* Available in the Redpanda Cloud Console endif::[] ifndef::env-cloud[] -*Default:* `1'500ms` +*Default:* `1500ms` endif::[] *Nullable:* No @@ -2137,7 +2449,9 @@ endif::[] No description available. -*Requires restart:* Yes +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] *Nullable:* No @@ -2151,6 +2465,10 @@ Enable automatic partition rebalancing when new nodes are added *Requires restart:* No +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] + // tag::self-managed-only[] *Visibility:* `deprecated` // end::self-managed-only[] @@ -2173,6 +2491,10 @@ Enables cluster metadata uploads. Required for xref:manage:whole-cluster-restore *Requires restart:* Yes +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] + // tag::self-managed-only[] *Visibility:* `tunable` // end::self-managed-only[] @@ -2213,6 +2535,10 @@ endif::[] *Requires restart:* No +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] + *Type:* array ifdef::env-cloud[] @@ -2243,6 +2569,10 @@ Limits the write rate for the controller log. *Requires restart:* No +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] + // tag::self-managed-only[] *Visibility:* `user` // end::self-managed-only[] @@ -2266,7 +2596,9 @@ endif::[] No description available. -*Requires restart:* Yes +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] *Nullable:* No @@ -2279,6 +2611,10 @@ Development features should never be enabled in a production cluster, or any clu *Requires restart:* No +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] + // tag::self-managed-only[] *Visibility:* `tunable` // end::self-managed-only[] @@ -2304,6 +2640,10 @@ Host metrics are prefixed with xref:reference:internal-metrics-reference.adoc#ve *Requires restart:* Yes +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] + // tag::self-managed-only[] *Visibility:* `tunable` // end::self-managed-only[] @@ -2329,7 +2669,9 @@ endif::[] Enable idempotent producers. -*Requires restart:* Yes +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] // tag::self-managed-only[] *Visibility:* `user` @@ -2354,6 +2696,10 @@ Enable automatic leadership rebalancing. *Requires restart:* No +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] + // tag::self-managed-only[] *Visibility:* `user` // end::self-managed-only[] @@ -2385,6 +2731,10 @@ The cluster metrics of the metrics reporter are different from xref:manage:monit *Requires restart:* No +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] + // tag::self-managed-only[] *Visibility:* `user` // end::self-managed-only[] @@ -2412,6 +2762,10 @@ Enable Redpanda extensions for MPX. *Requires restart:* No +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] + // tag::self-managed-only[] *Visibility:* `tunable` // end::self-managed-only[] @@ -2433,7 +2787,9 @@ endif::[] Enable PID file. You should not need to change. -*Requires restart:* Yes +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] // tag::self-managed-only[] *Visibility:* `tunable` @@ -2458,6 +2814,10 @@ Enable rack-aware replica assignment. *Requires restart:* No +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] + // tag::self-managed-only[] *Visibility:* `user` // end::self-managed-only[] @@ -2481,6 +2841,10 @@ Enable SASL authentication for Kafka connections. Authorization is required to m *Requires restart:* No +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] + // tag::self-managed-only[] *Visibility:* `user` // end::self-managed-only[] @@ -2514,6 +2878,10 @@ endif::[] *Requires restart:* No +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] + // tag::self-managed-only[] *Visibility:* `user` // end::self-managed-only[] @@ -2539,7 +2907,9 @@ endif::[] Enable transactions (atomic writes). -*Requires restart:* Yes +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] // tag::self-managed-only[] *Visibility:* `user` @@ -2564,6 +2934,10 @@ Enables the usage tracking mechanism, storing windowed history of kafka/cloud_st *Requires restart:* No +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] + // tag::self-managed-only[] *Visibility:* `user` // end::self-managed-only[] @@ -2587,6 +2961,10 @@ Whether new feature flags auto-activate after upgrades (true) or must wait for m *Requires restart:* No +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] + // tag::self-managed-only[] *Visibility:* `tunable` // end::self-managed-only[] @@ -2612,6 +2990,10 @@ Maximum number of bytes returned in a fetch request. *Requires restart:* No +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] + // tag::self-managed-only[] *Visibility:* `user` // end::self-managed-only[] @@ -2635,6 +3017,10 @@ Derivative coefficient for fetch PID controller. *Requires restart:* No +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] + // tag::self-managed-only[] *Visibility:* `tunable` // end::self-managed-only[] @@ -2658,6 +3044,10 @@ Integral coefficient for fetch PID controller. *Requires restart:* No +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] + // tag::self-managed-only[] *Visibility:* `tunable` // end::self-managed-only[] @@ -2683,6 +3073,10 @@ The maximum debounce time the fetch PID controller will apply, in milliseconds. *Requires restart:* No +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] + // tag::self-managed-only[] *Visibility:* `tunable` // end::self-managed-only[] @@ -2708,6 +3102,10 @@ Proportional coefficient for fetch PID controller. *Requires restart:* No +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] + // tag::self-managed-only[] *Visibility:* `tunable` // end::self-managed-only[] @@ -2733,6 +3131,10 @@ A fraction, between 0 and 1, for the target reactor utilization of the fetch sch *Requires restart:* No +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] + // tag::self-managed-only[] *Visibility:* `tunable` // end::self-managed-only[] @@ -2762,6 +3164,10 @@ The strategy used to fulfill fetch requests. *Requires restart:* No +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] + // tag::self-managed-only[] *Visibility:* `tunable` // end::self-managed-only[] @@ -2785,6 +3191,10 @@ Time to wait for the next read in fetch requests when the requested minimum byte *Requires restart:* No +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] + // tag::self-managed-only[] *Visibility:* `tunable` // end::self-managed-only[] @@ -2810,7 +3220,9 @@ Time duration after which the inactive fetch session is removed from the fetch s *Unit:* milliseconds -*Requires restart:* Yes +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] // tag::self-managed-only[] *Visibility:* `tunable` @@ -2839,7 +3251,9 @@ No description available. *Unit:* milliseconds -*Requires restart:* Yes +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] *Nullable:* No @@ -2852,7 +3266,9 @@ No description available. No description available. -*Requires restart:* Yes +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] *Nullable:* No @@ -2865,6 +3281,10 @@ Delay added to the rebalance phase to wait for new members. *Requires restart:* No +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] + // tag::self-managed-only[] *Visibility:* `tunable` // end::self-managed-only[] @@ -2892,6 +3312,10 @@ The maximum allowed session timeout for registered consumers. Longer timeouts gi *Requires restart:* No +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] + *Type:* integer *Accepted values:* [`-17592186044416`, `17592186044415`] @@ -2915,6 +3339,10 @@ The minimum allowed session timeout for registered consumers. Shorter timeouts r *Requires restart:* No +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] + *Type:* integer *Accepted values:* [`-17592186044416`, `17592186044415`] @@ -2936,6 +3364,10 @@ Timeout for new member joins. *Requires restart:* No +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] + // tag::self-managed-only[] *Visibility:* `tunable` // end::self-managed-only[] @@ -2948,7 +3380,7 @@ ifdef::env-cloud[] *Default:* Available in the Redpanda Cloud Console endif::[] ifndef::env-cloud[] -*Default:* `30'000ms` +*Default:* `30000ms` endif::[] *Nullable:* No @@ -2963,6 +3395,10 @@ Frequency rate at which the system should check for expired group offsets. *Requires restart:* No +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] + // tag::self-managed-only[] *Visibility:* `tunable` // end::self-managed-only[] @@ -2990,6 +3426,10 @@ Consumer group offset retention seconds. To disable offset retention, set this t *Requires restart:* No +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] + // tag::self-managed-only[] *Visibility:* `tunable` // end::self-managed-only[] @@ -3017,6 +3457,10 @@ Number of partitions in the internal group membership topic. *Requires restart:* No +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] + // tag::self-managed-only[] *Visibility:* `tunable` // end::self-managed-only[] @@ -3040,7 +3484,9 @@ endif::[] How often the health manager runs. -*Requires restart:* Yes +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] // tag::self-managed-only[] *Visibility:* `tunable` @@ -3067,6 +3513,10 @@ Maximum age of the metadata cached in the health monitor of a non-controller bro *Requires restart:* No +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] + // tag::self-managed-only[] *Visibility:* `tunable` // end::self-managed-only[] @@ -3093,6 +3543,10 @@ How often health monitor refresh cluster state *Requires restart:* No +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] + // tag::self-managed-only[] *Visibility:* `deprecated` // end::self-managed-only[] @@ -3127,6 +3581,10 @@ endif::[] *Requires restart:* No +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] + // tag::self-managed-only[] *Visibility:* `user` // end::self-managed-only[] @@ -3151,6 +3609,10 @@ Controls how much past backlog (unprocessed work) affects the priority of proces *Requires restart:* No +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] + // tag::self-managed-only[] *Visibility:* `tunable` // end::self-managed-only[] @@ -3174,6 +3636,10 @@ Proportional coefficient for the Iceberg backlog controller. Number of shares as *Requires restart:* No +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] + // tag::self-managed-only[] *Visibility:* `tunable` // end::self-managed-only[] @@ -3198,6 +3664,10 @@ Base path for the cloud-storage-object-backed Iceberg filesystem catalog. After *Requires restart:* Yes +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] + // tag::self-managed-only[] *Visibility:* `user` // end::self-managed-only[] @@ -3224,6 +3694,10 @@ The frequency at which the Iceberg coordinator commits topic files to the catalo *Requires restart:* No +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] + // tag::self-managed-only[] *Visibility:* `tunable` // end::self-managed-only[] @@ -3256,6 +3730,10 @@ endif::[] *Requires restart:* Yes +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] + // tag::self-managed-only[] *Visibility:* `user` // end::self-managed-only[] @@ -3292,6 +3770,10 @@ endif::[] *Requires restart:* No +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] + // tag::self-managed-only[] *Visibility:* `user` // end::self-managed-only[] @@ -3328,6 +3810,10 @@ endif::[] *Requires restart:* No +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] + // tag::self-managed-only[] *Visibility:* `user` // end::self-managed-only[] @@ -3352,6 +3838,10 @@ Whether to disable automatic Iceberg snapshot expiry. This property may be usefu *Requires restart:* No +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] + // tag::self-managed-only[] *Visibility:* `user` // end::self-managed-only[] @@ -3381,6 +3871,10 @@ endif::[] *Requires restart:* No +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] + // tag::self-managed-only[] *Visibility:* `user` // end::self-managed-only[] @@ -3420,6 +3914,10 @@ endif::[] *Requires restart:* Yes +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] + // tag::self-managed-only[] *Visibility:* `user` // end::self-managed-only[] @@ -3461,6 +3959,10 @@ endif::[] *Requires restart:* No +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] + // tag::self-managed-only[] *Visibility:* `user` // end::self-managed-only[] @@ -3494,6 +3996,10 @@ The TTL for caching the latest schema during translation when using the xref:man *Requires restart:* No +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] + // tag::self-managed-only[] *Visibility:* `tunable` // end::self-managed-only[] @@ -3529,6 +4035,10 @@ endif::[] *Requires restart:* Yes +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] + // tag::self-managed-only[] *Visibility:* `user` // end::self-managed-only[] @@ -3559,6 +4069,10 @@ endif::[] *Requires restart:* Yes +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] + // tag::self-managed-only[] *Visibility:* `user` // end::self-managed-only[] @@ -3593,6 +4107,10 @@ endif::[] *Requires restart:* Yes +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] + // tag::self-managed-only[] *Visibility:* `user` // end::self-managed-only[] @@ -3627,6 +4145,10 @@ endif::[] *Requires restart:* Yes +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] + // tag::self-managed-only[] *Visibility:* `user` // end::self-managed-only[] @@ -3655,6 +4177,10 @@ AWS service name for SigV4 signing when using aws_sigv4 authentication mode. Def *Requires restart:* Yes +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] + // tag::self-managed-only[] *Visibility:* `user` // end::self-managed-only[] @@ -3684,6 +4210,10 @@ endif::[] *Requires restart:* Yes +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] + // tag::self-managed-only[] *Visibility:* `user` // end::self-managed-only[] @@ -3707,6 +4237,10 @@ endif::[] *Requires restart:* Yes +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] + // tag::self-managed-only[] *Visibility:* `user` // end::self-managed-only[] @@ -3737,35 +4271,15 @@ endif::[] *Requires restart:* Yes -// tag::self-managed-only[] -*Visibility:* `user` -// end::self-managed-only[] - -*Type:* string - -ifdef::env-cloud[] -*Default:* Available in the Redpanda Cloud Console -endif::[] ifndef::env-cloud[] -*Default:* `null` +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes endif::[] -*Nullable:* Yes - ---- -// end::redpanda-cloud[] - -=== iceberg_rest_catalog_credentials_source - -Source of AWS credentials for Iceberg REST catalog SigV4 authentication. If not set, falls back to cloud_storage_credentials_source when using aws_sigv4 authentication mode. Accepted values: config_file, aws_instance_metadata, sts, gcp_instance_metadata, azure_vm_instance_metadata, azure_aks_oidc_federation. - -*Requires restart:* Yes - // tag::self-managed-only[] *Visibility:* `user` // end::self-managed-only[] -*Type:* object +*Type:* string ifdef::env-cloud[] *Default:* Available in the Redpanda Cloud Console @@ -3776,13 +4290,9 @@ endif::[] *Nullable:* Yes -// tag::self-managed-only[] -*Aliases:* iceberg_rest_catalog_aws_credentials_source -// end::self-managed-only[] - --- +// end::redpanda-cloud[] -// tag::redpanda-cloud[] === iceberg_rest_catalog_credentials_source ifndef::env-cloud[] @@ -3795,16 +4305,17 @@ endif::[] *Accepted values*: `aws_instance_metadata`, `azure_aks_oidc_federation`, `azure_vm_instance_metadata`, `config_file`, `gcp_instance_metadata`, `sts`. -ifdef::env-cloud[] -NOTE: This property is available only in Redpanda Cloud BYOC deployments. -endif::[] +*Requires restart:* Yes +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] // tag::self-managed-only[] *Visibility:* `user` // end::self-managed-only[] -*Type:* string +*Type:* object ifdef::env-cloud[] *Default:* Available in the Redpanda Cloud Console @@ -3813,14 +4324,17 @@ ifndef::env-cloud[] *Default:* `null` endif::[] -*Nullable:* No +*Nullable:* Yes *Related topics:* * xref:reference:properties/object-storage-properties.adoc#cloud_storage_credentials_source[`cloud_storage_credentials_source`] +// tag::self-managed-only[] +*Aliases:* iceberg_rest_catalog_aws_credentials_source +// end::self-managed-only[] + --- -// end::redpanda-cloud[] // tag::redpanda-cloud[] === iceberg_rest_catalog_crl @@ -3834,6 +4348,10 @@ endif::[] *Requires restart:* Yes +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] + // tag::self-managed-only[] *Visibility:* `user` // end::self-managed-only[] @@ -3858,6 +4376,10 @@ Path to certificate revocation list for `iceberg_rest_catalog_trust_file`. *Requires restart:* Yes +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] + // tag::self-managed-only[] *Visibility:* `user` // end::self-managed-only[] @@ -3888,6 +4410,10 @@ endif::[] *Requires restart:* Yes +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] + // tag::self-managed-only[] *Visibility:* `user` // end::self-managed-only[] @@ -3918,6 +4444,10 @@ endif::[] *Requires restart:* Yes +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] + // tag::self-managed-only[] *Visibility:* `user` // end::self-managed-only[] @@ -3948,6 +4478,10 @@ endif::[] *Requires restart:* Yes +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] + // tag::self-managed-only[] *Visibility:* `user` // end::self-managed-only[] @@ -3980,6 +4514,10 @@ endif::[] *Requires restart:* No +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] + // tag::self-managed-only[] *Visibility:* `tunable` // end::self-managed-only[] @@ -4014,6 +4552,10 @@ endif::[] *Requires restart:* Yes +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] + // tag::self-managed-only[] *Visibility:* `user` // end::self-managed-only[] @@ -4047,6 +4589,10 @@ endif::[] *Requires restart:* Yes +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] + // tag::self-managed-only[] *Visibility:* `user` // end::self-managed-only[] @@ -4071,6 +4617,10 @@ Path to a file containing a certificate chain to trust for the REST Iceberg cata *Requires restart:* Yes +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] + // tag::self-managed-only[] *Visibility:* `user` // end::self-managed-only[] @@ -4100,6 +4650,10 @@ endif::[] *Requires restart:* Yes +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] + // tag::self-managed-only[] *Visibility:* `user` // end::self-managed-only[] @@ -4128,6 +4682,10 @@ Average size per partition of the datalake translation backlog that the backlog *Requires restart:* No +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] + // tag::self-managed-only[] *Visibility:* `tunable` // end::self-managed-only[] @@ -4161,6 +4719,10 @@ endif::[] *Requires restart:* No +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] + // tag::self-managed-only[] *Visibility:* `user` // end::self-managed-only[] @@ -4173,7 +4735,7 @@ ifdef::env-cloud[] *Default:* Available in the Redpanda Cloud Console endif::[] ifndef::env-cloud[] -*Default:* `milliseconds` +*Default:* `std::chrono::milliseconds{1min}` endif::[] *Nullable:* No @@ -4193,6 +4755,10 @@ Ration of the total backlog size to the disk space at which the throttle to iceb *Requires restart:* No +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] + // tag::self-managed-only[] *Visibility:* `tunable` // end::self-managed-only[] @@ -4222,6 +4788,10 @@ endif::[] *Requires restart:* No +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] + // tag::self-managed-only[] *Visibility:* `user` // end::self-managed-only[] @@ -4244,7 +4814,9 @@ endif::[] The ID allocator allocates messages in batches (each batch is a one log record) and then serves requests from memory without touching the log until the batch is exhausted. -*Requires restart:* Yes +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] // tag::self-managed-only[] *Visibility:* `tunable` @@ -4269,7 +4841,9 @@ endif::[] Capacity of the `id_allocator` log in number of batches. After it reaches `id_allocator_stm`, it truncates the log's prefix. -*Requires restart:* Yes +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] // tag::self-managed-only[] *Visibility:* `tunable` @@ -4296,7 +4870,9 @@ endif::[] No description available. -*Requires restart:* Yes +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] *Nullable:* No @@ -4309,6 +4885,10 @@ Initial local retention size target for partitions of topics with xref:manage:ti *Requires restart:* No +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] + // tag::self-managed-only[] *Visibility:* `user` // end::self-managed-only[] @@ -4336,6 +4916,10 @@ Initial local retention time target for partitions of topics with xref:manage:ti *Requires restart:* No +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] + // tag::self-managed-only[] *Visibility:* `user` // end::self-managed-only[] @@ -4361,11 +4945,11 @@ endif::[] === internal_topic_replication_factor -Target replication factor for internal topics. +Target replication factor for internal topics -*Unit*: number of replicas per topic. - -*Requires restart:* Yes +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] // tag::self-managed-only[] *Visibility:* `user` @@ -4392,7 +4976,9 @@ Time between cluster join retries in milliseconds. *Unit:* milliseconds -*Requires restart:* Yes +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] // tag::self-managed-only[] *Visibility:* `tunable` @@ -4419,7 +5005,9 @@ endif::[] No description available. -*Requires restart:* Yes +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] *Nullable:* No @@ -4434,6 +5022,10 @@ Maximum size of a batch processed by the server. If the batch is compressed, the *Requires restart:* No +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] + // tag::self-managed-only[] *Visibility:* `tunable` // end::self-managed-only[] @@ -4459,7 +5051,9 @@ endif::[] No description available. -*Requires restart:* Yes +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] *Nullable:* No @@ -4472,7 +5066,9 @@ No description available. No description available. -*Requires restart:* Yes +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] *Nullable:* No @@ -4485,6 +5081,10 @@ Maximum connections per second for one core. If `null` (the default), then the n *Requires restart:* No +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] + // tag::self-managed-only[] *Visibility:* `user` // end::self-managed-only[] @@ -4510,6 +5110,10 @@ Overrides the maximum connections per second for one core for the specified IP a *Requires restart:* No +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] + // tag::self-managed-only[] *Visibility:* `user` // end::self-managed-only[] @@ -4537,6 +5141,10 @@ Maximum number of Kafka client connections per broker. If `null`, the property i *Requires restart:* No +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] + // tag::self-managed-only[] *Visibility:* `user` // end::self-managed-only[] @@ -4567,6 +5175,10 @@ A list of IP addresses for which Kafka client connection limits are overridden a *Requires restart:* No +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] + // tag::self-managed-only[] *Visibility:* `user` // end::self-managed-only[] @@ -4596,6 +5208,10 @@ Maximum number of Kafka client connections per IP address, per broker. If `null` *Requires restart:* No +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] + // tag::self-managed-only[] *Visibility:* `user` // end::self-managed-only[] @@ -4630,6 +5246,10 @@ Flag to require authorization for Kafka connections. If `null`, the property is *Requires restart:* No +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] + // tag::self-managed-only[] *Visibility:* `user` // end::self-managed-only[] @@ -4653,6 +5273,10 @@ Enable the Kafka partition reassignment API. *Requires restart:* No +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] + // tag::self-managed-only[] *Visibility:* `user` // end::self-managed-only[] @@ -4678,6 +5302,10 @@ Kafka group recovery timeout. *Requires restart:* No +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] + // tag::self-managed-only[] *Visibility:* `user` // end::self-managed-only[] @@ -4690,7 +5318,7 @@ ifdef::env-cloud[] *Default:* Available in the Redpanda Cloud Console endif::[] ifndef::env-cloud[] -*Default:* `30'000ms` +*Default:* `30000ms` endif::[] *Nullable:* No @@ -4703,6 +5331,10 @@ Limit fetch responses to this many bytes, even if the total of partition bytes l *Requires restart:* No +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] + // tag::self-managed-only[] *Visibility:* `tunable` // end::self-managed-only[] @@ -4726,6 +5358,10 @@ The size of the batch used to estimate memory consumption for fetch requests, in *Requires restart:* No +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] + // tag::self-managed-only[] *Visibility:* `user` // end::self-managed-only[] @@ -4749,6 +5385,10 @@ The share of Kafka subsystem memory that can be used for fetch read buffers, as *Requires restart:* Yes +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] + // tag::self-managed-only[] *Visibility:* `user` // end::self-managed-only[] @@ -4772,6 +5412,10 @@ Principal mapping rules for mTLS authentication on the Kafka API. If `null`, the *Requires restart:* No +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] + // tag::self-managed-only[] *Visibility:* `user` // end::self-managed-only[] @@ -4795,6 +5439,10 @@ A list of topics that are protected from deletion and configuration changes by K *Requires restart:* No +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] + // tag::self-managed-only[] *Visibility:* `user` // end::self-managed-only[] @@ -4823,6 +5471,10 @@ A list of topics that are protected from being produced to by Kafka clients. Set *Requires restart:* No +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] + // tag::self-managed-only[] *Visibility:* `user` // end::self-managed-only[] @@ -4846,6 +5498,10 @@ Controls the level of validation performed on batches produced to Redpanda. When *Requires restart:* No +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] + // tag::self-managed-only[] *Visibility:* `tunable` // end::self-managed-only[] @@ -4867,7 +5523,9 @@ endif::[] Smoothing factor for Kafka queue depth control depth tracking. -*Requires restart:* Yes +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] // tag::self-managed-only[] *Visibility:* `tunable` @@ -4892,7 +5550,9 @@ Update frequency for Kafka queue depth control. *Unit:* milliseconds -*Requires restart:* Yes +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] // tag::self-managed-only[] *Visibility:* `tunable` @@ -4917,7 +5577,9 @@ endif::[] Enable kafka queue depth control. -*Requires restart:* Yes +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] // tag::self-managed-only[] *Visibility:* `user` @@ -4940,7 +5602,9 @@ endif::[] Queue depth when idleness is detected in Kafka queue depth control. -*Requires restart:* Yes +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] // tag::self-managed-only[] *Visibility:* `tunable` @@ -4963,7 +5627,9 @@ endif::[] Smoothing parameter for Kafka queue depth control latency tracking. -*Requires restart:* Yes +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] // tag::self-managed-only[] *Visibility:* `tunable` @@ -4986,7 +5652,9 @@ endif::[] Maximum queue depth used in Kafka queue depth control. -*Requires restart:* Yes +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] // tag::self-managed-only[] *Visibility:* `tunable` @@ -5011,7 +5679,9 @@ Maximum latency threshold for Kafka queue depth control depth tracking. *Unit:* milliseconds -*Requires restart:* Yes +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] // tag::self-managed-only[] *Visibility:* `user` @@ -5036,7 +5706,9 @@ endif::[] Minimum queue depth used in Kafka queue depth control. -*Requires restart:* Yes +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] // tag::self-managed-only[] *Visibility:* `tunable` @@ -5059,7 +5731,9 @@ endif::[] Number of windows used in Kafka queue depth control latency tracking. -*Requires restart:* Yes +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] // tag::self-managed-only[] *Visibility:* `tunable` @@ -5084,7 +5758,9 @@ Window size for Kafka queue depth control latency tracking. *Unit:* milliseconds -*Requires restart:* Yes +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] // tag::self-managed-only[] *Visibility:* `tunable` @@ -5113,7 +5789,9 @@ No description available. *Unit:* bytes per second -*Requires restart:* Yes +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] *Nullable:* No @@ -5126,7 +5804,9 @@ No description available. No description available. -*Requires restart:* Yes +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] *Nullable:* No @@ -5141,7 +5821,9 @@ No description available. *Unit:* milliseconds -*Requires restart:* Yes +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] *Nullable:* No @@ -5156,7 +5838,9 @@ No description available. *Unit:* milliseconds -*Requires restart:* Yes +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] *Nullable:* No @@ -5171,6 +5855,10 @@ Maximum size of a single request processed using the Kafka API. *Requires restart:* No +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] + // tag::self-managed-only[] *Visibility:* `tunable` // end::self-managed-only[] @@ -5196,7 +5884,9 @@ Maximum size of the user-space receive buffer. If `null`, this limit is not appl *Unit:* bytes -*Requires restart:* Yes +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] // tag::self-managed-only[] *Visibility:* `tunable` @@ -5221,7 +5911,9 @@ Size of the Kafka server TCP receive buffer. If `null`, the property is disabled *Unit:* bytes -*Requires restart:* Yes +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] *Type:* integer @@ -5244,7 +5936,9 @@ Size of the Kafka server TCP transmit buffer. If `null`, the property is disable *Unit:* bytes -*Requires restart:* Yes +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] *Type:* integer @@ -5269,6 +5963,10 @@ The maximum time between Kafka client reauthentications. If a client has not rea *Requires restart:* No +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] + // tag::self-managed-only[] *Visibility:* `user` // end::self-managed-only[] @@ -5294,6 +5992,10 @@ Per-shard capacity of the cache for validating schema IDs. *Requires restart:* No +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] + // tag::self-managed-only[] *Visibility:* `tunable` // end::self-managed-only[] @@ -5319,6 +6021,10 @@ TCP keepalive probe interval in seconds for Kafka connections. This describes th *Requires restart:* No +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] + // tag::self-managed-only[] *Visibility:* `tunable` // end::self-managed-only[] @@ -5344,6 +6050,10 @@ TCP keepalive unacknowledged probes until the connection is considered dead for *Requires restart:* No +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] + // tag::self-managed-only[] *Visibility:* `tunable` // end::self-managed-only[] @@ -5369,6 +6079,10 @@ TCP keepalive idle timeout in seconds for Kafka connections. This describes the *Requires restart:* No +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] + // tag::self-managed-only[] *Visibility:* `tunable` // end::self-managed-only[] @@ -5407,6 +6121,10 @@ A connection is assigned the first matching group and is then excluded from thro *Requires restart:* No +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] + // tag::self-managed-only[] *Visibility:* `user` // end::self-managed-only[] @@ -5434,6 +6152,10 @@ List of Kafka API keys that are subject to cluster-wide and node-wide throughput *Requires restart:* No +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] + // tag::self-managed-only[] *Visibility:* `user` // end::self-managed-only[] @@ -5459,6 +6181,10 @@ The maximum rate of all ingress Kafka API traffic for a node. Includes all Kafka *Requires restart:* No +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] + // tag::self-managed-only[] *Visibility:* `user` // end::self-managed-only[] @@ -5490,6 +6216,10 @@ The maximum rate of all egress Kafka traffic for a node. Includes all Kafka API *Requires restart:* No +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] + // tag::self-managed-only[] *Visibility:* `user` // end::self-managed-only[] @@ -5521,6 +6251,10 @@ This threshold is evaluated with each request for data. When the number of token *Requires restart:* No +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] + // tag::self-managed-only[] *Visibility:* `tunable` // end::self-managed-only[] @@ -5552,7 +6286,9 @@ endif::[] No description available. -*Requires restart:* Yes +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] *Nullable:* No @@ -5565,6 +6301,10 @@ Maximum number of Kafka user topics that can be created. If `null`, then no limi *Requires restart:* No +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] + // tag::self-managed-only[] *Visibility:* `user` // end::self-managed-only[] @@ -5590,6 +6330,10 @@ Key-value store flush interval (in milliseconds). *Requires restart:* No +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] + // tag::self-managed-only[] *Visibility:* `tunable` // end::self-managed-only[] @@ -5613,7 +6357,9 @@ endif::[] Key-value maximum segment size (in bytes). -*Requires restart:* Yes +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] // tag::self-managed-only[] *Visibility:* `tunable` @@ -5638,6 +6384,10 @@ Leadership rebalancing idle timeout. *Requires restart:* No +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] + // tag::self-managed-only[] *Visibility:* `tunable` // end::self-managed-only[] @@ -5663,7 +6413,9 @@ endif::[] No description available. -*Requires restart:* Yes +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] *Nullable:* No @@ -5678,6 +6430,10 @@ The leader balancer maintains a list of muted groups and reevaluates muted statu *Requires restart:* No +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] + // tag::self-managed-only[] *Visibility:* `tunable` // end::self-managed-only[] @@ -5703,6 +6459,10 @@ The duration after which a broker that hasn't sent a heartbeat is considered mut *Requires restart:* No +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] + // tag::self-managed-only[] *Visibility:* `tunable` // end::self-managed-only[] @@ -5728,6 +6488,10 @@ Per shard limit for in-progress leadership transfers. *Requires restart:* No +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] + // tag::self-managed-only[] *Visibility:* `tunable` // end::self-managed-only[] @@ -5751,6 +6515,10 @@ Group offset retention is enabled by default starting in Redpanda version 23.1. *Requires restart:* No +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] + // tag::self-managed-only[] *Visibility:* `tunable` // end::self-managed-only[] @@ -5774,6 +6542,10 @@ Flag to enable a Redpanda cluster operator to use unsafe control characters with *Requires restart:* No +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] + // tag::self-managed-only[] *Visibility:* `user` // end::self-managed-only[] @@ -5799,6 +6571,10 @@ Period at which to log a warning about using unsafe strings containing control c *Requires restart:* No +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] + // tag::self-managed-only[] *Visibility:* `user` // end::self-managed-only[] @@ -5826,6 +6602,10 @@ The topic property xref:./topic-properties.adoc#cleanuppolicy[`cleanup.policy`] *Requires restart:* No +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] + // tag::self-managed-only[] *Visibility:* `user` // end::self-managed-only[] @@ -5853,7 +6633,9 @@ endif::[] No description available. -*Requires restart:* Yes +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] *Nullable:* No @@ -5868,6 +6650,10 @@ How often to trigger background compaction. *Requires restart:* No +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] + // tag::self-managed-only[] *Visibility:* `user` // end::self-managed-only[] @@ -5893,6 +6679,10 @@ The maximum range of segments that can be processed in a single round of adjacen *Requires restart:* No +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] + // tag::self-managed-only[] *Visibility:* `tunable` // end::self-managed-only[] @@ -5918,6 +6708,10 @@ The maximum number of segments that can be combined into a single segment during *Requires restart:* No +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] + // tag::self-managed-only[] *Visibility:* `tunable` // end::self-managed-only[] @@ -5943,6 +6737,10 @@ Pause use of sliding window compaction. Toggle to `true` _only_ when you want to *Requires restart:* No +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] + // tag::self-managed-only[] *Visibility:* `tunable` // end::self-managed-only[] @@ -5966,6 +6764,10 @@ Use sliding window compaction. *Requires restart:* Yes +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] + // tag::self-managed-only[] *Visibility:* `tunable` // end::self-managed-only[] @@ -5993,6 +6795,10 @@ The topic property xref:./topic-properties.adoc#compressiontype[`compression.typ *Requires restart:* No +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] + // tag::self-managed-only[] *Visibility:* `user` // end::self-managed-only[] @@ -6020,6 +6826,10 @@ Disables the housekeeping loop for local storage. This property is used to simpl *Requires restart:* Yes +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] + // tag::self-managed-only[] *Visibility:* `tunable` // end::self-managed-only[] @@ -6045,6 +6855,10 @@ Threshold in milliseconds for alerting on messages with a timestamp after the br *Requires restart:* No +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] + // tag::self-managed-only[] *Visibility:* `tunable` // end::self-managed-only[] @@ -6072,6 +6886,10 @@ Threshold in milliseconds for alerting on messages with a timestamp before the b *Requires restart:* No +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] + // tag::self-managed-only[] *Visibility:* `tunable` // end::self-managed-only[] @@ -6099,6 +6917,10 @@ The topic property xref:./topic-properties.adoc#messagetimestamptype[`message.ti *Requires restart:* No +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] + // tag::self-managed-only[] *Visibility:* `user` // end::self-managed-only[] @@ -6128,6 +6950,10 @@ The amount of time to keep a log file before deleting it (in milliseconds). If s *Requires restart:* No +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] + // tag::self-managed-only[] *Visibility:* `user` // end::self-managed-only[] @@ -6158,6 +6984,10 @@ Default lifetime of log segments. If `null`, the property is disabled, and no de *Requires restart:* No +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] + // tag::self-managed-only[] *Visibility:* `user` // end::self-managed-only[] @@ -6170,7 +7000,7 @@ ifdef::env-cloud[] *Default:* Available in the Redpanda Cloud Console endif::[] ifndef::env-cloud[] -*Default:* `weeks` +*Default:* `2 weeks` endif::[] *Nullable:* Yes @@ -6184,6 +7014,10 @@ Upper bound on topic `segment.ms`: higher values will be clamped to this value. *Requires restart:* No +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] + // tag::self-managed-only[] *Visibility:* `tunable` // end::self-managed-only[] @@ -6209,6 +7043,10 @@ Lower bound on topic `segment.ms`: lower values will be clamped to this value. *Requires restart:* No +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] + // tag::self-managed-only[] *Visibility:* `tunable` // end::self-managed-only[] @@ -6234,6 +7072,10 @@ Default log segment size in bytes for topics which do not set `segment.bytes`. *Requires restart:* No +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] + // tag::self-managed-only[] *Visibility:* `tunable` // end::self-managed-only[] @@ -6261,6 +7103,10 @@ Random variation to the segment size limit used for each partition. *Requires restart:* Yes +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] + // tag::self-managed-only[] *Visibility:* `tunable` // end::self-managed-only[] @@ -6286,6 +7132,10 @@ Upper bound on topic `segment.bytes`: higher values will be clamped to this limi *Requires restart:* No +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] + // tag::self-managed-only[] *Visibility:* `tunable` // end::self-managed-only[] @@ -6311,6 +7161,10 @@ Lower bound on topic `segment.bytes`: lower values will be clamped to this limit *Requires restart:* No +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] + // tag::self-managed-only[] *Visibility:* `tunable` // end::self-managed-only[] @@ -6336,6 +7190,10 @@ Disable reusable preallocated buffers for LZ4 decompression. *Requires restart:* Yes +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] + // tag::self-managed-only[] *Visibility:* `tunable` // end::self-managed-only[] @@ -6359,6 +7217,10 @@ Maximum compacted segment size after consolidation. *Requires restart:* No +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] + // tag::self-managed-only[] *Visibility:* `tunable` // end::self-managed-only[] @@ -6384,6 +7246,10 @@ For a compacted topic, the maximum time a message remains ineligible for compact *Requires restart:* No +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] + // tag::self-managed-only[] *Visibility:* `user` // end::self-managed-only[] @@ -6409,10 +7275,14 @@ endif::[] === max_concurrent_producer_ids -Maximum number of active producer sessions. When the threshold is passed, Redpanda terminates old sessions. When an idle producer corresponding to the terminated session wakes up and produces, its message batches are rejected, and an out of order sequence error is emitted. Consumers don't affect this setting. +Maximum number of active producer sessions per shard. Each shard tracks producer IDs using an LRU (Least Recently Used) eviction policy. When the configured limit is exceeded, the least recently used producer IDs are evicted from the cache. IMPORTANT: The default value is unlimited, which can lead to unbounded memory growth and out-of-memory (OOM) crashes in production environments with heavy producer usage, especially when using transactions or idempotent producers. It is strongly recommended to set a reasonable limit in production deployments. *Requires restart:* No +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] + // tag::self-managed-only[] *Visibility:* `tunable` // end::self-managed-only[] @@ -6430,6 +7300,11 @@ endif::[] *Nullable:* No +*Related topics:* + +* xref:develop:transactions.adoc#tune-producer-id-limits[Tune producer ID limits] +* xref:reference:properties/cluster-properties.adoc#transactional_id_expiration_ms[transactional_id_expiration_ms] + --- === max_in_flight_pandaproxy_requests_per_shard @@ -6438,6 +7313,10 @@ Maximum number of in-flight HTTP requests to HTTP Proxy permitted per shard. An *Requires restart:* No +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] + // tag::self-managed-only[] *Visibility:* `tunable` // end::self-managed-only[] @@ -6461,6 +7340,10 @@ Maximum number of in-flight HTTP requests to Schema Registry permitted per shard *Requires restart:* No +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] + // tag::self-managed-only[] *Visibility:* `tunable` // end::self-managed-only[] @@ -6486,6 +7369,10 @@ Fail-safe maximum throttle delay on Kafka requests. *Requires restart:* No +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] + // tag::self-managed-only[] *Visibility:* `tunable` // end::self-managed-only[] @@ -6498,7 +7385,7 @@ ifdef::env-cloud[] *Default:* Available in the Redpanda Cloud Console endif::[] ifndef::env-cloud[] -*Default:* `30'000ms` +*Default:* `30000ms` endif::[] *Nullable:* No @@ -6513,6 +7400,10 @@ For details, see xref:develop:transactions#transaction-usage-tips[Transaction us *Requires restart:* No +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] + // tag::self-managed-only[] *Visibility:* `tunable` // end::self-managed-only[] @@ -6542,7 +7433,9 @@ endif::[] No description available. -*Requires restart:* Yes +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] *Nullable:* No @@ -6555,7 +7448,9 @@ Time between members backend reconciliation loop retries. *Unit:* milliseconds -*Requires restart:* Yes +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] // tag::self-managed-only[] *Visibility:* `tunable` @@ -6582,6 +7477,10 @@ If `true`, the Redpanda process will terminate immediately when an allocation ca *Requires restart:* No +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] + // tag::self-managed-only[] *Visibility:* `tunable` // end::self-managed-only[] @@ -6605,6 +7504,10 @@ When `true`, memory allocations are sampled and tracked. A sampled live set of a *Requires restart:* No +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] + // tag::self-managed-only[] *Visibility:* `tunable` // end::self-managed-only[] @@ -6628,7 +7531,9 @@ Interval for metadata dissemination batching. *Unit:* milliseconds -*Requires restart:* Yes +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] // tag::self-managed-only[] *Visibility:* `tunable` @@ -6642,7 +7547,7 @@ ifdef::env-cloud[] *Default:* Available in the Redpanda Cloud Console endif::[] ifndef::env-cloud[] -*Default:* `3'000ms` +*Default:* `3000ms` endif::[] *Nullable:* No @@ -6653,7 +7558,9 @@ endif::[] Number of attempts to look up a topic's metadata-like shard before a request fails. This configuration controls the number of retries that request handlers perform when internal topic metadata (for topics like tx, consumer offsets, etc) is missing. These topics are usually created on demand when users try to use the cluster for the first time and it may take some time for the creation to happen and the metadata to propagate to all the brokers (particularly the broker handling the request). In the meantime Redpanda waits and retries. This configuration controls the number retries. -*Requires restart:* Yes +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] // tag::self-managed-only[] *Visibility:* `tunable` @@ -6680,7 +7587,9 @@ Delay before retrying a topic lookup in a shard or other meta tables. *Unit:* milliseconds -*Requires restart:* Yes +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] // tag::self-managed-only[] *Visibility:* `tunable` @@ -6694,7 +7603,7 @@ ifdef::env-cloud[] *Default:* Available in the Redpanda Cloud Console endif::[] ifndef::env-cloud[] -*Default:* `0'500ms` +*Default:* `0500ms` endif::[] *Nullable:* No @@ -6707,7 +7616,9 @@ Maximum time to wait in metadata request for cluster health to be refreshed. *Unit:* milliseconds -*Requires restart:* Yes +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] // tag::self-managed-only[] *Visibility:* `tunable` @@ -6734,6 +7645,10 @@ Cluster metrics reporter report interval. *Requires restart:* No +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] + // tag::self-managed-only[] *Visibility:* `tunable` // end::self-managed-only[] @@ -6759,6 +7674,10 @@ Cluster metrics reporter tick interval. *Requires restart:* No +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] + // tag::self-managed-only[] *Visibility:* `tunable` // end::self-managed-only[] @@ -6784,6 +7703,10 @@ URL of the cluster metrics reporter. *Requires restart:* No +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] + // tag::self-managed-only[] *Visibility:* `tunable` // end::self-managed-only[] @@ -6807,6 +7730,10 @@ The minimum ratio between the number of bytes in dirty segments and the total nu *Requires restart:* No +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] + // tag::self-managed-only[] *Visibility:* `user` // end::self-managed-only[] @@ -6832,6 +7759,10 @@ The minimum amount of time (in ms) that a log segment must remain unaltered befo *Requires restart:* No +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] + // tag::self-managed-only[] *Visibility:* `user` // end::self-managed-only[] @@ -6861,7 +7792,9 @@ endif::[] No description available. -*Requires restart:* Yes +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] *Nullable:* No @@ -6874,6 +7807,10 @@ Minimum allowable replication factor for topics in this cluster. The set value m *Requires restart:* No +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] + // tag::self-managed-only[] *Visibility:* `user` // end::self-managed-only[] @@ -6899,6 +7836,10 @@ How long after the last heartbeat request a node will wait before considering it *Requires restart:* No +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] + // tag::self-managed-only[] *Visibility:* `tunable` // end::self-managed-only[] @@ -6924,7 +7865,9 @@ Timeout for executing node management operations. *Unit:* milliseconds -*Requires restart:* Yes +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] // tag::self-managed-only[] *Visibility:* `tunable` @@ -6951,6 +7894,10 @@ Time interval between two node status messages. Node status messages establish l *Requires restart:* No +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] + // tag::self-managed-only[] *Visibility:* `tunable` // end::self-managed-only[] @@ -6978,6 +7925,10 @@ Maximum backoff (in milliseconds) to reconnect to an unresponsive peer during no *Requires restart:* No +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] + // tag::self-managed-only[] *Visibility:* `user` // end::self-managed-only[] @@ -7001,10 +7952,12 @@ endif::[] The amount of time (in seconds) to allow for when validating the expiry claim in the token. -*Unit*: seconds - *Requires restart:* No +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] + // tag::self-managed-only[] *Visibility:* `user` // end::self-managed-only[] @@ -7017,7 +7970,7 @@ ifdef::env-cloud[] *Default:* Available in the Redpanda Cloud Console endif::[] ifndef::env-cloud[] -*Default:* `seconds` +*Default:* `null` endif::[] *Nullable:* No @@ -7035,6 +7988,10 @@ The URL pointing to the well-known discovery endpoint for the OIDC provider. *Requires restart:* No +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] + // tag::self-managed-only[] *Visibility:* `user` // end::self-managed-only[] @@ -7059,6 +8016,10 @@ The frequency of refreshing the JSON Web Keys (JWKS) used to validate access tok *Requires restart:* No +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] + // tag::self-managed-only[] *Visibility:* `user` // end::self-managed-only[] @@ -7089,6 +8050,10 @@ Rule for mapping JWT payload claim to a Redpanda user principal. *Requires restart:* No +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] + // tag::self-managed-only[] *Visibility:* `user` // end::self-managed-only[] @@ -7125,6 +8090,10 @@ A string representing the intended recipient of the token. *Requires restart:* No +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] + // tag::self-managed-only[] *Visibility:* `user` // end::self-managed-only[] @@ -7149,6 +8118,10 @@ Number of partitions that can be reassigned at once. *Requires restart:* No +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] + // tag::self-managed-only[] *Visibility:* `tunable` // end::self-managed-only[] @@ -7174,6 +8147,10 @@ When the disk usage of a node exceeds this threshold, it triggers Redpanda to mo *Requires restart:* No +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] + // tag::self-managed-only[] *Visibility:* `user` // end::self-managed-only[] @@ -7203,6 +8180,10 @@ Minimum size of partition that is going to be prioritized when rebalancing a clu *Requires restart:* No +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] + // tag::self-managed-only[] *Visibility:* `tunable` // end::self-managed-only[] @@ -7236,6 +8217,10 @@ endif::[] *Requires restart:* No +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] + // tag::self-managed-only[] *Visibility:* `user` // end::self-managed-only[] @@ -7267,6 +8252,10 @@ Total size of partitions that autobalancer is going to move in one batch (deprec *Requires restart:* No +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] + // tag::self-managed-only[] *Visibility:* `deprecated` // end::self-managed-only[] @@ -7291,6 +8280,10 @@ When a node is unavailable for at least this timeout duration, it triggers Redpa *Requires restart:* No +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] + // tag::self-managed-only[] *Visibility:* `user` // end::self-managed-only[] @@ -7322,6 +8315,10 @@ Partition autobalancer tick interval. *Requires restart:* No +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] + // tag::self-managed-only[] *Visibility:* `tunable` // end::self-managed-only[] @@ -7347,6 +8344,10 @@ If the number of scheduled tick moves drops by this ratio, a new tick is schedul *Requires restart:* No +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] + // tag::self-managed-only[] *Visibility:* `tunable` // end::self-managed-only[] @@ -7370,6 +8371,10 @@ If `true`, Redpanda prioritizes balancing a topic’s partition replica count ev *Requires restart:* No +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] + // tag::self-managed-only[] *Visibility:* `user` // end::self-managed-only[] @@ -7393,6 +8398,10 @@ A threshold value to detect partitions which might have been stuck while shuttin *Requires restart:* No +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] + // tag::self-managed-only[] *Visibility:* `tunable` // end::self-managed-only[] @@ -7420,6 +8429,10 @@ See https://docs.seastar.io/master/[Seastar documentation^] *Requires restart:* Yes +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] + // tag::self-managed-only[] *Visibility:* `tunable` // end::self-managed-only[] @@ -7447,6 +8460,10 @@ Quota manager GC frequency in milliseconds. *Requires restart:* No +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] + // tag::self-managed-only[] *Visibility:* `tunable` // end::self-managed-only[] @@ -7472,6 +8489,10 @@ Enables an additional step in leader election where a candidate is allowed to wa *Requires restart:* No +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] + // tag::self-managed-only[] *Visibility:* `tunable` // end::self-managed-only[] @@ -7495,6 +8516,10 @@ Enables Raft optimization of heartbeats. *Requires restart:* No +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] + // tag::self-managed-only[] *Visibility:* `tunable` // end::self-managed-only[] @@ -7519,7 +8544,9 @@ Interval of checking partition against the `raft_replica_max_pending_flush_bytes *Unit:* milliseconds -*Requires restart:* Yes +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] // tag::self-managed-only[] *Visibility:* `deprecated` @@ -7543,7 +8570,9 @@ endif::[] The number of failed heartbeats after which an unresponsive TCP connection is forcibly closed. To disable forced disconnection, set to 0. -*Requires restart:* Yes +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] // tag::self-managed-only[] *Visibility:* `tunable` @@ -7570,6 +8599,10 @@ Number of milliseconds for Raft leader heartbeats. *Requires restart:* No +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] + // tag::self-managed-only[] *Visibility:* `tunable` // end::self-managed-only[] @@ -7597,6 +8630,10 @@ Raft heartbeat RPC (remote procedure call) timeout. Raft uses a heartbeat mechan *Requires restart:* No +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] + // tag::self-managed-only[] *Visibility:* `tunable` // end::self-managed-only[] @@ -7624,6 +8661,10 @@ Raft I/O timeout. *Requires restart:* No +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] + // tag::self-managed-only[] *Visibility:* `tunable` // end::self-managed-only[] @@ -7636,7 +8677,7 @@ ifdef::env-cloud[] *Default:* Available in the Redpanda Cloud Console endif::[] ifndef::env-cloud[] -*Default:* `10'000ms` +*Default:* `10000ms` endif::[] *Nullable:* No @@ -7649,6 +8690,10 @@ Raft learner recovery rate limit. Throttles the rate of data communicated to nod *Requires restart:* No +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] + // tag::self-managed-only[] *Visibility:* `tunable` // end::self-managed-only[] @@ -7672,6 +8717,10 @@ The total size of append entry requests that may be cached per shard, using the *Requires restart:* No +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] + // tag::self-managed-only[] *Visibility:* `tunable` // end::self-managed-only[] @@ -7695,7 +8744,9 @@ endif::[] No description available. -*Requires restart:* Yes +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] *Nullable:* No @@ -7708,6 +8759,10 @@ The maximum number of append entry requests that may be sent from Raft groups on *Requires restart:* No +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] + // tag::self-managed-only[] *Visibility:* `tunable` // end::self-managed-only[] @@ -7731,6 +8786,10 @@ Maximum memory that can be used for reads in Raft recovery process by default 15 *Requires restart:* No +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] + // tag::self-managed-only[] *Visibility:* `tunable` // end::self-managed-only[] @@ -7754,6 +8813,10 @@ Number of partitions that may simultaneously recover data to a particular shard. *Requires restart:* No +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] + // tag::self-managed-only[] *Visibility:* `tunable` // end::self-managed-only[] @@ -7777,6 +8840,10 @@ Specifies the default size of a read issued during Raft follower recovery. *Requires restart:* No +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] + // tag::self-managed-only[] *Visibility:* `tunable` // end::self-managed-only[] @@ -7802,6 +8869,10 @@ Disables cross shard sharing used to throttle recovery traffic. Should only be u *Requires restart:* No +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] + // tag::self-managed-only[] *Visibility:* `tunable` // end::self-managed-only[] @@ -7827,6 +8898,10 @@ Maximum delay between two subsequent flushes. After this delay, the log is autom *Requires restart:* No +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] + // tag::self-managed-only[] *Visibility:* `tunable` // end::self-managed-only[] @@ -7854,6 +8929,10 @@ Maximum number of bytes that are not flushed per partition. If the configured th *Requires restart:* No +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] + // tag::self-managed-only[] *Visibility:* `tunable` // end::self-managed-only[] @@ -7875,7 +8954,9 @@ endif::[] Maximum size of requests cached for replication. -*Requires restart:* Yes +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] // tag::self-managed-only[] *Visibility:* `tunable` @@ -7900,7 +8981,9 @@ Maximum number of Cross-core(Inter-shard communication) requests pending in Raft See https://docs.seastar.io/master/[Seastar documentation^] -*Requires restart:* Yes +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] // tag::self-managed-only[] *Visibility:* `tunable` @@ -7929,6 +9012,10 @@ Timeout for Raft's timeout_now RPC. This RPC is used to force a follower to disp *Requires restart:* No +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] + // tag::self-managed-only[] *Visibility:* `tunable` // end::self-managed-only[] @@ -7956,6 +9043,10 @@ Follower recovery timeout waiting period when transferring leadership. *Requires restart:* No +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] + // tag::self-managed-only[] *Visibility:* `tunable` // end::self-managed-only[] @@ -7981,7 +9072,9 @@ Duration after which inactive readers are evicted from cache. *Unit:* milliseconds -*Requires restart:* Yes +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] // tag::self-managed-only[] *Visibility:* `tunable` @@ -8008,6 +9101,10 @@ Maximum desired number of readers cached per NTP. This a soft limit, meaning tha *Requires restart:* No +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] + // tag::self-managed-only[] *Visibility:* `tunable` // end::self-managed-only[] @@ -8029,7 +9126,9 @@ endif::[] Minimum amount of free memory maintained by the batch cache background reclaimer. -*Requires restart:* Yes +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] // tag::self-managed-only[] *Visibility:* `tunable` @@ -8052,7 +9151,9 @@ endif::[] Starting from the last point in time when memory was reclaimed from the batch cache, this is the duration during which the amount of memory to reclaim grows at a significant rate, based on heuristics about the amount of available memory. -*Requires restart:* Yes +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] // tag::self-managed-only[] *Visibility:* `tunable` @@ -8066,7 +9167,7 @@ ifdef::env-cloud[] *Default:* Available in the Redpanda Cloud Console endif::[] ifndef::env-cloud[] -*Default:* `3'000ms` +*Default:* `3000ms` endif::[] *Nullable:* No @@ -8077,7 +9178,9 @@ endif::[] Maximum batch cache reclaim size. -*Requires restart:* Yes +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] // tag::self-managed-only[] *Visibility:* `tunable` @@ -8100,7 +9203,9 @@ endif::[] Minimum batch cache reclaim size. -*Requires restart:* Yes +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] // tag::self-managed-only[] *Visibility:* `tunable` @@ -8123,7 +9228,9 @@ endif::[] If the duration since the last time memory was reclaimed is longer than the amount of time specified in this property, the memory usage of the batch cache is considered stable, so only the minimum size (<>) is set to be reclaimed. -*Requires restart:* Yes +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] // tag::self-managed-only[] *Visibility:* `tunable` @@ -8137,7 +9244,7 @@ ifdef::env-cloud[] *Default:* Available in the Redpanda Cloud Console endif::[] ifndef::env-cloud[] -*Default:* `10'000ms` +*Default:* `10000ms` endif::[] *Nullable:* No @@ -8150,7 +9257,9 @@ Timeout for append entry requests issued while updating a stale follower. *Unit:* milliseconds -*Requires restart:* Yes +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] // tag::self-managed-only[] *Visibility:* `tunable` @@ -8177,6 +9286,10 @@ Flag for specifying whether or not to release cache when a full segment is rolle *Requires restart:* No +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] + // tag::self-managed-only[] *Visibility:* `tunable` // end::self-managed-only[] @@ -8200,7 +9313,9 @@ Timeout for append entry requests issued while replicating entries. *Unit:* milliseconds -*Requires restart:* Yes +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] // tag::self-managed-only[] *Visibility:* `tunable` @@ -8231,6 +9346,10 @@ The topic property xref:./topic-properties.adoc#retentionbytes[`retention.bytes` *Requires restart:* No +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] + // tag::self-managed-only[] *Visibility:* `user` // end::self-managed-only[] @@ -8258,6 +9377,10 @@ Flag to allow Tiered Storage topics to expand to consumable retention policy lim *Requires restart:* No +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] + // tag::self-managed-only[] *Visibility:* `user` // end::self-managed-only[] @@ -8281,6 +9404,10 @@ Trim log data when a cloud topic reaches its local retention limit. When this op *Requires restart:* No +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] + // tag::self-managed-only[] *Visibility:* `user` // end::self-managed-only[] @@ -8306,6 +9433,10 @@ This property can be overridden on a per-topic basis by setting `retention.local *Requires restart:* No +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] + // tag::self-managed-only[] *Visibility:* `user` // end::self-managed-only[] @@ -8337,6 +9468,10 @@ NOTE: Redpanda Data recommends setting only one of <> to form the complete credentials required for authentication. To authenticate using IAM roles, see <>. -*Requires restart:* Yes +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* No +endif::[] // tag::self-managed-only[] *Visibility:* `user` @@ -31,7 +33,9 @@ Optional API endpoint. The only instance in which you must set this value is whe - GCP: If not set, this is automatically generated using `storage.googleapis.com` and <>. - Azure: If not set, this is automatically generated using `blob.core.windows.net` and <>. If you have enabled hierarchical namespaces for your storage account and use a custom endpoint, use <>. -*Requires restart:* Yes +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* No +endif::[] // tag::self-managed-only[] *Visibility:* `user` @@ -54,7 +58,9 @@ endif::[] TLS port override. -*Requires restart:* Yes +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] // tag::self-managed-only[] *Visibility:* `user` @@ -81,6 +87,10 @@ When set to `true`, Redpanda automatically retrieves cluster metadata from a spe *Requires restart:* Yes +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] + // tag::self-managed-only[] *Visibility:* `tunable` // end::self-managed-only[] @@ -106,6 +116,10 @@ If not set, this is automatically generated using `dfs.core.windows.net` and <>. -*Requires restart:* Yes +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] // tag::self-managed-only[] *Visibility:* `user` @@ -879,6 +1003,10 @@ Disables the concurrency control mechanism in Tiered Storage. This safety featur *Requires restart:* No +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] + // tag::self-managed-only[] *Visibility:* `tunable` // end::self-managed-only[] @@ -902,6 +1030,10 @@ Use legacy upload mode and do not start archiver_manager. *Requires restart:* Yes +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] + // tag::self-managed-only[] *Visibility:* `user` // end::self-managed-only[] @@ -925,6 +1057,10 @@ Disable chunk reads and switch back to legacy mode where full segments are downl *Requires restart:* No +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] + // tag::self-managed-only[] *Visibility:* `tunable` // end::self-managed-only[] @@ -948,7 +1084,9 @@ endif::[] No description available. -*Requires restart:* Yes +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] *Nullable:* No @@ -961,6 +1099,10 @@ Begins the read replica sync loop in topic partitions with Tiered Storage enable *Requires restart:* No +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] + // tag::self-managed-only[] *Visibility:* `tunable` // end::self-managed-only[] @@ -984,6 +1126,10 @@ If `true`, Redpanda disables remote labels and falls back on the hash-based obje *Requires restart:* No +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] + // tag::self-managed-only[] *Visibility:* `tunable` // end::self-managed-only[] @@ -1005,7 +1151,9 @@ endif::[] Disable TLS for all object storage connections. -*Requires restart:* Yes +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] // tag::self-managed-only[] *Visibility:* `user` @@ -1030,6 +1178,10 @@ Disable all upload consistency checks to allow Redpanda to upload logs with gaps *Requires restart:* No +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] + // tag::self-managed-only[] *Visibility:* `tunable` // end::self-managed-only[] @@ -1053,6 +1205,10 @@ Begins the upload loop in topic partitions with Tiered Storage enabled. The prop *Requires restart:* No +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] + // tag::self-managed-only[] *Visibility:* `tunable` // end::self-managed-only[] @@ -1077,6 +1233,10 @@ When set to `true`, Redpanda can re-upload data for compacted topics to object s *Requires restart:* No +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] + // tag::self-managed-only[] *Visibility:* `tunable` // end::self-managed-only[] @@ -1100,6 +1260,10 @@ Controls the eviction of locally stored log segments when Tiered Storage uploads *Requires restart:* No +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] + // tag::self-managed-only[] *Visibility:* `tunable` // end::self-managed-only[] @@ -1124,6 +1288,10 @@ When set to `true`, new topics are by default configured to allow reading data d *Requires restart:* No +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] + // tag::self-managed-only[] *Visibility:* `tunable` // end::self-managed-only[] @@ -1148,6 +1316,10 @@ When set to `true`, new topics are by default configured to upload data to objec *Requires restart:* No +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] + // tag::self-managed-only[] *Visibility:* `tunable` // end::self-managed-only[] @@ -1171,6 +1343,10 @@ Enable routine checks (scrubbing) of object storage partitions. The scrubber val *Requires restart:* No +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] + // tag::self-managed-only[] *Visibility:* `tunable` // end::self-managed-only[] @@ -1194,6 +1370,10 @@ Enables adjacent segment merging. The segments are reuploaded if there is an opp *Requires restart:* No +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] + // tag::self-managed-only[] *Visibility:* `tunable` // end::self-managed-only[] @@ -1221,6 +1401,10 @@ Controls the upload of log segments to Tiered Storage. If set to `false`, this p *Requires restart:* No +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] + // tag::self-managed-only[] *Visibility:* `tunable` // end::self-managed-only[] @@ -1248,6 +1432,10 @@ endif::[] *Requires restart:* Yes +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] + // tag::self-managed-only[] *Visibility:* `user` // end::self-managed-only[] @@ -1273,6 +1461,10 @@ Interval, in milliseconds, between a final scrub and the next scrub. *Requires restart:* No +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] + // tag::self-managed-only[] *Visibility:* `tunable` // end::self-managed-only[] @@ -1300,6 +1492,10 @@ Timeout for running the cloud storage garbage collection, in milliseconds. *Requires restart:* No +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] + // tag::self-managed-only[] *Visibility:* `tunable` // end::self-managed-only[] @@ -1327,6 +1523,10 @@ Time limit on waiting for uploads to complete before a leadership transfer. If *Requires restart:* No +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] + // tag::self-managed-only[] *Visibility:* `tunable` // end::self-managed-only[] @@ -1358,6 +1558,10 @@ Interval, in milliseconds, between object storage housekeeping tasks. *Requires restart:* No +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] + // tag::self-managed-only[] *Visibility:* `tunable` // end::self-managed-only[] @@ -1385,6 +1589,10 @@ A segment is divided into chunks. Chunk hydration means downloading the chunk (w *Requires restart:* No +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] + // tag::self-managed-only[] *Visibility:* `tunable` // end::self-managed-only[] @@ -1412,6 +1620,10 @@ Negative doesn't make sense, but it may not be checked-for/enforced. Large is su *Requires restart:* No +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] + // tag::self-managed-only[] *Visibility:* `tunable` // end::self-managed-only[] @@ -1437,6 +1649,10 @@ The object storage request rate threshold for idle state detection. If the avera *Requires restart:* No +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] + // tag::self-managed-only[] *Visibility:* `tunable` // end::self-managed-only[] @@ -1462,6 +1678,10 @@ The timeout, in milliseconds, used to detect the idle state of the object storag *Requires restart:* No +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] + // tag::self-managed-only[] *Visibility:* `tunable` // end::self-managed-only[] @@ -1489,6 +1709,10 @@ Initial backoff time for exponential backoff algorithm (ms). *Requires restart:* No +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] + // tag::self-managed-only[] *Visibility:* `tunable` // end::self-managed-only[] @@ -1514,6 +1738,10 @@ Scrubber uses the latest cloud storage inventory report, if available, to check *Requires restart:* Yes +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] + // tag::self-managed-only[] *Visibility:* `tunable` // end::self-managed-only[] @@ -1537,6 +1765,10 @@ The name of the scheduled inventory job created by Redpanda to generate bucket o *Requires restart:* Yes +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] + // tag::self-managed-only[] *Visibility:* `tunable` // end::self-managed-only[] @@ -1560,6 +1792,10 @@ Maximum bytes of hashes held in memory before writing data to disk during invent *Requires restart:* No +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] + // tag::self-managed-only[] *Visibility:* `tunable` // end::self-managed-only[] @@ -1587,6 +1823,10 @@ Time interval between checks for a new inventory report in the cloud storage buc *Requires restart:* Yes +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] + // tag::self-managed-only[] *Visibility:* `tunable` // end::self-managed-only[] @@ -1612,6 +1852,10 @@ The prefix to the path in the cloud storage bucket or container where inventory *Requires restart:* Yes +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] + // tag::self-managed-only[] *Visibility:* `tunable` // end::self-managed-only[] @@ -1635,6 +1879,10 @@ If enabled, Redpanda will not attempt to create the scheduled report configurati *Requires restart:* Yes +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] + // tag::self-managed-only[] *Visibility:* `tunable` // end::self-managed-only[] @@ -1658,6 +1906,10 @@ Amount of memory that can be used to handle Tiered Storage metadata. *Requires restart:* No +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] + // tag::self-managed-only[] *Visibility:* `tunable` // end::self-managed-only[] @@ -1683,6 +1935,10 @@ Minimum interval, in seconds, between partition manifest uploads. Actual time be *Requires restart:* No +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] + // tag::self-managed-only[] *Visibility:* `tunable` // end::self-managed-only[] @@ -1710,6 +1966,10 @@ Manifest upload timeout, in milliseconds. *Requires restart:* No +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] + // tag::self-managed-only[] *Visibility:* `tunable` // end::self-managed-only[] @@ -1737,6 +1997,10 @@ The time interval that determines how long the materialized manifest can stay in *Requires restart:* No +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] + // tag::self-managed-only[] *Visibility:* `tunable` // end::self-managed-only[] @@ -1756,12 +2020,43 @@ endif::[] --- +=== cloud_storage_materialized_manifest_ttl_ms + +The interval, in milliseconds, determines how long the materialized manifest can stay in the cache under contention. This setting is used for performance tuning. When the spillover manifest is materialized and stored in the cache, and the cache needs to evict it, it uses this value as a timeout. The cursor that uses the spillover manifest uses this value as a TTL interval, after which it stops referencing the manifest making it available for eviction. This only affects spillover manifests under contention. + +*Unit:* milliseconds + +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] + +// tag::self-managed-only[] +*Visibility:* `user` +// end::self-managed-only[] + +*Type:* string + +ifdef::env-cloud[] +*Default:* Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +*Default:* `null` +endif::[] + +*Nullable:* No + +--- + === cloud_storage_max_concurrent_hydrations_per_shard Maximum concurrent segment hydrations of remote data per CPU core. If unset, value of `cloud_storage_max_connections / 2` is used, which means that half of available object storage bandwidth could be used to download data from object storage. If the cloud storage cache is empty every new segment reader will require a download. This will lead to 1:1 mapping between number of partitions scanned by the fetch request and number of parallel downloads. If this value is too large the downloads can affect other workloads. In case of any problem caused by the tiered-storage reads this value can be lowered. This will only affect segment hydrations (downloads) but won't affect cached segments. If fetch request is reading from the tiered-storage cache its concurrency will only be limited by available memory. *Requires restart:* No +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] + // tag::self-managed-only[] *Visibility:* `tunable` // end::self-managed-only[] @@ -1788,7 +2083,9 @@ This setting reduces resource utilization by closing inactive connections. Adjus *Unit:* milliseconds -*Requires restart:* Yes +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] // tag::self-managed-only[] *Visibility:* `tunable` @@ -1813,7 +2110,9 @@ endif::[] Maximum simultaneous object storage connections per shard, applicable to upload and download activities. -*Requires restart:* Yes +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] // tag::self-managed-only[] *Visibility:* `user` @@ -1839,7 +2138,9 @@ endif::[] Maximum concurrent readers of remote data per CPU core. If unset, value of `topic_partitions_per_shard` multiplied by 2 is used. -*Requires restart:* Yes +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] // tag::self-managed-only[] *Visibility:* `deprecated` @@ -1866,6 +2167,10 @@ Maximum partition readers per shard (deprecated) *Requires restart:* No +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] + // tag::self-managed-only[] *Visibility:* `deprecated` // end::self-managed-only[] @@ -1890,6 +2195,10 @@ Maximum concurrent I/O cursors of materialized remote segments per CPU core. If *Requires restart:* No +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] + // tag::self-managed-only[] *Visibility:* `tunable` // end::self-managed-only[] @@ -1919,6 +2228,10 @@ The per-partition limit for the number of segments pending deletion from the clo *Requires restart:* No +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] + // tag::self-managed-only[] *Visibility:* `tunable` // end::self-managed-only[] @@ -1943,6 +2256,10 @@ This setting limits the Tiered Storage subsystem's throughput per shard, facilit *Requires restart:* No +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] + // tag::self-managed-only[] *Visibility:* `tunable` // end::self-managed-only[] @@ -1968,6 +2285,10 @@ Timeout for xref:manage:tiered-storage.adoc[] metadata synchronization. *Requires restart:* No +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] + // tag::self-managed-only[] *Visibility:* `tunable` // end::self-managed-only[] @@ -1993,6 +2314,10 @@ The minimum number of chunks per segment for trimming to be enabled. If the numb *Requires restart:* No +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] + // tag::self-managed-only[] *Visibility:* `tunable` // end::self-managed-only[] @@ -2020,6 +2345,10 @@ Time interval between two partial scrubs of the same partition. *Requires restart:* No +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] + // tag::self-managed-only[] *Visibility:* `tunable` // end::self-managed-only[] @@ -2047,6 +2376,10 @@ Timeout to check if new data is available for partitions in object storage for r *Requires restart:* No +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] + // tag::self-managed-only[] *Visibility:* `tunable` // end::self-managed-only[] @@ -2074,7 +2407,9 @@ No description available. *Unit:* milliseconds -*Requires restart:* Yes +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] *Nullable:* No @@ -2087,6 +2422,10 @@ Retention in bytes for topics created during automated recovery. *Requires restart:* No +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] + // tag::self-managed-only[] *Visibility:* `tunable` // end::self-managed-only[] @@ -2110,6 +2449,10 @@ Number of metadata segments to validate, from newest to oldest, when <} due to validation error *Requires restart:* No +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] + // tag::self-managed-only[] *Visibility:* `tunable` // end::self-managed-only[] @@ -2181,7 +2528,9 @@ endif::[] Cloud provider region that houses the bucket or container used for storage. -*Requires restart:* Yes +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* No +endif::[] // tag::self-managed-only[] *Visibility:* `user` @@ -2206,7 +2555,9 @@ Timeout for IAM role related operations (ms). *Unit:* milliseconds -*Requires restart:* Yes +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] // tag::self-managed-only[] *Visibility:* `tunable` @@ -2235,6 +2586,10 @@ Jitter applied to the object storage scrubbing interval. *Requires restart:* No +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] + // tag::self-managed-only[] *Visibility:* `tunable` // end::self-managed-only[] @@ -2258,7 +2613,9 @@ endif::[] Cloud provider secret key. -*Requires restart:* Yes +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* No +endif::[] // tag::self-managed-only[] *Visibility:* `user` @@ -2285,6 +2642,10 @@ Time that a segment can be kept locally without uploading it to the object stora *Requires restart:* No +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] + // tag::self-managed-only[] *Visibility:* `tunable` // end::self-managed-only[] @@ -2310,6 +2671,10 @@ Smallest acceptable segment size in the object storage. Default: `cloud_storage_ *Requires restart:* No +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] + // tag::self-managed-only[] *Visibility:* `tunable` // end::self-managed-only[] @@ -2333,6 +2698,10 @@ Desired segment size in the object storage. The default is set in the topic-leve *Requires restart:* No +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] + // tag::self-managed-only[] *Visibility:* `tunable` // end::self-managed-only[] @@ -2358,6 +2727,10 @@ Log segment upload timeout, in milliseconds. *Requires restart:* No +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] + // tag::self-managed-only[] *Visibility:* `tunable` // end::self-managed-only[] @@ -2383,6 +2756,10 @@ Maximum number of segments in the spillover manifest that can be offloaded to th *Requires restart:* No +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] + // tag::self-managed-only[] *Visibility:* `tunable` // end::self-managed-only[] @@ -2406,6 +2783,10 @@ The size of the manifest which can be offloaded to the cloud. If the size of the *Requires restart:* No +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] + // tag::self-managed-only[] *Visibility:* `tunable` // end::self-managed-only[] @@ -2431,6 +2812,10 @@ Maximum throughput used by Tiered Storage per broker expressed as a percentage o *Requires restart:* No +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] + // tag::self-managed-only[] *Visibility:* `tunable` // end::self-managed-only[] @@ -2456,6 +2841,10 @@ Grace period during which the purger refuses to purge the topic. *Requires restart:* No +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] + // tag::self-managed-only[] *Visibility:* `tunable` // end::self-managed-only[] @@ -2479,7 +2868,9 @@ endif::[] Path to certificate that should be used to validate server certificate during TLS handshake. -*Requires restart:* Yes +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* No +endif::[] // tag::self-managed-only[] *Visibility:* `user` @@ -2502,7 +2893,9 @@ endif::[] Derivative coefficient for upload PID controller. -*Requires restart:* Yes +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] // tag::self-managed-only[] *Visibility:* `tunable` @@ -2525,7 +2918,9 @@ endif::[] Maximum number of I/O and CPU shares that archival upload can use. -*Requires restart:* Yes +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] // tag::self-managed-only[] *Visibility:* `tunable` @@ -2550,7 +2945,9 @@ endif::[] Minimum number of I/O and CPU shares that archival upload can use. -*Requires restart:* Yes +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] // tag::self-managed-only[] *Visibility:* `tunable` @@ -2575,7 +2972,9 @@ endif::[] Proportional coefficient for upload PID controller. -*Requires restart:* Yes +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] // tag::self-managed-only[] *Visibility:* `tunable` @@ -2602,7 +3001,9 @@ This is an internal-only configuration and should be enabled only after consulti *Unit:* milliseconds -*Requires restart:* Yes +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] // tag::self-managed-only[] *Visibility:* `tunable` @@ -2631,6 +3032,10 @@ Initial backoff interval when there is nothing to upload for a partition, in mil *Requires restart:* No +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] + // tag::self-managed-only[] *Visibility:* `tunable` // end::self-managed-only[] @@ -2658,6 +3063,10 @@ Maximum backoff interval when there is nothing to upload for a partition, in mil *Requires restart:* No +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] + // tag::self-managed-only[] *Visibility:* `tunable` // end::self-managed-only[] @@ -2690,6 +3099,10 @@ Set this property explicitly to override automatic configuration, ensure consist *Requires restart:* Yes +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] + // tag::self-managed-only[] *Visibility:* `user` // end::self-managed-only[] @@ -2713,6 +3126,10 @@ Whether to include Tiered Storage as a special remote:// directory in `DescribeL *Requires restart:* No +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] + // tag::self-managed-only[] *Visibility:* `user` // end::self-managed-only[] diff --git a/modules/reference/partials/properties/topic-properties.adoc b/modules/reference/partials/properties/topic-properties.adoc index a7d5c04712..794c64a412 100644 --- a/modules/reference/partials/properties/topic-properties.adoc +++ b/modules/reference/partials/properties/topic-properties.adoc @@ -13,6 +13,10 @@ When `cleanup.policy` is set, it overrides the cluster property xref:cluster-pro *Nullable:* No +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] + *Related topics:* * xref:cluster-properties.adoc#log_cleanup_policy[`log_cleanup_policy`] @@ -20,39 +24,6 @@ When `cleanup.policy` is set, it overrides the cluster property xref:cluster-pro --- // end::category-retention-compaction[] -=== cloud_storage_inventory_hash_path_directory - -Directory to store inventory report hashes for use by cloud storage scrubber. If not specified, Redpanda uses a default path within the data directory. - -*Type:* string - -*Default:* `null` - -*Nullable:* No - - -.Example -[,yaml] ----- -redpanda: - cloud_storage_inventory_hash_store: ----- - - ---- - -=== cloud_storage_materialized_manifest_ttl_ms - -The interval, in milliseconds, determines how long the materialized manifest can stay in the cache under contention. This setting is used for performance tuning. When the spillover manifest is materialized and stored in the cache, and the cache needs to evict it, it uses this value as a timeout. The cursor that uses the spillover manifest uses this value as a TTL interval, after which it stops referencing the manifest making it available for eviction. This only affects spillover manifests under contention. - -*Type:* string - -*Default:* `null` - -*Nullable:* No - ---- - // tag::category-retention-compaction[] === compaction.strategy @@ -64,6 +35,10 @@ Specifies the strategy used to determine which records to remove during log comp *Nullable:* No +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] + *Related topics:* * xref:./cluster-properties.adoc#compaction_strategy[`compaction_strategy`] @@ -90,6 +65,10 @@ When set, this property overrides the cluster property xref:./cluster-properties *Nullable:* No +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] + *Related topics:* * xref:./cluster-properties.adoc#log_compression_type[`log_compression_type`] @@ -109,6 +88,10 @@ Enable validation of the schema ID for keys on a record. This is a compatibility *Nullable:* No +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] + --- // end::category-schema-registry[] @@ -121,30 +104,42 @@ The subject name strategy for keys when `confluent.key.schema.validation` is ena *Nullable:* No +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] + --- // end::category-schema-registry[] // tag::category-schema-registry[] === confluent.value.schema.validation -Enable validation of the schema ID for values on a record. This is a compatibility alias for `redpanda.value.schema.id.validation`. When enabled, Redpanda validates that the schema ID encoded in the record's value is registered in the Schema Registry according to the configured subject name strategy. +Enable validation of the schema ID for values on a record. This is a compatibility alias for <>. When enabled, Redpanda validates that the schema ID encoded in the record's value is registered in the Schema Registry according to the configured subject name strategy. *Type:* string *Nullable:* No +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] + --- // end::category-schema-registry[] // tag::category-schema-registry[] === confluent.value.subject.name.strategy -The subject name strategy for values when `confluent.value.schema.validation` is enabled. This is a compatibility alias for `redpanda.value.subject.name.strategy`. This determines how the topic and schema are mapped to a subject name in the Schema Registry. +The subject name strategy for values when `confluent.value.schema.validation` is enabled. This is a compatibility alias for <>. This determines how the topic and schema are mapped to a subject name in the Schema Registry. *Type:* string *Nullable:* No +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] + --- // end::category-schema-registry[] @@ -163,6 +158,10 @@ If both `delete.retention.ms` and the cluster property config_ref:tombstone_rete *Nullable:* No +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] + *Related topics:* * xref:./cluster-properties.adoc#tombstone_retention_ms[`tombstone_retention_ms`] @@ -184,6 +183,10 @@ The maximum bytes not fsynced per partition. If this configured threshold is rea *Nullable:* No +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] + *Related topics:* * xref:./cluster-properties.adoc#flush_bytes[`flush_bytes`] @@ -204,6 +207,10 @@ The maximum delay (in ms) between two subsequent fsyncs. After this delay, the l *Nullable:* No +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] + *Related topics:* * xref:./cluster-properties.adoc#flush_ms[`flush_ms`] @@ -224,6 +231,10 @@ A size-based initial retention limit for Tiered Storage that determines how much *Nullable:* No +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] + *Related topics:* * xref:./cluster-properties.adoc#initial_retention_local_target_bytes[`initial_retention_local_target_bytes`] @@ -245,6 +256,10 @@ A time-based initial retention limit for Tiered Storage that determines how much *Nullable:* No +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] + *Related topics:* * xref:./cluster-properties.adoc#initial_retention_local_target_ms[`initial_retention_local_target_ms`] @@ -266,6 +281,10 @@ The maximum amount of time (in ms) that a log segment can remain unaltered befor *Nullable:* No +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] + *Related topics:* * xref:cluster-properties.adoc#max_compaction_lag_ms[`max_compaction_lag_ms`] @@ -290,6 +309,10 @@ If `max.message.bytes` is set to a positive value, it overrides the cluster prop *Nullable:* No +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] + *Related topics:* * xref:./cluster-properties.adoc#kafka_batch_max_bytes[`kafka_batch_max_bytes`] @@ -314,6 +337,10 @@ When `message.timestamp.type` is set, it overrides the cluster property xref:./c *Nullable:* No +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] + *Related topics:* * xref:./cluster-properties.adoc#log_message_timestamp_type[`log_message_timestamp_type`] @@ -335,6 +362,10 @@ The minimum ratio between the number of bytes in dirty segments and the total nu *Nullable:* No +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] + *Related topics:* * xref:./cluster-properties.adoc#min_cleanable_dirty_ratio[`min_cleanable_dirty_ratio`] @@ -355,6 +386,10 @@ The minimum amount of time (in ms) that a log segment must remain unaltered befo *Nullable:* No +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] + *Related topics:* * xref:cluster-properties.adoc#min_compaction_lag_ms[`min_compaction_lag_ms`] @@ -373,6 +408,10 @@ No description available. *Nullable:* No +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] + --- // end::category-tiered-storage[] @@ -381,10 +420,16 @@ No description available. Whether the corresponding Iceberg table is deleted upon deleting the topic. -*Type:* string +*Type:* boolean + +*Accepted values:* [`true`, `false`] *Nullable:* No +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] + --- // end::category-iceberg-integration[] @@ -397,6 +442,10 @@ Whether to write invalid records to a dead-letter queue (DLQ). *Nullable:* No +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] + *Related topics:* * xref:manage:iceberg/about-iceberg-topics.adoc#troubleshoot-errors[Troubleshoot errors] @@ -413,6 +462,10 @@ Enable the Iceberg integration for the topic. You can choose one of four modes. *Nullable:* No +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] + *Related topics:* * xref:manage:iceberg/choose-iceberg-mode.adoc#override-value-schema-latest-default[Choose an Iceberg Mode] @@ -429,6 +482,10 @@ The link:https://iceberg.apache.org/docs/nightly/partitioning/[partitioning^] sp *Nullable:* No +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] + *Related topics:* * xref:manage:iceberg/about-iceberg-topics.adoc#use-custom-partitioning[Use custom partitioning] @@ -447,6 +504,10 @@ Controls how often the data in the Iceberg table is refreshed with new data from *Nullable:* No +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] + --- // end::category-iceberg-integration[] @@ -459,6 +520,10 @@ No description available. *Nullable:* No +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] + --- // end::category-schema-registry[] @@ -471,6 +536,10 @@ No description available. *Nullable:* No +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] + --- // end::category-schema-registry[] @@ -487,6 +556,10 @@ If the cluster configuration property config_ref:enable_rack_awareness,true,prop *Nullable:* No +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] + *Related topics:* * xref:develop:produce-data/leader-pinning.adoc[Leader pinning] @@ -504,6 +577,10 @@ No description available. *Nullable:* No +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] + --- // end::exclude-from-docs[] // end::category-other[] @@ -521,6 +598,10 @@ NOTE: `redpanda.remote.delete` doesn't apply to Remote Read Replica topics: a Re *Nullable:* No +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] + *Related topics:* * xref:manage:tiered-storage.adoc[Tiered Storage] @@ -539,6 +620,10 @@ A flag for enabling Redpanda to fetch data for a topic from object storage to lo *Nullable:* No +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] + *Related topics:* * xref:manage:tiered-storage.adoc[Tiered Storage] @@ -553,12 +638,14 @@ The name of the object storage bucket for a Remote Read Replica topic. CAUTION: Setting `redpanda.remote.readreplica` together with either `redpanda.remote.read` or `redpanda.remote.write` results in an error. -*Type:* boolean - -*Accepted values:* [`true`, `false`] +*Type:* string *Nullable:* No +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] + *Related topics:* * xref:manage:remote-read-replicas.adoc[Remote Read Replicas] @@ -579,6 +666,10 @@ TIP: You can only configure `redpanda.remote.recovery` when you create a topic. *Nullable:* No +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] + *Related topics:* * xref:manage:tiered-storage.adoc[Tiered Storage] @@ -597,9 +688,12 @@ A flag for enabling Redpanda to upload data for a topic from local storage to ob *Nullable:* No +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] + *Related topics:* -* xref:manage:tiered-storage.adoc[Tiered Storage] * xref:manage:tiered-storage.adoc[Tiered Storage] --- @@ -608,24 +702,44 @@ A flag for enabling Redpanda to upload data for a topic from local storage to ob // tag::category-schema-registry[] === redpanda.value.schema.id.validation -No description available. +Enable validation of the schema ID for values on a record. When enabled, Redpanda validates that the schema ID encoded in the record's value is registered in the Schema Registry according to the configured subject name strategy. -*Type:* string +*Type:* boolean + +*Default:* `false` *Nullable:* No +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] + +*Related topics:* + +* xref:manage:schema-reg/schema-id-validation.adoc[Server-Side Schema ID Validation] + --- // end::category-schema-registry[] // tag::category-schema-registry[] === redpanda.value.subject.name.strategy -No description available. +The subject name strategy for values when `confluent.value.schema.validation` is enabled. This is a compatibility alias for `redpanda.value.subject.name.strategy`. This determines how the topic and schema are mapped to a subject name in the Schema Registry. *Type:* string +*Default:* `TopicNameStrategy` + *Nullable:* No +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] + +*Related topics:* + +* xref:manage:schema-reg/schema-id-validation.adoc[Server-Side Schema ID Validation] + --- // end::category-schema-registry[] @@ -639,6 +753,10 @@ No description available. *Nullable:* No +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] + --- // end::exclude-from-docs[] // end::category-other[] @@ -660,9 +778,12 @@ NOTE: Although `replication.factor` isn't returned or displayed by xref:referenc *Nullable:* No +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] + *Related topics:* -* xref:./cluster-properties.adoc#default_topic_replication[default_topic_replication] * xref:reference:rpk/rpk-topic/rpk-topic-describe.adoc[`rpk topic describe`] * xref:reference:rpk/rpk-topic/rpk-topic-alter-config.adoc[`rpk topic alter-config`] * xref:./cluster-properties.adoc#default_topic_replication[`default_topic_replication`] @@ -689,6 +810,10 @@ When both size-based (`retention.bytes`) and time-based (`retention.ms`) retenti *Nullable:* No +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] + *Related topics:* * xref:cluster-properties.adoc#retention_bytes[`retention_bytes`] @@ -711,6 +836,10 @@ A size-based retention limit for Tiered Storage that configures the maximum size *Nullable:* No +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] + *Related topics:* * xref:./cluster-properties.adoc#retention_local_target_bytes[`retention_local_target_bytes`] @@ -732,6 +861,10 @@ A time-based retention limit for Tiered Storage that sets the maximum duration t *Nullable:* No +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] + *Related topics:* * xref:./cluster-properties.adoc#retention_local_target_ms[`retention_local_target_ms`] @@ -758,6 +891,10 @@ When both size-based (`retention.bytes`) and time-based (`retention.ms`) retenti *Nullable:* No +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] + *Related topics:* * xref:./cluster-properties.adoc#log_retention_ms[`log_retention_ms`] @@ -782,6 +919,10 @@ When `segment.bytes` is set to a positive value, it overrides the cluster proper *Nullable:* No +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] + *Related topics:* * xref:./cluster-properties.adoc#log_segment_size[`log_segment_size`] @@ -808,6 +949,10 @@ If set to a positive duration, `segment.ms` overrides the cluster property xref: *Nullable:* No +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] + *Related topics:* * xref:./cluster-properties.adoc#log_segment_ms[`log_segment_ms`] @@ -834,10 +979,12 @@ When `write.caching` is set, it overrides the cluster property xref:cluster-prop *Nullable:* No +ifndef::env-cloud[] +*Restored during xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]:* Yes +endif::[] + *Related topics:* -* xref:cluster-properties.adoc#write_caching_default[`write_caching_default`] -* xref:./cluster-properties.adoc#write_caching_default[`write_caching_default`] * xref:develop:config-topics.adoc#configure-write-caching[Write caching] * xref:manage:tiered-storage.adoc[Tiered Storage]