|
111 | 111 | "category": "pandaproxy" |
112 | 112 | }, |
113 | 113 | "audit_enabled": { |
| 114 | + "description": "Enables or disables audit logging for the cluster.\n\nThis property supports three states:\n\n* true (1): Enables audit logging to track administrative and data access operations.\n* false (0): Disables audit logging completely.\n* Negative value: Uses the default cluster policy for audit logging, following Kafka protocol behavior.", |
114 | 115 | "related_topics": [], |
115 | 116 | "config_scope": "cluster" |
116 | 117 | }, |
|
322 | 323 | "description": "Time limit on waiting for uploads to complete before a leadership transfer. If this is `null`, leadership transfers proceed without waiting." |
323 | 324 | }, |
324 | 325 | "cloud_storage_housekeeping_interval_ms": { |
325 | | - "description": "Interval, in milliseconds, between object storage housekeeping tasks." |
| 326 | + "description": "Interval between object storage housekeeping tasks.\n\nThis property supports three states:\n\n* Positive value: Sets the interval in milliseconds between housekeeping operations. Smaller values provide more frequent cleanup but increase CPU and I/O usage.\n* 0: Disables automatic housekeeping operations. Manual cleanup may be required.\n* Negative value: Uses the default system interval for housekeeping tasks, following Kafka protocol behavior." |
326 | 327 | }, |
327 | 328 | "cloud_storage_hydrated_chunks_per_segment_ratio": { |
328 | 329 | "description": "The maximum number of chunks per segment that can be hydrated at a time. Above this number, unused chunks are trimmed.\n\nA segment is divided into chunks. Chunk hydration means downloading the chunk (which is a small part of a full segment) from cloud storage and placing it in the local disk cache. Redpanda periodically removes old, unused chunks from your local disk. This process is called chunk eviction. This property controls how many chunks can be present for a given segment in local disk at a time, before eviction is triggered, removing the oldest ones from disk. Note that this property is not used for the default eviction strategy which simply removes all unused chunks." |
|
670 | 671 | "config_scope": "cluster" |
671 | 672 | }, |
672 | 673 | "delete.retention.ms": { |
673 | | - "description": "The retention time for tombstone records in a compacted topic. Redpanda removes tombstone records after the retention limit is exceeded.\n\nIf you have enabled Tiered Storage and set <<redpandaremoteread,`redpanda.remote.read`>> or <<redpandaremotewrite,`redpanda.remote.write`>> for the topic, you cannot enable tombstone removal.\n\nIf both `delete.retention.ms` and the cluster property config_ref:tombstone_retention_ms,true,properties/cluster-properties[] are set, `delete.retention.ms` overrides the cluster level tombstone retention for an individual topic.", |
| 674 | + "description": "The retention time for tombstone records in a compacted topic. Redpanda removes tombstone records after the retention limit is exceeded.\n\nThis property supports three states: set to a specific millisecond value (including 0), unset (inherits cluster default), or disabled by setting to a negative value (no per-topic limit applied). Setting the value to 0 means tombstone records are immediately eligible for removal.\n\nIf you have enabled Tiered Storage and set <<redpandaremoteread,`redpanda.remote.read`>> or <<redpandaremotewrite,`redpanda.remote.write`>> for the topic, you cannot enable tombstone removal.\n\nIf both `delete.retention.ms` and the cluster property config_ref:tombstone_retention_ms,true,properties/cluster-properties[] are set, `delete.retention.ms` overrides the cluster level tombstone retention for an individual topic.", |
674 | 675 | "related_topics": [ |
675 | 676 | "xref:reference:properties/cluster-properties.adoc#tombstone_retention_ms[`tombstone_retention_ms`]", |
676 | 677 | "xref:manage:cluster-maintenance/compaction-settings.adoc#tombstone-record-removal[Tombstone record removal]" |
|
952 | 953 | "config_scope": "cluster" |
953 | 954 | }, |
954 | 955 | "initial.retention.local.target.bytes": { |
955 | | - "description": "A size-based initial retention limit for Tiered Storage that determines how much data in local storage is transferred to a partition replica when a cluster is resized. If `null` (default), all locally retained data is transferred.", |
| 956 | + "description": "A size-based initial retention limit for Tiered Storage that determines how much data in local storage is transferred to a partition replica when a cluster is resized. If `null` (default), all locally retained data is transferred.\n\nThis property supports three states: set to a specific byte value (including 0), unset (inherits cluster default), or disabled by setting to a negative value (transfers all locally retained data).", |
956 | 957 | "related_topics": [ |
957 | 958 | "xref:reference:properties/cluster-properties.adoc#initial_retention_local_target_bytes[`initial_retention_local_target_bytes`]", |
958 | 959 | "xref:manage:tiered-storage.adoc#fast-commission-and-decommission[Fast commission and decommission through Tiered Storage]" |
959 | 960 | ], |
960 | 961 | "config_scope": "topic" |
961 | 962 | }, |
962 | 963 | "initial.retention.local.target.ms": { |
963 | | - "description": "A time-based initial retention limit for Tiered Storage that determines how much data in local storage is transferred to a partition replica when a cluster is resized. If `null` (default), all locally retained data is transferred.", |
| 964 | + "description": "A time-based initial retention limit for Tiered Storage that determines how much data in local storage is transferred to a partition replica when a cluster is resized. If `null` (default), all locally retained data is transferred.\n\nThis property supports three states: set to a specific millisecond value (including 0), unset (inherits cluster default), or disabled by setting to a negative value (transfers all locally retained data).", |
964 | 965 | "related_topics": [ |
965 | 966 | "xref:reference:properties/cluster-properties.adoc#initial_retention_local_target_ms[`initial_retention_local_target_ms`]", |
966 | 967 | "xref:manage:tiered-storage.adoc#fast-commission-and-decommission[Fast commission and decommission through Tiered Storage]" |
|
1316 | 1317 | "config_scope": "cluster" |
1317 | 1318 | }, |
1318 | 1319 | "min.cleanable.dirty.ratio": { |
1319 | | - "description": "The minimum ratio between the number of bytes in dirty segments and the total number of bytes in closed segments that must be reached before a partition's log is eligible for compaction in a compact topic.", |
| 1320 | + "description": "The minimum ratio between the number of bytes in dirty segments and the total number of bytes in closed segments that must be reached before a partition's log is eligible for compaction in a compact topic.\n\nThis property supports three states: set to a specific ratio value (including 0.0), unset (inherits cluster default), or disabled by setting to a negative value (inherits cluster default). Setting the value to 0.0 means compaction is always eligible regardless of dirty ratio.", |
1320 | 1321 | "related_topics": [ |
1321 | 1322 | "xref:reference:properties/cluster-properties.adoc#min_cleanable_dirty_ratio[`min_cleanable_dirty_ratio`]" |
1322 | 1323 | ], |
|
1685 | 1686 | "config_scope": "topic" |
1686 | 1687 | }, |
1687 | 1688 | "retention.bytes": { |
1688 | | - "description": "A size-based retention limit that configures the maximum size that a topic partition can grow before becoming eligible for cleanup.\n\nIf `retention.bytes` is set to a positive value, it overrides the cluster property xref:cluster-properties.adoc#retention_bytes[`retention_bytes`] for the topic, and the total retained size for the topic is `retention.bytes` multiplied by the number of partitions for the topic.\n\nWhen both size-based (`retention.bytes`) and time-based (`retention.ms`) retention limits are set, cleanup occurs when either limit is reached.", |
| 1689 | + "description": "A size-based retention limit that configures the maximum size that a topic partition can grow before becoming eligible for cleanup.\n\nThis property supports three states: set to a specific byte value (including 0), unset (inherits cluster default), or disabled by setting to a negative value (no per-topic limit applied). Setting the value to 0 means partitions are immediately eligible for cleanup.\n\nIf `retention.bytes` is set to a non-negative value, it overrides the cluster property xref:cluster-properties.adoc#retention_bytes[`retention_bytes`] for the topic, and the total retained size for the topic is `retention.bytes` multiplied by the number of partitions for the topic.\n\nWhen both size-based (`retention.bytes`) and time-based (`retention.ms`) retention limits are set, cleanup occurs when either limit is reached.", |
1689 | 1690 | "related_topics": [ |
1690 | 1691 | "xref:cluster-properties.adoc#retention_bytes[`retention_bytes`]", |
1691 | 1692 | "xref:reference:properties/cluster-properties.adoc#retention_bytes[`retention_bytes`]", |
|
1694 | 1695 | "config_scope": "topic" |
1695 | 1696 | }, |
1696 | 1697 | "retention.local.target.bytes": { |
1697 | | - "description": "A size-based retention limit for Tiered Storage that configures the maximum size that a topic partition in local storage can grow before becoming eligible for cleanup. It applies per partition and is equivalent to <<retentionbytes, `retention.bytes`>> without Tiered Storage.", |
| 1698 | + "description": "A size-based retention limit for Tiered Storage that configures the maximum size that a topic partition in local storage can grow before becoming eligible for cleanup. It applies per partition and is equivalent to <<retentionbytes, `retention.bytes`>> without Tiered Storage.\n\nThis property supports three states: set to a specific byte value (including 0), unset (inherits cluster default), or disabled by setting to a negative value (no per-topic limit applied). Setting the value to 0 means data in local storage is immediately eligible for cleanup.", |
1698 | 1699 | "related_topics": [ |
1699 | 1700 | "xref:reference:properties/cluster-properties.adoc#retention_local_target_bytes[`retention_local_target_bytes`]", |
1700 | 1701 | "xref:manage:tiered-storage.adoc[Tiered Storage]" |
1701 | 1702 | ], |
1702 | 1703 | "config_scope": "topic" |
1703 | 1704 | }, |
1704 | 1705 | "retention.local.target.ms": { |
1705 | | - "description": "A time-based retention limit for Tiered Storage that sets the maximum duration that a log's segment file for a topic is retained in local storage before it's eligible for cleanup. This property is equivalent to <<retentionms, `retention.ms`>> without Tiered Storage.", |
| 1706 | + "description": "A time-based retention limit for Tiered Storage that sets the maximum duration that a log's segment file for a topic is retained in local storage before it's eligible for cleanup. This property is equivalent to <<retentionms, `retention.ms`>> without Tiered Storage.\n\nThis property supports three states: set to a specific millisecond value (including 0), unset (inherits cluster default), or disabled by setting to a negative value (no per-topic limit applied). Setting the value to 0 means data in local storage is immediately eligible for cleanup.", |
1706 | 1707 | "related_topics": [ |
1707 | 1708 | "xref:reference:properties/cluster-properties.adoc#retention_local_target_ms[`retention_local_target_ms`]", |
1708 | 1709 | "xref:manage:tiered-storage.adoc[Tiered Storage]", |
|
1711 | 1712 | "config_scope": "topic" |
1712 | 1713 | }, |
1713 | 1714 | "retention.ms": { |
1714 | | - "description": "A time-based retention limit that configures the maximum duration that a log's segment file for a topic is retained before it becomes eligible to be cleaned up. To consume all data, a consumer of the topic must read from a segment before its `retention.ms` elapses, otherwise the segment may be compacted and/or deleted. If a non-positive value, no per-topic limit is applied.\n\nIf `retention.ms` is set to a positive value, it overrides the cluster property xref:./cluster-properties.adoc#log_retention_ms[`log_retention_ms`] for the topic.\n\nWhen both size-based (`retention.bytes`) and time-based (`retention.ms`) retention limits are set, the earliest occurring limit applies.", |
| 1715 | + "description": "A time-based retention limit that configures the maximum duration that a log's segment file for a topic is retained before it becomes eligible to be cleaned up. To consume all data, a consumer of the topic must read from a segment before its `retention.ms` elapses, otherwise the segment may be compacted and/or deleted.\n\nThis property supports three states: set to a specific millisecond value (including 0), unset (inherits cluster default), or disabled by setting to a negative value (no per-topic limit applied). Setting the value to 0 means data is immediately eligible for cleanup.\n\nIf `retention.ms` is set to a non-negative value, it overrides the cluster property xref:./cluster-properties.adoc#log_retention_ms[`log_retention_ms`] for the topic.\n\nWhen both size-based (`retention.bytes`) and time-based (`retention.ms`) retention limits are set, the earliest occurring limit applies.", |
1715 | 1716 | "related_topics": [ |
1716 | 1717 | "xref:reference:properties/cluster-properties.adoc#log_retention_ms[`log_retention_ms`]", |
1717 | 1718 | "xref:manage:cluster-maintenance/disk-utilization.adoc#configure-message-retention[Configure message retention]" |
|
1748 | 1749 | "description": "Local retention time target for partitions of topics with object storage write enabled.\n\nThis property can be overridden on a per-topic basis by setting `retention.local.target.ms` in each topic enabled for Tiered Storage. See xref:manage:cluster-maintenance/disk-utilization.adoc#configure-message-retention[Configure message retention].\n\nNOTE: Both <<retention_local_target_bytes_default,`retention_local_target_bytes_default`>> and <<retention_local_target_ms_default,`retention_local_target_ms_default`>> can be set. The limit that is reached first is applied." |
1749 | 1750 | }, |
1750 | 1751 | "retention_local_trim_interval": { |
| 1752 | + "description": "Interval between data retention checks for locally-stored data.\n\nThis property supports three states:\n\n* Positive value: Sets the interval in milliseconds between retention checks. Smaller values provide more precise retention but increase CPU usage.\n* 0: Disables periodic retention checks. Data will only be trimmed when triggered by other events.\n* Negative value: Uses the default system interval for retention checks, following Kafka protocol behavior.", |
1751 | 1753 | "config_scope": "cluster" |
1752 | 1754 | }, |
1753 | 1755 | "retries": { |
|
1923 | 1925 | "config_scope": "topic" |
1924 | 1926 | }, |
1925 | 1927 | "segment.ms": { |
1926 | | - "description": "The maximum duration that a log segment of a topic is active (open for writes and not deletable). A periodic event, with `segment.ms` as its period, forcibly closes the active segment and transitions, or rolls, to a new active segment. The closed (inactive) segment is then eligible to be cleaned up according to cleanup and retention properties.\n\nIf set to a positive duration, `segment.ms` overrides the cluster property xref:./cluster-properties.adoc#log_segment_ms[`log_segment_ms`]. Values are automatically clamped between the cluster bounds set by xref:./cluster-properties.adoc#log_segment_ms_min[`log_segment_ms_min`] (default: 10 minutes) and xref:./cluster-properties.adoc#log_segment_ms_max[`log_segment_ms_max`] (default: 1 year). If your configured value exceeds these bounds, Redpanda uses the bound value and logs a warning. Check current cluster bounds with `rpk cluster config get log_segment_ms_min log_segment_ms_max`.", |
| 1928 | + "description": "The maximum duration that a log segment of a topic is active (open for writes and not deletable). A periodic event, with `segment.ms` as its period, forcibly closes the active segment and transitions, or rolls, to a new active segment. The closed (inactive) segment is then eligible to be cleaned up according to cleanup and retention properties.\n\nThis property supports three states: set to a specific millisecond value (including 0), unset (inherits cluster default), or disabled by setting to a negative value (inherits cluster default). Setting the value to 0 means segments are immediately eligible for closure.\n\nIf set to a non-negative value, `segment.ms` overrides the cluster property xref:./cluster-properties.adoc#log_segment_ms[`log_segment_ms`]. Values are automatically clamped between the cluster bounds set by xref:./cluster-properties.adoc#log_segment_ms_min[`log_segment_ms_min`] (default: 10 minutes) and xref:./cluster-properties.adoc#log_segment_ms_max[`log_segment_ms_max`] (default: 1 year). If your configured value exceeds these bounds, Redpanda uses the bound value and logs a warning. Check current cluster bounds with `rpk cluster config get log_segment_ms_min log_segment_ms_max`.", |
1927 | 1929 | "related_topics": [ |
1928 | 1930 | "xref:reference:properties/cluster-properties.adoc#log_segment_ms[`log_segment_ms`]", |
1929 | 1931 | "xref:reference:properties/cluster-properties.adoc#log_segment_ms_min[`log_segment_ms_min`]", |
|
0 commit comments