diff --git a/.ci/vale/styles/config/vocabularies/InfluxDataDocs/accept.txt b/.ci/vale/styles/config/vocabularies/InfluxDataDocs/accept.txt index 1ebaf7d462..a560770213 100644 --- a/.ci/vale/styles/config/vocabularies/InfluxDataDocs/accept.txt +++ b/.ci/vale/styles/config/vocabularies/InfluxDataDocs/accept.txt @@ -31,7 +31,7 @@ LogicalPlan [Mm]onitor MBs? PBs? -Parquet +Parquet|\b\w*-*parquet-\w*\b|\b--\w*parquet\w*\b|`[^`]*parquet[^`]*` Redoc SQLAlchemy SQLAlchemy diff --git a/.github/copilot-instructions.md b/.github/copilot-instructions.md index ffa9b01d02..9bfd165788 100644 --- a/.github/copilot-instructions.md +++ b/.github/copilot-instructions.md @@ -1,5 +1,9 @@ # Instructions for InfluxData Documentation +## Collaboration approach + +Be a critical thinking partner with a focus on developer experience needs, not a yes-person. Challenge assumptions, identify potential issues, and suggest alternatives when appropriate. Don't automatically agree with user suggestions - analyze them first and provide honest feedback about potential problems, better approaches, or missing considerations. + ## Purpose and scope Help document InfluxData products by creating clear, accurate technical content with proper code examples, frontmatter, and formatting. diff --git a/.vscode/settings.json b/.vscode/settings.json index 2c18d3282e..c827452b90 100644 --- a/.vscode/settings.json +++ b/.vscode/settings.json @@ -14,17 +14,6 @@ }, "vale.valeCLI.config": "${workspaceFolder}/.vale.ini", "vale.valeCLI.minAlertLevel": "warning", - "github.copilot.chat.codeGeneration.useInstructionFiles": true, - "github.copilot.chat.codeGeneration.instructions": [ - { - "file": "${workspaceFolder}/.github/copilot-instructions.md", - } - ], - "github.copilot.chat.pullRequestDescriptionGeneration.instructions": [ - { - "file": "${workspaceFolder}/.github/copilot-instructions.md", - } - ], "cSpell.words": [ "influxctl" ] diff --git a/compose.yaml b/compose.yaml index cd466f6e3a..ea11e03cb4 100644 --- a/compose.yaml +++ b/compose.yaml @@ -349,7 +349,6 @@ services: - --data-dir=/var/lib/influxdb3/data - --plugin-dir=/var/lib/influxdb3/plugins environment: - - INFLUXDB3_ENTERPRISE_LICENSE_EMAIL=${INFLUXDB3_ENTERPRISE_LICENSE_EMAIL} - INFLUXDB3_AUTH_TOKEN=/run/secrets/influxdb3-enterprise-admin-token volumes: - type: bind diff --git a/content/influxdb3/core/reference/cli/influxdb3/delete/_index.md b/content/influxdb3/core/reference/cli/influxdb3/delete/_index.md index d15e985a9b..954678c45b 100644 --- a/content/influxdb3/core/reference/cli/influxdb3/delete/_index.md +++ b/content/influxdb3/core/reference/cli/influxdb3/delete/_index.md @@ -1,7 +1,7 @@ --- title: influxdb3 delete description: > - The `influxdb3 delete` command deletes a resource such as a database or a table. + The `influxdb3 delete` command deletes a resource such as a cache, database, or table. menu: influxdb3_core: parent: influxdb3 @@ -10,6 +10,6 @@ weight: 300 source: /shared/influxdb3-cli/delete/_index.md --- - diff --git a/content/influxdb3/core/reference/cli/influxdb3/delete/token.md b/content/influxdb3/core/reference/cli/influxdb3/delete/token.md new file mode 100644 index 0000000000..48e31be254 --- /dev/null +++ b/content/influxdb3/core/reference/cli/influxdb3/delete/token.md @@ -0,0 +1,14 @@ +--- +title: influxdb3 delete token +description: > + The `influxdb3 delete token` command manage token resources. +influxdb3/core/tags: [cli] +menu: + influxdb3_core_reference: + parent: influxdb3 cli +weight: 201 +--- + + diff --git a/content/influxdb3/core/reference/cli/influxdb3/serve.md b/content/influxdb3/core/reference/cli/influxdb3/serve.md index ffc7fe5af6..debeb8ddc7 100644 --- a/content/influxdb3/core/reference/cli/influxdb3/serve.md +++ b/content/influxdb3/core/reference/cli/influxdb3/serve.md @@ -36,41 +36,23 @@ influxdb3 serve [OPTIONS] --node-id | :--------------- | :--------------------------------------------------- | :------------------------------------------------------------------------------------------------------------------------ | | {{< req "\*" >}} | `--node-id` | _See [configuration options](/influxdb3/core/reference/config-options/#node-id)_ | | | `--object-store` | _See [configuration options](/influxdb3/core/reference/config-options/#object-store)_ | -| | `--bucket` | _See [configuration options](/influxdb3/core/reference/config-options/#bucket)_ | -| | `--data-dir` | _See [configuration options](/influxdb3/core/reference/config-options/#data-dir)_ | | | `--admin-token-recovery-http-bind` | _See [configuration options](/influxdb3/core/reference/config-options/#admin-token-recovery-http-bind)_ | +| | `--admin-token-recovery-tcp-listener-file-path` | _See [configuration options](/influxdb3/core/reference/config-options/#admin-token-recovery-tcp-listener-file-path)_ | | | `--aws-access-key-id` | _See [configuration options](/influxdb3/core/reference/config-options/#aws-access-key-id)_ | -| | `--aws-secret-access-key` | _See [configuration options](/influxdb3/core/reference/config-options/#aws-secret-access-key)_ | +| | `--aws-allow-http` | _See [configuration options](/influxdb3/core/reference/config-options/#aws-allow-http)_ | | | `--aws-default-region` | _See [configuration options](/influxdb3/core/reference/config-options/#aws-default-region)_ | | | `--aws-endpoint` | _See [configuration options](/influxdb3/core/reference/config-options/#aws-endpoint)_ | +| | `--aws-secret-access-key` | _See [configuration options](/influxdb3/core/reference/config-options/#aws-secret-access-key)_ | | | `--aws-session-token` | _See [configuration options](/influxdb3/core/reference/config-options/#aws-session-token)_ | -| | `--aws-allow-http` | _See [configuration options](/influxdb3/core/reference/config-options/#aws-allow-http)_ | | | `--aws-skip-signature` | _See [configuration options](/influxdb3/core/reference/config-options/#aws-skip-signature)_ | -| | `--google-service-account` | _See [configuration options](/influxdb3/core/reference/config-options/#google-service-account)_ | -| | `--azure-storage-account` | _See [configuration options](/influxdb3/core/reference/config-options/#azure-storage-account)_ | | | `--azure-storage-access-key` | _See [configuration options](/influxdb3/core/reference/config-options/#azure-storage-access-key)_ | -| | `--object-store-connection-limit` | _See [configuration options](/influxdb3/core/reference/config-options/#object-store-connection-limit)_ | -| | `--object-store-http2-only` | _See [configuration options](/influxdb3/core/reference/config-options/#object-store-http2-only)_ | -| | `--object-store-http2-max-frame-size` | _See [configuration options](/influxdb3/core/reference/config-options/#object-store-http2-max-frame-size)_ | -| | `--object-store-max-retries` | _See [configuration options](/influxdb3/core/reference/config-options/#object-store-max-retries)_ | -| | `--object-store-retry-timeout` | _See [configuration options](/influxdb3/core/reference/config-options/#object-store-retry-timeout)_ | -| | `--object-store-cache-endpoint` | _See [configuration options](/influxdb3/core/reference/config-options/#object-store-cache-endpoint)_ | -| `-h` | `--help` | Print help information | -| | `--help-all` | Print detailed help information | -| | `--log-filter` | _See [configuration options](/influxdb3/core/reference/config-options/#log-filter)_ | -| `-v` | `--verbose` | Enable verbose output | -| | `--log-destination` | _See [configuration options](/influxdb3/core/reference/config-options/#log-destination)_ | -| | `--log-format` | _See [configuration options](/influxdb3/core/reference/config-options/#log-format)_ | -| | `--traces-exporter` | _See [configuration options](/influxdb3/core/reference/config-options/#traces-exporter)_ | -| | `--traces-exporter-jaeger-agent-host` | _See [configuration options](/influxdb3/core/reference/config-options/#traces-exporter-jaeger-agent-host)_ | -| | `--traces-exporter-jaeger-agent-port` | _See [configuration options](/influxdb3/core/reference/config-options/#traces-exporter-jaeger-agent-port)_ | -| | `--traces-exporter-jaeger-service-name` | _See [configuration options](/influxdb3/core/reference/config-options/#traces-exporter-jaeger-service-name)_ | -| | `--traces-exporter-jaeger-trace-context-header-name` | _See [configuration options](/influxdb3/core/reference/config-options/#traces-exporter-jaeger-trace-context-header-name)_ | -| | `--traces-jaeger-debug-name` | _See [configuration options](/influxdb3/core/reference/config-options/#traces-jaeger-debug-name)_ | -| | `--traces-jaeger-tags` | _See [configuration options](/influxdb3/core/reference/config-options/#traces-jaeger-tags)_ | -| | `--traces-jaeger-max-msgs-per-second` | _See [configuration options](/influxdb3/core/reference/config-options/#traces-jaeger-max-msgs-per-second)_ | +| | `--azure-storage-account` | _See [configuration options](/influxdb3/core/reference/config-options/#azure-storage-account)_ | +| | `--bucket` | _See [configuration options](/influxdb3/core/reference/config-options/#bucket)_ | +| | `--buffer-mem-limit-mb` | _See [configuration options](/influxdb3/core/reference/config-options/#buffer-mem-limit-mb)_ | +| | `--data-dir` | _See [configuration options](/influxdb3/core/reference/config-options/#data-dir)_ | +| | `--datafusion-config` | _See [configuration options](/influxdb3/core/reference/config-options/#datafusion-config)_ | +| | `--datafusion-max-parquet-fanout` | _See [configuration options](/influxdb3/core/reference/config-options/#datafusion-max-parquet-fanout)_ | | | `--datafusion-num-threads` | _See [configuration options](/influxdb3/core/reference/config-options/#datafusion-num-threads)_ | -| | `--datafusion-runtime-type` | _See [configuration options](/influxdb3/core/reference/config-options/#datafusion-runtime-type)_ | | | `--datafusion-runtime-disable-lifo-slot` | _See [configuration options](/influxdb3/core/reference/config-options/#datafusion-runtime-disable-lifo-slot)_ | | | `--datafusion-runtime-event-interval` | _See [configuration options](/influxdb3/core/reference/config-options/#datafusion-runtime-event-interval)_ | | | `--datafusion-runtime-global-queue-interval` | _See [configuration options](/influxdb3/core/reference/config-options/#datafusion-runtime-global-queue-interval)_ | @@ -78,29 +60,67 @@ influxdb3 serve [OPTIONS] --node-id | | `--datafusion-runtime-max-io-events-per-tick` | _See [configuration options](/influxdb3/core/reference/config-options/#datafusion-runtime-max-io-events-per-tick)_ | | | `--datafusion-runtime-thread-keep-alive` | _See [configuration options](/influxdb3/core/reference/config-options/#datafusion-runtime-thread-keep-alive)_ | | | `--datafusion-runtime-thread-priority` | _See [configuration options](/influxdb3/core/reference/config-options/#datafusion-runtime-thread-priority)_ | -| | `--datafusion-max-parquet-fanout` | _See [configuration options](/influxdb3/core/reference/config-options/#datafusion-max-parquet-fanout)_ | +| | `--datafusion-runtime-type` | _See [configuration options](/influxdb3/core/reference/config-options/#datafusion-runtime-type)_ | | | `--datafusion-use-cached-parquet-loader` | _See [configuration options](/influxdb3/core/reference/config-options/#datafusion-use-cached-parquet-loader)_ | -| | `--datafusion-config` | _See [configuration options](/influxdb3/core/reference/config-options/#datafusion-config)_ | -| | `--max-http-request-size` | _See [configuration options](/influxdb3/core/reference/config-options/#max-http-request-size)_ | -| | `--http-bind` | _See [configuration options](/influxdb3/core/reference/config-options/#http-bind)_ | -| | `--exec-mem-pool-bytes` | _See [configuration options](/influxdb3/core/reference/config-options/#exec-mem-pool-bytes)_ | -| | `--gen1-duration` | _See [configuration options](/influxdb3/core/reference/config-options/#gen1-duration)_ | -| | `--wal-flush-interval` | _See [configuration options](/influxdb3/core/reference/config-options/#wal-flush-interval)_ | -| | `--wal-snapshot-size` | _See [configuration options](/influxdb3/core/reference/config-options/#wal-snapshot-size)_ | -| | `--wal-max-write-buffer-size` | _See [configuration options](/influxdb3/core/reference/config-options/#wal-max-write-buffer-size)_ | -| | `--snapshotted-wal-files-to-keep` | _See [configuration options](/influxdb3/core/reference/config-options/#snapshotted-wal-files-to-keep)_ | -| | `--query-log-size` | _See [configuration options](/influxdb3/core/reference/config-options/#query-log-size)_ | -| | `--parquet-mem-cache-size` | _See [configuration options](/influxdb3/core/reference/config-options/#parquet-mem-cache-size)_ | -| | `--parquet-mem-cache-prune-percentage` | _See [configuration options](/influxdb3/core/reference/config-options/#parquet-mem-cache-prune-percentage)_ | -| | `--parquet-mem-cache-prune-interval` | _See [configuration options](/influxdb3/core/reference/config-options/#parquet-mem-cache-prune-interval)_ | +| | `--delete-grace-period` | _See [configuration options](/influxdb3/core/reference/config-options/#delete-grace-period)_ | +| | `--disable-authz` | _See [configuration options](/influxdb3/core/reference/config-options/#disable-authz)_ | | | `--disable-parquet-mem-cache` | _See [configuration options](/influxdb3/core/reference/config-options/#disable-parquet-mem-cache)_ | -| | `--last-cache-eviction-interval` | _See [configuration options](/influxdb3/core/reference/config-options/#last-cache-eviction-interval)_ | | | `--distinct-cache-eviction-interval` | _See [configuration options](/influxdb3/core/reference/config-options/#distinct-cache-eviction-interval)_ | -| | `--plugin-dir` | _See [configuration options](/influxdb3/core/reference/config-options/#plugin-dir)_ | +| | `--exec-mem-pool-bytes` | _See [configuration options](/influxdb3/core/reference/config-options/#exec-mem-pool-bytes)_ | | | `--force-snapshot-mem-threshold` | _See [configuration options](/influxdb3/core/reference/config-options/#force-snapshot-mem-threshold)_ | -| | `--virtual-env-location` | _See [configuration options](/influxdb3/core/reference/config-options/#virtual-env-location)_ | +| | `--gen1-duration` | _See [configuration options](/influxdb3/core/reference/config-options/#gen1-duration)_ | +| | `--gen1-lookback-duration` | _See [configuration options](/influxdb3/core/reference/config-options/#gen1-lookback-duration)_ | +| | `--google-service-account` | _See [configuration options](/influxdb3/core/reference/config-options/#google-service-account)_ | +| | `--hard-delete-default-duration` | _See [configuration options](/influxdb3/core/reference/config-options/#hard-delete-default-duration)_ | +| `-h` | `--help` | Print help information | +| | `--help-all` | Print detailed help information | +| | `--http-bind` | _See [configuration options](/influxdb3/core/reference/config-options/#http-bind)_ | +| | `--last-cache-eviction-interval` | _See [configuration options](/influxdb3/core/reference/config-options/#last-cache-eviction-interval)_ | +| | `--log-destination` | _See [configuration options](/influxdb3/core/reference/config-options/#log-destination)_ | +| | `--log-filter` | _See [configuration options](/influxdb3/core/reference/config-options/#log-filter)_ | +| | `--log-format` | _See [configuration options](/influxdb3/core/reference/config-options/#log-format)_ | +| | `--max-http-request-size` | _See [configuration options](/influxdb3/core/reference/config-options/#max-http-request-size)_ | +| | `--object-store-cache-endpoint` | _See [configuration options](/influxdb3/core/reference/config-options/#object-store-cache-endpoint)_ | +| | `--object-store-connection-limit` | _See [configuration options](/influxdb3/core/reference/config-options/#object-store-connection-limit)_ | +| | `--object-store-http2-max-frame-size` | _See [configuration options](/influxdb3/core/reference/config-options/#object-store-http2-max-frame-size)_ | +| | `--object-store-http2-only` | _See [configuration options](/influxdb3/core/reference/config-options/#object-store-http2-only)_ | +| | `--object-store-max-retries` | _See [configuration options](/influxdb3/core/reference/config-options/#object-store-max-retries)_ | +| | `--object-store-retry-timeout` | _See [configuration options](/influxdb3/core/reference/config-options/#object-store-retry-timeout)_ | | | `--package-manager` | _See [configuration options](/influxdb3/core/reference/config-options/#package-manager)_ | +| | `--parquet-mem-cache-prune-interval` | _See [configuration options](/influxdb3/core/reference/config-options/#parquet-mem-cache-prune-interval)_ | +| | `--parquet-mem-cache-prune-percentage` | _See [configuration options](/influxdb3/core/reference/config-options/#parquet-mem-cache-prune-percentage)_ | +| | `--parquet-mem-cache-query-path-duration` | _See [configuration options](/influxdb3/core/reference/config-options/#parquet-mem-cache-query-path-duration)_ | +| | `--parquet-mem-cache-size` | _See [configuration options](/influxdb3/core/reference/config-options/#parquet-mem-cache-size)_ | +| | `--plugin-dir` | _See [configuration options](/influxdb3/core/reference/config-options/#plugin-dir)_ | +| | `--preemptive-cache-age` | _See [configuration options](/influxdb3/core/reference/config-options/#preemptive-cache-age)_ | | | `--query-file-limit` | _See [configuration options](/influxdb3/core/reference/config-options/#query-file-limit)_ | +| | `--query-log-size` | _See [configuration options](/influxdb3/core/reference/config-options/#query-log-size)_ | +| | `--retention-check-interval` | _See [configuration options](/influxdb3/core/reference/config-options/#retention-check-interval)_ | +| | `--snapshotted-wal-files-to-keep` | _See [configuration options](/influxdb3/core/reference/config-options/#snapshotted-wal-files-to-keep)_ | +| | `--table-index-cache-concurrency-limit` | _See [configuration options](/influxdb3/core/reference/config-options/#table-index-cache-concurrency-limit)_ | +| | `--table-index-cache-max-entries` | _See [configuration options](/influxdb3/core/reference/config-options/#table-index-cache-max-entries)_ | +| | `--tcp-listener-file-path` | _See [configuration options](/influxdb3/core/reference/config-options/#tcp-listener-file-path)_ | +| | `--telemetry-disable-upload` | _See [configuration options](/influxdb3/core/reference/config-options/#telemetry-disable-upload)_ | +| | `--telemetry-endpoint` | _See [configuration options](/influxdb3/core/reference/config-options/#telemetry-endpoint)_ | +| | `--tls-cert` | _See [configuration options](/influxdb3/core/reference/config-options/#tls-cert)_ | +| | `--tls-key` | _See [configuration options](/influxdb3/core/reference/config-options/#tls-key)_ | +| | `--tls-minimum-version` | _See [configuration options](/influxdb3/core/reference/config-options/#tls-minimum-version)_ | +| | `--traces-exporter` | _See [configuration options](/influxdb3/core/reference/config-options/#traces-exporter)_ | +| | `--traces-exporter-jaeger-agent-host` | _See [configuration options](/influxdb3/core/reference/config-options/#traces-exporter-jaeger-agent-host)_ | +| | `--traces-exporter-jaeger-agent-port` | _See [configuration options](/influxdb3/core/reference/config-options/#traces-exporter-jaeger-agent-port)_ | +| | `--traces-exporter-jaeger-service-name` | _See [configuration options](/influxdb3/core/reference/config-options/#traces-exporter-jaeger-service-name)_ | +| | `--traces-exporter-jaeger-trace-context-header-name` | _See [configuration options](/influxdb3/core/reference/config-options/#traces-exporter-jaeger-trace-context-header-name)_ | +| | `--traces-jaeger-debug-name` | _See [configuration options](/influxdb3/core/reference/config-options/#traces-jaeger-debug-name)_ | +| | `--traces-jaeger-max-msgs-per-second` | _See [configuration options](/influxdb3/core/reference/config-options/#traces-jaeger-max-msgs-per-second)_ | +| | `--traces-jaeger-tags` | _See [configuration options](/influxdb3/core/reference/config-options/#traces-jaeger-tags)_ | +| `-v` | `--verbose` | Enable verbose output | +| | `--virtual-env-location` | _See [configuration options](/influxdb3/core/reference/config-options/#virtual-env-location)_ | +| | `--wal-flush-interval` | _See [configuration options](/influxdb3/core/reference/config-options/#wal-flush-interval)_ | +| | `--wal-max-write-buffer-size` | _See [configuration options](/influxdb3/core/reference/config-options/#wal-max-write-buffer-size)_ | +| | `--wal-replay-concurrency-limit` | _See [configuration options](/influxdb3/core/reference/config-options/#wal-replay-concurrency-limit)_ | +| | `--wal-replay-fail-on-error` | _See [configuration options](/influxdb3/core/reference/config-options/#wal-replay-fail-on-error)_ | +| | `--wal-snapshot-size` | _See [configuration options](/influxdb3/core/reference/config-options/#wal-snapshot-size)_ | +| | `--without-auth` | _See [configuration options](/influxdb3/core/reference/config-options/#without-auth)_ | {{< caption >}} {{< req text="\* Required options" >}} @@ -110,7 +130,7 @@ influxdb3 serve [OPTIONS] --node-id You can use environment variables to define most `influxdb3 serve` options. For more information, see -[Configuration options](/influxdb3/enterprise/reference/config-options/). +[Configuration options](/influxdb3/core/reference/config-options/). ## Examples diff --git a/content/influxdb3/core/reference/config-options.md b/content/influxdb3/core/reference/config-options.md index 6914536eba..11e29f06c8 100644 --- a/content/influxdb3/core/reference/config-options.md +++ b/content/influxdb3/core/reference/config-options.md @@ -8,1052 +8,9 @@ menu: parent: Reference name: Configuration options weight: 100 +source: /shared/influxdb3-cli/config-options.md --- -{{< product-name >}} lets you customize your server configuration by using -`influxdb3 serve` command options or by setting environment variables. - -## Configure your server - -Pass configuration options to the `influxdb serve` server using either command -options or environment variables. Command options take precedence over -environment variables. - -##### Example influxdb3 serve command options - - - -```sh -influxdb3 serve \ - --object-store file \ - --data-dir ~/.influxdb3 \ - --node-id NODE_ID \ - --log-filter info \ - --max-http-request-size 20971520 \ - --aws-allow-http -``` - -##### Example environment variables - - - -```sh -export INFLUXDB3_OBJECT_STORE=file -export INFLUXDB3_DB_DIR=~/.influxdb3 -export INFLUXDB3_WRITER_IDENTIFIER_PREFIX=my-host -export LOG_FILTER=info -export INFLUXDB3_MAX_HTTP_REQUEST_SIZE=20971520 -export AWS_ALLOW_HTTP=true - -influxdb3 serve -``` - -## Server configuration options - -- [General](#general) - - [object-store](#object-store) - - [data-dir](#data-dir) - - [node-id](#node-id) - - [query-file-limit](#query-file-limit) -- [AWS](#aws) - - [aws-access-key-id](#aws-access-key-id) - - [aws-secret-access-key](#aws-secret-access-key) - - [aws-default-region](#aws-default-region) - - [aws-endpoint](#aws-endpoint) - - [aws-session-token](#aws-session-token) - - [aws-allow-http](#aws-allow-http) - - [aws-skip-signature](#aws-skip-signature) -- [Google Cloud Service](#google-cloud-service) - - [google-service-account](#google-service-account) -- [Microsoft Azure](#microsoft-azure) - - [azure-storage-account](#azure-storage-account) - - [azure-storage-access-key](#azure-storage-access-key) -- [Object Storage](#object-storage) - - [bucket](#bucket) - - [object-store-connection-limit](#object-store-connection-limit) - - [object-store-http2-only](#object-store-http2-only) - - [object-store-http2-max-frame-size](#object-store-http2-max-frame-size) - - [object-store-max-retries](#object-store-max-retries) - - [object-store-retry-timeout](#object-store-retry-timeout) - - [object-store-cache-endpoint](#object-store-cache-endpoint) -- [Logs](#logs) - - [log-filter](#log-filter) - - [log-destination](#log-destination) - - [log-format](#log-format) - - [query-log-size](#query-log-size) -- [Traces](#traces) - - [traces-exporter](#traces-exporter) - - [traces-exporter-jaeger-agent-host](#traces-exporter-jaeger-agent-host) - - [traces-exporter-jaeger-agent-port](#traces-exporter-jaeger-agent-port) - - [traces-exporter-jaeger-service-name](#traces-exporter-jaeger-service-name) - - [traces-exporter-jaeger-trace-context-header-name](#traces-exporter-jaeger-trace-context-header-name) - - [traces-jaeger-debug-name](#traces-jaeger-debug-name) - - [traces-jaeger-tags](#traces-jaeger-tags) - - [traces-jaeger-max-msgs-per-second](#traces-jaeger-max-msgs-per-second) -- [DataFusion](#datafusion) - - [datafusion-num-threads](#datafusion-num-threads) - - [datafusion-runtime-type](#datafusion-runtime-type) - - [datafusion-runtime-disable-lifo-slot](#datafusion-runtime-disable-lifo-slot) - - [datafusion-runtime-event-interval](#datafusion-runtime-event-interval) - - [datafusion-runtime-global-queue-interval](#datafusion-runtime-global-queue-interval) - - [datafusion-runtime-max-blocking-threads](#datafusion-runtime-max-blocking-threads) - - [datafusion-runtime-max-io-events-per-tick](#datafusion-runtime-max-io-events-per-tick) - - [datafusion-runtime-thread-keep-alive](#datafusion-runtime-thread-keep-alive) - - [datafusion-runtime-thread-priority](#datafusion-runtime-thread-priority) - - [datafusion-max-parquet-fanout](#datafusion-max-parquet-fanout) - - [datafusion-use-cached-parquet-loader](#datafusion-use-cached-parquet-loader) - - [datafusion-config](#datafusion-config) -- [HTTP](#http) - - [max-http-request-size](#max-http-request-size) - - [http-bind](#http-bind) - - [admin-token-recovery-http-bind](#admin-token-recovery-http-bind) -- [Memory](#memory) - - [exec-mem-pool-bytes](#exec-mem-pool-bytes) - - [buffer-mem-limit-mb](#buffer-mem-limit-mb) - - [force-snapshot-mem-threshold](#force-snapshot-mem-threshold) -- [Write-Ahead Log (WAL)](#write-ahead-log-wal) - - [wal-flush-interval](#wal-flush-interval) - - [wal-snapshot-size](#wal-snapshot-size) - - [wal-max-write-buffer-size](#wal-max-write-buffer-size) - - [snapshotted-wal-files-to-keep](#snapshotted-wal-files-to-keep) -- [Compaction](#compaction) - - [gen1-duration](#gen1-duration) -- [Caching](#caching) - - [preemptive-cache-age](#preemptive-cache-age) - - [parquet-mem-cache-size-mb](#parquet-mem-cache-size-mb) - - [parquet-mem-cache-prune-percentage](#parquet-mem-cache-prune-percentage) - - [parquet-mem-cache-prune-interval](#parquet-mem-cache-prune-interval) - - [disable-parquet-mem-cache](#disable-parquet-mem-cache) - - [last-cache-eviction-interval](#last-cache-eviction-interval) - - [distinct-cache-eviction-interval](#distinct-cache-eviction-interval) -- [Processing engine](#processing-engine) - - [plugin-dir](#plugin-dir) - - [virtual-env-location](#virtual-env-location) - - [package-manager](#package-manager) - ---- - -### General - -- [object-store](#object-store) -- [data-dir](#data-dir) -- [node-id](#node-id) -- [query-file-limit](#query-file-limit) - -#### object-store - -Specifies which object storage to use to store Parquet files. -This option supports the following values: - -- `memory` -- `memory-throttled` -- `file` -- `s3` -- `google` -- `azure` - -| influxdb3 serve option | Environment variable | -| :--------------------- | :----------------------- | -| `--object-store` | `INFLUXDB3_OBJECT_STORE` | - ---- - -#### data-dir - -For the `file` object store, defines the location {{< product-name >}} uses to store files locally. -Required when using the `file` [object store](#object-store). - -| influxdb3 serve option | Environment variable | -| :--------------------- | :------------------- | -| `--data-dir` | `INFLUXDB3_DB_DIR` | - ---- - -#### node-id - -Specifies the node identifier used as a prefix in all object store file paths. -Use a unique node identifier for each host sharing the same object store -configuration--for example, the same bucket. - -| influxdb3 serve option | Environment variable | -| :--------------------- | :--------------------------------- | -| `--node-id` | `INFLUXDB3_NODE_IDENTIFIER_PREFIX` | - ---- - -#### query-file-limit - -Limits the number of Parquet files a query can access. - -**Default:** `432` - -With the default `432` setting and the default [`gen1-duration`](#gen1-duration) -setting of 10 minutes, queries can access up to a 72 hours of data, but -potentially less depending on whether all data for a given 10 minute block of -time was ingested during the same period. - -You can increase this limit to allow more files to be queried, but be aware of -the following side-effects: - -- Degraded query performance for queries that read more Parquet files -- Increased memory usage -- Your system potentially killing the `influxdb3` process due to Out-of-Memory - (OOM) errors -- If using object storage to store data, many GET requests to access the data - (as many as 2 per file) - -> [!Note] -> We recommend keeping the default setting and querying smaller time ranges. -> If you need to query longer time ranges or faster query performance on any query -> that accesses an hour or more of data, [InfluxDB 3 Enterprise](/influxdb3/enterprise/) -> optimizes data storage by compacting and rearranging Parquet files to achieve -> faster query performance. - -| influxdb3 serve option | Environment variable | -| :--------------------- | :--------------------------- | -| `--query-file-limit` | `INFLUXDB3_QUERY_FILE_LIMIT` | - ---- - -### AWS - -- [aws-access-key-id](#aws-access-key-id) -- [aws-secret-access-key](#aws-secret-access-key) -- [aws-default-region](#aws-default-region) -- [aws-endpoint](#aws-endpoint) -- [aws-session-token](#aws-session-token) -- [aws-allow-http](#aws-allow-http) -- [aws-skip-signature](#aws-skip-signature) - -#### aws-access-key-id - -When using Amazon S3 as the object store, set this to an access key that has -permission to read from and write to the specified S3 bucket. - -| influxdb3 serve option | Environment variable | -| :--------------------- | :------------------- | -| `--aws-access-key-id` | `AWS_ACCESS_KEY_ID` | - ---- - -#### aws-secret-access-key - -When using Amazon S3 as the object store, set this to the secret access key that -goes with the specified access key ID. - -| influxdb3 serve option | Environment variable | -| :------------------------ | :---------------------- | -| `--aws-secret-access-key` | `AWS_SECRET_ACCESS_KEY` | - ---- - -#### aws-default-region - -When using Amazon S3 as the object store, set this to the region that goes with -the specified bucket if different from the fallback value. - -**Default:** `us-east-1` - -| influxdb3 serve option | Environment variable | -| :--------------------- | :------------------- | -| `--aws-default-region` | `AWS_DEFAULT_REGION` | - ---- - -#### aws-endpoint - -When using an Amazon S3 compatibility storage service, set this to the endpoint. - -| influxdb3 serve option | Environment variable | -| :--------------------- | :------------------- | -| `--aws-endpoint` | `AWS_ENDPOINT` | - ---- - -#### aws-session-token - -When using Amazon S3 as an object store, set this to the session token. This is -handy when using a federated login or SSO and fetching credentials via the UI. - -| influxdb3 serve option | Environment variable | -| :--------------------- | :------------------- | -| `--aws-session-token` | `AWS_SESSION_TOKEN` | - ---- - -#### aws-allow-http - -Allows unencrypted HTTP connections to AWS. - -| influxdb3 serve option | Environment variable | -| :--------------------- | :------------------- | -| `--aws-allow-http` | `AWS_ALLOW_HTTP` | - ---- - -#### aws-skip-signature - -If enabled, S3 object stores do not fetch credentials and do not sign requests. - -| influxdb3 serve option | Environment variable | -| :--------------------- | :------------------- | -| `--aws-skip-signature` | `AWS_SKIP_SIGNATURE` | - ---- - -### Google Cloud Service - -- [google-service-account](#google-service-account) - -#### google-service-account - -When using Google Cloud Storage as the object store, set this to the path to the -JSON file that contains the Google credentials. - -| influxdb3 serve option | Environment variable | -| :------------------------- | :----------------------- | -| `--google-service-account` | `GOOGLE_SERVICE_ACCOUNT` | - ---- - -### Microsoft Azure - -- [azure-storage-account](#azure-storage-account) -- [azure-storage-access-key](#azure-storage-access-key) - -#### azure-storage-account - -When using Microsoft Azure as the object store, set this to the name you see -when navigating to **All Services > Storage accounts > `[name]`**. - -| influxdb3 serve option | Environment variable | -| :------------------------ | :---------------------- | -| `--azure-storage-account` | `AZURE_STORAGE_ACCOUNT` | - ---- - -#### azure-storage-access-key - -When using Microsoft Azure as the object store, set this to one of the Key -values in the Storage account's **Settings > Access keys**. - -| influxdb3 serve option | Environment variable | -| :--------------------------- | :------------------------- | -| `--azure-storage-access-key` | `AZURE_STORAGE_ACCESS_KEY` | - ---- - -### Object Storage - -- [bucket](#bucket) -- [object-store-connection-limit](#object-store-connection-limit) -- [object-store-http2-only](#object-store-http2-only) -- [object-store-http2-max-frame-size](#object-store-http2-max-frame-size) -- [object-store-max-retries](#object-store-max-retries) -- [object-store-retry-timeout](#object-store-retry-timeout) -- [object-store-cache-endpoint](#object-store-cache-endpoint) - -#### bucket - -Sets the name of the object storage bucket to use. Must also set -`--object-store` to a cloud object storage for this option to take effect. - -| influxdb3 serve option | Environment variable | -| :--------------------- | :------------------- | -| `--bucket` | `INFLUXDB3_BUCKET` | - ---- - -#### object-store-connection-limit - -When using a network-based object store, limits the number of connections to -this value. - -**Default:** `16` - -| influxdb3 serve option | Environment variable | -| :-------------------------------- | :------------------------------ | -| `--object-store-connection-limit` | `OBJECT_STORE_CONNECTION_LIMIT` | - ---- - -#### object-store-http2-only - -Forces HTTP/2 connections to network-based object stores. - -| influxdb3 serve option | Environment variable | -| :-------------------------- | :------------------------ | -| `--object-store-http2-only` | `OBJECT_STORE_HTTP2_ONLY` | - ---- - -#### object-store-http2-max-frame-size - -Sets the maximum frame size (in bytes/octets) for HTTP/2 connections. - -| influxdb3 serve option | Environment variable | -| :------------------------------------ | :---------------------------------- | -| `--object-store-http2-max-frame-size` | `OBJECT_STORE_HTTP2_MAX_FRAME_SIZE` | - ---- - -#### object-store-max-retries - -Defines the maximum number of times to retry a request. - -| influxdb3 serve option | Environment variable | -| :--------------------------- | :------------------------- | -| `--object-store-max-retries` | `OBJECT_STORE_MAX_RETRIES` | - ---- - -#### object-store-retry-timeout - -Specifies the maximum length of time from the initial request after which no -further retries are be attempted. - -| influxdb3 serve option | Environment variable | -| :----------------------------- | :--------------------------- | -| `--object-store-retry-timeout` | `OBJECT_STORE_RETRY_TIMEOUT` | - ---- - -#### object-store-cache-endpoint - -Sets the endpoint of an S3-compatible, HTTP/2-enabled object store cache. - -| influxdb3 serve option | Environment variable | -| :------------------------------ | :---------------------------- | -| `--object-store-cache-endpoint` | `OBJECT_STORE_CACHE_ENDPOINT` | - ---- - -### Logs - -- [log-filter](#log-filter) -- [log-destination](#log-destination) -- [log-format](#log-format) -- [query-log-size](#query-log-size) - -#### log-filter - -Sets the filter directive for logs. - -| influxdb3 serve option | Environment variable | -| :--------------------- | :------------------- | -| `--log-filter` | `LOG_FILTER` | - ---- - -#### log-destination - -Specifies the destination for logs. - -**Default:** `stdout` - -| influxdb3 serve option | Environment variable | -| :--------------------- | :------------------- | -| `--log-destination` | `LOG_DESTINATION` | - ---- - -#### log-format - -Defines the message format for logs. - -This option supports the following values: - -- `full` _(default)_ - -**Default:** `full` - -| influxdb3 serve option | Environment variable | -| :--------------------- | :------------------- | -| `--log-format` | `LOG_FORMAT` | - ---- - -#### query-log-size - -Defines the size of the query log. Up to this many queries remain in the -log before older queries are evicted to make room for new ones. - -**Default:** `1000` - -| influxdb3 serve option | Environment variable | -| :--------------------- | :------------------------- | -| `--query-log-size` | `INFLUXDB3_QUERY_LOG_SIZE` | - ---- - -### Traces - -- [traces-exporter](#traces-exporter) -- [traces-exporter-jaeger-agent-host](#traces-exporter-jaeger-agent-host) -- [traces-exporter-jaeger-agent-port](#traces-exporter-jaeger-agent-port) -- [traces-exporter-jaeger-service-name](#traces-exporter-jaeger-service-name) -- [traces-exporter-jaeger-trace-context-header-name](#traces-exporter-jaeger-trace-context-header-name) -- [traces-jaeger-debug-name](#traces-jaeger-debug-name) -- [traces-jaeger-tags](#traces-jaeger-tags) -- [traces-jaeger-max-msgs-per-second](#traces-jaeger-max-msgs-per-second) - -#### traces-exporter - -Sets the type of tracing exporter. - -**Default:** `none` - -| influxdb3 serve option | Environment variable | -| :--------------------- | :------------------- | -| `--traces-exporter` | `TRACES_EXPORTER` | - ---- - -#### traces-exporter-jaeger-agent-host - -Specifies the Jaeger agent network hostname for tracing. - -**Default:** `0.0.0.0` - -| influxdb3 serve option | Environment variable | -| :------------------------------------ | :---------------------------------- | -| `--traces-exporter-jaeger-agent-host` | `TRACES_EXPORTER_JAEGER_AGENT_HOST` | - ---- - -#### traces-exporter-jaeger-agent-port - -Defines the Jaeger agent network port for tracing. - -**Default:** `6831` - -| influxdb3 serve option | Environment variable | -| :------------------------------------ | :---------------------------------- | -| `--traces-exporter-jaeger-agent-port` | `TRACES_EXPORTER_JAEGER_AGENT_PORT` | - ---- - -#### traces-exporter-jaeger-service-name - -Sets the Jaeger service name for tracing. - -**Default:** `iox-conductor` - -| influxdb3 serve option | Environment variable | -| :-------------------------------------- | :------------------------------------ | -| `--traces-exporter-jaeger-service-name` | `TRACES_EXPORTER_JAEGER_SERVICE_NAME` | - ---- - -#### traces-exporter-jaeger-trace-context-header-name - -Specifies the header name used for passing trace context. - -**Default:** `uber-trace-id` - -| influxdb3 serve option | Environment variable | -| :--------------------------------------------------- | :------------------------------------------------- | -| `--traces-exporter-jaeger-trace-context-header-name` | `TRACES_EXPORTER_JAEGER_TRACE_CONTEXT_HEADER_NAME` | - ---- - -#### traces-jaeger-debug-name - -Specifies the header name used for force sampling in tracing. - -**Default:** `jaeger-debug-id` - -| influxdb3 serve option | Environment variable | -| :--------------------------- | :---------------------------------- | -| `--traces-jaeger-debug-name` | `TRACES_EXPORTER_JAEGER_DEBUG_NAME` | - ---- - -#### traces-jaeger-tags - -Defines a set of `key=value` pairs to annotate tracing spans with. - -| influxdb3 serve option | Environment variable | -| :--------------------- | :---------------------------- | -| `--traces-jaeger-tags` | `TRACES_EXPORTER_JAEGER_TAGS` | - ---- - -#### traces-jaeger-max-msgs-per-second - -Specifies the maximum number of messages sent to a Jaeger service per second. - -**Default:** `1000` - -| influxdb3 serve option | Environment variable | -| :------------------------------------ | :---------------------------------- | -| `--traces-jaeger-max-msgs-per-second` | `TRACES_JAEGER_MAX_MSGS_PER_SECOND` | - ---- - -### DataFusion - -- [datafusion-num-threads](#datafusion-num-threads) -- [datafusion-runtime-type](#datafusion-runtime-type) -- [datafusion-runtime-disable-lifo-slot](#datafusion-runtime-disable-lifo-slot) -- [datafusion-runtime-event-interval](#datafusion-runtime-event-interval) -- [datafusion-runtime-global-queue-interval](#datafusion-runtime-global-queue-interval) -- [datafusion-runtime-max-blocking-threads](#datafusion-runtime-max-blocking-threads) -- [datafusion-runtime-max-io-events-per-tick](#datafusion-runtime-max-io-events-per-tick) -- [datafusion-runtime-thread-keep-alive](#datafusion-runtime-thread-keep-alive) -- [datafusion-runtime-thread-priority](#datafusion-runtime-thread-priority) -- [datafusion-max-parquet-fanout](#datafusion-max-parquet-fanout) -- [datafusion-use-cached-parquet-loader](#datafusion-use-cached-parquet-loader) -- [datafusion-config](#datafusion-config) - -#### datafusion-num-threads - -Sets the maximum number of DataFusion runtime threads to use. - -| influxdb3 serve option | Environment variable | -| :------------------------- | :--------------------------------- | -| `--datafusion-num-threads` | `INFLUXDB3_DATAFUSION_NUM_THREADS` | - ---- - -#### datafusion-runtime-type - -Specifies the DataFusion tokio runtime type. - -This option supports the following values: - -- `current-thread` -- `multi-thread` _(default)_ -- `multi-thread-alt` - -**Default:** `multi-thread` - -| influxdb3 serve option | Environment variable | -| :-------------------------- | :---------------------------------- | -| `--datafusion-runtime-type` | `INFLUXDB3_DATAFUSION_RUNTIME_TYPE` | - ---- - -#### datafusion-runtime-disable-lifo-slot - -Disables the LIFO slot of the DataFusion runtime. - -This option supports the following values: - -- `true` -- `false` - -| influxdb3 serve option | Environment variable | -| :--------------------------------------- | :----------------------------------------------- | -| `--datafusion-runtime-disable-lifo-slot` | `INFLUXDB3_DATAFUSION_RUNTIME_DISABLE_LIFO_SLOT` | - ---- - -#### datafusion-runtime-event-interval - -Sets the number of scheduler ticks after which the scheduler of the DataFusion -tokio runtime polls for external events--for example: timers, I/O. - -| influxdb3 serve option | Environment variable | -| :------------------------------------ | :-------------------------------------------- | -| `--datafusion-runtime-event-interval` | `INFLUXDB3_DATAFUSION_RUNTIME_EVENT_INTERVAL` | - ---- - -#### datafusion-runtime-global-queue-interval - -Sets the number of scheduler ticks after which the scheduler of the DataFusion -runtime polls the global task queue. - -| influxdb3 serve option | Environment variable | -| :------------------------------------------- | :--------------------------------------------------- | -| `--datafusion-runtime-global-queue-interval` | `INFLUXDB3_DATAFUSION_RUNTIME_GLOBAL_QUEUE_INTERVAL` | - ---- - -#### datafusion-runtime-max-blocking-threads - -Specifies the limit for additional threads spawned by the DataFusion runtime. - -| influxdb3 serve option | Environment variable | -| :------------------------------------------ | :-------------------------------------------------- | -| `--datafusion-runtime-max-blocking-threads` | `INFLUXDB3_DATAFUSION_RUNTIME_MAX_BLOCKING_THREADS` | - ---- - -#### datafusion-runtime-max-io-events-per-tick - -Configures the maximum number of events processed per tick by the tokio -DataFusion runtime. - -| influxdb3 serve option | Environment variable | -| :-------------------------------------------- | :---------------------------------------------------- | -| `--datafusion-runtime-max-io-events-per-tick` | `INFLUXDB3_DATAFUSION_RUNTIME_MAX_IO_EVENTS_PER_TICK` | - ---- - -#### datafusion-runtime-thread-keep-alive - -Sets a custom timeout for a thread in the blocking pool of the tokio DataFusion -runtime. - -| influxdb3 serve option | Environment variable | -| :--------------------------------------- | :----------------------------------------------- | -| `--datafusion-runtime-thread-keep-alive` | `INFLUXDB3_DATAFUSION_RUNTIME_THREAD_KEEP_ALIVE` | - ---- - -#### datafusion-runtime-thread-priority - -Sets the thread priority for tokio DataFusion runtime workers. - -**Default:** `10` - -| influxdb3 serve option | Environment variable | -| :------------------------------------- | :--------------------------------------------- | -| `--datafusion-runtime-thread-priority` | `INFLUXDB3_DATAFUSION_RUNTIME_THREAD_PRIORITY` | - ---- - -#### datafusion-max-parquet-fanout - -When multiple parquet files are required in a sorted way -(deduplication for example), specifies the maximum fanout. - -**Default:** `1000` - -| influxdb3 serve option | Environment variable | -| :-------------------------------- | :---------------------------------------- | -| `--datafusion-max-parquet-fanout` | `INFLUXDB3_DATAFUSION_MAX_PARQUET_FANOUT` | - ---- - -#### datafusion-use-cached-parquet-loader - -Uses a cached parquet loader when reading parquet files from the object store. - -| influxdb3 serve option | Environment variable | -| :--------------------------------------- | :----------------------------------------------- | -| `--datafusion-use-cached-parquet-loader` | `INFLUXDB3_DATAFUSION_USE_CACHED_PARQUET_LOADER` | - ---- - -#### datafusion-config - -Provides custom configuration to DataFusion as a comma-separated list of -`key:value` pairs. - -| influxdb3 serve option | Environment variable | -| :--------------------- | :---------------------------- | -| `--datafusion-config` | `INFLUXDB3_DATAFUSION_CONFIG` | - ---- - -### HTTP - -- [max-http-request-size](#max-http-request-size) -- [http-bind](#http-bind) -- [admin-token-recovery-http-bind](#admin-token-recovery-http-bind) - -#### max-http-request-size - -Specifies the maximum size of HTTP requests. - -**Default:** `10485760` - -| influxdb3 serve option | Environment variable | -| :------------------------ | :-------------------------------- | -| `--max-http-request-size` | `INFLUXDB3_MAX_HTTP_REQUEST_SIZE` | - ---- - -#### http-bind - -Defines the address on which InfluxDB serves HTTP API requests. - -**Default:** `0.0.0.0:8181` - -| influxdb3 serve option | Environment variable | -| :--------------------- | :------------------------- | -| `--http-bind` | `INFLUXDB3_HTTP_BIND_ADDR` | - ---- - -#### admin-token-recovery-http-bind - -Enables an admin token recovery HTTP server on a separate port. This server allows regenerating lost admin tokens without existing authentication. The server automatically shuts down after a successful token regeneration. - -> [!Warning] -> This option creates an unauthenticated endpoint that can regenerate admin tokens. Only use this when you have lost access to your admin token and ensure the server is only accessible from trusted networks. - -**Default:** `127.0.0.1:8182` (when enabled) - -| influxdb3 serve option | Environment variable | -| :--------------------- | :------------------- | -| `--admin-token-recovery-http-bind` | `INFLUXDB3_ADMIN_TOKEN_RECOVERY_HTTP_BIND` | - -##### Example usage - -```bash -# Start server with recovery endpoint -influxdb3 serve --admin-token-recovery-http-bind - -# In another terminal, regenerate the admin token -influxdb3 create token --admin --regenerate --host http://127.0.0.1:8182 -``` - ---- - -### Memory - -- [exec-mem-pool-bytes](#exec-mem-pool-bytes) -- [buffer-mem-limit-mb](#buffer-mem-limit-mb) -- [force-snapshot-mem-threshold](#force-snapshot-mem-threshold) - -#### exec-mem-pool-bytes - -Specifies the size of the memory pool used during query execution, in bytes. - -**Default:** `8589934592` - -| influxdb3 serve option | Environment variable | -| :---------------------- | :------------------------------ | -| `--exec-mem-pool-bytes` | `INFLUXDB3_EXEC_MEM_POOL_BYTES` | - ---- - -#### buffer-mem-limit-mb - -Specifies the size limit of the buffered data in MB. If this limit is exceeded, -the server forces a snapshot. - -**Default:** `5000` - -| influxdb3 serve option | Environment variable | -| :---------------------- | :------------------------------ | -| `--buffer-mem-limit-mb` | `INFLUXDB3_BUFFER_MEM_LIMIT_MB` | - ---- - -#### force-snapshot-mem-threshold - -Specifies the threshold for the internal memory buffer. Supports either a -percentage (portion of available memory)of or absolute value -(total bytes)--for example: `70%` or `100000`. - -**Default:** `70%` - -| influxdb3 serve option | Environment variable | -| :------------------------------- | :--------------------------------------- | -| `--force-snapshot-mem-threshold` | `INFLUXDB3_FORCE_SNAPSHOT_MEM_THRESHOLD` | - ---- - -### Write-Ahead Log (WAL) - -- [wal-flush-interval](#wal-flush-interval) -- [wal-snapshot-size](#wal-snapshot-size) -- [wal-max-write-buffer-size](#wal-max-write-buffer-size) -- [snapshotted-wal-files-to-keep](#snapshotted-wal-files-to-keep) - -#### wal-flush-interval - -Specifies the interval to flush buffered data to a WAL file. Writes that wait -for WAL confirmation take up to this interval to complete. - -**Default:** `1s` - -| influxdb3 serve option | Environment variable | -| :--------------------- | :----------------------------- | -| `--wal-flush-interval` | `INFLUXDB3_WAL_FLUSH_INTERVAL` | - ---- - -#### wal-snapshot-size - -Defines the number of WAL files to attempt to remove in a snapshot. This, -multiplied by the interval, determines how often snapshots are taken. - -**Default:** `600` - -| influxdb3 serve option | Environment variable | -| :--------------------- | :---------------------------- | -| `--wal-snapshot-size` | `INFLUXDB3_WAL_SNAPSHOT_SIZE` | - ---- - -#### wal-max-write-buffer-size - -Specifies the maximum number of write requests that can be buffered before a -flush must be executed and succeed. - -**Default:** `100000` - -| influxdb3 serve option | Environment variable | -| :---------------------------- | :------------------------------------ | -| `--wal-max-write-buffer-size` | `INFLUXDB3_WAL_MAX_WRITE_BUFFER_SIZE` | - ---- - -#### snapshotted-wal-files-to-keep - -Specifies the number of snapshotted WAL files to retain in the object store. -Flushing the WAL files does not clear the WAL files immediately; -they are deleted when the number of snapshotted WAL files exceeds this number. - -**Default:** `300` - -| influxdb3 serve option | Environment variable | -| :-------------------------------- | :-------------------------------- | -| `--snapshotted-wal-files-to-keep` | `INFLUXDB3_NUM_WAL_FILES_TO_KEEP` | - ---- - -### Compaction - -#### gen1-duration - -Specifies the duration that Parquet files are arranged into. Data timestamps -land each row into a file of this duration. Supported durations are `1m`, -`5m`, and `10m`. These files are known as "generation 1" files, which the -compactor in InfluxDB 3 Enterprise can merge into larger generations. - -**Default:** `10m` - -| influxdb3 serve option | Environment variable | -| :--------------------- | :------------------------ | -| `--gen1-duration` | `INFLUXDB3_GEN1_DURATION` | - ---- - -### Caching - -- [preemptive-cache-age](#preemptive-cache-age) -- [parquet-mem-cache-size-mb](#parquet-mem-cache-size-mb) -- [parquet-mem-cache-prune-percentage](#parquet-mem-cache-prune-percentage) -- [parquet-mem-cache-prune-interval](#parquet-mem-cache-prune-interval) -- [disable-parquet-mem-cache](#disable-parquet-mem-cache) -- [last-cache-eviction-interval](#last-cache-eviction-interval) -- [distinct-cache-eviction-interval](#distinct-cache-eviction-interval) - -#### preemptive-cache-age - -Specifies the interval to prefetch into the Parquet cache during compaction. - -**Default:** `3d` - -| influxdb3 serve option | Environment variable | -| :----------------------- | :------------------------------- | -| `--preemptive-cache-age` | `INFLUXDB3_PREEMPTIVE_CACHE_AGE` | - ---- - -#### parquet-mem-cache-size-mb - -Defines the size of the in-memory Parquet cache in megabytes (MB). - -**Default:** `1000` - -| influxdb3 serve option | Environment variable | -| :---------------------------- | :------------------------------------ | -| `--parquet-mem-cache-size-mb` | `INFLUXDB3_PARQUET_MEM_CACHE_SIZE_MB` | - ---- - -#### parquet-mem-cache-prune-percentage - -Specifies the percentage of entries to prune during a prune operation on the -in-memory Parquet cache. - -**Default:** `0.1` - -| influxdb3 serve option | Environment variable | -| :------------------------------------- | :--------------------------------------------- | -| `--parquet-mem-cache-prune-percentage` | `INFLUXDB3_PARQUET_MEM_CACHE_PRUNE_PERCENTAGE` | - ---- - -#### parquet-mem-cache-prune-interval - -Sets the interval to check if the in-memory Parquet cache needs to be pruned. - -**Default:** `1s` - -| influxdb3 serve option | Environment variable | -| :----------------------------------- | :------------------------------------------- | -| `--parquet-mem-cache-prune-interval` | `INFLUXDB3_PARQUET_MEM_CACHE_PRUNE_INTERVAL` | - ---- - -#### disable-parquet-mem-cache - -Disables the in-memory Parquet cache. By default, the cache is enabled. - -| influxdb3 serve option | Environment variable | -| :---------------------------- | :------------------------------------ | -| `--disable-parquet-mem-cache` | `INFLUXDB3_DISABLE_PARQUET_MEM_CACHE` | - ---- - -#### last-cache-eviction-interval - -Specifies the interval to evict expired entries from the Last-N-Value cache, -expressed as a human-readable time--for example: `20s`, `1m`, `1h`. - -**Default:** `10s` - -| influxdb3 serve option | Environment variable | -| :------------------------------- | :--------------------------------------- | -| `--last-cache-eviction-interval` | `INFLUXDB3_LAST_CACHE_EVICTION_INTERVAL` | - ---- - -#### distinct-cache-eviction-interval - -Specifies the interval to evict expired entries from the distinct value cache, -expressed as a human-readable time--for example: `20s`, `1m`, `1h`. - -**Default:** `10s` - -| influxdb3 serve option | Environment variable | -| :----------------------------------- | :------------------------------------------- | -| `--distinct-cache-eviction-interval` | `INFLUXDB3_DISTINCT_CACHE_EVICTION_INTERVAL` | - ---- - -### Processing engine - -- [plugin-dir](#plugin-dir) -- [virtual-env-location](#virtual-env-location) -- [package-manager](#package-manager) - -#### plugin-dir - -Specifies the local directory that contains Python plugins and their test files. - -| influxdb3 serve option | Environment variable | -| :--------------------- | :--------------------- | -| `--plugin-dir` | `INFLUXDB3_PLUGIN_DIR` | - ---- - -#### virtual-env-location - -Specifies the location of the Python virtual environment that the processing -engine uses. - -| influxdb3 serve option | Environment variable | -| :----------------------- | :--------------------- | -| `--virtual-env-location` | `VIRTUAL_ENV_LOCATION` | - ---- - -#### package-manager - -Specifies the Python package manager that the processing engine uses. - -**Default:** `10s` - -| influxdb3 serve option | Environment variable | -| :--------------------- | :------------------- | -| `--package-manager` | `PACKAGE_MANAGER` | + \ No newline at end of file diff --git a/content/influxdb3/enterprise/reference/cli/influxdb3/delete/_index.md b/content/influxdb3/enterprise/reference/cli/influxdb3/delete/_index.md index 99fa0418ee..05aa0b8775 100644 --- a/content/influxdb3/enterprise/reference/cli/influxdb3/delete/_index.md +++ b/content/influxdb3/enterprise/reference/cli/influxdb3/delete/_index.md @@ -1,7 +1,7 @@ --- title: influxdb3 delete description: > - The `influxdb3 delete` command deletes a resource such as a database or a table. + The `influxdb3 delete` command deletes a resource such as a cache, database, or table. menu: influxdb3_enterprise: parent: influxdb3 @@ -10,6 +10,6 @@ weight: 300 source: /shared/influxdb3-cli/delete/_index.md --- - diff --git a/content/influxdb3/enterprise/reference/cli/influxdb3/delete/token.md b/content/influxdb3/enterprise/reference/cli/influxdb3/delete/token.md new file mode 100644 index 0000000000..5c48463dd4 --- /dev/null +++ b/content/influxdb3/enterprise/reference/cli/influxdb3/delete/token.md @@ -0,0 +1,15 @@ +--- +title: influxdb3 delete Token +description: > + The `influxdb3 delete Token` command manage token resources. +influxdb3/enterprise/tags: [cli] +menu: + influxdb3_enterprise_reference: + parent: influxdb3 cli +weight: 201 +source: /shared/influxdb3-cli/delete/token.md +--- + + diff --git a/content/influxdb3/enterprise/reference/cli/influxdb3/serve.md b/content/influxdb3/enterprise/reference/cli/influxdb3/serve.md index f8e927d750..7fb25d5d97 100644 --- a/content/influxdb3/enterprise/reference/cli/influxdb3/serve.md +++ b/content/influxdb3/enterprise/reference/cli/influxdb3/serve.md @@ -38,6 +38,7 @@ influxdb3 serve [OPTIONS] \ | Option | | Description | | :--------------- | :--------------------------------------------------- | :------------------------------------------------------------------------------------------------------------------------------ | | | `--admin-token-recovery-http-bind` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#admin-token-recovery-http-bind)_ | +| | `--admin-token-recovery-tcp-listener-file-path` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#admin-token-recovery-tcp-listener-file-path)_ | | | `--aws-access-key-id` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#aws-access-key-id)_ | | | `--aws-allow-http` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#aws-allow-http)_ | | | `--aws-default-region` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#aws-default-region)_ | @@ -48,7 +49,11 @@ influxdb3 serve [OPTIONS] \ | | `--azure-storage-access-key` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#azure-storage-access-key)_ | | | `--azure-storage-account` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#azure-storage-account)_ | | | `--bucket` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#bucket)_ | +| | `--buffer-mem-limit-mb` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#buffer-mem-limit-mb)_ | +| | `--catalog-sync-interval` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#catalog-sync-interval)_ | | {{< req "\*" >}} | `--cluster-id` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#cluster-id)_ | +| | `--compaction-check-interval` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#compaction-check-interval)_ | +| | `--compaction-cleanup-wait` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#compaction-cleanup-wait)_ | | | `--compaction-gen2-duration` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#compaction-gen2-duration)_ | | | `--compaction-max-num-files-per-plan` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#compaction-max-num-files-per-plan)_ | | | `--compaction-multipliers` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#compaction-multipliers)_ | @@ -66,16 +71,22 @@ influxdb3 serve [OPTIONS] \ | | `--datafusion-runtime-thread-priority` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#datafusion-runtime-thread-priority)_ | | | `--datafusion-runtime-type` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#datafusion-runtime-type)_ | | | `--datafusion-use-cached-parquet-loader` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#datafusion-use-cached-parquet-loader)_ | +| | `--delete-grace-period` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#delete-grace-period)_ | +| | `--disable-authz` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#disable-authz)_ | | | `--disable-parquet-mem-cache` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#disable-parquet-mem-cache)_ | | | `--distinct-cache-eviction-interval` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#distinct-cache-eviction-interval)_ | +| | `--distinct-value-cache-disable-from-history` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#distinct-value-cache-disable-from-history)_ | | | `--exec-mem-pool-bytes` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#exec-mem-pool-bytes)_ | | | `--force-snapshot-mem-threshold` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#force-snapshot-mem-threshold)_ | | | `--gen1-duration` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#gen1-duration)_ | +| | `--gen1-lookback-duration` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#gen1-lookback-duration)_ | | | `--google-service-account` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#google-service-account)_ | +| | `--hard-delete-default-duration` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#hard-delete-default-duration)_ | | `-h` | `--help` | Print help information | | | `--help-all` | Print detailed help information | | | `--http-bind` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#http-bind)_ | | | `--last-cache-eviction-interval` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#last-cache-eviction-interval)_ | +| | `--last-value-cache-disable-from-history` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#last-value-cache-disable-from-history)_ | | | `--license-email` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#license-email)_ | | | `--license-file` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#license-file)_ | | | `--log-destination` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#log-destination)_ | @@ -84,6 +95,11 @@ influxdb3 serve [OPTIONS] \ | | `--max-http-request-size` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#max-http-request-size)_ | | | `--mode` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#mode)_ | | {{< req "\*" >}} | `--node-id` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#node-id)_ | +| | `--node-id-from-env` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#node-id-from-env)_ | +| | `--num-cores` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#num-cores)_ | +| | `--num-database-limit` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#num-database-limit)_ | +| | `--num-table-limit` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#num-table-limit)_ | +| | `--num-total-columns-per-table-limit` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#num-total-columns-per-table-limit)_ | | | `--object-store` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#object-store)_ | | | `--object-store-cache-endpoint` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#object-store-cache-endpoint)_ | | | `--object-store-connection-limit` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#object-store-connection-limit)_ | @@ -101,7 +117,16 @@ influxdb3 serve [OPTIONS] \ | | `--query-file-limit` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#query-file-limit)_ | | | `--query-log-size` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#query-log-size)_ | | | `--replication-interval` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#replication-interval)_ | +| | `--retention-check-interval` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#retention-check-interval)_ | | | `--snapshotted-wal-files-to-keep` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#snapshotted-wal-files-to-keep)_ | +| | `--table-index-cache-concurrency-limit` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#table-index-cache-concurrency-limit)_ | +| | `--table-index-cache-max-entries` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#table-index-cache-max-entries)_ | +| | `--tcp-listener-file-path` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#tcp-listener-file-path)_ | +| | `--telemetry-disable-upload` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#telemetry-disable-upload)_ | +| | `--telemetry-endpoint` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#telemetry-endpoint)_ | +| | `--tls-cert` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#tls-cert)_ | +| | `--tls-key` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#tls-key)_ | +| | `--tls-minimum-version` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#tls-minimum-version)_ | | | `--traces-exporter` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#traces-exporter)_ | | | `--traces-exporter-jaeger-agent-host` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#traces-exporter-jaeger-agent-host)_ | | | `--traces-exporter-jaeger-agent-port` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#traces-exporter-jaeger-agent-port)_ | @@ -110,11 +135,16 @@ influxdb3 serve [OPTIONS] \ | | `--traces-jaeger-debug-name` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#traces-jaeger-debug-name)_ | | | `--traces-jaeger-max-msgs-per-second` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#traces-jaeger-max-msgs-per-second)_ | | | `--traces-jaeger-tags` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#traces-jaeger-tags)_ | +| | `--use-pacha-tree` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#use-pacha-tree)_ | | `-v` | `--verbose` | Enable verbose output | | | `--virtual-env-location` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#virtual-env-location)_ | +| | `--wait-for-running-ingestor` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#wait-for-running-ingestor)_ | | | `--wal-flush-interval` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#wal-flush-interval)_ | | | `--wal-max-write-buffer-size` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#wal-max-write-buffer-size)_ | +| | `--wal-replay-concurrency-limit` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#wal-replay-concurrency-limit)_ | +| | `--wal-replay-fail-on-error` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#wal-replay-fail-on-error)_ | | | `--wal-snapshot-size` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#wal-snapshot-size)_ | +| | `--without-auth` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#without-auth)_ | {{< caption >}} {{< req text="\* Required options" >}} diff --git a/content/influxdb3/enterprise/reference/config-options.md b/content/influxdb3/enterprise/reference/config-options.md index ee00c6dd2c..50f81207b2 100644 --- a/content/influxdb3/enterprise/reference/config-options.md +++ b/content/influxdb3/enterprise/reference/config-options.md @@ -128,6 +128,8 @@ influxdb3 serve - [compaction-max-num-files-per-plan](#compaction-max-num-files-per-plan) - [compaction-gen2-duration](#compaction-gen2-duration) - [compaction-multipliers](#compaction-multipliers) + - [compaction-cleanup-wait](#compaction-cleanup-wait) + - [compaction-check-interval](#compaction-check-interval) - [gen1-duration](#gen1-duration) - [Caching](#caching) - [preemptive-cache-age](#preemptive-cache-age) @@ -140,11 +142,38 @@ influxdb3 serve - [last-value-cache-disable-from-history](#last-value-cache-disable-from-history) - [distinct-cache-eviction-interval](#distinct-cache-eviction-interval) - [distinct-value-cache-disable-from-history](#distinct-value-cache-disable-from-history) + - [table-index-cache-max-entries](#table-index-cache-max-entries) + - [table-index-cache-concurrency-limit](#table-index-cache-concurrency-limit) - [query-file-limit](#query-file-limit) - [Processing Engine](#processing-engine) - [plugin-dir](#plugin-dir) - [virtual-env-location](#virtual-env-location) - [package-manager](#package-manager) +- [Cluster Management](#cluster-management) + - [replication-interval](#replication-interval) + - [catalog-sync-interval](#catalog-sync-interval) + - [wait-for-running-ingestor](#wait-for-running-ingestor) +- [Resource Limits](#resource-limits) + - [num-cores](#num-cores) + - [num-database-limit](#num-database-limit) + - [num-table-limit](#num-table-limit) + - [num-total-columns-per-table-limit](#num-total-columns-per-table-limit) +- [Data Lifecycle Management](#data-lifecycle-management) + - [gen1-lookback-duration](#gen1-lookback-duration) + - [retention-check-interval](#retention-check-interval) + - [delete-grace-period](#delete-grace-period) + - [hard-delete-default-duration](#hard-delete-default-duration) +- [WAL Advanced Options](#wal-advanced-options) + - [wal-replay-fail-on-error](#wal-replay-fail-on-error) + - [wal-replay-concurrency-limit](#wal-replay-concurrency-limit) +- [Telemetry](#telemetry) + - [telemetry-disable-upload](#telemetry-disable-upload) + - [telemetry-endpoint](#telemetry-endpoint) +- [TCP Listeners](#tcp-listeners) + - [tcp-listener-file-path](#tcp-listener-file-path) + - [admin-token-recovery-tcp-listener-file-path](#admin-token-recovery-tcp-listener-file-path) +- [Experimental Features](#experimental-features) + - [use-pacha-tree](#use-pacha-tree) --- @@ -315,6 +344,10 @@ Default is `tls-1.2`. Disables authentication for all server actions (CLI commands and API requests). The server processes all requests without requiring tokens or authentication. +| influxdb3 serve option | Environment variable | +| :--------------------- | :---------------------------- | +| `--without-auth` | `INFLUXDB3_START_WITHOUT_AUTH`| + --- #### disable-authz @@ -322,6 +355,10 @@ The server processes all requests without requiring tokens or authentication. Optionally disable authz by passing in a comma separated list of resources. Valid values are `health`, `ping`, and `metrics`. +| influxdb3 serve option | Environment variable | +| :--------------------- | :----------------------- | +| `--disable-authz` | `INFLUXDB3_DISABLE_AUTHZ`| + --- ### AWS @@ -1080,8 +1117,25 @@ to delete files marked as needing deletion during that compaction run. | :-------------------------- | :--------------------------------------------- | | `--compaction-cleanup-wait` | `INFLUXDB3_ENTERPRISE_COMPACTION_CLEANUP_WAIT` | +{{% show-in "enterprise" %}} + +--- + +#### compaction-check-interval + +Specifies how often the compactor checks for new compaction work to perform. + +**Default:** `10s` + +| influxdb3 serve option | Environment variable | +| :----------------------------- | :------------------------------------------------ | +| `--compaction-check-interval` | `INFLUXDB3_ENTERPRISE_COMPACTION_CHECK_INTERVAL` | + +{{% /show-in %}} + --- + #### gen1-duration Specifies the duration that Parquet files are arranged into. Data timestamps @@ -1199,6 +1253,8 @@ expressed as a human-readable duration--for example: `20s`, `1m`, `1h`. | :------------------------------- | :--------------------------------------- | | `--last-cache-eviction-interval` | `INFLUXDB3_LAST_CACHE_EVICTION_INTERVAL` | +{{% show-in "enterprise" %}} + --- #### last-value-cache-disable-from-history @@ -1206,9 +1262,11 @@ expressed as a human-readable duration--for example: `20s`, `1m`, `1h`. Disables populating the last-N-value cache from historical data. If disabled, the cache is still populated with data from the write-ahead log (WAL). -| influxdb3 serve option | Environment variable | -| :---------------------------------------- | :------------------------------------------------ | -| `--last-value-cache-disable-from-history` | `INFLUXDB3_LAST_VALUE_CACHE_DISABLE_FROM_HISTORY` | +| influxdb3 serve option | Environment variable | +| :---------------------------------------- | :---------------------------------------------------------- | +| `--last-value-cache-disable-from-history` | `INFLUXDB3_ENTERPRISE_LAST_VALUE_CACHE_DISABLE_FROM_HISTORY`| + +{{% /show-in %}} --- @@ -1223,6 +1281,7 @@ expressed as a human-readable duration--for example: `20s`, `1m`, `1h`. | :----------------------------------- | :------------------------------------------- | | `--distinct-cache-eviction-interval` | `INFLUXDB3_DISTINCT_CACHE_EVICTION_INTERVAL` | +{{% show-in "enterprise" %}} --- #### distinct-value-cache-disable-from-history @@ -1230,9 +1289,36 @@ expressed as a human-readable duration--for example: `20s`, `1m`, `1h`. Disables populating the distinct value cache from historical data. If disabled, the cache is still populated with data from the write-ahead log (WAL). -| influxdb3 serve option | Environment variable | -| :-------------------------------------------- | :---------------------------------------------------- | -| `--distinct-value-cache-disable-from-history` | `INFLUXDB3_DISTINCT_VALUE_CACHE_DISABLE_FROM_HISTORY` | +| influxdb3 serve option | Environment variable | +| :-------------------------------------------- | :-------------------------------------------------------------- | +| `--distinct-value-cache-disable-from-history` | `INFLUXDB3_ENTERPRISE_DISTINCT_VALUE_CACHE_DISABLE_FROM_HISTORY`| + +{{% /show-in %}} + +--- + +#### table-index-cache-max-entries + +Specifies the maximum number of entries in the table index cache. + +**Default:** `1000` + +| influxdb3 serve option | Environment variable | +| :--------------------------------- | :-------------------------------------------- | +| `--table-index-cache-max-entries` | `INFLUXDB3_TABLE_INDEX_CACHE_MAX_ENTRIES` | + +--- + +#### table-index-cache-concurrency-limit + +Limits the concurrency level for table index cache operations. + +**Default:** `8` + +| influxdb3 serve option | Environment variable | +| :---------------------------------------- | :------------------------------------------------- | +| `--table-index-cache-concurrency-limit` | `INFLUXDB3_TABLE_INDEX_CACHE_CONCURRENCY_LIMIT` | + --- #### query-file-limit @@ -1288,3 +1374,257 @@ This option supports the following values: | influxdb3 serve option | Environment variable | | :--------------------- | :------------------- | | `--package-manager` | `PACKAGE_MANAGER` | + +{{% show-in "enterprise" %}} + +--- + +### Cluster Management + + +- [replication-interval](#replication-interval) +- [catalog-sync-interval](#catalog-sync-interval) +- [wait-for-running-ingestor](#wait-for-running-ingestor) + +#### replication-interval + +Specifies the interval at which data replication occurs between cluster nodes. + +**Default:** `250ms` + +| influxdb3 serve option | Environment variable | +| :------------------------- | :------------------------------------------- | +| `--replication-interval` | `INFLUXDB3_ENTERPRISE_REPLICATION_INTERVAL` | + +--- + +#### catalog-sync-interval + +Defines how often the catalog synchronizes across cluster nodes. + +**Default:** `10s` + +| influxdb3 serve option | Environment variable | +| :--------------------------- | :------------------------------------------ | +| `--catalog-sync-interval` | `INFLUXDB3_ENTERPRISE_CATALOG_SYNC_INTERVAL`| + +--- + +#### wait-for-running-ingestor + +Specifies how long to wait for a running ingestor during startup. + +**Default:** `10s` + +| influxdb3 serve option | Environment variable | +| :------------------------------- | :------------------------------------------------ | +| `--wait-for-running-ingestor` | `INFLUXDB3_ENTERPRISE_WAIT_FOR_RUNNING_INGESTOR` | + +{{% /show-in %}} + +{{% show-in "enterprise" %}} +--- + +### Resource Limits + +- [num-cores](#num-cores) +- [num-database-limit](#num-database-limit) +- [num-table-limit](#num-table-limit) +- [num-total-columns-per-table-limit](#num-total-columns-per-table-limit) + +#### num-cores + +Limits the number of CPU cores that InfluxDB Enterprise can use. + +| influxdb3 serve option | Environment variable | +| :--------------------- | :-------------------------------- | +| `--num-cores` | `INFLUXDB3_ENTERPRISE_NUM_CORES` | + +--- + +#### num-database-limit + +Sets the maximum number of databases that can be created. + +| influxdb3 serve option | Environment variable | +| :------------------------ | :---------------------------------------- | +| `--num-database-limit` | `INFLUXDB3_ENTERPRISE_NUM_DATABASE_LIMIT` | + +--- + +#### num-table-limit + +Defines the maximum number of tables that can be created across all databases. + +| influxdb3 serve option | Environment variable | +| :---------------------- | :------------------------------------- | +| `--num-table-limit` | `INFLUXDB3_ENTERPRISE_NUM_TABLE_LIMIT` | + +--- + +#### num-total-columns-per-table-limit + +Sets the maximum number of columns allowed per table. + +| influxdb3 serve option | Environment variable | +| :--------------------------------------- | :---------------------------------------------------------- | +| `--num-total-columns-per-table-limit` | `INFLUXDB3_ENTERPRISE_NUM_TOTAL_COLUMNS_PER_TABLE_LIMIT` | + +{{% /show-in %}} + +--- + +### Data Lifecycle Management + +- [gen1-lookback-duration](#gen1-lookback-duration) +- [retention-check-interval](#retention-check-interval) +- [delete-grace-period](#delete-grace-period) +- [hard-delete-default-duration](#hard-delete-default-duration) + +#### gen1-lookback-duration + +Specifies how far back to look when creating generation 1 Parquet files. + +**Default:** `24h` + +| influxdb3 serve option | Environment variable | +| :---------------------------- | :-------------------------------------- | +| `--gen1-lookback-duration` | `INFLUXDB3_GEN1_LOOKBACK_DURATION` | + +--- + +#### retention-check-interval + +Defines how often the system checks for data that should be deleted according to retention policies. + +**Default:** `1h` + +| influxdb3 serve option | Environment variable | +| :----------------------------- | :--------------------------------------- | +| `--retention-check-interval` | `INFLUXDB3_RETENTION_CHECK_INTERVAL` | + +--- + +#### delete-grace-period + +Specifies the grace period before permanently deleting data. + +**Default:** `24h` + +| influxdb3 serve option | Environment variable | +| :------------------------ | :--------------------------------- | +| `--delete-grace-period` | `INFLUXDB3_DELETE_GRACE_PERIOD` | + +--- + +#### hard-delete-default-duration + +Sets the default duration for hard deletion of data. + +**Default:** `90d` + +| influxdb3 serve option | Environment variable | +| :---------------------------------- | :-------------------------------------------- | +| `--hard-delete-default-duration` | `INFLUXDB3_HARD_DELETE_DEFAULT_DURATION` | + +--- + +### WAL Advanced Options + +- [wal-replay-fail-on-error](#wal-replay-fail-on-error) +- [wal-replay-concurrency-limit](#wal-replay-concurrency-limit) + +#### wal-replay-fail-on-error + +Determines whether WAL replay should fail when encountering errors. + +**Default:** `false` + +| influxdb3 serve option | Environment variable | +| :--------------------------- | :------------------------------------- | +| `--wal-replay-fail-on-error` | `INFLUXDB3_WAL_REPLAY_FAIL_ON_ERROR` | + +--- + +#### wal-replay-concurrency-limit + +Sets the maximum number of concurrent WAL replay operations. + +**Default:** `16` + +| influxdb3 serve option | Environment variable | +| :--------------------------------- | :------------------------------------------ | +| `--wal-replay-concurrency-limit` | `INFLUXDB3_WAL_REPLAY_CONCURRENCY_LIMIT` | + +--- + +### Telemetry + +- [telemetry-disable-upload](#telemetry-disable-upload) +- [telemetry-endpoint](#telemetry-endpoint) + +#### telemetry-disable-upload + +Disables the upload of telemetry data to InfluxData. + +**Default:** `false` + +| influxdb3 serve option | Environment variable | +| :---------------------------- | :-------------------------------------- | +| `--telemetry-disable-upload` | `INFLUXDB3_TELEMETRY_DISABLE_UPLOAD` | + +--- + +#### telemetry-endpoint + +Specifies the endpoint for telemetry data uploads. + +| influxdb3 serve option | Environment variable | +| :----------------------- | :--------------------------------- | +| `--telemetry-endpoint` | `INFLUXDB3_TELEMETRY_ENDPOINT` | + +--- + +### TCP Listeners + +- [tcp-listener-file-path](#tcp-listener-file-path) +- [admin-token-recovery-tcp-listener-file-path](#admin-token-recovery-tcp-listener-file-path) + +#### tcp-listener-file-path + +Specifies the file path for the TCP listener configuration. + +| influxdb3 serve option | Environment variable | +| :-------------------------- | :----------------------------------- | +| `--tcp-listener-file-path` | `INFLUXDB3_TCP_LISTINER_FILE_PATH` | + +--- + +#### admin-token-recovery-tcp-listener-file-path + +Specifies the TCP listener file path for admin token recovery operations. + +| influxdb3 serve option | Environment variable | +| :---------------------------------------------- | :-------------------------------------------------------- | +| `--admin-token-recovery-tcp-listener-file-path` | `INFLUXDB3_ADMIN_TOKEN_RECOVERY_TCP_LISTENER_FILE_PATH` | + +{{% show-in "enterprise" %}} +--- + +### Experimental Features + +- [use-pacha-tree](#use-pacha-tree) + +#### use-pacha-tree + +Enables the experimental PachaTree storage engine for improved performance. + +> [!Warning] +> This is an experimental feature and should not be used in production environments. + +**Default:** `false` + +| influxdb3 serve option | Environment variable | +| :---------------------- | :------------------------------------- | +| `--use-pacha-tree` | `INFLUXDB3_ENTERPRISE_USE_PACHA_TREE` | +{{% /show-in %}} diff --git a/content/shared/influxdb3-cli/config-options.md b/content/shared/influxdb3-cli/config-options.md new file mode 100644 index 0000000000..287a2521db --- /dev/null +++ b/content/shared/influxdb3-cli/config-options.md @@ -0,0 +1,1690 @@ + +{{< product-name >}} lets you customize your server configuration by using +`influxdb3 serve` command options or by setting environment variables. + +## Configure your server + +Pass configuration options to the `influxdb serve` server using either command +options or environment variables. Command options take precedence over +environment variables. + +##### Example `influxdb3 serve` command options + + + +```sh +influxdb3 serve \ + --node-id node0 \ +{{% show-in "enterprise" %}} --cluster-id cluster0 \ + --license-email example@email.com \{{% /show-in %}} + --object-store file \ + --data-dir ~/.influxdb3 \ + --log-filter info +``` + +##### Example environment variables + + + +```sh +{{% show-in "enterprise" %}}export INFLUXDB3_ENTERPRISE_LICENSE_EMAIL=example@email.com{{% /show-in %}} +{{% show-in "enterprise" %}}export INFLUXDB3_ENTERPRISE_CLUSTER_ID=cluster0{{% /show-in %}} +export INFLUXDB3_NODE_IDENTIFIER_PREFIX=my-node +export INFLUXDB3_OBJECT_STORE=file +export INFLUXDB3_DB_DIR=~/.influxdb3 +export LOG_FILTER=info + +influxdb3 serve +``` + +## Server configuration options + +- [General](#general) +{{% show-in "enterprise" %}} - [cluster-id](#cluster-id){{% /show-in %}} + - [data-dir](#data-dir) +{{% show-in "enterprise" %}} - [license-email](#license-email) + - [license-file](#license-file) + - [mode](#mode){{% /show-in %}} + - [node-id](#node-id) +{{% show-in "enterprise" %}} - [node-id-from-env](#node-id-from-env){{% /show-in %}} + - [object-store](#object-store) + - [tls-key](#tls-key) + - [tls-cert](#tls-cert) + - [tls-minimum-versions](#tls-minimum-version) + - [without-auth](#without-auth) + - [disable-authz](#disable-authz) +- [AWS](#aws) + - [aws-access-key-id](#aws-access-key-id) + - [aws-secret-access-key](#aws-secret-access-key) + - [aws-default-region](#aws-default-region) + - [aws-endpoint](#aws-endpoint) + - [aws-session-token](#aws-session-token) + - [aws-allow-http](#aws-allow-http) + - [aws-skip-signature](#aws-skip-signature) +- [Google Cloud Service](#google-cloud-service) + - [google-service-account](#google-service-account) +- [Microsoft Azure](#microsoft-azure) + - [azure-storage-account](#azure-storage-account) + - [azure-storage-access-key](#azure-storage-access-key) +- [Object Storage](#object-storage) + - [bucket](#bucket) + - [object-store-connection-limit](#object-store-connection-limit) + - [object-store-http2-only](#object-store-http2-only) + - [object-store-http2-max-frame-size](#object-store-http2-max-frame-size) + - [object-store-max-retries](#object-store-max-retries) + - [object-store-retry-timeout](#object-store-retry-timeout) + - [object-store-cache-endpoint](#object-store-cache-endpoint) +- [Logs](#logs) + - [log-filter](#log-filter) + - [log-destination](#log-destination) + - [log-format](#log-format) + - [query-log-size](#query-log-size) +- [Traces](#traces) + - [traces-exporter](#traces-exporter) + - [traces-exporter-jaeger-agent-host](#traces-exporter-jaeger-agent-host) + - [traces-exporter-jaeger-agent-port](#traces-exporter-jaeger-agent-port) + - [traces-exporter-jaeger-service-name](#traces-exporter-jaeger-service-name) + - [traces-exporter-jaeger-trace-context-header-name](#traces-exporter-jaeger-trace-context-header-name) + - [traces-jaeger-debug-name](#traces-jaeger-debug-name) + - [traces-jaeger-tags](#traces-jaeger-tags) + - [traces-jaeger-max-msgs-per-second](#traces-jaeger-max-msgs-per-second) +- [DataFusion](#datafusion) + - [datafusion-num-threads](#datafusion-num-threads) + - [datafusion-runtime-type](#datafusion-runtime-type) + - [datafusion-runtime-disable-lifo-slot](#datafusion-runtime-disable-lifo-slot) + - [datafusion-runtime-event-interval](#datafusion-runtime-event-interval) + - [datafusion-runtime-global-queue-interval](#datafusion-runtime-global-queue-interval) + - [datafusion-runtime-max-blocking-threads](#datafusion-runtime-max-blocking-threads) + - [datafusion-runtime-max-io-events-per-tick](#datafusion-runtime-max-io-events-per-tick) + - [datafusion-runtime-thread-keep-alive](#datafusion-runtime-thread-keep-alive) + - [datafusion-runtime-thread-priority](#datafusion-runtime-thread-priority) + - [datafusion-max-parquet-fanout](#datafusion-max-parquet-fanout) + - [datafusion-use-cached-parquet-loader](#datafusion-use-cached-parquet-loader) + - [datafusion-config](#datafusion-config) +- [HTTP](#http) + - [max-http-request-size](#max-http-request-size) + - [http-bind](#http-bind) + - [admin-token-recovery-http-bind](#admin-token-recovery-http-bind) +- [Memory](#memory) + - [exec-mem-pool-bytes](#exec-mem-pool-bytes) + - [buffer-mem-limit-mb](#buffer-mem-limit-mb) + - [force-snapshot-mem-threshold](#force-snapshot-mem-threshold) +- [Write-Ahead Log (WAL)](#write-ahead-log-wal) + - [wal-flush-interval](#wal-flush-interval) + - [wal-snapshot-size](#wal-snapshot-size) + - [wal-max-write-buffer-size](#wal-max-write-buffer-size) + - [snapshotted-wal-files-to-keep](#snapshotted-wal-files-to-keep) + - [wal-replay-fail-on-error](#wal-replay-fail-on-error) + - [wal-replay-concurrency-limit](#wal-replay-concurrency-limit) +- [Compaction](#compaction) +{{% show-in "enterprise" %}} - [compaction-row-limit](#compaction-row-limit) + - [compaction-max-num-files-per-plan](#compaction-max-num-files-per-plan) + - [compaction-gen2-duration](#compaction-gen2-duration) + - [compaction-multipliers](#compaction-multipliers) + - [compaction-cleanup-wait](#compaction-cleanup-wait) + - [compaction-check-interval](#compaction-check-interval){{% /show-in %}} + - [gen1-duration](#gen1-duration) +- [Caching](#caching) + - [preemptive-cache-age](#preemptive-cache-age) + - [parquet-mem-cache-size](#parquet-mem-cache-size) + - [parquet-mem-cache-prune-percentage](#parquet-mem-cache-prune-percentage) + - [parquet-mem-cache-prune-interval](#parquet-mem-cache-prune-interval) + - [parquet-mem-cache-query-path-duration](#parquet-mem-cache-query-path-duration) + - [disable-parquet-mem-cache](#disable-parquet-mem-cache) + - [table-index-cache-max-entries](#table-index-cache-max-entries) + - [table-index-cache-concurrency-limit](#table-index-cache-concurrency-limit) +{{% show-in "enterprise" %}} - [last-value-cache-disable-from-history](#last-value-cache-disable-from-history){{% /show-in %}} + - [last-cache-eviction-interval](#last-cache-eviction-interval) +{{% show-in "enterprise" %}} - [distinct-value-cache-disable-from-history](#distinct-value-cache-disable-from-history){{% /show-in %}} + - [distinct-cache-eviction-interval](#distinct-cache-eviction-interval) + - [query-file-limit](#query-file-limit) +- [Processing Engine](#processing-engine) + - [plugin-dir](#plugin-dir) + - [virtual-env-location](#virtual-env-location) + - [package-manager](#package-manager) +{{% show-in "enterprise" %}} +- [Cluster Management](#cluster-management) + - [replication-interval](#replication-interval) + - [catalog-sync-interval](#catalog-sync-interval) + - [wait-for-running-ingestor](#wait-for-running-ingestor) +- [Resource Limits](#resource-limits) + - [num-cores](#num-cores) + - [num-database-limit](#num-database-limit) + - [num-table-limit](#num-table-limit) + - [num-total-columns-per-table-limit](#num-total-columns-per-table-limit) +{{% /show-in %}} +- [Data Lifecycle Management](#data-lifecycle-management) + - [gen1-lookback-duration](#gen1-lookback-duration) + - [retention-check-interval](#retention-check-interval) + - [delete-grace-period](#delete-grace-period) + - [hard-delete-default-duration](#hard-delete-default-duration) +- [Telemetry](#telemetry) + - [telemetry-disable-upload](#telemetry-disable-upload) + - [telemetry-endpoint](#telemetry-endpoint) +- [TCP Listeners](#tcp-listeners) + - [tcp-listener-file-path](#tcp-listener-file-path) + - [admin-token-recovery-tcp-listener-file-path](#admin-token-recovery-tcp-listener-file-path) +{{% show-in "enterprise" %}} +- [Experimental Features](#experimental-features) + - [use-pacha-tree](#use-pacha-tree) +{{% /show-in %}} + +--- + +### General + +{{% show-in "enterprise" %}} +- [cluster-id](#cluster-id) +{{% /show-in %}} +- [data-dir](#data-dir) +{{% show-in "enterprise" %}} +- [license-email](#license-email) +- [license-file](#license-file) +- [mode](#mode) +{{% /show-in %}} +- [node-id](#node-id) +{{% show-in "enterprise" %}} +- [node-id-from-env](#node-id-from-env) +{{% /show-in %}} +- [object-store](#object-store) +- [query-file-limit](#query-file-limit) + +{{% show-in "enterprise" %}} +#### cluster-id + +Specifies the cluster identifier that prefixes the object store path for the Enterprise Catalog. +This value must be different than the [`--node-id`](#node-id) value. + +| influxdb3 serve option | Environment variable | +| :--------------------- | :--------------------------------- | +| `--cluster-id` | `INFLUXDB3_ENTERPRISE_CLUSTER_ID` | + +--- +{{% /show-in %}} + +#### data-dir + +For the `file` object store, defines the location InfluxDB 3 uses to store files locally. +Required when using the `file` [object store](#object-store). + +| influxdb3 serve option | Environment variable | +| :--------------------- | :------------------- | +| `--data-dir` | `INFLUXDB3_DB_DIR` | + +--- + +{{% show-in "enterprise" %}} +#### license-email + +Specifies the email address to associate with your InfluxDB 3 Enterprise license +and automatically responds to the interactive email prompt when the server starts. +This option is mutually exclusive with [license-file](#license-file). + +| influxdb3 serve option | Environment variable | +| :--------------------- | :----------------------------------- | +| `--license-email` | `INFLUXDB3_ENTERPRISE_LICENSE_EMAIL` | + +--- + +#### license-file + +Specifies the path to a license file for InfluxDB 3 Enterprise. When provided, the license +file's contents are used instead of requesting a new license. +This option is mutually exclusive with [license-email](#license-email). + +| influxdb3 serve option | Environment variable | +| :--------------------- | :----------------------------------- | +| `--license-file` | `INFLUXDB3_ENTERPRISE_LICENSE_FILE` | + +--- + +#### mode + +Sets the mode to start the server in. + +This option supports the following values: + +- `all` _(default)_: Enables all server modes +- `ingest`: Enables only data ingest capabilities +- `query`: Enables only query capabilities +- `compact`: Enables only compaction processes +- `process`: Enables only data processing capabilities + +You can specify multiple modes using a comma-delimited list (for example, `ingest,query`). + +**Default:** `all` + +| influxdb3 serve option | Environment variable | +| :--------------------- | :-------------------------- | +| `--mode` | `INFLUXDB3_ENTERPRISE_MODE` | + +--- +{{% /show-in %}} + +#### node-id + +Specifies the node identifier used as a prefix in all object store file paths. +This should be unique for any hosts sharing the same object store +configuration--for example, the same bucket. + +| influxdb3 serve option | Environment variable | +| :--------------------- | :--------------------------------- | +| `--node-id` | `INFLUXDB3_NODE_IDENTIFIER_PREFIX` | + +{{% show-in "enterprise" %}} +#### node-id-from-env + +Specifies the node identifier used as a prefix in all object store file paths. +Takes the name of an environment variable as an argument and uses the value of that environment variable as the node identifier. +This option cannot be used with the `--node-id` option. + +| influxdb3 serve option | Environment variable | +| :--------------------- | :----------------------------------- | +| `--node-id-from-env` | `INFLUXDB3_NODE_IDENTIFIER_FROM_ENV` | + +##### Example using --node-id-from-env + +```bash +export DATABASE_NODE=node0 && influxdb3 serve \ + --node-id-from-env DATABASE_NODE \ + --cluster-id cluster0 \ + --object-store file \ + --data-dir ~/.influxdb3/data +``` + +--- +{{% /show-in %}} + +#### object-store + +Specifies which object storage to use to store Parquet files. +This option supports the following values: + +- `memory`: Effectively no object persistence +- `memory-throttled`: Like `memory` but with latency and throughput that somewhat resembles a cloud object store +- `file`: Stores objects in the local filesystem (must also set `--data-dir`) +- `s3`: Amazon S3 (must also set `--bucket`, `--aws-access-key-id`, `--aws-secret-access-key`, and possibly `--aws-default-region`) +- `google`: Google Cloud Storage (must also set `--bucket` and `--google-service-account`) +- `azure`: Microsoft Azure blob storage (must also set `--bucket`, `--azure-storage-account`, and `--azure-storage-access-key`) + +| influxdb3 serve option | Environment variable | +| :--------------------- | :----------------------- | +| `--object-store` | `INFLUXDB3_OBJECT_STORE` | + +--- + +#### tls-key + +The path to a key file for TLS to be enabled. + +| influxdb3 serve option | Environment variable | +| :--------------------- | :--------------------- | +| `--tls-key` | `INFLUXDB3_TLS_KEY` | + +--- + +#### tls-cert + +The path to a cert file for TLS to be enabled. + +| influxdb3 serve option | Environment variable | +| :--------------------- | :--------------------- | +| `--tls-cert` | `INFLUXDB3_TLS_CERT` | + +--- + +#### tls-minimum-version + +The minimum version for TLS. +Valid values are `tls-1.2` or `tls-1.3`. +Default is `tls-1.2`. + +| influxdb3 serve option | Environment variable | +| :---------------------- | :----------------------- | +| `--tls-minimum-version` | `INFLUXDB3_TLS_MINIMUM_VERSION` | + +--- + +#### without-auth + +Disables authentication for all server actions (CLI commands and API requests). +The server processes all requests without requiring tokens or authentication. + +| influxdb3 serve option | Environment variable | +| :--------------------- | :---------------------------- | +| `--without-auth` | `INFLUXDB3_START_WITHOUT_AUTH`| + +--- + +#### disable-authz + +Optionally disable authz by passing in a comma separated list of resources. +Valid values are `health`, `ping`, and `metrics`. + +| influxdb3 serve option | Environment variable | +| :--------------------- | :----------------------- | +| `--disable-authz` | `INFLUXDB3_DISABLE_AUTHZ`| + +--- + +### AWS + +- [aws-access-key-id](#aws-access-key-id) +- [aws-secret-access-key](#aws-secret-access-key) +- [aws-default-region](#aws-default-region) +- [aws-endpoint](#aws-endpoint) +- [aws-session-token](#aws-session-token) +- [aws-allow-http](#aws-allow-http) +- [aws-skip-signature](#aws-skip-signature) + +#### aws-access-key-id + +When using Amazon S3 as the object store, set this to an access key that has +permission to read from and write to the specified S3 bucket. + +| influxdb3 serve option | Environment variable | +| :--------------------- | :------------------- | +| `--aws-access-key-id` | `AWS_ACCESS_KEY_ID` | + +--- + +#### aws-secret-access-key + +When using Amazon S3 as the object store, set this to the secret access key that +goes with the specified access key ID. + +| influxdb3 serve option | Environment variable | +| :------------------------ | :---------------------- | +| `--aws-secret-access-key` | `AWS_SECRET_ACCESS_KEY` | + +--- + +#### aws-default-region + +When using Amazon S3 as the object store, set this to the region that goes with +the specified bucket if different from the fallback value. + +**Default:** `us-east-1` + +| influxdb3 serve option | Environment variable | +| :--------------------- | :------------------- | +| `--aws-default-region` | `AWS_DEFAULT_REGION` | + +--- + +#### aws-endpoint + +When using an Amazon S3 compatibility storage service, set this to the endpoint. + +| influxdb3 serve option | Environment variable | +| :--------------------- | :------------------- | +| `--aws-endpoint` | `AWS_ENDPOINT` | + +--- + +#### aws-session-token + +When using Amazon S3 as an object store, set this to the session token. This is +handy when using a federated login or SSO and fetching credentials via the UI. + +| influxdb3 serve option | Environment variable | +| :--------------------- | :------------------- | +| `--aws-session-token` | `AWS_SESSION_TOKEN` | + +--- + +#### aws-allow-http + +Allows unencrypted HTTP connections to AWS. + +| influxdb3 serve option | Environment variable | +| :--------------------- | :------------------- | +| `--aws-allow-http` | `AWS_ALLOW_HTTP` | + +--- + +#### aws-skip-signature + +If enabled, S3 object stores do not fetch credentials and do not sign requests. + +| influxdb3 serve option | Environment variable | +| :--------------------- | :------------------- | +| `--aws-skip-signature` | `AWS_SKIP_SIGNATURE` | + +--- + +### Google Cloud Service + +- [google-service-account](#google-service-account) + +#### google-service-account + +When using Google Cloud Storage as the object store, set this to the path to the +JSON file that contains the Google credentials. + +| influxdb3 serve option | Environment variable | +| :------------------------- | :----------------------- | +| `--google-service-account` | `GOOGLE_SERVICE_ACCOUNT` | + +--- + +### Microsoft Azure + +- [azure-storage-account](#azure-storage-account) +- [azure-storage-access-key](#azure-storage-access-key) + +#### azure-storage-account + +When using Microsoft Azure as the object store, set this to the name you see +when navigating to **All Services > Storage accounts > `[name]`**. + +| influxdb3 serve option | Environment variable | +| :------------------------ | :---------------------- | +| `--azure-storage-account` | `AZURE_STORAGE_ACCOUNT` | + +--- + +#### azure-storage-access-key + +When using Microsoft Azure as the object store, set this to one of the Key +values in the Storage account's **Settings > Access keys**. + +| influxdb3 serve option | Environment variable | +| :--------------------------- | :------------------------- | +| `--azure-storage-access-key` | `AZURE_STORAGE_ACCESS_KEY` | + +--- + +### Object Storage + +- [bucket](#bucket) +- [object-store-connection-limit](#object-store-connection-limit) +- [object-store-http2-only](#object-store-http2-only) +- [object-store-http2-max-frame-size](#object-store-http2-max-frame-size) +- [object-store-max-retries](#object-store-max-retries) +- [object-store-retry-timeout](#object-store-retry-timeout) +- [object-store-cache-endpoint](#object-store-cache-endpoint) + +#### bucket + +Sets the name of the object storage bucket to use. Must also set +`--object-store` to a cloud object storage for this option to take effect. + +| influxdb3 serve option | Environment variable | +| :--------------------- | :------------------- | +| `--bucket` | `INFLUXDB3_BUCKET` | + +--- + +#### object-store-connection-limit + +When using a network-based object store, limits the number of connections to +this value. + +**Default:** `16` + +| influxdb3 serve option | Environment variable | +| :-------------------------------- | :------------------------------ | +| `--object-store-connection-limit` | `OBJECT_STORE_CONNECTION_LIMIT` | + +--- + +#### object-store-http2-only + +Forces HTTP/2 connections to network-based object stores. + +| influxdb3 serve option | Environment variable | +| :-------------------------- | :------------------------ | +| `--object-store-http2-only` | `OBJECT_STORE_HTTP2_ONLY` | + +--- + +#### object-store-http2-max-frame-size + +Sets the maximum frame size (in bytes/octets) for HTTP/2 connections. + +| influxdb3 serve option | Environment variable | +| :------------------------------------ | :---------------------------------- | +| `--object-store-http2-max-frame-size` | `OBJECT_STORE_HTTP2_MAX_FRAME_SIZE` | + +--- + +#### object-store-max-retries + +Defines the maximum number of times to retry a request. + +| influxdb3 serve option | Environment variable | +| :--------------------------- | :------------------------- | +| `--object-store-max-retries` | `OBJECT_STORE_MAX_RETRIES` | + +--- + +#### object-store-retry-timeout + +Specifies the maximum length of time from the initial request after which no +further retries are be attempted. + +| influxdb3 serve option | Environment variable | +| :----------------------------- | :--------------------------- | +| `--object-store-retry-timeout` | `OBJECT_STORE_RETRY_TIMEOUT` | + +--- + +#### object-store-cache-endpoint + +Sets the endpoint of an S3-compatible, HTTP/2-enabled object store cache. + +| influxdb3 serve option | Environment variable | +| :------------------------------ | :---------------------------- | +| `--object-store-cache-endpoint` | `OBJECT_STORE_CACHE_ENDPOINT` | + +--- + +### Logs + +- [log-filter](#log-filter) +- [log-destination](#log-destination) +- [log-format](#log-format) +- [query-log-size](#query-log-size) + +#### log-filter + +Sets the filter directive for logs. + +| influxdb3 serve option | Environment variable | +| :--------------------- | :------------------- | +| `--log-filter` | `LOG_FILTER` | + +--- + +#### log-destination + +Specifies the destination for logs. + +**Default:** `stdout` + +| influxdb3 serve option | Environment variable | +| :--------------------- | :------------------- | +| `--log-destination` | `LOG_DESTINATION` | + +--- + +#### log-format + +Defines the message format for logs. + +This option supports the following values: + +- `full` _(default)_ + +**Default:** `full` + +| influxdb3 serve option | Environment variable | +| :--------------------- | :------------------- | +| `--log-format` | `LOG_FORMAT` | + +--- + +#### query-log-size + +Defines the size of the query log. Up to this many queries remain in the +log before older queries are evicted to make room for new ones. + +**Default:** `1000` + +| influxdb3 serve option | Environment variable | +| :--------------------- | :------------------------- | +| `--query-log-size` | `INFLUXDB3_QUERY_LOG_SIZE` | + +--- + +### Traces + +- [traces-exporter](#traces-exporter) +- [traces-exporter-jaeger-agent-host](#traces-exporter-jaeger-agent-host) +- [traces-exporter-jaeger-agent-port](#traces-exporter-jaeger-agent-port) +- [traces-exporter-jaeger-service-name](#traces-exporter-jaeger-service-name) +- [traces-exporter-jaeger-trace-context-header-name](#traces-exporter-jaeger-trace-context-header-name) +- [traces-jaeger-debug-name](#traces-jaeger-debug-name) +- [traces-jaeger-tags](#traces-jaeger-tags) +- [traces-jaeger-max-msgs-per-second](#traces-jaeger-max-msgs-per-second) + +#### traces-exporter + +Sets the type of tracing exporter. + +**Default:** `none` + +| influxdb3 serve option | Environment variable | +| :--------------------- | :------------------- | +| `--traces-exporter` | `TRACES_EXPORTER` | + +--- + +#### traces-exporter-jaeger-agent-host + +Specifies the Jaeger agent network hostname for tracing. + +**Default:** `0.0.0.0` + +| influxdb3 serve option | Environment variable | +| :------------------------------------ | :---------------------------------- | +| `--traces-exporter-jaeger-agent-host` | `TRACES_EXPORTER_JAEGER_AGENT_HOST` | + +--- + +#### traces-exporter-jaeger-agent-port + +Defines the Jaeger agent network port for tracing. + +**Default:** `6831` + +| influxdb3 serve option | Environment variable | +| :------------------------------------ | :---------------------------------- | +| `--traces-exporter-jaeger-agent-port` | `TRACES_EXPORTER_JAEGER_AGENT_PORT` | + +--- + +#### traces-exporter-jaeger-service-name + +Sets the Jaeger service name for tracing. + +**Default:** `iox-conductor` + +| influxdb3 serve option | Environment variable | +| :-------------------------------------- | :------------------------------------ | +| `--traces-exporter-jaeger-service-name` | `TRACES_EXPORTER_JAEGER_SERVICE_NAME` | + +--- + +#### traces-exporter-jaeger-trace-context-header-name + +Specifies the header name used for passing trace context. + +**Default:** `uber-trace-id` + +| influxdb3 serve option | Environment variable | +| :--------------------------------------------------- | :------------------------------------------------- | +| `--traces-exporter-jaeger-trace-context-header-name` | `TRACES_EXPORTER_JAEGER_TRACE_CONTEXT_HEADER_NAME` | + +--- + +#### traces-jaeger-debug-name + +Specifies the header name used for force sampling in tracing. + +**Default:** `jaeger-debug-id` + +| influxdb3 serve option | Environment variable | +| :--------------------------- | :---------------------------------- | +| `--traces-jaeger-debug-name` | `TRACES_EXPORTER_JAEGER_DEBUG_NAME` | + +--- + +#### traces-jaeger-tags + +Defines a set of `key=value` pairs to annotate tracing spans with. + +| influxdb3 serve option | Environment variable | +| :--------------------- | :---------------------------- | +| `--traces-jaeger-tags` | `TRACES_EXPORTER_JAEGER_TAGS` | + +--- + +#### traces-jaeger-max-msgs-per-second + +Specifies the maximum number of messages sent to a Jaeger service per second. + +**Default:** `1000` + +| influxdb3 serve option | Environment variable | +| :------------------------------------ | :---------------------------------- | +| `--traces-jaeger-max-msgs-per-second` | `TRACES_JAEGER_MAX_MSGS_PER_SECOND` | + +--- + +### DataFusion + +- [datafusion-num-threads](#datafusion-num-threads) +- [datafusion-runtime-type](#datafusion-runtime-type) +- [datafusion-runtime-disable-lifo-slot](#datafusion-runtime-disable-lifo-slot) +- [datafusion-runtime-event-interval](#datafusion-runtime-event-interval) +- [datafusion-runtime-global-queue-interval](#datafusion-runtime-global-queue-interval) +- [datafusion-runtime-max-blocking-threads](#datafusion-runtime-max-blocking-threads) +- [datafusion-runtime-max-io-events-per-tick](#datafusion-runtime-max-io-events-per-tick) +- [datafusion-runtime-thread-keep-alive](#datafusion-runtime-thread-keep-alive) +- [datafusion-runtime-thread-priority](#datafusion-runtime-thread-priority) +- [datafusion-max-parquet-fanout](#datafusion-max-parquet-fanout) +- [datafusion-use-cached-parquet-loader](#datafusion-use-cached-parquet-loader) +- [datafusion-config](#datafusion-config) + +#### datafusion-num-threads + +Sets the maximum number of DataFusion runtime threads to use. + +| influxdb3 serve option | Environment variable | +| :------------------------- | :--------------------------------- | +| `--datafusion-num-threads` | `INFLUXDB3_DATAFUSION_NUM_THREADS` | + +--- + +#### datafusion-runtime-type + +Specifies the DataFusion tokio runtime type. + +This option supports the following values: + +- `current-thread` +- `multi-thread` _(default)_ +- `multi-thread-alt` + +**Default:** `multi-thread` + +| influxdb3 serve option | Environment variable | +| :-------------------------- | :---------------------------------- | +| `--datafusion-runtime-type` | `INFLUXDB3_DATAFUSION_RUNTIME_TYPE` | + +--- + +#### datafusion-runtime-disable-lifo-slot + +Disables the LIFO slot of the DataFusion runtime. + +This option supports the following values: + +- `true` +- `false` + +| influxdb3 serve option | Environment variable | +| :--------------------------------------- | :----------------------------------------------- | +| `--datafusion-runtime-disable-lifo-slot` | `INFLUXDB3_DATAFUSION_RUNTIME_DISABLE_LIFO_SLOT` | + +--- + +#### datafusion-runtime-event-interval + +Sets the number of scheduler ticks after which the scheduler of the DataFusion +tokio runtime polls for external events--for example: timers, I/O. + +| influxdb3 serve option | Environment variable | +| :------------------------------------ | :-------------------------------------------- | +| `--datafusion-runtime-event-interval` | `INFLUXDB3_DATAFUSION_RUNTIME_EVENT_INTERVAL` | + +--- + +#### datafusion-runtime-global-queue-interval + +Sets the number of scheduler ticks after which the scheduler of the DataFusion +runtime polls the global task queue. + +| influxdb3 serve option | Environment variable | +| :------------------------------------------- | :--------------------------------------------------- | +| `--datafusion-runtime-global-queue-interval` | `INFLUXDB3_DATAFUSION_RUNTIME_GLOBAL_QUEUE_INTERVAL` | + +--- + +#### datafusion-runtime-max-blocking-threads + +Specifies the limit for additional threads spawned by the DataFusion runtime. + +| influxdb3 serve option | Environment variable | +| :------------------------------------------ | :-------------------------------------------------- | +| `--datafusion-runtime-max-blocking-threads` | `INFLUXDB3_DATAFUSION_RUNTIME_MAX_BLOCKING_THREADS` | + +--- + +#### datafusion-runtime-max-io-events-per-tick + +Configures the maximum number of events processed per tick by the tokio +DataFusion runtime. + +| influxdb3 serve option | Environment variable | +| :-------------------------------------------- | :---------------------------------------------------- | +| `--datafusion-runtime-max-io-events-per-tick` | `INFLUXDB3_DATAFUSION_RUNTIME_MAX_IO_EVENTS_PER_TICK` | + +--- + +#### datafusion-runtime-thread-keep-alive + +Sets a custom timeout for a thread in the blocking pool of the tokio DataFusion +runtime. + +| influxdb3 serve option | Environment variable | +| :--------------------------------------- | :----------------------------------------------- | +| `--datafusion-runtime-thread-keep-alive` | `INFLUXDB3_DATAFUSION_RUNTIME_THREAD_KEEP_ALIVE` | + +--- + +#### datafusion-runtime-thread-priority + +Sets the thread priority for tokio DataFusion runtime workers. + +**Default:** `10` + +| influxdb3 serve option | Environment variable | +| :------------------------------------- | :--------------------------------------------- | +| `--datafusion-runtime-thread-priority` | `INFLUXDB3_DATAFUSION_RUNTIME_THREAD_PRIORITY` | + +--- + +#### datafusion-max-parquet-fanout + +When multiple parquet files are required in a sorted way +(deduplication for example), specifies the maximum fanout. + +**Default:** `1000` + +| influxdb3 serve option | Environment variable | +| :-------------------------------- | :---------------------------------------- | +| `--datafusion-max-parquet-fanout` | `INFLUXDB3_DATAFUSION_MAX_PARQUET_FANOUT` | + +--- + +#### datafusion-use-cached-parquet-loader + +Uses a cached parquet loader when reading parquet files from the object store. + +| influxdb3 serve option | Environment variable | +| :--------------------------------------- | :----------------------------------------------- | +| `--datafusion-use-cached-parquet-loader` | `INFLUXDB3_DATAFUSION_USE_CACHED_PARQUET_LOADER` | + +--- + +#### datafusion-config + +Provides custom configuration to DataFusion as a comma-separated list of +`key:value` pairs. + +| influxdb3 serve option | Environment variable | +| :--------------------- | :---------------------------- | +| `--datafusion-config` | `INFLUXDB3_DATAFUSION_CONFIG` | + +--- + +### HTTP + +- [max-http-request-size](#max-http-request-size) +- [http-bind](#http-bind) +- [admin-token-recovery-http-bind](#admin-token-recovery-http-bind) + +#### max-http-request-size + +Specifies the maximum size of HTTP requests. + +**Default:** `10485760` + +| influxdb3 serve option | Environment variable | +| :------------------------ | :-------------------------------- | +| `--max-http-request-size` | `INFLUXDB3_MAX_HTTP_REQUEST_SIZE` | + +--- + +#### http-bind + +Defines the address on which InfluxDB serves HTTP API requests. + +**Default:** `0.0.0.0:8181` + +| influxdb3 serve option | Environment variable | +| :--------------------- | :------------------------- | +| `--http-bind` | `INFLUXDB3_HTTP_BIND_ADDR` | + +--- + +#### admin-token-recovery-http-bind + +Enables an admin token recovery HTTP server on a separate port. This server allows regenerating lost admin tokens without existing authentication. The server automatically shuts down after a successful token regeneration. + +> [!Warning] +> This option creates an unauthenticated endpoint that can regenerate admin tokens. Only use this when you have lost access to your admin token and ensure the server is only accessible from trusted networks. + +**Default:** `127.0.0.1:8182` (when enabled) + +| influxdb3 serve option | Environment variable | +| :--------------------- | :------------------- | +| `--admin-token-recovery-http-bind` | `INFLUXDB3_ADMIN_TOKEN_RECOVERY_HTTP_BIND` | + +##### Example usage + +```bash +# Start server with recovery endpoint +influxdb3 serve --admin-token-recovery-http-bind + +# In another terminal, regenerate the admin token +influxdb3 create token --admin --regenerate --host http://127.0.0.1:8182 +``` + +--- + +### Memory + +- [exec-mem-pool-bytes](#exec-mem-pool-bytes) +- [buffer-mem-limit-mb](#buffer-mem-limit-mb) +- [force-snapshot-mem-threshold](#force-snapshot-mem-threshold) + +#### exec-mem-pool-bytes + +Specifies the size of memory pool used during query execution. +Can be given as absolute value in bytes or as a percentage of the total available memory--for +example: `8000000000` or `10%`). + +{{% show-in "core" %}}**Default:** `8589934592`{{% /show-in %}} +{{% show-in "enterprise" %}}**Default:** `20%`{{% /show-in %}} + +| influxdb3 serve option | Environment variable | +| :---------------------- | :------------------------------ | +| `--exec-mem-pool-bytes` | `INFLUXDB3_EXEC_MEM_POOL_BYTES` | + +{{% show-in "core" %}} +--- + +#### buffer-mem-limit-mb + + +Specifies the size limit of the buffered data in MB. If this limit is exceeded, +the server forces a snapshot. + +**Default:** `5000` + +| influxdb3 serve option | Environment variable | +| :---------------------- | :------------------------------ | +| `--buffer-mem-limit-mb` | `INFLUXDB3_BUFFER_MEM_LIMIT_MB` | + +{{% /show-in %}} + +--- + +#### force-snapshot-mem-threshold + +Specifies the threshold for the internal memory buffer. Supports either a +percentage (portion of available memory) or absolute value in MB--for example: `70%` or `1000`. + +{{% show-in "core" %}}**Default:** `70%`{{% /show-in %}} +{{% show-in "enterprise" %}}**Default:** `50%`{{% /show-in %}} + +| influxdb3 serve option | Environment variable | +| :------------------------------- | :--------------------------------------- | +| `--force-snapshot-mem-threshold` | `INFLUXDB3_FORCE_SNAPSHOT_MEM_THRESHOLD` | + +--- + +### Write-Ahead Log (WAL) + +- [wal-flush-interval](#wal-flush-interval) +- [wal-snapshot-size](#wal-snapshot-size) +- [wal-max-write-buffer-size](#wal-max-write-buffer-size) +- [snapshotted-wal-files-to-keep](#snapshotted-wal-files-to-keep) +- [wal-replay-fail-on-error](#wal-replay-fail-on-error) +- [wal-replay-concurrency-limit](#wal-replay-concurrency-limit) + +#### wal-flush-interval + +Specifies the interval to flush buffered data to a WAL file. Writes that wait +for WAL confirmation take up to this interval to complete. + +**Default:** `1s` + +| influxdb3 serve option | Environment variable | +| :--------------------- | :----------------------------- | +| `--wal-flush-interval` | `INFLUXDB3_WAL_FLUSH_INTERVAL` | + +--- + +#### wal-snapshot-size + +Defines the number of WAL files to attempt to remove in a snapshot. This, +multiplied by the interval, determines how often snapshots are taken. + +**Default:** `600` + +| influxdb3 serve option | Environment variable | +| :--------------------- | :---------------------------- | +| `--wal-snapshot-size` | `INFLUXDB3_WAL_SNAPSHOT_SIZE` | + +--- + +#### wal-max-write-buffer-size + +Specifies the maximum number of write requests that can be buffered before a +flush must be executed and succeed. + +**Default:** `100000` + +| influxdb3 serve option | Environment variable | +| :---------------------------- | :------------------------------------ | +| `--wal-max-write-buffer-size` | `INFLUXDB3_WAL_MAX_WRITE_BUFFER_SIZE` | + +--- + +#### snapshotted-wal-files-to-keep + +Specifies the number of snapshotted WAL files to retain in the object store. +Flushing the WAL files does not clear the WAL files immediately; +they are deleted when the number of snapshotted WAL files exceeds this number. + +**Default:** `300` + +| influxdb3 serve option | Environment variable | +| :-------------------------------- | :-------------------------------- | +| `--snapshotted-wal-files-to-keep` | `INFLUXDB3_NUM_WAL_FILES_TO_KEEP` | + +--- + +#### wal-replay-fail-on-error + +Determines whether WAL replay should fail when encountering errors. + +**Default:** `false` + +| influxdb3 serve option | Environment variable | +| :--------------------------- | :------------------------------------- | +| `--wal-replay-fail-on-error` | `INFLUXDB3_WAL_REPLAY_FAIL_ON_ERROR` | + +--- + +#### wal-replay-concurrency-limit + +Sets the maximum number of concurrent WAL replay operations. + +**Default:** `16` + +| influxdb3 serve option | Environment variable | +| :--------------------------------- | :------------------------------------------ | +| `--wal-replay-concurrency-limit` | `INFLUXDB3_WAL_REPLAY_CONCURRENCY_LIMIT` | + +--- + +### Compaction + +{{% show-in "enterprise" %}} +- [compaction-row-limit](#compaction-row-limit) +- [compaction-max-num-files-per-plan](#compaction-max-num-files-per-plan) +- [compaction-gen2-duration](#compaction-gen2-duration) +- [compaction-multipliers](#compaction-multipliers) +- [compaction-cleanup-wait](#compaction-cleanup-wait) +- [compaction-check-interval](#compaction-check-interval) +{{% /show-in %}} +- [gen1-duration](#gen1-duration) + +{{% show-in "enterprise" %}} +#### compaction-row-limit + +Specifies the soft limit for the number of rows per file that the compactor +writes. The compactor may write more rows than this limit. + +**Default:** `1000000` + +| influxdb3 serve option | Environment variable | +| :----------------------- | :------------------------------------------ | +| `--compaction-row-limit` | `INFLUXDB3_ENTERPRISE_COMPACTION_ROW_LIMIT` | + +--- + +#### compaction-max-num-files-per-plan + +Sets the maximum number of files included in any compaction plan. + +**Default:** `500` + +| influxdb3 serve option | Environment variable | +| :------------------------------------ | :------------------------------------------------------- | +| `--compaction-max-num-files-per-plan` | `INFLUXDB3_ENTERPRISE_COMPACTION_MAX_NUM_FILES_PER_PLAN` | + +--- + +#### compaction-gen2-duration + +Specifies the duration of the first level of compaction (gen2). Later levels of +compaction are multiples of this duration. This value should be equal to or +greater than the gen1 duration. + +**Default:** `20m` + +| influxdb3 serve option | Environment variable | +| :--------------------------- | :---------------------------------------------- | +| `--compaction-gen2-duration` | `INFLUXDB3_ENTERPRISE_COMPACTION_GEN2_DURATION` | + +--- + +#### compaction-multipliers + +Specifies a comma-separated list of multiples defining the duration of each +level of compaction. The number of elements in the list determines the number of +compaction levels. The first element specifies the duration of the first level +(gen3); subsequent levels are multiples of the previous level. + +**Default:** `3,4,6,5` + +| influxdb3 serve option | Environment variable | +| :------------------------- | :-------------------------------------------- | +| `--compaction-multipliers` | `INFLUXDB3_ENTERPRISE_COMPACTION_MULTIPLIERS` | + +--- + +#### compaction-cleanup-wait + +Specifies the amount of time that the compactor waits after finishing a compaction run +to delete files marked as needing deletion during that compaction run. + +**Default:** `10m` + +| influxdb3 serve option | Environment variable | +| :-------------------------- | :--------------------------------------------- | +| `--compaction-cleanup-wait` | `INFLUXDB3_ENTERPRISE_COMPACTION_CLEANUP_WAIT` | + +--- + +#### compaction-check-interval + +Specifies how often the compactor checks for new compaction work to perform. + +**Default:** `10s` + +| influxdb3 serve option | Environment variable | +| :----------------------------- | :------------------------------------------------ | +| `--compaction-check-interval` | `INFLUXDB3_ENTERPRISE_COMPACTION_CHECK_INTERVAL` | + +--- +{{% /show-in %}} + +#### gen1-duration + +Specifies the duration that Parquet files are arranged into. Data timestamps +land each row into a file of this duration. Supported durations are `1m`, +`5m`, and `10m`. These files are known as "generation 1" files{{% show-in "enterprise" %}}, which the +compactor can merge into larger generations{{% /show-in %}}{{% show-in "core" %}} that the +compactor in InfluxDB 3 Enterprise can merge into larger generations{{% /show-in %}}. + +**Default:** `10m` + +| influxdb3 serve option | Environment variable | +| :--------------------- | :------------------------ | +| `--gen1-duration` | `INFLUXDB3_GEN1_DURATION` | + +--- + +### Caching + +- [preemptive-cache-age](#preemptive-cache-age) +- [parquet-mem-cache-size](#parquet-mem-cache-size) +- [parquet-mem-cache-prune-percentage](#parquet-mem-cache-prune-percentage) +- [parquet-mem-cache-prune-interval](#parquet-mem-cache-prune-interval) +- [parquet-mem-cache-query-path-duration](#parquet-mem-cache-query-path-duration) +- [disable-parquet-mem-cache](#disable-parquet-mem-cache) +- [table-index-cache-max-entries](#table-index-cache-max-entries) +- [table-index-cache-concurrency-limit](#table-index-cache-concurrency-limit) +{{% show-in "enterprise" %}} +- [last-value-cache-disable-from-history](#last-value-cache-disable-from-history) +{{% /show-in %}} +- [last-cache-eviction-interval](#last-cache-eviction-interval) +{{% show-in "enterprise" %}} +- [distinct-value-cache-disable-from-history](#distinct-value-cache-disable-from-history) +{{% /show-in %}} +- [distinct-cache-eviction-interval](#distinct-cache-eviction-interval) + +#### preemptive-cache-age + +Specifies the interval to prefetch into the Parquet cache during compaction. + +**Default:** `3d` + +| influxdb3 serve option | Environment variable | +| :----------------------- | :------------------------------- | +| `--preemptive-cache-age` | `INFLUXDB3_PREEMPTIVE_CACHE_AGE` | + +--- + +#### parquet-mem-cache-size + +Specifies the size of the in-memory Parquet cache{{% show-in "core" %}} in megabytes (MB){{% /show-in %}}{{% show-in "enterprise" %}} in megabytes or percentage of total available memory{{% /show-in %}}. + +{{% show-in "core" %}}**Default:** `1000`{{% /show-in %}} +{{% show-in "enterprise" %}}**Default:** `20%`{{% /show-in %}} + +| influxdb3 serve option | Environment variable | +| :---------------------------- | :---------------------------------- | +{{% show-in "core" %}}| `--parquet-mem-cache-size-mb` | `INFLUXDB3_PARQUET_MEM_CACHE_SIZE_MB` |{{% /show-in %}} +{{% show-in "enterprise" %}}| `--parquet-mem-cache-size` | `INFLUXDB3_PARQUET_MEM_CACHE_SIZE` |{{% /show-in %}} + +#### parquet-mem-cache-prune-percentage + +Specifies the percentage of entries to prune during a prune operation on the +in-memory Parquet cache. + +**Default:** `0.1` + +| influxdb3 serve option | Environment variable | +| :------------------------------------- | :--------------------------------------------- | +| `--parquet-mem-cache-prune-percentage` | `INFLUXDB3_PARQUET_MEM_CACHE_PRUNE_PERCENTAGE` | + +--- + +#### parquet-mem-cache-prune-interval + +Sets the interval to check if the in-memory Parquet cache needs to be pruned. + +**Default:** `1s` + +| influxdb3 serve option | Environment variable | +| :----------------------------------- | :------------------------------------------- | +| `--parquet-mem-cache-prune-interval` | `INFLUXDB3_PARQUET_MEM_CACHE_PRUNE_INTERVAL` | + +--- + +#### parquet-mem-cache-query-path-duration + +{{% show-in "enterprise" %}} +A [duration](/influxdb3/enterprise/reference/glossary/#duration) that specifies +{{% /show-in %}}{{% show-in "core" %}} +Specifies +{{% /show-in %}} +the time window for caching recent Parquet files in memory. Default is `5h`. + +Only files containing data with a timestamp between `now` and `now - duration` +are cached when accessed during queries--for example, with the default `5h` setting: + +- Current time: `2024-06-10 15:00:00` +- Cache window: Last 5 hours (`2024-06-10 10:00:00` to now) + +If a query requests data from `2024-06-09` (old) and `2024-06-10 14:00` (recent): + +- **Cached**: Parquet files with data from `2024-06-10 14:00` (within 5-hour window) +- **Not cached**: Parquet files with data from `2024-06-09` (outside 5-hour window) + +| influxdb3 serve option | Environment variable | +| :---------------------------- | :------------------------------------ | +| `--parquet-mem-cache-query-path-duration` | `INFLUXDB3_PARQUET_MEM_CACHE_QUERY_PATH_DURATION` | + +--- + +#### disable-parquet-mem-cache + +Disables the in-memory Parquet cache. By default, the cache is enabled. + +| influxdb3 serve option | Environment variable | +| :---------------------------- | :------------------------------------ | +| `--disable-parquet-mem-cache` | `INFLUXDB3_DISABLE_PARQUET_MEM_CACHE` | + +--- + +#### table-index-cache-max-entries + +Specifies the maximum number of entries in the table index cache. + +**Default:** `1000` + +| influxdb3 serve option | Environment variable | +| :--------------------------------- | :-------------------------------------------- | +| `--table-index-cache-max-entries` | `INFLUXDB3_TABLE_INDEX_CACHE_MAX_ENTRIES` | + +--- + +#### table-index-cache-concurrency-limit + +Limits the concurrency level for table index cache operations. + +**Default:** `8` + +| influxdb3 serve option | Environment variable | +| :---------------------------------------- | :------------------------------------------------- | +| `--table-index-cache-concurrency-limit` | `INFLUXDB3_TABLE_INDEX_CACHE_CONCURRENCY_LIMIT` | + +{{% show-in "enterprise" %}} + +--- + +#### last-value-cache-disable-from-history + +Disables populating the last-N-value cache from historical data. +If disabled, the cache is still populated with data from the write-ahead log (WAL). + +| influxdb3 serve option | Environment variable | +| :---------------------------------------- | :---------------------------------------------------------- | +| `--last-value-cache-disable-from-history` | `INFLUXDB3_ENTERPRISE_LAST_VALUE_CACHE_DISABLE_FROM_HISTORY`| + +{{% /show-in %}} + +--- + +#### last-cache-eviction-interval + +Specifies the interval to evict expired entries from the Last-N-Value cache, +expressed as a human-readable duration--for example: `20s`, `1m`, `1h`. + +**Default:** `10s` + +| influxdb3 serve option | Environment variable | +| :------------------------------- | :--------------------------------------- | +| `--last-cache-eviction-interval` | `INFLUXDB3_LAST_CACHE_EVICTION_INTERVAL` | + + +{{% show-in "enterprise" %}} +--- + +#### distinct-value-cache-disable-from-history + +Disables populating the distinct value cache from historical data. +If disabled, the cache is still populated with data from the write-ahead log (WAL). + +| influxdb3 serve option | Environment variable | +| :-------------------------------------------- | :-------------------------------------------------------------- | +| `--distinct-value-cache-disable-from-history` | `INFLUXDB3_ENTERPRISE_DISTINCT_VALUE_CACHE_DISABLE_FROM_HISTORY`| + +{{% /show-in %}} + +--- + +#### distinct-cache-eviction-interval + +Specifies the interval to evict expired entries from the distinct value cache, +expressed as a human-readable duration--for example: `20s`, `1m`, `1h`. + +**Default:** `10s` + +| influxdb3 serve option | Environment variable | +| :----------------------------------- | :------------------------------------------- | +| `--distinct-cache-eviction-interval` | `INFLUXDB3_DISTINCT_CACHE_EVICTION_INTERVAL` | + +--- + +#### query-file-limit + +Limits the number of Parquet files a query can access. +If a query attempts to read more than this limit, {{< product-name >}} returns an error. + +{{% show-in "core" %}} +**Default:** `432` + +With the default `432` setting and the default [`gen1-duration`](#gen1-duration) +setting of 10 minutes, queries can access up to a 72 hours of data, but +potentially less depending on whether all data for a given 10 minute block of +time was ingested during the same period. + +You can increase this limit to allow more files to be queried, but be aware of +the following side-effects: + +- Degraded query performance for queries that read more Parquet files +- Increased memory usage +- Your system potentially killing the `influxdb3` process due to Out-of-Memory + (OOM) errors +- If using object storage to store data, many GET requests to access the data + (as many as 2 per file) + +> [!Note] +> We recommend keeping the default setting and querying smaller time ranges. +> If you need to query longer time ranges or faster query performance on any query +> that accesses an hour or more of data, [InfluxDB 3 Enterprise](/influxdb3/enterprise/) +> optimizes data storage by compacting and rearranging Parquet files to achieve +> faster query performance. +{{% /show-in %}} + +| influxdb3 serve option | Environment variable | +| :--------------------- | :--------------------------- | +| `--query-file-limit` | `INFLUXDB3_QUERY_FILE_LIMIT` | + +--- + +### Processing Engine + +- [plugin-dir](#plugin-dir) +- [virtual-env-location](#virtual-env-location) +- [package-manager](#package-manager) + +#### plugin-dir + +Specifies the local directory that contains Python plugins and their test files. + +| influxdb3 serve option | Environment variable | +| :--------------------- | :--------------------- | +| `--plugin-dir` | `INFLUXDB3_PLUGIN_DIR` | + +--- + +#### virtual-env-location + +Specifies the location of the Python virtual environment that the processing +engine uses. + +| influxdb3 serve option | Environment variable | +| :----------------------- | :--------------------- | +| `--virtual-env-location` | `VIRTUAL_ENV` | + +--- + +#### package-manager + +Specifies the Python package manager that the processing engine uses. + +This option supports the following values: + +- `discover` _(default)_: Automatically discover available package manager +- `pip`: Use pip package manager +- `uv`: Use uv package manager + +**Default:** `discover` + +| influxdb3 serve option | Environment variable | +| :--------------------- | :------------------- | +| `--package-manager` | `PACKAGE_MANAGER` | + +{{% show-in "enterprise" %}} + +--- + +### Cluster Management + +- [replication-interval](#replication-interval) +- [catalog-sync-interval](#catalog-sync-interval) +- [wait-for-running-ingestor](#wait-for-running-ingestor) + +#### replication-interval + +Specifies the interval at which data replication occurs between cluster nodes. + +**Default:** `250ms` + +| influxdb3 serve option | Environment variable | +| :------------------------- | :------------------------------------------- | +| `--replication-interval` | `INFLUXDB3_ENTERPRISE_REPLICATION_INTERVAL` | + +--- + +#### catalog-sync-interval + +Defines how often the catalog synchronizes across cluster nodes. + +**Default:** `10s` + +| influxdb3 serve option | Environment variable | +| :--------------------------- | :------------------------------------------ | +| `--catalog-sync-interval` | `INFLUXDB3_ENTERPRISE_CATALOG_SYNC_INTERVAL`| + +--- + +#### wait-for-running-ingestor + +Specifies how long to wait for a running ingestor during startup. + +**Default:** `10s` + +| influxdb3 serve option | Environment variable | +| :------------------------------- | :------------------------------------------------ | +| `--wait-for-running-ingestor` | `INFLUXDB3_ENTERPRISE_WAIT_FOR_RUNNING_INGESTOR` | + +--- + +### Resource Limits + +- [num-cores](#num-cores) +- [num-database-limit](#num-database-limit) +- [num-table-limit](#num-table-limit) +- [num-total-columns-per-table-limit](#num-total-columns-per-table-limit) + +#### num-cores + +Limits the number of CPU cores that InfluxDB Enterprise can use. + +| influxdb3 serve option | Environment variable | +| :--------------------- | :-------------------------------- | +| `--num-cores` | `INFLUXDB3_ENTERPRISE_NUM_CORES` | + +--- + +#### num-database-limit + +Sets the maximum number of databases that can be created. + +| influxdb3 serve option | Environment variable | +| :------------------------ | :---------------------------------------- | +| `--num-database-limit` | `INFLUXDB3_ENTERPRISE_NUM_DATABASE_LIMIT` | + +--- + +#### num-table-limit + +Defines the maximum number of tables that can be created across all databases. + +| influxdb3 serve option | Environment variable | +| :---------------------- | :------------------------------------- | +| `--num-table-limit` | `INFLUXDB3_ENTERPRISE_NUM_TABLE_LIMIT` | + +--- + +#### num-total-columns-per-table-limit + +Sets the maximum number of columns allowed per table. + +| influxdb3 serve option | Environment variable | +| :--------------------------------------- | :---------------------------------------------------------- | +| `--num-total-columns-per-table-limit` | `INFLUXDB3_ENTERPRISE_NUM_TOTAL_COLUMNS_PER_TABLE_LIMIT` | + +{{% /show-in %}} + +--- + +### Data Lifecycle Management + +- [gen1-lookback-duration](#gen1-lookback-duration) +- [retention-check-interval](#retention-check-interval) +- [delete-grace-period](#delete-grace-period) +- [hard-delete-default-duration](#hard-delete-default-duration) + +#### gen1-lookback-duration + +Specifies how far back to look when creating generation 1 Parquet files. + +**Default:** `24h` + +| influxdb3 serve option | Environment variable | +| :---------------------------- | :-------------------------------------- | +| `--gen1-lookback-duration` | `INFLUXDB3_GEN1_LOOKBACK_DURATION` | + +--- + +#### retention-check-interval + +Defines how often the system checks for data that should be deleted according to retention policies. + +**Default:** `1h` + +| influxdb3 serve option | Environment variable | +| :----------------------------- | :--------------------------------------- | +| `--retention-check-interval` | `INFLUXDB3_RETENTION_CHECK_INTERVAL` | + +--- + +#### delete-grace-period + +Specifies the grace period before permanently deleting data. + +**Default:** `24h` + +| influxdb3 serve option | Environment variable | +| :------------------------ | :--------------------------------- | +| `--delete-grace-period` | `INFLUXDB3_DELETE_GRACE_PERIOD` | + +--- + +#### hard-delete-default-duration + +Sets the default duration for hard deletion of data. + +**Default:** `90d` + +| influxdb3 serve option | Environment variable | +| :---------------------------------- | :-------------------------------------------- | +| `--hard-delete-default-duration` | `INFLUXDB3_HARD_DELETE_DEFAULT_DURATION` | + +--- + +### Telemetry + +- [telemetry-disable-upload](#telemetry-disable-upload) +- [telemetry-endpoint](#telemetry-endpoint) + +#### telemetry-disable-upload + +Disables the upload of telemetry data to InfluxData. + +**Default:** `false` + +| influxdb3 serve option | Environment variable | +| :---------------------------- | :-------------------------------------- | +| `--telemetry-disable-upload` | `INFLUXDB3_TELEMETRY_DISABLE_UPLOAD` | + +--- + +#### telemetry-endpoint + +Specifies the endpoint for telemetry data uploads. + +| influxdb3 serve option | Environment variable | +| :----------------------- | :--------------------------------- | +| `--telemetry-endpoint` | `INFLUXDB3_TELEMETRY_ENDPOINT` | + +--- + +### TCP Listeners + +- [tcp-listener-file-path](#tcp-listener-file-path) +- [admin-token-recovery-tcp-listener-file-path](#admin-token-recovery-tcp-listener-file-path) + +#### tcp-listener-file-path + +Specifies the file path for the TCP listener configuration. + +| influxdb3 serve option | Environment variable | +| :-------------------------- | :----------------------------------- | +| `--tcp-listener-file-path` | `INFLUXDB3_TCP_LISTINER_FILE_PATH` | + +--- + +#### admin-token-recovery-tcp-listener-file-path + +Specifies the TCP listener file path for admin token recovery operations. + +| influxdb3 serve option | Environment variable | +| :---------------------------------------------- | :-------------------------------------------------------- | +| `--admin-token-recovery-tcp-listener-file-path` | `INFLUXDB3_ADMIN_TOKEN_RECOVERY_TCP_LISTENER_FILE_PATH` | + +{{% show-in "enterprise" %}} +--- + +### Experimental Features + +- [use-pacha-tree](#use-pacha-tree) + +#### use-pacha-tree + +Enables the experimental PachaTree storage engine for improved performance. + +> [!Warning] +> This is an experimental feature and should not be used in production environments. + +**Default:** `false` + +| influxdb3 serve option | Environment variable | +| :---------------------- | :------------------------------------- | +| `--use-pacha-tree` | `INFLUXDB3_ENTERPRISE_USE_PACHA_TREE` | + +{{% /show-in %}} \ No newline at end of file diff --git a/content/shared/influxdb3-cli/delete/_index.md b/content/shared/influxdb3-cli/delete/_index.md index 81a47ffc67..5636183142 100644 --- a/content/shared/influxdb3-cli/delete/_index.md +++ b/content/shared/influxdb3-cli/delete/_index.md @@ -1,5 +1,5 @@ -The `influxdb3 delete` command deletes a resource such as a database or a table. +The `influxdb3 delete` command deletes a resource such as a cache, a database, or a table. ## Usage @@ -19,6 +19,7 @@ influxdb3 delete | [last_cache](/influxdb3/version/reference/cli/influxdb3/delete/last_cache/) | Delete a last value cache | | [distinct_cache](/influxdb3/version/reference/cli/influxdb3/delete/distinct_cache/) | Delete a metadata cache | | [table](/influxdb3/version/reference/cli/influxdb3/delete/table/) | Delete a table from a database | +| [token](/influxdb3/version/reference/cli/influxdb3/delete/token/) | Delete an authorization token from the server | | [trigger](/influxdb3/version/reference/cli/influxdb3/delete/trigger/) | Delete a trigger for the processing engine | | help | Print command help or the help of a subcommand | {{% /show-in %}} @@ -30,6 +31,7 @@ influxdb3 delete | [last_cache](/influxdb3/version/reference/cli/influxdb3/delete/last_cache/) | Delete a last value cache | | [distinct_cache](/influxdb3/version/reference/cli/influxdb3/delete/distinct_cache/) | Delete a metadata cache | | [table](/influxdb3/version/reference/cli/influxdb3/delete/table/) | Delete a table from a database | +| [token](/influxdb3/version/reference/cli/influxdb3/delete/token/) | Delete an authorization token from the server | | [trigger](/influxdb3/version/reference/cli/influxdb3/delete/trigger/) | Delete a trigger for the processing engine | | help | Print command help or the help of a subcommand | {{% /show-in %}} diff --git a/content/shared/influxdb3-cli/delete/token.md b/content/shared/influxdb3-cli/delete/token.md new file mode 100644 index 0000000000..73cfd688a3 --- /dev/null +++ b/content/shared/influxdb3-cli/delete/token.md @@ -0,0 +1,32 @@ + +The `influxdb3 delete token` command deletes an authorization token from the {{% product-name %}} server. + +## Usage + +```bash +influxdb3 delete token [OPTIONS] +``` + +## Options + +| Option | Description | Default | Environment | +|----------------|-----------------------------------------------------------------------------------|---------|------------------------| +| `--token` | _({{< req >}})_ The token for authentication with the {{% product-name %}} server | | `INFLUXDB3_AUTH_TOKEN` | +| `--token-name` | _({{< req >}})_ The name of the token to be deleted | | | +| `--tls-ca` | An optional arg to use a custom ca for useful for testing with self signed certs | | `INFLUXDB3_TLS_CA` | +| `-h` | `--help` | Print help information | +| | `--help-all` | Print detailed help information | + +## Examples + +### Delete a token by name + +```bash +influxdb3 delete token --token-name TOKEN_TO_DELETE --token AUTH_TOKEN +``` + +### Show help for the command + +```bash +influxdb3 delete token --help +``` \ No newline at end of file diff --git a/helper-scripts/influxdb3-monolith/README.md b/helper-scripts/influxdb3-monolith/README.md index 34ce14c4c9..67bb5bbc2a 100644 --- a/helper-scripts/influxdb3-monolith/README.md +++ b/helper-scripts/influxdb3-monolith/README.md @@ -41,40 +41,52 @@ Creates and configures authentication tokens for InfluxDB 3 containers. ./setup-auth-tokens.sh enterprise ``` -### 🔍 CLI Documentation Audit +### 📘 CLI Documentation Audit -#### `audit-cli-documentation.js` -JavaScript ESM script that audits InfluxDB 3 CLI commands against existing documentation to identify missing or outdated content. +#### `documentation-audit.js` +JavaScript ESM script that parses InfluxDB 3 source code to extract CLI commands and audits them against existing documentation with enhanced accuracy and category filtering. **Usage:** ```bash -node audit-cli-documentation.js [core|enterprise|both] [version|local] +node documentation-audit.js [core|enterprise|both] [version/branch/tag] [--categories=CATEGORY1,CATEGORY2,...] ``` **Features:** -- Compares actual CLI help output with documented commands -- Identifies missing documentation for new CLI options -- Finds documented options that no longer exist in the CLI -- Supports both released versions and local containers -- Generates detailed audit reports with recommendations -- Handles authentication automatically using Docker secrets +- **Source code parsing**: Parses Rust CLI implementation directly from source code +- **Accurate pattern matching**: Uses regex with word boundaries to reduce false positives +- **Category-based filtering**: Focus audits on specific documentation areas +- **Enterprise feature detection**: Identifies enterprise-specific commands and features +- **Complete option extraction**: Extracts descriptions, defaults, environment variables, and requirements +- **Template generation**: Creates documentation templates for missing commands + +**Available Categories:** +- `CLI_REFERENCE` - CLI reference documentation +- `API_REFERENCE` - API documentation +- `GETTING_STARTED` - Getting started and tutorials +- `ADMIN_GUIDES` - Administration and management +- `WRITE_DATA` - Write and ingest data documentation +- `QUERY_DATA` - Query and read data documentation +- `PROCESS_DATA` - Process and transform data documentation +- `GENERAL_REFERENCE` - General reference documentation **Examples:** ```bash -# Audit Core documentation against local container -node audit-cli-documentation.js core local +# Audit Core documentation with all categories +node documentation-audit.js core main -# Audit Enterprise documentation against specific version -node audit-cli-documentation.js enterprise v3.2.0 +# Audit Enterprise with only CLI and API reference categories +node documentation-audit.js enterprise main --categories=CLI_REFERENCE,API_REFERENCE -# Audit both products against local containers -node audit-cli-documentation.js both local +# Audit specific version with getting started docs only +node documentation-audit.js both v3.3.0 --categories=GETTING_STARTED + +# Audit with multiple specific categories +node documentation-audit.js core main --categories=CLI_REFERENCE,ADMIN_GUIDES,WRITE_DATA ``` **Output:** -- `../output/cli-audit/documentation-audit-{product}-{version}.md` - Detailed audit report -- `../output/cli-audit/parsed-cli-{product}-{version}.md` - Parsed CLI structure -- `../output/cli-audit/patches/{product}/` - Generated patches for missing documentation +- `../output/cli-audit/documentation-audit-{product}-{version}.md` - Enhanced audit report with source code insights +- `../output/cli-audit/patches/{product}/` - Generated documentation templates with complete option details ### 🛠️ CLI Documentation Updates @@ -131,12 +143,12 @@ docker compose down && docker compose up -d influxdb3-core influxdb3-enterprise ### 2. CLI Documentation Audit ```bash -# Start your containers -docker compose up -d influxdb3-core influxdb3-enterprise +# Audit CLI documentation from source code +node documentation-audit.js core main +node documentation-audit.js enterprise main -# Audit CLI documentation -node audit-cli-documentation.js core local -node audit-cli-documentation.js enterprise local +# Or audit with specific categories +node documentation-audit.js both main --categories=CLI_REFERENCE # Review the output ls ../output/cli-audit/ @@ -145,12 +157,12 @@ ls ../output/cli-audit/ ### 3. Development Workflow ```bash -# Audit documentation for both products -node audit-cli-documentation.js both local +# Audit using source code parsing with category filtering +node documentation-audit.js both main --categories=CLI_REFERENCE,API_REFERENCE # Check the audit results -cat ../output/cli-audit/documentation-audit-core-local.md -cat ../output/cli-audit/documentation-audit-enterprise-local.md +cat ../output/cli-audit/documentation-audit-core-main.md +cat ../output/cli-audit/documentation-audit-enterprise-main.md # Apply patches if needed (dry run first) node apply-cli-patches.js both --dry-run @@ -162,7 +174,7 @@ For release documentation, use the audit and patch workflow: ```bash # Audit against released version -node audit-cli-documentation.js enterprise v3.2.0 +node documentation-audit.js enterprise v3.2.0 # Review missing documentation cat ../output/cli-audit/documentation-audit-enterprise-v3.2.0.md @@ -176,12 +188,18 @@ git diff content/influxdb3/enterprise/reference/cli/ ## Container Integration -The scripts work with your Docker Compose setup: +The scripts work with your Docker Compose setup and automatically manage container lifecycle: **Expected container names:** - `influxdb3-core` (port 8282) - `influxdb3-enterprise` (port 8181) +**Container management:** +- Scripts automatically start containers if they're not running +- Uses `docker compose up -d {service}` to start services +- Waits for containers to be ready before proceeding +- Uses `docker compose exec -T` for non-interactive command execution + **Docker Compose secrets:** - `influxdb3-core-admin-token` - Admin token for Core (stored in `~/.env.influxdb3-core-admin-token`) - `influxdb3-enterprise-admin-token` - Admin token for Enterprise (stored in `~/.env.influxdb3-enterprise-admin-token`) @@ -193,7 +211,7 @@ The scripts work with your Docker Compose setup: 1. **Pre-release audit:** ```bash - node audit-cli-documentation.js core v3.2.0 + node documentation-audit.js core v3.2.0 ``` 2. **Review audit results and update documentation** @@ -202,9 +220,9 @@ The scripts work with your Docker Compose setup: ### 🔬 Development Testing -1. **Audit local development:** +1. **Audit development branch:** ```bash - node audit-cli-documentation.js enterprise local + node documentation-audit.js enterprise main ``` 2. **Verify new features are documented** @@ -215,7 +233,7 @@ The scripts work with your Docker Compose setup: 1. **Final audit before release:** ```bash - node audit-cli-documentation.js both local + node documentation-audit.js both v3.3.0 ``` 2. **Apply all pending patches** @@ -228,21 +246,20 @@ The scripts work with your Docker Compose setup: helper-scripts/ ├── output/ │ └── cli-audit/ -│ ├── documentation-audit-core-local.md # CLI documentation audit report -│ ├── documentation-audit-enterprise-v3.2.0.md # CLI documentation audit report -│ ├── parsed-cli-core-local.md # Parsed CLI structure -│ ├── parsed-cli-enterprise-v3.2.0.md # Parsed CLI structure +│ ├── documentation-audit-core-main.md # CLI documentation audit report +│ ├── documentation-audit-enterprise-v3.2.0.md # CLI documentation audit report +│ ├── influxdb-clone/ # Working copy of source code │ └── patches/ │ ├── core/ # Generated patches for Core -│ │ ├── influxdb3-cli-patch-001.md -│ │ └── influxdb3-cli-patch-002.md +│ │ ├── query.md +│ │ └── write.md │ └── enterprise/ # Generated patches for Enterprise -│ ├── influxdb3-cli-patch-001.md -│ └── influxdb3-cli-patch-002.md +│ ├── backup.md +│ └── restore.md └── influxdb3-monolith/ ├── README.md # This file ├── setup-auth-tokens.sh # Auth setup - ├── audit-cli-documentation.js # CLI documentation audit + ├── documentation-audit.js # CLI documentation audit (source code-based) └── apply-cli-patches.js # CLI documentation patches ``` @@ -252,11 +269,20 @@ helper-scripts/ **Container not running:** ```bash -# Check status +# Scripts now handle this automatically, but you can manually check: docker compose ps -# Start specific service -docker compose up -d influxdb3-core +# Or manually start +docker compose up -d influxdb3-core influxdb3-enterprise +``` + +**Source code access issues:** +```bash +# Check if the source repository exists +ls /Users/ja/Documents/github/influxdata/influxdb + +# Verify git access +cd /Users/ja/Documents/github/influxdata/influxdb && git status ``` **Authentication failures:** @@ -290,7 +316,7 @@ DEBUG=1 node audit-cli-documentation.js core local - name: Audit CLI Documentation run: | cd helper-scripts/influxdb3-monolith - node audit-cli-documentation.js core ${{ env.VERSION }} + node documentation-audit.js core ${{ env.VERSION }} - name: Upload CLI Audit Results uses: actions/upload-artifact@v3 @@ -306,7 +332,7 @@ DEBUG=1 node audit-cli-documentation.js core local name: CLI Documentation Audit command: | cd helper-scripts/influxdb3-monolith - node audit-cli-documentation.js enterprise v3.2.0 + node documentation-audit.js enterprise v3.2.0 --categories=CLI_REFERENCE - store_artifacts: path: helper-scripts/output/cli-audit/ diff --git a/helper-scripts/influxdb3-monolith/audit-cli-documentation.js b/helper-scripts/influxdb3-monolith/audit-cli-documentation.js deleted file mode 100755 index 74e1af565f..0000000000 --- a/helper-scripts/influxdb3-monolith/audit-cli-documentation.js +++ /dev/null @@ -1,974 +0,0 @@ -#!/usr/bin/env node - -/** - * Audit CLI documentation against current CLI help output - * Usage: node audit-cli-documentation.js [core|enterprise|both] [version] - * Example: node audit-cli-documentation.js core 3.2.0 - */ - -import { spawn } from 'child_process'; -import { promises as fs } from 'fs'; -import { homedir } from 'os'; -import { join, dirname } from 'path'; -import { fileURLToPath } from 'url'; -import { - validateVersionInputs, - getRepositoryRoot, -} from '../common/validate-tags.js'; - -const __filename = fileURLToPath(import.meta.url); -const __dirname = dirname(__filename); - -// Color codes -const Colors = { - RED: '\x1b[0;31m', - GREEN: '\x1b[0;32m', - YELLOW: '\x1b[1;33m', - BLUE: '\x1b[0;34m', - NC: '\x1b[0m', // No Color -}; - -class CLIDocAuditor { - constructor(product = 'both', version = 'local') { - this.product = product; - this.version = version; - this.outputDir = join(dirname(__dirname), 'output', 'cli-audit'); - - // Token paths - check environment variables first (Docker Compose), then fall back to local files - const coreTokenEnv = process.env.INFLUXDB3_CORE_TOKEN; - const enterpriseTokenEnv = process.env.INFLUXDB3_ENTERPRISE_TOKEN; - - if (coreTokenEnv && this.fileExists(coreTokenEnv)) { - // Running in Docker Compose with secrets - this.coreTokenFile = coreTokenEnv; - this.enterpriseTokenFile = enterpriseTokenEnv; - } else { - // Running locally - this.coreTokenFile = join(homedir(), '.env.influxdb3-core-admin-token'); - this.enterpriseTokenFile = join( - homedir(), - '.env.influxdb3-enterprise-admin-token' - ); - } - - // Commands to extract help for - this.mainCommands = [ - 'create', - 'delete', - 'disable', - 'enable', - 'query', - 'show', - 'test', - 'update', - 'write', - ]; - this.subcommands = [ - 'create database', - 'create token admin', - 'create token', - 'create trigger', - 'create last_cache', - 'create distinct_cache', - 'create table', - 'show databases', - 'show tokens', - 'show system', - 'delete database', - 'delete table', - 'delete trigger', - 'update database', - 'test wal_plugin', - 'test schedule_plugin', - ]; - - // Map for command tracking during option parsing - this.commandOptionsMap = {}; - } - - async fileExists(path) { - try { - await fs.access(path); - return true; - } catch { - return false; - } - } - - async ensureDir(dir) { - await fs.mkdir(dir, { recursive: true }); - } - - async loadTokens() { - let coreToken = null; - let enterpriseToken = null; - - try { - if (await this.fileExists(this.coreTokenFile)) { - const stat = await fs.stat(this.coreTokenFile); - if (stat.size > 0) { - coreToken = (await fs.readFile(this.coreTokenFile, 'utf8')).trim(); - } - } - } catch { - // Token file doesn't exist or can't be read - } - - try { - if (await this.fileExists(this.enterpriseTokenFile)) { - const stat = await fs.stat(this.enterpriseTokenFile); - if (stat.size > 0) { - enterpriseToken = ( - await fs.readFile(this.enterpriseTokenFile, 'utf8') - ).trim(); - } - } - } catch { - // Token file doesn't exist or can't be read - } - - return { coreToken, enterpriseToken }; - } - - runCommand(cmd, args = []) { - return new Promise((resolve) => { - const child = spawn(cmd, args, { encoding: 'utf8' }); - let stdout = ''; - let stderr = ''; - - child.stdout.on('data', (data) => { - stdout += data.toString(); - }); - - child.stderr.on('data', (data) => { - stderr += data.toString(); - }); - - child.on('close', (code) => { - resolve({ code, stdout, stderr }); - }); - - child.on('error', (err) => { - resolve({ code: 1, stdout: '', stderr: err.message }); - }); - }); - } - - async extractCurrentCLI(product, outputFile) { - process.stdout.write( - `Extracting current CLI help from influxdb3-${product}...` - ); - - await this.loadTokens(); - - if (this.version === 'local') { - const containerName = `influxdb3-${product}`; - - // Check if container is running - const { code, stdout } = await this.runCommand('docker', [ - 'ps', - '--format', - '{{.Names}}', - ]); - if (code !== 0 || !stdout.includes(containerName)) { - console.log(` ${Colors.RED}✗${Colors.NC}`); - console.log(`Error: Container ${containerName} is not running.`); - console.log(`Start it with: docker compose up -d influxdb3-${product}`); - return false; - } - - // Extract comprehensive help - let fileContent = ''; - - // Main help - const mainHelp = await this.runCommand('docker', [ - 'exec', - containerName, - 'influxdb3', - '--help', - ]); - fileContent += mainHelp.code === 0 ? mainHelp.stdout : mainHelp.stderr; - - // Extract all subcommand help - for (const cmd of this.mainCommands) { - fileContent += `\n\n===== influxdb3 ${cmd} --help =====\n`; - const cmdHelp = await this.runCommand('docker', [ - 'exec', - containerName, - 'influxdb3', - cmd, - '--help', - ]); - fileContent += cmdHelp.code === 0 ? cmdHelp.stdout : cmdHelp.stderr; - } - - // Extract detailed subcommand help - for (const subcmd of this.subcommands) { - fileContent += `\n\n===== influxdb3 ${subcmd} --help =====\n`; - const cmdParts = [ - 'exec', - containerName, - 'influxdb3', - ...subcmd.split(' '), - '--help', - ]; - const subcmdHelp = await this.runCommand('docker', cmdParts); - fileContent += - subcmdHelp.code === 0 ? subcmdHelp.stdout : subcmdHelp.stderr; - } - - await fs.writeFile(outputFile, fileContent); - console.log(` ${Colors.GREEN}✓${Colors.NC}`); - } else { - // Use specific version image - const image = `influxdb:${this.version}-${product}`; - - process.stdout.write(`Extracting CLI help from ${image}...`); - - // Pull image if needed - const pullResult = await this.runCommand('docker', ['pull', image]); - if (pullResult.code !== 0) { - console.log(` ${Colors.RED}✗${Colors.NC}`); - console.log(`Error: Failed to pull image ${image}`); - return false; - } - - // Extract help from specific version - let fileContent = ''; - - // Main help - const mainHelp = await this.runCommand('docker', [ - 'run', - '--rm', - image, - 'influxdb3', - '--help', - ]); - fileContent += mainHelp.code === 0 ? mainHelp.stdout : mainHelp.stderr; - - // Extract subcommand help - for (const cmd of this.mainCommands) { - fileContent += `\n\n===== influxdb3 ${cmd} --help =====\n`; - const cmdHelp = await this.runCommand('docker', [ - 'run', - '--rm', - image, - 'influxdb3', - cmd, - '--help', - ]); - fileContent += cmdHelp.code === 0 ? cmdHelp.stdout : cmdHelp.stderr; - } - - await fs.writeFile(outputFile, fileContent); - console.log(` ${Colors.GREEN}✓${Colors.NC}`); - } - - return true; - } - - async parseCLIHelp(helpFile, parsedFile) { - const content = await fs.readFile(helpFile, 'utf8'); - const lines = content.split('\n'); - - let output = '# CLI Commands and Options\n\n'; - let currentCommand = ''; - let inOptions = false; - - for (const line of lines) { - // Detect command headers - if (line.startsWith('===== influxdb3') && line.endsWith('--help =====')) { - currentCommand = line - .replace('===== ', '') - .replace(' --help =====', '') - .trim(); - output += `## ${currentCommand}\n\n`; - inOptions = false; - // Initialize options list for this command - this.commandOptionsMap[currentCommand] = []; - } - // Detect options sections - else if (line.trim() === 'Options:') { - output += '### Options:\n\n'; - inOptions = true; - } - // Parse option lines - else if (inOptions && /^\s*-/.test(line)) { - // Extract option and description - const optionMatch = line.match(/--[a-z][a-z0-9-]*/); - const shortMatch = line.match(/\s-[a-zA-Z],/); - - if (optionMatch) { - const option = optionMatch[0]; - const shortOption = shortMatch - ? shortMatch[0].replace(/[,\s]/g, '') - : null; - - // Extract description by removing option parts - let description = line.replace(/^\s*-[^\s]*\s*/, ''); - description = description.replace(/^\s*--[^\s]*\s*/, '').trim(); - - if (shortOption) { - output += `- \`${shortOption}, ${option}\`: ${description}\n`; - } else { - output += `- \`${option}\`: ${description}\n`; - } - - // Store option with its command context - if (currentCommand && option) { - this.commandOptionsMap[currentCommand].push(option); - } - } - } - // Reset options flag for new sections - else if (/^[A-Z][a-z]+:$/.test(line.trim())) { - inOptions = false; - } - } - - await fs.writeFile(parsedFile, output); - } - - findDocsPath(product) { - if (product === 'core') { - return 'content/influxdb3/core/reference/cli/influxdb3'; - } else if (product === 'enterprise') { - return 'content/influxdb3/enterprise/reference/cli/influxdb3'; - } - return ''; - } - - async extractCommandHelp(content, command) { - // Find the section for this specific command in the CLI help - const lines = content.split('\n'); - let inCommand = false; - let helpText = []; - const commandHeader = `===== influxdb3 ${command} --help =====`; - - for (let i = 0; i < lines.length; i++) { - if (lines[i] === commandHeader) { - inCommand = true; - continue; - } - if (inCommand && lines[i].startsWith('===== influxdb3')) { - break; - } - if (inCommand) { - helpText.push(lines[i]); - } - } - - return helpText.join('\n').trim(); - } - - async generateDocumentationTemplate(command, helpText) { - // Parse the help text to extract description and options - const lines = helpText.split('\n'); - let description = ''; - let usage = ''; - let options = []; - let inOptions = false; - - for (let i = 0; i < lines.length; i++) { - const line = lines[i]; - - if (i === 0 && !line.startsWith('Usage:') && line.trim()) { - description = line.trim(); - } - if (line.startsWith('Usage:')) { - usage = line.replace('Usage:', '').trim(); - } - if (line.trim() === 'Options:') { - inOptions = true; - continue; - } - if (inOptions && /^\s*-/.test(line)) { - const optionMatch = line.match(/--([a-z][a-z0-9-]*)/); - const shortMatch = line.match(/\s-([a-zA-Z]),/); - if (optionMatch) { - const optionName = optionMatch[1]; - const shortOption = shortMatch ? shortMatch[1] : null; - let optionDesc = line - .replace(/^\s*-[^\s]*\s*/, '') - .replace(/^\s*--[^\s]*\s*/, '') - .trim(); - - options.push({ - name: optionName, - short: shortOption, - description: optionDesc, - }); - } - } - } - - // Generate markdown template - let template = `--- -title: influxdb3 ${command} -description: > - The \`influxdb3 ${command}\` command ${description.toLowerCase()}. -influxdb3/core/tags: [cli] -menu: - influxdb3_core_reference: - parent: influxdb3 cli -weight: 201 ---- - -# influxdb3 ${command} - -${description} - -## Usage - -\`\`\`bash -${usage || `influxdb3 ${command} [OPTIONS]`} -\`\`\` - -`; - - if (options.length > 0) { - template += `## Options - -| Option | Description | -|--------|-------------| -`; - - for (const opt of options) { - const optionDisplay = opt.short - ? `\`-${opt.short}\`, \`--${opt.name}\`` - : `\`--${opt.name}\``; - template += `| ${optionDisplay} | ${opt.description} |\n`; - } - } - - template += ` -## Examples - -### Example 1: Basic usage - -{{% code-placeholders "PLACEHOLDER1|PLACEHOLDER2" %}} -\`\`\`bash -influxdb3 ${command} --example PLACEHOLDER1 -\`\`\` -{{% /code-placeholders %}} - -Replace the following: - -- {{% code-placeholder-key %}}\`PLACEHOLDER1\`{{% /code-placeholder-key %}}: Description of placeholder -`; - - return template; - } - - async extractFrontmatter(content) { - const lines = content.split('\n'); - if (lines[0] !== '---') return { frontmatter: null, content }; - - const frontmatterLines = []; - let i = 1; - while (i < lines.length && lines[i] !== '---') { - frontmatterLines.push(lines[i]); - i++; - } - - if (i >= lines.length) return { frontmatter: null, content }; - - const frontmatterText = frontmatterLines.join('\n'); - const remainingContent = lines.slice(i + 1).join('\n'); - - return { frontmatter: frontmatterText, content: remainingContent }; - } - - async getActualContentPath(filePath) { - // Get the actual content path, resolving source fields - try { - const content = await fs.readFile(filePath, 'utf8'); - const { frontmatter } = await this.extractFrontmatter(content); - - if (frontmatter) { - const sourceMatch = frontmatter.match(/^source:\s*(.+)$/m); - if (sourceMatch) { - let sourcePath = sourceMatch[1].trim(); - // Handle relative paths from project root - if (sourcePath.startsWith('/shared/')) { - sourcePath = `content${sourcePath}`; - } - return sourcePath; - } - } - return null; // No source field found - } catch { - return null; - } - } - - async parseDocumentedOptions(filePath) { - // Parse a documentation file to extract all documented options - try { - const content = await fs.readFile(filePath, 'utf8'); - const options = []; - - // Look for options in various patterns: - // 1. Markdown tables with option columns - // 2. Option lists with backticks - // 3. Code examples with --option flags - - // Pattern 1: Markdown tables (| Option | Description |) - const tableMatches = content.match(/\|\s*`?--[a-z][a-z0-9-]*`?\s*\|/gi); - if (tableMatches) { - for (const match of tableMatches) { - const option = match.match(/--[a-z][a-z0-9-]*/i); - if (option) { - options.push(option[0]); - } - } - } - - // Pattern 2: Backtick-enclosed options in text - const backtickMatches = content.match(/`--[a-z][a-z0-9-]*`/gi); - if (backtickMatches) { - for (const match of backtickMatches) { - const option = match.replace(/`/g, ''); - options.push(option); - } - } - - // Pattern 3: Options in code blocks - const codeBlockMatches = content.match(/```[\s\S]*?```/g); - if (codeBlockMatches) { - for (const block of codeBlockMatches) { - const blockOptions = block.match(/--[a-z][a-z0-9-]*/gi); - if (blockOptions) { - options.push(...blockOptions); - } - } - } - - // Pattern 4: Environment variable mappings (INFLUXDB3_* to --option) - const envMatches = content.match( - /\|\s*`INFLUXDB3_[^`]*`\s*\|\s*`--[a-z][a-z0-9-]*`\s*\|/gi - ); - if (envMatches) { - for (const match of envMatches) { - const option = match.match(/--[a-z][a-z0-9-]*/); - if (option) { - options.push(option[0]); - } - } - } - - // Remove duplicates and return sorted - return [...new Set(options)].sort(); - } catch { - return []; - } - } - - async auditDocs(product, cliFile, auditFile) { - const docsPath = this.findDocsPath(product); - const sharedPath = 'content/shared/influxdb3-cli'; - const patchDir = join(this.outputDir, 'patches', product); - await this.ensureDir(patchDir); - - let output = `# CLI Documentation Audit - ${product}\n`; - output += `Generated: ${new Date().toISOString()}\n\n`; - - // GitHub base URL for edit links - const githubBase = 'https://github.com/influxdata/docs-v2/edit/master'; - const githubNewBase = 'https://github.com/influxdata/docs-v2/new/master'; - - // VSCode links for local editing - const vscodeBase = 'vscode://file'; - const projectRoot = join(__dirname, '..', '..'); - - // Check for missing documentation - output += '## Missing Documentation\n\n'; - - let missingCount = 0; - const missingDocs = []; - - // Map commands to expected documentation files - const commandToFile = { - 'create database': 'create/database.md', - 'create token': 'create/token/_index.md', - 'create token admin': 'create/token/admin.md', - 'create trigger': 'create/trigger.md', - 'create table': 'create/table.md', - 'create last_cache': 'create/last_cache.md', - 'create distinct_cache': 'create/distinct_cache.md', - 'show databases': 'show/databases.md', - 'show tokens': 'show/tokens.md', - 'delete database': 'delete/database.md', - 'delete table': 'delete/table.md', - query: 'query.md', - write: 'write.md', - }; - - // Extract commands from CLI help - const content = await fs.readFile(cliFile, 'utf8'); - const lines = content.split('\n'); - - for (const line of lines) { - if (line.startsWith('===== influxdb3') && line.endsWith('--help =====')) { - const command = line - .replace('===== influxdb3 ', '') - .replace(' --help =====', ''); - - if (commandToFile[command]) { - const expectedFile = commandToFile[command]; - const productFile = join(docsPath, expectedFile); - const sharedFile = join(sharedPath, expectedFile); - - const productExists = await this.fileExists(productFile); - const sharedExists = await this.fileExists(sharedFile); - - let needsContent = false; - let targetPath = null; - let stubPath = null; - - if (!productExists && !sharedExists) { - // Completely missing - needsContent = true; - targetPath = productFile; - } else if (productExists) { - // Check if it has a source field pointing to missing content - const actualPath = await this.getActualContentPath(productFile); - if (actualPath && !(await this.fileExists(actualPath))) { - needsContent = true; - targetPath = actualPath; - stubPath = productFile; - } - } else if (sharedExists) { - // Shared file exists, check if it has content - const actualPath = await this.getActualContentPath(sharedFile); - if (actualPath && !(await this.fileExists(actualPath))) { - needsContent = true; - targetPath = actualPath; - stubPath = sharedFile; - } - } - - if (needsContent && targetPath) { - const githubNewUrl = `${githubNewBase}/${targetPath}`; - const localPath = join(projectRoot, targetPath); - - output += `- **Missing**: Documentation for \`influxdb3 ${command}\`\n`; - if (stubPath) { - output += ` - Stub exists at: \`${stubPath}\`\n`; - output += ` - Content needed at: \`${targetPath}\`\n`; - } else { - output += ` - Expected: \`${targetPath}\` or \`${sharedFile}\`\n`; - } - output += ` - [Create on GitHub](${githubNewUrl})\n`; - output += ` - Local: \`${localPath}\`\n`; - - // Generate documentation template - const helpText = await this.extractCommandHelp(content, command); - const docTemplate = await this.generateDocumentationTemplate( - command, - helpText - ); - - // Save patch file - const patchFileName = `${command.replace(/ /g, '-')}.md`; - const patchFile = join(patchDir, patchFileName); - await fs.writeFile(patchFile, docTemplate); - - output += ` - **Template generated**: \`${patchFile}\`\n`; - - missingDocs.push({ command, file: targetPath, patchFile }); - missingCount++; - } - } - } - } - - if (missingCount === 0) { - output += 'No missing documentation files detected.\n'; - } else { - output += '\n### Quick Actions\n\n'; - output += - 'Copy and paste these commands to create missing documentation:\n\n'; - output += '```bash\n'; - for (const doc of missingDocs) { - const relativePatch = join( - 'helper-scripts/output/cli-audit/patches', - product, - `${doc.command.replace(/ /g, '-')}.md` - ); - output += `# Create ${doc.command} documentation\n`; - output += `mkdir -p $(dirname ${doc.file})\n`; - output += `cp ${relativePatch} ${doc.file}\n\n`; - } - output += '```\n'; - } - - output += '\n'; - - // Check for outdated options in existing docs - output += '## Existing Documentation Review\n\n'; - - // Parse CLI help first to populate commandOptionsMap - const parsedFile = join( - this.outputDir, - `parsed-cli-${product}-${this.version}.md` - ); - await this.parseCLIHelp(cliFile, parsedFile); - - // For each command, check if documentation exists and compare content - const existingDocs = []; - for (const [command, expectedFile] of Object.entries(commandToFile)) { - const productFile = join(docsPath, expectedFile); - const sharedFile = join(sharedPath, expectedFile); - - let docFile = null; - let actualContentFile = null; - - // Find the documentation file - if (await this.fileExists(productFile)) { - docFile = productFile; - // Check if it's a stub with source field - const actualPath = await this.getActualContentPath(productFile); - actualContentFile = actualPath - ? join(projectRoot, actualPath) - : join(projectRoot, productFile); - } else if (await this.fileExists(sharedFile)) { - docFile = sharedFile; - actualContentFile = join(projectRoot, sharedFile); - } - - if (docFile && (await this.fileExists(actualContentFile))) { - const githubEditUrl = `${githubBase}/${docFile}`; - const localPath = join(projectRoot, docFile); - const vscodeUrl = `${vscodeBase}/${localPath}`; - - // Get CLI options for this command - const cliOptions = this.commandOptionsMap[`influxdb3 ${command}`] || []; - - // Parse documentation content to find documented options - const documentedOptions = - await this.parseDocumentedOptions(actualContentFile); - - // Find missing options (in CLI but not in docs) - const missingOptions = cliOptions.filter( - (opt) => !documentedOptions.includes(opt) - ); - - // Find extra options (in docs but not in CLI) - const extraOptions = documentedOptions.filter( - (opt) => !cliOptions.includes(opt) - ); - - existingDocs.push({ - command, - file: docFile, - actualContentFile: actualContentFile.replace( - join(projectRoot, ''), - '' - ), - githubUrl: githubEditUrl, - localPath, - vscodeUrl, - cliOptions, - documentedOptions, - missingOptions, - extraOptions, - }); - } - } - - if (existingDocs.length > 0) { - output += 'Review these existing documentation files for accuracy:\n\n'; - - for (const doc of existingDocs) { - output += `### \`influxdb3 ${doc.command}\`\n`; - output += `- **File**: \`${doc.file}\`\n`; - if (doc.actualContentFile !== doc.file) { - output += `- **Content**: \`${doc.actualContentFile}\`\n`; - } - output += `- [Edit on GitHub](${doc.githubUrl})\n`; - output += `- [Open in VS Code](${doc.vscodeUrl})\n`; - output += `- **Local**: \`${doc.localPath}\`\n`; - - // Show option analysis - if (doc.missingOptions.length > 0) { - output += `- **⚠️ Missing from docs** (${doc.missingOptions.length} options):\n`; - for (const option of doc.missingOptions.sort()) { - output += ` - \`${option}\`\n`; - } - } - - if (doc.extraOptions.length > 0) { - output += `- **ℹ️ Documented but not in CLI** (${doc.extraOptions.length} options):\n`; - for (const option of doc.extraOptions.sort()) { - output += ` - \`${option}\`\n`; - } - } - - if (doc.missingOptions.length === 0 && doc.extraOptions.length === 0) { - output += `- **✅ Options match** (${doc.cliOptions.length} options)\n`; - } - - if (doc.cliOptions.length > 0) { - output += `- **All CLI Options** (${doc.cliOptions.length}):\n`; - const uniqueOptions = [...new Set(doc.cliOptions)].sort(); - for (const option of uniqueOptions) { - const status = doc.missingOptions.includes(option) ? '❌' : '✅'; - output += ` - ${status} \`${option}\`\n`; - } - } - output += '\n'; - } - } - - output += '\n## Summary\n'; - output += `- Missing documentation files: ${missingCount}\n`; - output += `- Existing documentation files: ${existingDocs.length}\n`; - output += `- Generated templates: ${missingCount}\n`; - output += '- Options are grouped by command for easier review\n\n'; - - output += '## Automation Suggestions\n\n'; - output += - '1. **Use generated templates**: Check the `patches` directory for pre-filled documentation templates\n'; - output += - '2. **Batch creation**: Use the shell commands above to quickly create all missing files\n'; - output += - '3. **CI Integration**: Add this audit to your CI pipeline to catch missing docs early\n'; - output += - '4. **Auto-PR**: Create a GitHub Action that runs this audit and opens PRs for missing docs\n\n'; - - await fs.writeFile(auditFile, output); - console.log(`📄 Audit complete: ${auditFile}`); - - if (missingCount > 0) { - console.log( - `📝 Generated ${missingCount} documentation templates in: ${patchDir}` - ); - } - } - - async run() { - console.log( - `${Colors.BLUE}🔍 InfluxDB 3 CLI Documentation Audit${Colors.NC}` - ); - console.log('======================================='); - console.log(`Product: ${this.product}`); - console.log(`Version: ${this.version}`); - console.log(); - - // Ensure output directory exists - await this.ensureDir(this.outputDir); - - if (this.product === 'core') { - const cliFile = join( - this.outputDir, - `current-cli-core-${this.version}.txt` - ); - const auditFile = join( - this.outputDir, - `documentation-audit-core-${this.version}.md` - ); - - if (await this.extractCurrentCLI('core', cliFile)) { - await this.auditDocs('core', cliFile, auditFile); - } - } else if (this.product === 'enterprise') { - const cliFile = join( - this.outputDir, - `current-cli-enterprise-${this.version}.txt` - ); - const auditFile = join( - this.outputDir, - `documentation-audit-enterprise-${this.version}.md` - ); - - if (await this.extractCurrentCLI('enterprise', cliFile)) { - await this.auditDocs('enterprise', cliFile, auditFile); - } - } else if (this.product === 'both') { - // Core - const cliFileCore = join( - this.outputDir, - `current-cli-core-${this.version}.txt` - ); - const auditFileCore = join( - this.outputDir, - `documentation-audit-core-${this.version}.md` - ); - - if (await this.extractCurrentCLI('core', cliFileCore)) { - await this.auditDocs('core', cliFileCore, auditFileCore); - } - - // Enterprise - const cliFileEnt = join( - this.outputDir, - `current-cli-enterprise-${this.version}.txt` - ); - const auditFileEnt = join( - this.outputDir, - `documentation-audit-enterprise-${this.version}.md` - ); - - if (await this.extractCurrentCLI('enterprise', cliFileEnt)) { - await this.auditDocs('enterprise', cliFileEnt, auditFileEnt); - } - } else { - console.error(`Error: Invalid product '${this.product}'`); - console.error( - 'Usage: node audit-cli-documentation.js [core|enterprise|both] [version]' - ); - process.exit(1); - } - - console.log(); - console.log( - `${Colors.GREEN}✅ CLI documentation audit complete!${Colors.NC}` - ); - console.log(); - console.log('Next steps:'); - console.log(`1. Review the audit reports in: ${this.outputDir}`); - console.log('2. Update missing documentation files'); - console.log('3. Verify options match current CLI behavior'); - console.log('4. Update examples and usage patterns'); - } -} - -// Main execution -async function main() { - const args = process.argv.slice(2); - const product = args[0] || 'both'; - const version = args[1] || 'local'; - - // Validate product - if (!['core', 'enterprise', 'both'].includes(product)) { - console.error(`Error: Invalid product '${product}'`); - console.error( - 'Usage: node audit-cli-documentation.js [core|enterprise|both] [version]' - ); - console.error('Example: node audit-cli-documentation.js core 3.2.0'); - process.exit(1); - } - - // Validate version tag - try { - const repoRoot = await getRepositoryRoot(); - await validateVersionInputs(version, null, repoRoot); - } catch (error) { - console.error(`Version validation failed: ${error.message}`); - process.exit(1); - } - - const auditor = new CLIDocAuditor(product, version); - await auditor.run(); -} - -// Run if called directly -if (import.meta.url === `file://${process.argv[1]}`) { - main().catch((err) => { - console.error('Error:', err); - process.exit(1); - }); -} - -export { CLIDocAuditor }; diff --git a/helper-scripts/influxdb3-monolith/documentation-audit.js b/helper-scripts/influxdb3-monolith/documentation-audit.js new file mode 100644 index 0000000000..7abcdc38bd --- /dev/null +++ b/helper-scripts/influxdb3-monolith/documentation-audit.js @@ -0,0 +1,1229 @@ +#!/usr/bin/env node + +/** + * CLI documentation auditor that parses commands directly from Rust source code + * + * Usage: node documentation-audit.js [core|enterprise|both] [version/branch/tag] + * Example: node documentation-audit.js core v3.3.0 + * Example: node documentation-audit.js enterprise main + */ + +import { promises as fs } from 'fs'; +import { join, dirname } from 'path'; +import { fileURLToPath } from 'url'; +import { spawn } from 'child_process'; + +const __filename = fileURLToPath(import.meta.url); +const __dirname = dirname(__filename); + +// Color codes +const Colors = { + RED: '\x1b[0;31m', + GREEN: '\x1b[0;32m', + YELLOW: '\x1b[1;33m', + BLUE: '\x1b[0;34m', + NC: '\x1b[0m', // No Color +}; + +// Documentation categories with their associated path patterns +const DOC_CATEGORIES = { + CLI_REFERENCE: { + name: 'CLI Reference', + priority: 1, + patterns: [ + '/reference/cli/', + '/shared/influxdb3-cli/', + '/admin/cli/', + '/reference/influxdb3/cli/', + ] + }, + API_REFERENCE: { + name: 'API Reference', + priority: 2, + patterns: [ + '/reference/api/', + '/api/', + '/shared/influxdb3-api/', + ] + }, + GETTING_STARTED: { + name: 'Getting Started', + priority: 3, + patterns: [ + '/get-started/', + '/tutorial/', + '/quickstart/', + '/shared/influxdb3-get-started/', + ] + }, + ADMIN_GUIDES: { + name: 'Administration', + priority: 4, + patterns: [ + '/admin/', + '/manage/', + '/administration/', + '/shared/influxdb3-admin/', + ] + }, + WRITE_DATA: { + name: 'Write Data', + priority: 5, + patterns: [ + '/write-data/', + '/write/', + '/ingest/', + '/shared/influxdb3-write/', + ] + }, + QUERY_DATA: { + name: 'Query Data', + priority: 6, + patterns: [ + '/query-data/', + '/query/', + '/read/', + '/shared/influxdb3-query/', + ] + }, + PROCESS_DATA: { + name: 'Process Data', + priority: 7, + patterns: [ + '/process-data/', + '/process/', + '/transform/', + '/shared/influxdb3-process/', + ] + }, + GENERAL_REFERENCE: { + name: 'General Reference', + priority: 10, + patterns: [ + '/reference/', + '/shared/influxdb3-reference/', + ] + } +}; + +// Helper to get active categories (can be configured) +function getActiveCategories(categoryFilter = null) { + if (!categoryFilter) { + return Object.values(DOC_CATEGORIES); + } + + if (Array.isArray(categoryFilter)) { + return categoryFilter.map(cat => DOC_CATEGORIES[cat]).filter(Boolean); + } + + if (typeof categoryFilter === 'string') { + return DOC_CATEGORIES[categoryFilter] ? [DOC_CATEGORIES[categoryFilter]] : []; + } + + return Object.values(DOC_CATEGORIES); +} + +class CLIDocumentationAuditor { + constructor(product = 'both', version = 'main', categoryFilter = null) { + this.product = product; + this.version = version; + this.categoryFilter = categoryFilter; + this.outputDir = join(dirname(__dirname), 'output', 'cli-audit'); + + // Repository paths - Use separate clone to avoid disturbing user's work + this.sourceInfluxdbRepo = '/Users/ja/Documents/github/influxdata/influxdb'; + this.workingInfluxdbRepo = join(this.outputDir, 'influxdb-clone'); + this.docsRepo = '/Users/ja/Documents/github/influxdata/docs-v2'; + + // Parsed command data + this.commands = new Map(); // command -> { options: [], description: '', examples: [] } + this.commandOptionsMap = {}; // For backward compatibility + + // Enterprise feature detection patterns + this.enterprisePatterns = [ + /enterprise/i, + /license/i, + /cluster/i, + /replication/i, + /backup/i, + /restore/i + ]; + + // Active categories for search + this.activeCategories = getActiveCategories(this.categoryFilter); + } + + async ensureDir(dir) { + await fs.mkdir(dir, { recursive: true }); + } + + runCommand(cmd, args = [], cwd = null) { + return new Promise((resolve) => { + const options = { encoding: 'utf8' }; + if (cwd) options.cwd = cwd; + + const child = spawn(cmd, args, options); + let stdout = ''; + let stderr = ''; + + child.stdout.on('data', (data) => { + stdout += data.toString(); + }); + + child.stderr.on('data', (data) => { + stderr += data.toString(); + }); + + child.on('close', (code) => { + resolve({ code, stdout, stderr }); + }); + + child.on('error', (err) => { + resolve({ code: 1, stdout: '', stderr: err.message }); + }); + }); + } + + /** + * Ensure we have a working copy of the repository + * This avoids disturbing the user's working directory + */ + async ensureWorkingRepo() { + // Check if working repo exists + try { + await fs.access(this.workingInfluxdbRepo); + console.log('📁 Using existing working repository clone'); + return true; + } catch { + // Clone the repository + console.log('📥 Cloning repository for analysis...'); + const result = await this.runCommand('git', [ + 'clone', + this.sourceInfluxdbRepo, + this.workingInfluxdbRepo + ]); + + if (result.code !== 0) { + console.error(`❌ Failed to clone repository: ${result.stderr}`); + return false; + } + + console.log('✅ Repository cloned successfully'); + return true; + } + } + + /** + * Checkout specific version/branch/tag in the working repository + */ + async checkoutVersion(version) { + if (version === 'main' || version === 'local') { + console.log('📋 Using current state of repository'); + + // Pull latest changes if it's main + if (version === 'main') { + const result = await this.runCommand( + 'git', + ['pull', 'origin', 'main'], + this.workingInfluxdbRepo + ); + if (result.code !== 0) { + console.warn(`⚠️ Could not pull latest changes: ${result.stderr}`); + } + } + return true; + } + + console.log(`🔄 Checking out version: ${version}`); + const result = await this.runCommand( + 'git', + ['checkout', version], + this.workingInfluxdbRepo + ); + + if (result.code !== 0) { + console.error(`❌ Failed to checkout ${version}: ${result.stderr}`); + return false; + } + + return true; + } + + /** + * Detect if a command or feature is Enterprise-specific + * @param {string} content - Source code content + * @param {string} commandName - Command name + * @returns {boolean} - True if enterprise feature + */ + detectEnterpriseFeature(content, commandName) { + // Check for enterprise patterns in code content + const hasEnterprisePattern = this.enterprisePatterns.some(pattern => + pattern.test(content) || pattern.test(commandName) + ); + + // Check for feature flags or conditional compilation + const hasFeatureFlag = /cfg\(feature\s*=\s*["']enterprise["']\)/.test(content); + + // Check for license checks + const hasLicenseCheck = /license|License/.test(content); + + return hasEnterprisePattern || hasFeatureFlag || hasLicenseCheck; + } + + /** + * Parse Rust source file to extract command information + * Enhanced with Enterprise feature detection + */ + async parseRustCommand(filePath, commandName) { + try { + const content = await fs.readFile(filePath, 'utf8'); + const command = { + name: commandName, + options: [], + description: '', + examples: [], + usage: '', + subcommands: [], + isEnterpriseFeature: this.detectEnterpriseFeature(content, commandName), + alias: null + }; + + // Look for command-level visible_alias in clap attributes + const commandAliasRegex = /#\[(?:clap|command)\([^)]*visible_alias\s*=\s*"([^"]+)"[^)]*\)\]/; + const aliasMatch = content.match(commandAliasRegex); + if (aliasMatch) { + command.alias = aliasMatch[1]; + } + + // Extract struct documentation (above #[derive(Debug, Parser)]) + const structRegex = /#\[derive\(.*Parser.*\)\]\s*(?:\/\/\/.*\n)*\s*pub struct (\w+)/; + + // Find the relevant struct - could be Config or a specific subcommand config + let structMatch = content.match(structRegex); + + // If this is a subcommand, look for the specific config struct + if (commandName.includes(' ')) { + const parts = commandName.split(' '); + const subcommandName = parts[parts.length - 1]; + // Look for structs like PackageConfig, WalPluginConfig, etc. + const subcommandStructRegex = new RegExp(`#\\[derive\\([^)]*(?:clap::Parser|clap::Args)[^)]*\\)\\]\\s*(?:\\/\\/\\/.*\\n)*\\s*pub struct (${subcommandName.charAt(0).toUpperCase() + subcommandName.slice(1)}\\w*Config)`, 'i'); + const subcommandMatch = content.match(subcommandStructRegex); + if (subcommandMatch) { + structMatch = subcommandMatch; + } + } + + if (structMatch) { + try { + // Look for doc comments before the struct + const beforeStruct = content.substring(0, structMatch.index); + const lines = beforeStruct.split('\n').reverse(); + + const docLines = []; + for (const line of lines) { + const docMatch = line.match(/^\s*\/\/\/\s*(.*)$/); + if (docMatch) { + docLines.unshift(docMatch[1]); + } else if (line.trim() === '' || line.includes('#[')) { + continue; // Skip empty lines and attributes + } else { + break; // Stop at non-doc content + } + } + + if (docLines.length > 0) { + command.description = docLines.join(' ').trim(); + } + } catch (error) { + console.warn(`Warning: Error parsing struct documentation in ${filePath}: ${error.message}`); + } + } + + // Parse fields with a more robust line-by-line approach + const lines = content.split('\n'); + let currentDoc = ''; + let inStruct = false; + + // Only proceed if we found a valid struct + if (structMatch && structMatch[1]) { + const structName = structMatch[1]; + + for (let i = 0; i < lines.length; i++) { + try { + const line = lines[i].trim(); + + // Check if we're entering the relevant struct + if (line.includes('pub struct') && line.includes(structName)) { + inStruct = true; + continue; + } + + // Stop when we exit the struct + if (inStruct && line === '}') { + break; + } + + if (!inStruct) continue; + + // Collect doc comments + if (line.startsWith('///')) { + const docText = line.replace(/^\/\/\/\s*/, ''); + currentDoc = currentDoc ? `${currentDoc} ${docText}` : docText; + continue; + } + + // Look for clap attributes - support both #[clap(...)] and #[arg(...)] + if (line.startsWith('#[clap(') || line.startsWith('#[arg(')) { + const clapLine = line; + let clapAttrs = ''; + + if (line.startsWith('#[clap(')) { + clapAttrs = clapLine.replace('#[clap(', '').replace(')]', ''); + } else { + clapAttrs = clapLine.replace('#[arg(', '').replace(')]', ''); + } + + // Handle multi-line clap attributes + let j = i + 1; + while (j < lines.length && !clapLine.includes(')]')) { + const nextLine = lines[j].trim(); + clapAttrs += ' ' + nextLine.replace(')]', ''); + if (nextLine.includes(')]')) break; + j++; + } + + // Get the field definition (next non-empty line) + let fieldLine = ''; + j = i + 1; + while (j < lines.length) { + const nextLine = lines[j].trim(); + if (nextLine && !nextLine.startsWith('#[') && !nextLine.startsWith('///')) { + fieldLine = nextLine; + break; + } + j++; + } + + if (fieldLine.includes(':')) { + let [fieldName, fieldType] = fieldLine.split(':').map(s => s.trim()); + const attrs = this.parseClapAttributes(clapAttrs); + + // Skip flattened configs + if (attrs.flatten) { + currentDoc = ''; + continue; + } + + // Clean field name - remove visibility modifiers like 'pub', 'pub(crate)', etc. + fieldName = fieldName.replace(/^pub(?:\([^)]*\))?\s+/, ''); + + const optionName = attrs.long || fieldName.replace(/_/g, '-'); + + // Skip internal command routing options + if (optionName === 'cmd' || optionName === 'command' || optionName === 'subcommand') { + currentDoc = ''; + continue; + } + + const option = { + name: optionName, + short: attrs.short, + description: currentDoc || attrs.help || attrs.about || '', + defaultValue: attrs.default_value || attrs.default_value_t, + env: attrs.env, + fieldType: fieldType.replace(/,$/, '').trim(), + required: !fieldType.includes('Option<') && !attrs.default_value && !attrs.default_value_t && !attrs.required_unless_present + }; + + command.options.push(option); + } + + currentDoc = ''; + } else if (line && !line.startsWith('//') && !line.startsWith('#[')) { + // Reset doc collection if we hit a non-doc, non-attribute line + currentDoc = ''; + } + } catch (error) { + console.warn(`Warning: Error parsing line ${i + 1} in ${filePath}: ${error.message}`); + continue; + } + } + } + + // Look for usage examples in comments + const exampleRegex = /\/\/\s*[Ee]xample:?\s*\n?\s*([^\n]+)/g; + let exampleMatch; + + while ((exampleMatch = exampleRegex.exec(content)) !== null) { + const example = exampleMatch[1].trim(); + if (example && !command.examples.includes(example)) { + command.examples.push(example); + } + } + + // Parse subcommands from enum definitions (only for top-level commands) + if (!commandName.includes(' ')) { + const subcommands = this.parseSubcommands(content, commandName); + command.subcommands = subcommands; + } else { + command.subcommands = []; + } + + return command; + } catch (error) { + console.warn(`Failed to parse ${filePath}: ${error.message}`); + return null; + } + } + + /** + * Parse subcommands from enum definitions in Rust source + */ + parseSubcommands(content, parentCommand) { + const subcommands = []; + + // Look for subcommand enums (e.g., pub enum SubCommand, pub enum Command) + const enumRegex = /#\[derive\([^)]*clap::Subcommand[^)]*\)\]\s*pub enum (\w*(?:Sub)?Command)\s*\{([^}]+)\}/gs; + let enumMatch; + + while ((enumMatch = enumRegex.exec(content)) !== null) { + const enumContent = enumMatch[2]; + const lines = enumContent.split('\n'); + + for (let i = 0; i < lines.length; i++) { + const line = lines[i].trim(); + + // Look for enum variants (subcommands) + if (line.startsWith('///')) { + // Collect doc comment + let description = line.replace(/^\/\/\/\s*/, ''); + let j = i + 1; + + // Look for more doc comments + while (j < lines.length && lines[j].trim().startsWith('///')) { + description += ' ' + lines[j].trim().replace(/^\/\/\/\s*/, ''); + j++; + } + + // Look for the next non-comment line which should be the variant + while (j < lines.length) { + const variantLine = lines[j].trim(); + if (variantLine && !variantLine.startsWith('///') && !variantLine.startsWith('#[')) { + // Parse variant name - handle both simple and with config + let variantMatch = variantLine.match(/^(\w+)(?:\([^)]+\))?[,]?$/); + if (variantMatch) { + let subcommandName = variantMatch[1]; + + // Check for clap name attribute in preceding lines + for (let k = i + 1; k < j; k++) { + const clapMatch = lines[k].match(/#\[clap\([^)]*name\s*=\s*"([^"]+)"[^)]*\)\]/); + if (clapMatch) { + subcommandName = clapMatch[1]; + break; + } + } + + subcommands.push({ + name: subcommandName, + description: description.trim(), + fullName: `${parentCommand} ${subcommandName}` + }); + } + break; + } + j++; + } + + i = j - 1; // Skip processed lines + } + } + } + + return subcommands; + } + + /** + * Parse clap attribute string into structured object + * More robust parsing that handles various formats + */ + parseClapAttributes(clapAttrs) { + const attrs = {}; + + // Clean up the input + const cleanAttrs = clapAttrs.replace(/\s+/g, ' ').trim(); + + // Handle different clap patterns with improved regex + const patterns = { + long: /long\s*=\s*"([^"]+)"/, + short: /short\s*=\s*['"]([^'"]+)['"]/, + help: /help\s*=\s*"([^"]+)"/, + about: /about\s*=\s*"([^"]+)"/, + default_value: /default_value\s*=\s*"([^"]+)"/, + default_value_t: /default_value_t\s*=\s*([^,)\s]+)/, + env: /env\s*=\s*"([^"]+)"/, + value_enum: /value_enum/, + flatten: /flatten/, + visible_alias: /visible_alias\s*=\s*"([^"]+)"/, + required_unless_present: /required_unless_present\s*=\s*"([^"]+)"/ + }; + + for (const [key, pattern] of Object.entries(patterns)) { + if (typeof pattern === 'object' && pattern.test) { + const match = cleanAttrs.match(pattern); + if (match) { + attrs[key] = match[1]; + } + } else { + attrs[key] = pattern.test(cleanAttrs); + } + } + + return attrs; + } + + /** + * Discover all commands by walking the commands directory + */ + async discoverCommands() { + const commandsDir = join(this.workingInfluxdbRepo, 'influxdb3/src/commands'); + + // Parse root commands + const rootCommands = await this.parseCommandDirectory(commandsDir, []); + + // Store commands + for (const command of rootCommands) { + const fullName = `influxdb3 ${command.name}`; + this.commands.set(fullName, command); + + // For backward compatibility + this.commandOptionsMap[fullName] = command.options.map(opt => `--${opt.name}`); + + // Add subcommands as separate entries if they exist + for (const subcommand of command.subcommands) { + // Try to find the subcommand's config file to get its options + const subcommandPath = this.findSubcommandFile(command.name, subcommand.name); + if (subcommandPath) { + const subcommandConfig = await this.parseRustCommand(subcommandPath, subcommand.fullName); + if (subcommandConfig) { + this.commands.set(`influxdb3 ${subcommand.fullName}`, subcommandConfig); + this.commandOptionsMap[`influxdb3 ${subcommand.fullName}`] = subcommandConfig.options.map(opt => `--${opt.name}`); + } + } + } + } + + console.log(`✅ Discovered ${this.commands.size} commands from source code`); + } + + /** + * Find the file containing subcommand configuration + */ + findSubcommandFile(parentCommand, subcommandName) { + // For commands like 'install package', look for struct PackageConfig in install.rs + const commandFile = join(this.workingInfluxdbRepo, 'influxdb3/src/commands', `${parentCommand}.rs`); + return commandFile; // The subcommand config is typically in the same file + } + + /** + * Recursively parse command directory structure + */ + async parseCommandDirectory(dir, parentCommands) { + const commands = []; + + try { + const entries = await fs.readdir(dir, { withFileTypes: true }); + + for (const entry of entries) { + if (entry.isFile() && entry.name.endsWith('.rs') && entry.name !== 'mod.rs' && entry.name !== 'common.rs' && entry.name !== 'helpers.rs') { + const commandName = entry.name.replace('.rs', ''); + const fullPath = join(dir, entry.name); + const fullCommandName = [...parentCommands, commandName].join(' '); + + const command = await this.parseRustCommand(fullPath, fullCommandName); + if (command) { + commands.push(command); + } + } else if (entry.isDirectory() && parentCommands.length === 0) { + // Only parse subdirectories if we're at the top level commands directory + // and skip known non-command directories + const skipDirectories = ['plugin_test']; // Known legacy/internal directories + + if (!skipDirectories.includes(entry.name)) { + const subDir = join(dir, entry.name); + const subCommands = await this.parseCommandDirectory(subDir, [...parentCommands, entry.name]); + commands.push(...subCommands); + } + } + } + } catch (error) { + console.warn(`Failed to read directory ${dir}: ${error.message}`); + } + + return commands; + } + + /** + * Find documented options using reference pattern filtering + */ + async grepForDocumentedOptions(command, options, repoRoot) { + const searchDirs = [ + join(repoRoot, 'content/influxdb3/core'), + join(repoRoot, 'content/influxdb3/enterprise'), + join(repoRoot, 'content/shared') + ]; + + const results = { + documentedOptions: [], + optionLocations: {}, + commandFiles: [], + excludedFiles: [] + }; + + // Helper function to check file relevance and assign priority + const getFileRelevance = (filePath) => { + // Check each active category + for (const category of this.activeCategories) { + for (const pattern of category.patterns) { + if (filePath.includes(pattern)) { + return { + relevant: true, + priority: category.priority, + reason: `${category.name}: ${pattern}`, + category: category.name + }; + } + } + } + + // Check for command mentions in path + const commandName = command.replace('influxdb3 ', ''); + const hasDirectCommandMention = ( + filePath.includes(`/cli/influxdb3/${commandName.replace(' ', '/')}`) || + filePath.includes(`/${commandName}/`) || + filePath.includes(`-${commandName}.md`) || + filePath.includes(`${commandName}.md`) + ); + + if (hasDirectCommandMention) { + return { + relevant: true, + priority: 15, // Lower priority than categories + reason: 'Direct command path match', + category: 'Path Match' + }; + } + + return { + relevant: false, + priority: 99, + reason: 'Not in active categories', + category: 'None' + }; + }; + + // Recursive file search + const searchFiles = async (dir) => { + try { + const entries = await fs.readdir(dir, { withFileTypes: true }); + + for (const entry of entries) { + const fullPath = join(dir, entry.name); + + if (entry.isDirectory()) { + await searchFiles(fullPath); + } else if (entry.name.endsWith('.md')) { + const relativePath = fullPath.replace(repoRoot + '/', ''); + const relevance = getFileRelevance(fullPath); + + if (!relevance.relevant) { + results.excludedFiles.push({ + file: relativePath, + reason: relevance.reason + }); + continue; + } + + try { + const content = await fs.readFile(fullPath, 'utf-8'); + + // Check for command mentions - be more specific + const commandName = command.replace('influxdb3 ', ''); + + // Create regex patterns for more accurate matching + const commandRegex = new RegExp(`influxdb3\\s+${commandName.replace(/\s+/g, '\\s+')}(?:\\s|$|\\[)`, 'i'); + const codeBlockRegex = /```(?:sh|bash|shell)\s*\n[\s\S]*?influxdb3[\s\S]*?```/g; + + // Check if this file contains relevant CLI command mentions + const hasCommandMention = ( + // Direct command mention with proper word boundaries + commandRegex.test(content) || + // Check within code blocks specifically + (codeBlockRegex.test(content) && content.match(codeBlockRegex).some(block => commandRegex.test(block))) || + // Path-based matching for CLI reference docs + relativePath.includes(`/cli/influxdb3/${commandName.replace(' ', '/')}`) + ); + + if (hasCommandMention) { + // Look for each option in this file + const lines = content.split('\n'); + lines.forEach((line, index) => { + for (const option of options) { + // Create regex for more precise option matching + // Match option with word boundaries, allowing for backticks, quotes, or table cells + const optionRegex = new RegExp(`(?:^|[\\s\`'"|])${option.replace(/[.*+?^${}()|[\]\\]/g, '\\$&')}(?:[\\s\`'"|,]|$)`); + + if (optionRegex.test(line)) { + // Additional validation: skip if it's in a comment or unrelated context + const trimmedLine = line.trim(); + const isComment = trimmedLine.startsWith('//') || trimmedLine.startsWith('#') || trimmedLine.startsWith('