diff --git a/README.md b/README.md index ea29cb4bb..b95f73653 100644 --- a/README.md +++ b/README.md @@ -9,7 +9,6 @@ KubeBlocks add-ons. | apecloud-mysql | apecloud-mysql-8.0.30
wescale-0.2.7 | ApeCloud MySQL is a database that is compatible with MySQL syntax and achieves high availability through the utilization of the RAFT consensus protocol. | xuriwuyun | | apecloud-postgresql | | ApeCloud PostgreSQL is a database that is compatible with PostgreSQL syntax and achieves high availability through the utilization of the RAFT consensus protocol. | ldming | | clickhouse | clickhouse-22.3.18
clickhouse-22.3.20
clickhouse-22.8.21
clickhouse-24.8.3
clickhouse-25.4.4 | ClickHouse is an open-source column-oriented OLAP database management system. Use it to boost your database performance while providing linear scalability and hardware efficiency. | sophon-zt | -| doris | doris-be-2.1.6
doris-fe-2.1.6 | Apache Doris is an MPP-based real-time data warehouse known for its high query speed. For queries on large datasets, it returns results in sub-seconds. It supports both high-concurrency point queries and high-throughput complex analysis. It can be used for report analysis, ad-hoc queries, unified data warehouse, and data lake query acceleration. Based on Apache Doris, users can build applications for user behavior analysis, A/B testing platform, log analysis, user profile analysis, and e-commerce order analysis. | 1aal | | elasticsearch | elasticsearch-6.8.23
elasticsearch-7.10.1
elasticsearch-7.7.1
elasticsearch-7.8.1
elasticsearch-8.1.3
elasticsearch-8.15.5
elasticsearch-8.8.2
kibana-6.8.23
kibana-7.10.1
kibana-7.7.1
kibana-7.8.1
kibana-8.1.3
kibana-8.15.5
kibana-8.8.2
kibana-8.9.1 | Elasticsearch is a distributed, RESTful search engine optimized for speed and relevance on production-scale workloads. | iziang vipshop | | etcd | etcd-3.5.15
etcd-3.5.6
etcd-3.6.1 | Etcd is a strongly consistent, distributed key-value store that provides a reliable way to store data that needs to be accessed by a distributed system or cluster of machines. | ApeCloud | | greptimedb | greptimedb-0.3.2 | An open-source, cloud-native, distributed time-series database with PromQL/SQL/Python supported. | GreptimeTeam sh2 | diff --git a/addons-cluster/doris/.helmignore b/addons-cluster/doris/.helmignore deleted file mode 100644 index 0e8a0eb36..000000000 --- a/addons-cluster/doris/.helmignore +++ /dev/null @@ -1,23 +0,0 @@ -# Patterns to ignore when building packages. -# This supports shell glob matching, relative path matching, and -# negation (prefixed with !). Only one pattern per line. -.DS_Store -# Common VCS dirs -.git/ -.gitignore -.bzr/ -.bzrignore -.hg/ -.hgignore -.svn/ -# Common backup files -*.swp -*.bak -*.tmp -*.orig -*~ -# Various IDEs -.project -.idea/ -*.tmproj -.vscode/ diff --git a/addons-cluster/doris/Chart.yaml b/addons-cluster/doris/Chart.yaml deleted file mode 100644 index 43d04573c..000000000 --- a/addons-cluster/doris/Chart.yaml +++ /dev/null @@ -1,30 +0,0 @@ -apiVersion: v2 -name: doris-cluster -description: A Helm chart for Doris Cluster - -# A chart can be either an 'application' or a 'library' chart. -# -# Application charts are a collection of templates that can be packaged into versioned archives -# to be deployed. -# -# Library charts provide useful utilities or functions for the chart developer. They're included as -# a dependency of application charts to inject those utilities and functions into the rendering -# pipeline. Library charts do not define any templates and therefore cannot be deployed. -type: application - -# This is the chart version. This version number should be incremented each time you make changes -# to the chart and its templates, including the app version. -# Versions are expected to follow Semantic Versioning (https://semver.org/) -version: 1.0.2 - -# This is the version number of the application being deployed. This version number should be -# incremented each time you make changes to the application. Versions are not expected to -# follow Semantic Versioning. They should reflect the version the application is using. -# It is recommended to use it with quotes. -appVersion: "2.1.6" - -dependencies: -- name: kblib - version: 0.1.2 - repository: file://../kblib - alias: extra diff --git a/addons-cluster/doris/templates/_helpers.tpl b/addons-cluster/doris/templates/_helpers.tpl deleted file mode 100644 index 3004264ec..000000000 --- a/addons-cluster/doris/templates/_helpers.tpl +++ /dev/null @@ -1,56 +0,0 @@ -{{/* -Expand the name of the chart. -*/}} -{{- define "doris-cluster.name" -}} -{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }} -{{- end }} - -{{/* -Create a default fully qualified app name. -We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). -If release name contains chart name it will be used as a full name. -*/}} -{{- define "doris-cluster.fullname" -}} -{{- if .Values.fullnameOverride }} -{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }} -{{- else }} -{{- $name := default .Chart.Name .Values.nameOverride }} -{{- if contains $name .Release.Name }} -{{- .Release.Name | trunc 63 | trimSuffix "-" }} -{{- else }} -{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }} -{{- end }} -{{- end }} -{{- end }} - -{{/* -Create chart name and version as used by the chart label. -*/}} -{{- define "doris-cluster.chart" -}} -{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }} -{{- end }} - -{{/* -Common labels -*/}} -{{- define "doris-cluster.labels" -}} -helm.sh/chart: {{ include "doris-cluster.chart" . }} -{{ include "doris-cluster.selectorLabels" . }} -{{- if .Chart.AppVersion }} -app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} -{{- end }} -app.kubernetes.io/managed-by: {{ .Release.Service }} -{{- end }} - -{{/* -Selector labels -*/}} -{{- define "doris-cluster.selectorLabels" -}} -app.kubernetes.io/name: {{ include "doris-cluster.name" . }} -app.kubernetes.io/instance: {{ .Release.Name }} -{{- end }} - - -{{- define "doris.version" }} -{{- trimPrefix "doris-" .Values.version }} -{{- end }} diff --git a/addons-cluster/doris/templates/cluster.yaml b/addons-cluster/doris/templates/cluster.yaml deleted file mode 100644 index 760e1101d..000000000 --- a/addons-cluster/doris/templates/cluster.yaml +++ /dev/null @@ -1,71 +0,0 @@ -apiVersion: apps.kubeblocks.io/v1 -kind: Cluster -metadata: - name: {{ include "kblib.clusterName" . }} - namespace: {{ .Release.Namespace }} - labels: {{ include "kblib.clusterLabels" . | nindent 4 }} -spec: - clusterDef: doris - terminationPolicy: {{ .Values.extra.terminationPolicy }} - topology: aggregated - componentSpecs: - - name: fe - {{- include "kblib.componentMonitor" . | indent 6 }} - serviceVersion: {{ include "doris.version" . }} - replicas: {{ .Values.fe.replicas | default 3 }} - {{- with .Values.fe.resources }} - resources: - {{- with .limits }} - limits: - cpu: {{ .cpu | quote }} - memory: {{ print .memory "Gi" | quote }} - {{- end }} - {{- with .requests }} - requests: - cpu: {{ .cpu | quote }} - memory: {{ print .memory "Gi" | quote }} - {{- end }} - {{- end }} - volumes: - - name: certificates - secret: - secretName: {{ include "kblib.clusterName" . }}-certificates - optional: true - volumeClaimTemplates: - {{- range $key, $value := .Values.fe.persistence.storages }} - - name: {{ $key }} - spec: - storageClassName: {{ $.Values.fe.persistence.storageClassName }} - accessModes: - - ReadWriteOnce - resources: - requests: - storage: {{ print $value "Gi" | quote }} - {{- end }} - - name: be - serviceVersion: {{ include "doris.version" . }} - replicas: {{ .Values.be.replicas | default 3 }} - {{- with .Values.be.resources }} - resources: - {{- with .limits }} - limits: - cpu: {{ .cpu | quote }} - memory: {{ print .memory "Gi" | quote }} - {{- end }} - {{- with .requests }} - requests: - cpu: {{ .cpu | quote }} - memory: {{ print .memory "Gi" | quote }} - {{- end }} - {{- end }} - volumeClaimTemplates: - {{- range $key, $value := .Values.be.persistence.storages }} - - name: {{ $key }} - spec: - storageClassName: {{ $.Values.be.persistence.storageClassName }} - accessModes: - - ReadWriteOnce - resources: - requests: - storage: {{ print $value "Gi" | quote }} - {{- end }} diff --git a/addons-cluster/doris/templates/secret.yaml b/addons-cluster/doris/templates/secret.yaml deleted file mode 100644 index 326719195..000000000 --- a/addons-cluster/doris/templates/secret.yaml +++ /dev/null @@ -1,9 +0,0 @@ -apiVersion: v1 -kind: Secret -metadata: - name: {{ include "kblib.clusterName" . }}-certificates - labels: - labels: {{ include "kblib.clusterLabels" . | nindent 4 }} -data: - # provide ca certificate private key to generate PKCS12 format certificate - ca-key.pem: {{ print "" | b64enc }} \ No newline at end of file diff --git a/addons-cluster/doris/values.schema.json b/addons-cluster/doris/values.schema.json deleted file mode 100644 index 121987016..000000000 --- a/addons-cluster/doris/values.schema.json +++ /dev/null @@ -1,195 +0,0 @@ -{ - "$schema": "http://json-schema.org/schema#", - "type": "object", - "properties": { - "version": { - "title": "Version", - "description": "The version of Doris.", - "type": "string", - "default": "2.1.6" - }, - "fe": { - "type": "object", - "properties": { - "replicas": { - "title": "FE Replicas", - "description": "The number of replicas for FE.", - "type": "integer", - "default": 3, - "minimum": 1 - }, - "resources": { - "type": "object", - "properties": { - "limits": { - "type": "object", - "properties": { - "cpu": { - "title": "FE CPU Limit", - "description": "FE CPU cores limit.", - "type": ["number", "string"], - "default": 1, - "minimum": 1 - }, - "memory": { - "title": "FE Memory Limit(Gi)", - "description": "FE Memory limit, the unit is Gi.", - "type": ["number", "string"], - "default": 2, - "minimum": 2 - } - } - }, - "requests": { - "type": "object", - "properties": { - "cpu": { - "title": "FE CPU Request", - "description": "FE CPU cores request.", - "type": ["number", "string"], - "default": 1, - "minimum": 1 - }, - "memory": { - "title": "FE Memory Request(Gi)", - "description": "FE Memory request, the unit is Gi.", - "type": ["number", "string"], - "default": 2, - "minimum": 2 - } - } - } - } - }, - "persistence": { - "type": "object", - "properties": { - "storageClassName": { - "title": "Storage Class Name", - "description": "Storage class name for persistent volumes.", - "type": ["string", "null"], - "default": "" - }, - "storages": { - "type": "object", - "properties": { - "metadata": { - "title": "FE Metadata Storage(Gi)", - "description": "FE metadata storage size, the unit is Gi.", - "type": ["number", "string"], - "default": 10, - "minimum": 1 - }, - "log": { - "title": "FE Log Storage(Gi)", - "description": "FE log storage size, the unit is Gi.", - "type": ["number", "string"], - "default": 10, - "minimum": 1 - } - } - } - } - } - } - }, - "be": { - "type": "object", - "properties": { - "replicas": { - "title": "BE Replicas", - "description": "The number of replicas for BE.", - "type": "integer", - "default": 3, - "minimum": 1 - }, - "resources": { - "type": "object", - "properties": { - "limits": { - "type": "object", - "properties": { - "cpu": { - "title": "BE CPU Limit", - "description": "BE CPU cores limit.", - "type": ["number", "string"], - "default": 1, - "minimum": 0.5 - }, - "memory": { - "title": "BE Memory Limit(Gi)", - "description": "BE Memory limit, the unit is Gi.", - "type": ["number", "string"], - "default": 2, - "minimum": 1 - } - } - }, - "requests": { - "type": "object", - "properties": { - "cpu": { - "title": "BE CPU Request", - "description": "BE CPU cores request.", - "type": ["number", "string"], - "default": 1, - "minimum": 0.5 - }, - "memory": { - "title": "BE Memory Request(Gi)", - "description": "BE Memory request, the unit is Gi.", - "type": ["number", "string"], - "default": 2, - "minimum": 1 - } - } - } - } - }, - "persistence": { - "type": "object", - "properties": { - "storageClassName": { - "title": "Storage Class Name", - "description": "Storage class name for persistent volumes.", - "type": ["string", "null"], - "default": "" - }, - "storages": { - "type": "object", - "properties": { - "data": { - "title": "BE Data Storage(Gi)", - "description": "BE data storage size, the unit is Gi.", - "type": ["number", "string"], - "default": 20, - "minimum": 1 - }, - "log": { - "title": "BE Log Storage(Gi)", - "description": "BE log storage size, the unit is Gi.", - "type": ["number", "string"], - "default": 10, - "minimum": 1 - } - } - } - } - } - } - }, - "nameOverride": { - "title": "Name Override", - "description": "Override the default chart name.", - "type": "string", - "default": "" - }, - "fullnameOverride": { - "title": "Fullname Override", - "description": "Override the default full chart name.", - "type": "string", - "default": "" - } - }, - "required": ["fe", "be"] -} \ No newline at end of file diff --git a/addons-cluster/doris/values.yaml b/addons-cluster/doris/values.yaml deleted file mode 100644 index 9f0745787..000000000 --- a/addons-cluster/doris/values.yaml +++ /dev/null @@ -1,35 +0,0 @@ -version: 2.1.6 - -fe: - replicas: 3 - resources: - limits: - cpu: 1 - memory: 2 - requests: - cpu: 1 - memory: 2 - persistence: - storageClassName: "" - storages: - metadata: 10 - log: 10 - -be: - replicas: 3 - resources: - limits: - cpu: 1 - memory: 2 - requests: - cpu: 1 - memory: 2 - persistence: - storageClassName: "" - storages: - data: 20 - log: 10 - -nameOverride: "" - -fullnameOverride: "" diff --git a/addons/doris/.helmignore b/addons/doris/.helmignore deleted file mode 100644 index 0e8a0eb36..000000000 --- a/addons/doris/.helmignore +++ /dev/null @@ -1,23 +0,0 @@ -# Patterns to ignore when building packages. -# This supports shell glob matching, relative path matching, and -# negation (prefixed with !). Only one pattern per line. -.DS_Store -# Common VCS dirs -.git/ -.gitignore -.bzr/ -.bzrignore -.hg/ -.hgignore -.svn/ -# Common backup files -*.swp -*.bak -*.tmp -*.orig -*~ -# Various IDEs -.project -.idea/ -*.tmproj -.vscode/ diff --git a/addons/doris/Chart.yaml b/addons/doris/Chart.yaml deleted file mode 100644 index f75563873..000000000 --- a/addons/doris/Chart.yaml +++ /dev/null @@ -1,39 +0,0 @@ -apiVersion: v2 -name: doris -description: Apache Doris is an MPP-based real-time data warehouse known for its high query speed. For queries on large datasets, it returns results in sub-seconds. It supports both high-concurrency point queries and high-throughput complex analysis. It can be used for report analysis, ad-hoc queries, unified data warehouse, and data lake query acceleration. Based on Apache Doris, users can build applications for user behavior analysis, A/B testing platform, log analysis, user profile analysis, and e-commerce order analysis. - -# A chart can be either an 'application' or a 'library' chart. -# -# Application charts are a collection of templates that can be packaged into versioned archives -# to be deployed. -# -# Library charts provide useful utilities or functions for the chart developer. They're included as -# a dependency of application charts to inject those utilities and functions into the rendering -# pipeline. Library charts do not define any templates and therefore cannot be deployed. -type: application - -# This is the chart version. This version number should be incremented each time you make changes -# to the chart and its templates, including the app version. -# Versions are expected to follow Semantic Versioning (https://semver.org/) -version: 1.0.2 - -# This is the version number of the application being deployed. This version number should be -# incremented each time you make changes to the application. Versions are not expected to -# follow Semantic Versioning. They should reflect the version the application is using. -# It is recommended to use it with quotes. -appVersion: "2.1.6" - -dependencies: - - name: kblib - version: 0.1.0 - repository: file://../kblib - alias: extra - -maintainers: - - name: 1aal - url: https://github.com/apecloud/kubeblocks/ - -annotations: - addon.kubeblocks.io/kubeblocks-version: ">=1.0.0" - addon.kubeblocks.io/model: "RDBMS" - addon.kubeblocks.io/provider: "community" diff --git a/addons/doris/config/be-config-constraint.cue b/addons/doris/config/be-config-constraint.cue deleted file mode 100644 index 4583a3987..000000000 --- a/addons/doris/config/be-config-constraint.cue +++ /dev/null @@ -1,1484 +0,0 @@ -#BEParameter: { - // DYNAMIC parameters - - // Threshold to logging agent task trace, in seconds. - agent_task_trace_threshold_sec: int | *2 - - // The interval for cleaning the recycle bin is 24 hours. When the disk space is insufficient, the file retention period under trash may not comply with this parameter - trash_file_expire_time_sec: int | *86400 - - // The timeout period for connecting to ES via http. Unit is millisecond - es_http_timeout_ms: int | *5000 - - // The timeout when establishing connection with external table such as ODBC table - external_table_connect_timeout_sec: int | *30 - - // Interval between profile reports. Unit is second - status_report_interval: int | *5 - - // This configuration is used to control whether to serialize the protoBuf request and embed the Tuple/Block data into the controller attachment and send it through http brpc when the length of the Tuple/Block data is greater than 1.8G. To avoid errors when the length of the protoBuf request exceeds 2G: Bad request, error_text=[E1003]Fail to compress request. In the past version, after putting Tuple/Block data in the attachment, it was sent through the default baidu_std brpc, but when the attachment exceeds 2G, it will be truncated. There is no 2G limit for sending through http brpc. - transfer_large_data_by_brpc: bool | *true - - // Thrift default timeout time - thrift_rpc_timeout_ms: int | *60000 - - // Used to set retry interval for thrift client in be to avoid avalanche disaster in fe thrift server, the unit is ms - thrift_client_retry_interval_ms: int | *1000 - - // The default thrift client connection timeout time, the unit is second - thrift_connect_timeout_seconds: int | *3 - - // The maximum size of a (received) message of the thrift server, in bytes. If the size of the message sent by the client exceeds this limit, the Thrift server will reject the request and close the connection. As a result, the client will encounter the error: "connection has been closed by peer." In this case, you can try increasing this parameter. The default value is 104857600(100MB). - thrift_max_message_size: int | *104857600 - - // Txn submit rpc timeout, the unit is ms - txn_commit_rpc_timeout_ms: int | *60000 - - // Time interval for clearing expired Rowset, the unit is second - unused_rowset_monitor_interval: int | *30 - - // The soft limit of the maximum length of String type. - string_type_length_soft_limit_bytes: int | *1048576 - - // When using the odbc external table, if a column type of the odbc source table is HLL, CHAR or VARCHAR, and the length of the column value exceeds this value, the query will report an error 'column value length longer than buffer length'. You can increase this value - big_column_size_buffer: int | *65535 - - // When using the odbc external table, if a column type of the odbc source table is not HLL, CHAR or VARCHAR, and the length of the column value exceeds this value, the query will report an error 'column value length longer than buffer length'. You can increase this value - small_column_size_buffer: int | *100 - - // The soft limit of the maximum length of JSONB type. - jsonb_type_length_soft_limit_bytes: int & >= 1 & <= 2147483643 | *1048576 - - // Number of max scan keys - doris_max_scan_key_num: int | *48 - - // When BE performs data scanning, it will split the same scanning range into multiple ScanRanges. This parameter represents the scan data range of each ScanRange. This parameter can limit the time that a single OlapScanner occupies the io thread. - doris_scan_range_row_count: int | *1000000 - - // The maximum number of data rows returned by each scanning thread in a single execution - doris_scanner_row_num: int | *16384 - - // Single read execute fragment row bytes. If there are too many columns in the table, you can adjust this config if you encounter a select * stuck - doris_scanner_row_bytes: int | *10485760 - - // The size of the Buffer queue of the ExchangeNode node, in bytes. After the amount of data sent from the Sender side is larger than the Buffer size of ExchangeNode, subsequent data sent will block until the Buffer frees up space for writing - exchg_node_buffer_size_bytes: int | *20485760 - - // The max number of push down values of a single column. if exceed, no conditions will be pushed down for that column. - max_pushdown_conditions_per_column: int | *1024 - - // The value set by the user for send_batch_parallelism is not allowed to exceed max_send_batch_parallelism_per_job, if exceed, the value of send_batch_parallelism would be max_send_batch_parallelism_per_job - max_send_batch_parallelism_per_job: int & >= 1 | *5 - - // The maximum amount of data read by each OlapScanner. - doris_scan_range_max_mb: int | *1024 - - // Whether disable automatic compaction task - disable_auto_compaction: bool | *false - - // Whether enable vertical compaction - enable_vertical_compaction: bool | *true - - // In vertical compaction, column number for every group - vertical_compaction_num_columns_per_group: int | *5 - - // In vertical compaction, max dest segment file size, The unit is m bytes. - vertical_compaction_max_segment_size: int | *1073741824 - - // Whether to enable ordered data compaction - enable_ordered_data_compaction: bool | *true - - // In ordered data compaction, min segment size for input rowset, The unit is m bytes. - ordered_data_compaction_min_segment_size: int | *10485760 - - // The maximum of thread number in base compaction thread pool, -1 means one thread per disk. - max_base_compaction_threads: int | *4 - - // Sleep interval in ms after generated compaction tasks - generate_compaction_tasks_interval_ms: int | *10 - - // The limit of the number of Cumulative files to be reached. After reaching this limit, BaseCompaction will be triggered - base_compaction_min_rowset_num: int | *5 - - // One of the trigger conditions of BaseCompaction: Cumulative file size reaches the proportion of Base file - base_compaction_min_data_ratio: float | *0.3 - - // The upper limit of "permits" held by all compaction tasks. This config can be set to limit memory consumption for compaction. - total_permits_for_compaction_score: int | *10000 - - // The total disk size of the output rowset of cumulative compaction exceeds this configuration size, and the rowset will be used for base compaction. The unit is m bytes. - compaction_promotion_size_mbytes: int | *1024 - - // Output rowset of cumulative compaction total disk size exceed this config ratio of base rowset's total disk size, this rowset will be given to base compaction. The value must be between 0 and 1. - compaction_promotion_ratio: float & >0 & < 1 | *0.05 - - // The smallest size of rowset promotion. When the rowset is less than this config, this rowset will be not given to base compaction. The unit is m byte. - compaction_promotion_min_size_mbytes: int | *128 - - // When the cumulative compaction is merged, the selected rowsets to be merged have a larger disk size than this configuration, then they are divided and merged according to the level policy. When it is smaller than this configuration, merge directly. The unit is m bytes. - compaction_min_size_mbytes: int | *64 - - // Cumulative compaction strategy: the minimum number of incremental files - cumulative_compaction_min_deltas: int | *5 - - // Cumulative compaction strategy: the maximum number of incremental files - cumulative_compaction_max_deltas: int | *1000 - - // Threshold to logging base compaction's trace information, in seconds - base_compaction_trace_threshold: int | *60 - - // Threshold to logging cumulative compaction's trace information, in second - cumulative_compaction_trace_threshold: int | *10 - - // The number of compaction tasks which execute in parallel for a disk(HDD) - compaction_task_num_per_disk: int & >=2 | *4 - - // The number of compaction tasks which execute in parallel for a fast disk(SSD) - compaction_task_num_per_fast_disk: int & >=2 | *8 - - // How many rounds of cumulative compaction for each round of base compaction when compaction tasks generation. - cumulative_compaction_rounds_for_each_base_compaction_round: int | *9 - - // The maximum of thread number in cumulative compaction thread pool, -1 means one thread per disk - max_cumu_compaction_threads: int | *-1 - - // Global segcompaction thread pool size - segcompaction_num_threads: int | *5 - - // Disable the trace log of compaction - disable_compaction_trace_log: bool | *true - - // Select the time interval in seconds for rowset to be compacted. - pick_rowset_to_compact_interval_sec: int | *86400 - - // The maximum of thread number in single replica compaction thread pool. -1 means one thread per disk. - max_single_replica_compaction_threads: int | *-1 - - // Minimal interval (s) to update peer replica infos - update_replica_infos_interval_seconds: int | *60 - - // Whether to enable stream load record function, the default is false. - enable_stream_load_record: bool | *false - - // The load error log will be deleted after this time - load_error_log_reserve_hours: int | *48 - - // Error log size limit, default 200MB - load_error_log_limit_bytes: int | *209715200 - - // This configuration is mainly used to modify timeout of brpc between master replica and slave replica, used for single replica load. - slave_replica_writer_rpc_timeout_sec: int | *60 - - // Used to limit the number of segments in the newly generated rowset when importing. If the threshold is exceeded, the import will fail with error -238. Too many segments will cause compaction to take up a lot of memory and cause OOM errors. - max_segment_num_per_rowset: int | *1000 - - // The number of caches for the data consumer used by the routine load. - routine_load_consumer_pool_size: int | *1024 - - // When the timeout of an import task is less than this threshold, Doris will consider it to be a high priority task. High priority tasks use a separate pool of flush threads. - load_task_high_priority_threshold_second: int | *120 - - // The minimum timeout for each rpc in the load job. - min_load_rpc_timeout_ms: int | *20000 - - // The maximum number of consumers in a data consumer group, used for routine load - max_consumer_num_per_group: int | *3 - - // Used to limit the maximum amount of csv data allowed in one Stream load. - streaming_load_max_mb: int | *10240 - - // It is used to limit the maximum amount of json data allowed in one Stream load. The unit is MB. - streaming_load_json_max_mb: int | *100 - - // While loading data, there's a polling thread keep sending data to corresponding BE from Coordinator's sink node. This thread will check whether there's data to send every olap_table_sink_send_interval_microseconds microseconds. - olap_table_sink_send_interval_microseconds: int | *1000 - - // If we load data to a table which enabled auto partition. the interval of olap_table_sink_send_interval_microseconds is too slow. In that case the real interval will multiply this factor. - olap_table_sink_send_interval_auto_partition_factor: int | *0.001 - - // The maximum external scan cache batch count, which means that the cache max_memory_cache_batch_count * batch_size row, the default is 20, and the default value of batch_size is 1024, which means that 20 * 1024 rows will be cached in memory. - max_memory_sink_batch_count: int | *20 - - // Interval in milliseconds between memtable flush mgr refresh iterations. - memtable_mem_tracker_refresh_interval_ms: int | *5 - - // If the number of rows in a page is less than this value, no zonemap will be created to reduce data expansion - zone_map_row_num_threshold: int | *20 - - // Maximum memory allowed for a single schema change task. - memory_limitation_per_thread_for_schema_change_bytes: int | *2147483648 - - - // The minimum length of TCMalloc Hook when consume/release MemTracker. Consume size smaller than this value will continue to accumulate to avoid frequent calls to consume/release of MemTracker. Decreasing this value will increase the frequency of consume/release. Increasing this value will cause MemTracker statistics to be inaccurate. Theoretically, the statistical value of a MemTracker differs from the true value = ( mem_tracker_consume_min_size_bytes * the number of BE threads where the MemTracker is located). - mem_tracker_consume_min_size_bytes: int | *1048576 - - // The size of the buffer before flashing to disk. - write_buffer_size: int | *5242880 - - // The cache size used when reading files on hdfs or object storage. - remote_storage_read_buffer_mb: int | *16 - - // Recycle scan data thread check interval - path_gc_check_interval_second: int | *86400 - - path_gc_check_step: int | *1000 - - path_gc_check_step_interval_ms: int | *10 - - // This configuration is used for the context gc thread scheduling cycle. The unit is minutes. - scan_context_gc_interval_min: int | *5 - - // Configure how many rows of data are contained in a single RowBlock - default_num_rows_per_column_file_block: int | *1024 - - // Disable to use page cache for index caching, this configuration only takes effect in BETA storage format, usually it is recommended to false - disable_storage_page_cache: bool | *false - - // Disk status check interval - disk_stat_monitor_interval: int | *5 - - // The storage engine allows the percentage of damaged hard disks to exist. After the damaged hard disk exceeds the changed ratio, BE will automatically exit - max_percentage_of_error_disk: int | *100 - - // The read size is the size of the reads sent to os. - read_size: int | *8388608 - - // The min bytes that should be left of a data dir. Default is 1GB. - storage_flood_stage_left_capacity_bytes: int | *1073741824 - - // The percent of max used capacity of a data dir. Default is 90%. - storage_flood_stage_usage_percent: int | *90 - - // Whether the storage engine opens sync and keeps it to the disk - sync_tablet_meta: bool | *false - - // The maximum duration of unvalidated data retained by the storage engine - pending_data_expire_time_sec: int | *1800 - - // Limit the number of versions of a single tablet. It is used to prevent a large number of version accumulation problems caused by too frequent import or untimely compaction. When the limit is exceeded, the import task will be rejected. - max_tablet_version_num: int | *2000 - - // The time interval for the TabletMeta Checkpoint thread to perform polling. - tablet_meta_checkpoint_min_interval_secs: int | *600 - // The minimum number of Rowsets for storing TabletMeta Checkpoints - tablet_meta_checkpoint_min_new_rowsets_num: int | *10 - - // It is used to control the expiration time of cleaning up the merged rowset version. When the current time now() minus the max created rowset‘s create time in a version path is greater than tablet_rowset_stale_sweep_time_sec, the current path is cleaned up and these merged rowsets are deleted, the unit is second. - tablet_rowset_stale_sweep_time_sec: int | *300 - - // Used to ignore brpc error '[E1011]The server is overcrowded' when writing data. - tablet_writer_ignore_eovercrowded: bool | *true - - // The lifetime of TabletsChannel. If the channel does not receive any data at this time, the channel will be deleted. - streaming_load_rpc_max_alive_time_sec: int | *1200 - - // The interval time for the agent to report the disk status to FE - report_disk_state_interval_seconds: int | *60 - - // Result buffer cancellation time - result_buffer_cancelled_interval_time: int | *300 - - // Snapshot file cleaning interval. - snapshot_expire_time_sec: int | *172800 - - // System log level. - sys_log_level: string & "INFO" | "WARNING" | "ERROR" | "FATAL" | *"INFO" - - // The interval time for the agent to report the olap table to the FE - report_tablet_interval_seconds: int | *60 - - // The interval time for the agent to report the task signature to FE - report_task_interval_seconds: int | *10 - - // Used for forward compatibility, will be removed later. - enable_token_check: bool | *true - - // Max number of txns for every txn_partition_map in txn manager, this is a self protection to avoid too many txns saving in manage. - max_runnings_transactions_per_txn_map: int | *2000 - - // Maximum download speed limit, unit is kbps. - max_download_speed_kbps: int | *50000 - - // Download time limit, unit is second. - download_low_speed_time: int | *300 - - // Minimum download speed, unit is kbps. - download_low_speed_limit_kbps: int | *50 - - // The increased frequency of priority for remaining tasks in BlockingPriorityQueue - priority_queue_remaining_tasks_increased_frequency: int | *512 - - // Whether enable simdjson to parse json while stream load - enable_simdjson_reader: bool | *true - - // If true, when the process does not exceed the soft mem limit, the query memory will not be limited; when the process memory exceeds the soft mem limit, the query with the largest ratio between the currently used memory and the exec_mem_limit will be canceled. If false, cancel query when the memory used exceeds exec_mem_limit. - enable_query_memory_overcommit: bool | *true - - // The storage directory for files queried by local table valued functions. - user_files_secure_path: string | *"/opt/apache-doris/be" - - // The batch size for sending data by brpc streaming client - brpc_streaming_client_batch_bytes: int | *262144 - - // In cloud native deployment scenario, BE will be add to cluster and remove from cluster very frequently. User's query will fail if there is a fragment is running on the shuting down BE. Users could use stop_be.sh --grace, then BE will wait all running queries to stop to avoiding running query failure, but if the waiting time exceed the limit, then be will exit directly. During this period, FE will not send any queries to BE and waiting for all running queries to stop. - grace_shutdown_wait_seconds: int | *120 - - // number of threads that fetch auto-inc ranges from FE - auto_inc_fetch_thread_num: int | *3 - - // the ratio of _low_level_water_level_mark/_batch_size in AutoIncIDBuffer - auto_inc_low_water_level_mark_size_ratio: int | *3 - - ca_cert_file_paths: string | *"/etc/pki/tls/certs/ca-bundle.crt;/etc/ssl/certs/ca-certificates.crt;/etc/ssl/ca-bundle.pem" - - // the ratio of _prefetch_size/_batch_size in AutoIncIDBuffer - auto_inc_prefetch_size_ratio: int | *10 - - // The maximum size of a single file in a compaction that contains duplicate keys, in MB. - base_compaction_dup_key_max_file_size_mbytes: int | *1024 - - // The maximum score of a compaction that contains duplicate keys. - base_compaction_max_compaction_score: int | *20 - - // The path to store broken storage files. - broken_storage_path: string | *"" - - // The timeout time for buffered reader to read data, unit is ms. - buffered_reader_read_timeout_ms: int | *600000 - - // The interval time for the agent to prune stale tablets. - cache_periodic_prune_stale_sweep_sec: int | *300 - - // The interval time for the agent to prune stale tablets. - cache_prune_interval_sec: int | *300 - - // Whether to check segment when build rowset meta - check_segment_when_build_rowset_meta: bool | *false - - // The time interval to clean expired stream load records - trash_file_expire_time_sec: int | *1800 - - // The interval time for the agent to compact cold data - cold_data_compaction_interval_sec: int | *1800 - - // The threshold of the ratio of the number of unique keys to the total number of keys in a column dictionary. If the ratio is less than this value, the column dictionary will not be compressed. - column_dictionary_key_ratio_threshold: int | *0 - - // The threshold of the size of a column dictionary. If the size of a column dictionary is less than this value, the column dictionary will not be compressed. - column_dictionary_key_size_threshold: int | *0 - - // The interval time for the agent to prune stale objects in common object LRU cache. - common_obj_lru_cache_stale_sweep_time_sec: int | *900 - - // The batch size for compaction - compaction_batch_size: int | *-1 - - // The maximum number of invisible versions to keep in a compaction. - compaction_keep_invisible_version_max_count: int | *500 - - // The minimum number of invisible versions to keep in a compaction. - compaction_keep_invisible_version_min_count: int | *50 - - // The timeout time for compaction to keep invisible versions, unit is sec. - compaction_keep_invisible_version_timeout_sec: int | *1800 - - // The maximum memory bytes limit for compaction. - compaction_memory_bytes_limit: int | *1073741824 - - // When output rowset of cumulative compaction total version count (end_version - start_version) exceed this config count, the rowset will be moved to base compaction. This config will work for unique key merge-on-write table only, to reduce version count related cost on delete bitmap more effectively. - compaction_promotion_version_count: int | *1000 - - // The interval time for the agent to confirm unused remote files. - confirm_unused_remote_files_interval_sec: int | *60 - - // The threshold of the memory bytes to crash when allocating large memory. - crash_in_alloc_large_memory_bytes: int | *-1 - - // The factor of the maximum number of deltas to compact in a cumulative compaction. - cumulative_compaction_max_deltas_factor: int | *10 - - // The interval time for the agent to prune stale data pages in data page cache. - data_page_cache_stale_sweep_time_sec: int | *300 - - // Whether to debug inverted index compaction - debug_inverted_index_compaction: bool | *false - - // The interval time for the agent to prune stale bitmaps in aggregation cache. - delete_bitmap_agg_cache_stale_sweep_time_sec: int | *1800 - - // Whether to disable memory garbage collection. - disable_memory_gc: bool | *false - - // Whether to disable segment cache - disable_segment_cache: bool | *false - - // Whether to disable row cache feature in storage - disable_storage_row_cache: bool | *true - - // the timeout of a work thread to wait the blocking priority queue to get a task - doris_blocking_priority_queue_wait_timeout_ms: int | *500 - - // The path to the cgroup cpu directory - doris_cgroup_cpu_path: string | *"" - - // max bytes number for single scan block, used in segmentv2 - doris_scan_block_max_mb: int | *67108864 - - // size of scanner queue between scanner thread and compute thread - doris_scanner_queue_size: int | *1024 - - // the threshold of double resize - double_resize_threshold: int | *23 - - // DWARF location info mode - dwarf_location_info_mode: string | *"FAST" - - // Whether to enable write background when using brpc stream - enable_brpc_stream_write_background: bool | *true - - // Whether to enable column type check - enable_column_type_check: bool | *true - - // whether check compaction checksum - enable_compaction_checksum: bool | *false - - // whether enable compaction priority scheduling - enable_compaction_priority_scheduling: bool | *true - - // Default 300s, if its value <= 0, then log is disabled - enable_debug_log_timeout_secs: int | *0 - - // Whether to apply delete pred in cumu compaction - enable_delete_when_cumu_compaction: bool | *false - - // Whether to purge dirty pages in jemalloc - enable_je_purge_dirty_pages: bool | *true - - // Whether to enable memory orphan check - enable_memory_orphan_check: bool | *true - - // Whether to enable merge-on-write correctness check - enable_merge_on_write_correctness_check: bool | *true - - // Whether to enable missing rows correctness check - enable_missing_rows_correctness_check: bool | *false - - // If set to false, the parquet reader will not use page index to filter data. This is only for debug purpose, in case sometimes the page index filter wrong data. - enable_parquet_page_index: bool | *false - - // Whether to enable pipeline task leakage detect - enable_pipeline_task_leakage_detect: bool | *false - - // Whether to enable query like bloom filter - enable_query_like_bloom_filter: bool | *true - - // Whether to enable rowid conversion correctness check - enable_rowid_conversion_correctness_check: bool | *false - - // Whether to enable shrink memory - enable_shrink_memory: bool | *false - - // Whether to enable use cgroup memory info - enable_use_cgroup_memory_info: bool | *true - - // Whether to enable vertical segment writer - enable_vertical_segment_writer: bool | *true - - // Whether to enable workload group memory gc - enable_workload_group_memory_gc: bool | *true - - // The estimated memory bytes per column reader - estimated_mem_per_column_reader: int | *1024 - - // Whether to ignore eovercrowded error in exchange sink - exchange_sink_ignore_eovercrowded: bool | *true - - // The capacity factor of exchange buffer queue - exchg_buffer_queue_capacity_factor: int | *64 - - // The timeout time for fetch remote schema rpc, unit is ms. - fetch_remote_schema_rpc_timeout_ms: int | *60000 - - // The timeout time for fetch rpc, unit is seconds. - fetch_rpc_timeout_seconds: int | *30 - - // The maximum number of evicted files per round in file cache. - file_cache_max_evict_num_per_round: int | *5000 - - // The maximum size of file reader cache in file cache. - file_cache_max_file_reader_cache_size: int | *1000000 - - // The wait time in seconds after file cache fail. - file_cache_wait_sec_after_fail: int | *0 - - // The size of finished migration tasks queue. - finished_migration_tasks_size: int | *10000 - - // The batch size for garbage sweep in file cache. - garbage_sweep_batch_size: int | *100 - - // The interval time for the agent to generate cooldown task in seconds. - generate_cooldown_task_interval_sec: int | *20 - - // The tool to get stack trace. - get_stack_trace_tool: string | *"libunwind" - - // The memory limit for group commit queue in bytes. - group_commit_queue_mem_limit: int | *67108864 - - // The double grow degree of hash table. - hash_table_double_grow_degree: int | *31 - - // The threshold of high disk available level diff usages. Consider two high usage disk at the same available level if they do not exceed this diff. - high_disk_avail_level_diff_usages: float | *0.15 - - // The maximum file size for hive sink in bytes. - hive_sink_max_file_size: int | *1073741824 - - // The maximum file size for iceberg sink in bytes. - iceberg_sink_max_file_size: int | *1073741824 - - // Whether to ignore not found file in external table. - ignore_not_found_file_in_external_table: bool | *true - - // Whether to ignore rowset stale unconsistent delete. - ignore_rowset_stale_unconsistent_delete: bool | *false - - // Whether to ignore schema change check. - ignore_schema_change_check: bool | *false - - // Threshold of reading a small file into memory - in_memory_file_size: int | *1048576 - - // The time to keep an index cache entry after lookup in seconds. - index_cache_entry_stay_time_after_lookup_s: int | *1800 - - // The interval time for the agent to sweep index page cache in seconds. - index_page_cache_stale_sweep_time_sec: int | *600 - - // The interval time for the agent to sweep inverted index page cache in seconds. - inverted_index_cache_stale_sweep_time_sec: int | *600 - - // Whether to enable inverted index compaction. - inverted_index_compaction_enable: bool | *false - - // The maximum number of buffered documents in inverted index compaction. - inverted_index_max_buffered_docs: int | *-1 - - // The size of RAM buffer for inverted index. - inverted_index_ram_buffer_size: float | *512 - - // Whether to enable inverted index RAM directory. - inverted_index_ram_dir_enable: bool | *true - - // The timeout time for jdbc connection pool cache clear in seconds. - jdbc_connection_pool_cache_clear_time_sec: int | *28800 - - // The memory limit percent for jdbc connection pool cache. - je_dirty_pages_mem_limit_percent: string | *"5%" - - // The directory for jeprofile. - jeprofile_dir: string | *"/opt/apache-doris/be/log" - - // The path for kerberos ccache. - kerberos_ccache_path: string | *"" - - // The path for kerberos krb5.conf. - kerberos_krb5_conf_path: string | *"/etc/krb5.conf" - - // The memory limit for local exchange buffer in bytes. - local_exchange_buffer_mem_limit: int | *134217728 - - // The maximum size of lookup connection cache in bytes. - lookup_connection_cache_bytes_limit: int | *4294967296 - - // The threshold score for low priority compaction. - low_priority_compaction_score_threshold: int | *200 - - // The maximum number of low priority compaction tasks per disk. - low_priority_compaction_task_num_per_disk: int | *2 - - // The maximum ratio of amplified read to total read. - max_amplified_read_ratio: float | *0.8 - - // The maximum fill rate of disk. - max_fill_rate: int | *2 - - // The maximum wait time in seconds for fragment start. - max_fragment_start_wait_time_seconds: int | *30 - - // The maximum number of retries for S3 client. - max_s3_client_retry: int | *10 - - // The maximum number of tablet IO errors. - max_tablet_io_errors: int | *-1 - - // The sleep time in milliseconds for memory garbage collection. - memory_gc_sleep_time_ms: int | *500 - - // The memory limitation per thread for storage migration in bytes. - memory_limitation_per_thread_for_storage_migration_bytes: int | *100000000 - - // The sleep time in milliseconds for memory maintenance. - memory_maintenance_sleep_time_ms: int | *100 - - // The maximum number of flush running tasks per BE. - memtable_flush_running_count_limit: int | *2 - - // The hard limit percent of active memory for memtable flush. - memtable_hard_limit_active_percent: int | *50 - - // The insert memory ratio for memtable flush. - memtable_insert_memory_ratio: float | *1.4 - - // The soft limit percent of active memory for memtable flush. - memtable_soft_limit_active_percent: int | *50 - - // The minimum IO size for merged HDFS write in bytes. - merged_hdfs_min_io_size: int | *8192 - - // The minimum IO size for merged OSS write in bytes. - merged_oss_min_io_size: int | *1048576 - - // The threshold of remaining size for migration in MB. - migration_remaining_size_threshold_mb: int | *10 - - // The timeout time for migration task in seconds. If the task runs longer than this time, the task will be terminated, in seconds. Timeout = max(migration_task_timeout_secs, tablet size / 1MB/s) - migration_task_timeout_secs: int | *300 - - // The minimum bytes in scanner queue. - min_bytes_in_scanner_queue: int | *67108864 - - // The threshold of mmap size in bytes. - mmap_threshold: int | *134217728 - - // The maximum number of discontinuous versions for MOW publish. - mow_publish_max_discontinuous_version_num: int | *20 - - // The maximum number of threads for multi-get. - multi_get_max_threads: int | *10 - - // The maximum size of node channel pending queue in bytes. - nodechannel_pending_queue_max_bytes: int | *67108864 - - // The natural read size for ORC in MB. - orc_natural_read_size_mb: int | *8 - - // The maximum buffer size for Parquet column in MB. - parquet_column_max_buffer_mb: int | *8 - - // The maximum size of Parquet header in MB. - parquet_header_max_size_mb: int | *1 - - // The maximum buffer size for Parquet row group in MB. - parquet_rowgroup_max_buffer_mb: int | *128 - - // The interval time for pipeline status report in seconds. - pipeline_status_report_interval: int | *10 - - // The interval time for pipeline task leakage detect in seconds. - pipeline_task_leakage_detect_period_secs: int | *60 - - // The interval time for the agent to sweep primary key index page cache in seconds. - pk_index_page_cache_stale_sweep_time_sec: int | *600 - - // The interval time for the agent to sweep point query row cache in seconds. - point_query_row_cache_stale_sweep_time_sec: int | *300 - - // The maximum size of pre-serialize keys in bytes. - pre_serialize_keys_limit_bytes: int | *16777216 - - // The full GC size percent. - process_full_gc_size: string | *"10%" - - // The minor GC size percent. - process_minor_gc_size: string | *"5%" - - // The public access IP. - public_access_ip: string | *"" - - // The timeout time for query statistics reserve in milliseconds. - query_statistics_reserve_timeout_ms: int | *30000 - - // The interval time for the agent to remove unused remote files in seconds. - remove_unused_remote_files_interval_sec: int | *21600 - - // The interval time for query statistics report in milliseconds. - report_query_statistics_interval_ms: int | *3000 - - // Whether to report random wait time. - report_random_wait: bool | *true - - // The maximum number of rows to check for RF predicate. - rf_predicate_check_row_num: int | *204800 - - // The base wait time in milliseconds for S3 client. - s3_read_base_wait_time_ms: int | *100 - - // The maximum wait time in milliseconds for S3 client. - s3_read_max_wait_time_ms: int | *800 - - // The buffer size in bytes for S3 client. - s3_write_buffer_size: int | *5242880 - - // The timeout time in milliseconds for S3 client buffer allocation. - s3_writer_buffer_allocation_timeout: int | *300 - - // The nice value for scan thread. - scan_thread_nice_value: int | *0 - - // The capacity of schema cache. - schema_cache_capacity: int | *1024 - - // The interval time for schema cache sweep in seconds. - schema_cache_sweep_time_sec: int | *100 - - // The threshold of compressed size in KB for segment compression. - segment_compression_threshold_kb: int | *256 - - // Whether to skip loading stale rowset meta. - skip_loading_stale_rowset_meta: bool | *false - - // The interval time for spill GC in milliseconds. - spill_gc_interval_ms: int | *2000 - - // The work time in milliseconds for spill GC. - spill_gc_work_time_ms: int | *2000 - - // The threshold of stack trace size in bytes for allocating large memory. - stacktrace_in_alloc_large_memory_bytes: int | *2147483648 - - // The interval time for storage refresh storage policy task in seconds. - storage_refresh_storage_policy_task_interval_seconds: int | *5 - - // The batch size for stream load record. - stream_load_record_batch_size: int | *50 - - // The threshold of data processed in bytes for non-partition write scaling. - table_sink_non_partition_write_scaling_data_processed_threshold: int | *26214400 - - // The maximum number of partitions per writer for partition write. - table_sink_partition_write_max_partition_nums_per_writer: int | *128 - - // The threshold of data processed in bytes for non-partition write rebalance. - table_sink_partition_write_min_data_processed_rebalance_threshold: int | *26214400 - - // The threshold of data processed in bytes for partition write rebalance. - table_sink_partition_write_min_partition_data_processed_rebalance_threshold: int | *15728640 - - // The interval time for tablet lookup cache sweep in seconds. - tablet_lookup_cache_stale_sweep_time_sec: int | *30 - - // The maximum size of serialized tablet meta in bytes. - tablet_meta_serialize_size_limit: int | *1610612736 - - // The batch size for tablet path check. - tablet_path_check_batch_size: int | *1000 - - // The threshold of data processed in bytes for tablet rowset stale sweep. - tablet_rowset_stale_sweep_threshold_size: int | *100 - - // The capacity of tablet schema cache. - tablet_schema_cache_capacity: int | *102400 - - // The interval time for tablet schema cache recycle in seconds. - tablet_schema_cache_recycle_interval: int | *3600 - - // The ratio of orphan vertex for tablet version graph. - tablet_version_graph_orphan_vertex_ratio: float | *0.1 - - // The maximum wait time in milliseconds for thread wait GC. - thread_wait_gc_max_milliseconds: int | *1000 - - // The number of retries for thrift client open. - thrift_client_open_num_tries: int | *1 - - // Whether to enable flatten nested variant column. - variant_enable_flatten_nested: bool | *false - - // The maximum size of merged tablet schema in bytes. - variant_max_merged_tablet_schema_size: int | *2048 - - // The ratio of default values for sparse column. - variant_ratio_of_defaults_as_sparse_column: float | *1 - - // The threshold of rows to estimate sparse column. - variant_threshold_rows_to_estimate_sparse_column: int | *2048 - - // Whether to throw exception on invalid JSON. - variant_throw_exeception_on_invalid_json: bool | *false - - // The interval time for weighted memory ratio refresh in milliseconds. - wg_weighted_memory_ratio_refresh_interval_ms: int | *50 - - // The timeout time for workload group scan task wait in milliseconds. - workload_group_scan_task_wait_timeout_ms: int | *10000 - - // The write buffer size in bytes for aggregation. - write_buffer_size_for_agg: int | *419430400 - - // The interval time for cleaning stream load record in seconds. - clean_stream_load_record_interval_secs: int | *1800 - - // STATIC parameters - // Whether to enable set in bitmap value - enable_set_in_bitmap_value: bool | *false - - // Whether to enable skip tablet compaction - enable_skip_tablet_compaction: bool | *true - - // Whether to enable snapshot action - enable_snapshot_action: bool | *false - - // Whether to enable time lut - enable_time_lut: bool | *true - - // Whether to enable workload group for scan - enable_workload_group_for_scan: bool | *false - - // Whether to enable write index searcher cache - enable_write_index_searcher_cache: bool | *true - - // Whether to exit on exception - exit_on_exception: bool | *false - - // The expiration time of FE cache in seconds - fe_expire_duration_seconds: int | *60 - - // The maximum size of file segment in file cache - file_cache_max_file_segment_size: int | *4194304 - - // The minimum size of file segment in file cache - file_cache_min_file_segment_size: int | *1048576 - - // The path to the file cache directory - file_cache_path: string | *"" - - // The protocol for function service - function_service_protocol: string | *"h2:grpc" - - // The interval time for the agent to generate tablet meta checkpoint tasks - generate_tablet_meta_checkpoint_tasks_interval_secs: int | *600 - - // The number of threads for group commit insert - group_commit_insert_threads: int | *10 - - // The maximum number of rows for max filter ratio in group commit - group_commit_memory_rows_for_max_filter_ratio: int | *10000 - - // The number of threads for group commit relay wal - group_commit_relay_wal_threads: int | *10 - - // The maximum retry interval time for group commit replay wal in seconds - group_commit_replay_wal_retry_interval_max_seconds: int | *1800 - - // The retry interval time for group commit replay wal in seconds - group_commit_replay_wal_retry_interval_seconds: int | *5 - - // The maximum number of retry times for group commit replay wal - group_commit_replay_wal_retry_num: int | *10 - - // Whether to wait for group commit replay wal finish - group_commit_wait_replay_wal_finish: bool | *false - - // The maximum disk limit for group commit wal - group_commit_wal_max_disk_limit: string | *"10%" - - // Whether to hide webserver config page - hide_webserver_config_page: bool | *false - - // Whether to ignore always true predicate for segment - ignore_always_true_predicate_for_segment: bool | *true - - // The number of rowsets to ignore invalid partition id - ignore_invalid_partition_id_rowset_num: int | *0 - - // The number of threads for ingest binlog work pool - ingest_binlog_work_pool_size: int | *-1 - - // The path to the inverted index dictionary directory - inverted_index_dict_path: string | *"/opt/apache-doris/be/dict" - - // The percentage of file descriptor limit for inverted index - inverted_index_fd_number_limit_percent: int | *40 - - // The limit of query cache memory size for inverted index - inverted_index_query_cache_limit: string | *"10%" - - // The number of shards for inverted index query cache - inverted_index_query_cache_shards: int | *256 - - // The size of read buffer for inverted index - inverted_index_read_buffer_size: int | *4096 - - // The limit of searcher cache memory size for inverted index - inverted_index_searcher_cache_limit: string | *"10%" - - // Whether to enable kafka debug - kafka_debug: string | *"disable" - - // The percentage of memory limit for load process safe memory permit - load_process_safe_mem_permit_percent: int | *5 - - // The maximum retry interval time for load stream eagain wait in seconds - load_stream_eagain_wait_seconds: int | *600 - - // The maximum number of tasks for load stream flush token - load_stream_flush_token_max_tasks: int | *15 - - // The maximum buffer size for load stream - load_stream_max_buf_size: int | *20971520 - - // The maximum wait time for load stream flush token in milliseconds - load_stream_max_wait_flush_token_time_ms: int | *600000 - - // The number of messages in each batch for load stream - load_stream_messages_in_batch: int | *128 - - - // The maximum depth of bkd tree - max_depth_in_bkd_tree: int | *32 - - // The maximum depth of expression tree - max_depth_of_expr_tree: int | *600 - - // The maximum number of external file meta cache - max_external_file_meta_cache_num: int | *1000 - - // The maximum number of hdfs file handle cache - max_hdfs_file_handle_cache_num: int | *1000 - - // The maximum time for hdfs file handle cache in seconds - max_hdfs_file_handle_cache_time_sec: int | *3600 - - // The maximum number of meta checkpoint threads - max_meta_checkpoint_threads: int | *-1 - - // The maximum number of tablet migration threads - max_tablet_migration_threads: int | *1 - - // The reserved memory bytes for memtable limiter - memtable_limiter_reserved_memory_bytes: int | *838860800 - - // The timeout time for migration lock in milliseconds - migration_lock_timeout_ms: int | *1000 - - // The minimum number of file descriptors - min_file_descriptor_number: int | *60000 - - // The minimum row group size for parquet reader - min_row_group_size: int | *134217728 - - // The minimum number of tablet migration threads - min_tablet_migration_threads: int | *1 - - // The number of broadcast buffers - num_broadcast_buffer: int | *32 - - // Number of cores Doris will used, this will effect only when it's greater than 0. Otherwise, Doris will use all cores returned from "/proc/cpuinfo". - num_cores: int | *0 - - // Control the number of disks on the machine. If 0, this comes from the system settings. - num_disks: int | *0 - - // The timeout time for open load stream in milliseconds - open_load_stream_timeout_ms: int | *60000 - - // The maximum buffer size for parquet reader - parquet_reader_max_buffer_size: int | *50 - - // The size of partition disk index lru cache - partition_disk_index_lru_size: int | *10000 - - // The threshold for topn partition - partition_topn_partition_threshold: int | *1024 - - // The number of pipeline executor threads - pipeline_executor_size: int | *0 - - // The limit of page cache memory size for primary key storage - pk_storage_page_cache_limit: string | *"10%" - - // The size of primary key data page - primary_key_data_page_size: int | *32768 - - // The timeout time for publish version task in seconds - publish_version_task_timeout_s: int | *8 - - // The elasticity size of query cache memory size in MB - query_cache_elasticity_size_mb: int | *128 - - // The maximum number of partitions for query cache - query_cache_max_partition_count: int | *1024 - - // The maximum size of query cache memory size in MB - query_cache_max_size_mb: int | *256 - - // The maximum number of rowsets in each batch for remote split source - remote_split_source_batch_size: int | *10240 - - // The maximum number of write buffers for rocksdb - rocksdb_max_write_buffer_number: int | *5 - - // The load balancer for rpc - rpc_load_balancer: string | *"rr" - - // The number of threads for s3 transfer executor pool - s3_transfer_executor_pool_size: int | *2 - - // The percentage of file descriptor limit for segment cache - segment_cache_fd_percentage: int | *40 - - // The percentage of memory limit for segment cache - segment_cache_memory_percentage: int | *2 - - // Whether to share delta writers - share_delta_writers: bool | *true - - // The queue size for spill io thread pool - spill_io_thread_pool_queue_size: int | *102400 - - // The number of threads for spill io thread pool - spill_io_thread_pool_thread_num: int | *-1 - - // The limit of storage size for spill io thread pool - spill_storage_limit: string | *"20%" - - // The root path for spill storage - spill_storage_root_path: string | *"" - - // The path for ssl certificate - ssl_certificate_path: string | *"" - - // The path for ssl private key - ssl_private_key_path: string | *"" - - // The timeout time for stream load record expire in seconds - stream_load_record_expire_time_secs: int | *28800 - - // The buffer size for stream tvf - stream_tvf_buffer_size: int | *1048576 - - // The roll mode for system log, TIME-DAY, TIME-HOUR, SIZE-MB-nnn - sys_log_roll_mode: string | *"SIZE-MB-1024" - - // The verbose flags for system log - sys_log_verbose_flags_v: int | *-1 - - // The interval time for tablet path check in seconds - tablet_path_check_interval_seconds: int | *-1 - - // The maximum number of publish transaction threads - tablet_publish_txn_max_thread: int | *32 - - // Whether to enable stale sweep by size for tablet rowset - tablet_rowset_stale_sweep_by_size: bool | *false - - // The path for temporary files - tmp_file_dir: string | *"tmp" - - // Whether to wait for internal group commit finish - wait_internal_group_commit_finish: bool | *false - - // The number of flush thread per store - wg_flush_thread_num_per_store: int | *6 - - // Whether to enable set in bitmap value - enable_set_in_bitmap_value: bool | *false - - // Whether to enable low cardinality optimize - enable_low_cardinality_optimize: bool | *true - - - // Whether to enable low cardinality cache code - enable_low_cardinality_cache_code: bool | *true - - // Whether to enable jvm monitor - enable_jvm_monitor: bool | *false - - // Whether to check timestamp of inverted index cache - enable_inverted_index_cache_check_timestamp: bool | *true - - // Whether to enable fuzzy mode - enable_fuzzy_mode: bool | *false - - // This config controls whether the s3 file writer would flush cache asynchronously - enable_flush_file_cache_async: bool | *true - - // Whether to enable file logger - enable_file_logger: bool | *true - - // Whether to enable file cache query limit feature - enable_file_cache_query_limit: bool | *false - - // Whether to enable file cache feature - enable_file_cache: bool | *false - - // Whether to enable binlog feature - enable_feature_binlog: bool | *false - - // Whether to enable debug points - enable_debug_points: bool | *false - - // Whether to enable base compaction idle scheduler - enable_base_compaction_idle_sched: bool | *true - - // Whether to check authorization - enable_all_http_auth: bool | *false - - // Download binlog rate limit, unit is KB/s, 0 means no limit - download_binlog_rate_limit_kbs: int | *0 - - // min thread pool size for scanner thread pool - doris_scanner_min_thread_pool_thread_num: int | *8 - - // number of s3 scanner thread pool size - doris_remote_scanner_thread_pool_thread_num: int | *48 - - // number of s3 scanner thread pool queue size - doris_remote_scanner_thread_pool_queue_size: int | *102400 - - // Whether to enable scanner thread pool per disk, if true, each disk will have a separate thread pool for scanner - doris_enable_scanner_thread_pool_per_disk: bool | *true - - // Whether to disable pk page cache feature in storage - disable_pk_storage_page_cache: bool | *false - - // The default delete bitmap cache is set to 100MB. We will take the larger of 0.5% of the total memory and 100MB as the delete bitmap cache size. - delete_bitmap_dynamic_agg_cache_limit: string | *"0.5%" - - // Global bitmap cache capacity for aggregation cache, size in bytes - delete_bitmap_agg_cache_capacity: int | *104857600 - - // The number of threads to compact cold data - cooldown_thread_num: int | *5 - - // The number of threads to compact cold data - cold_data_compaction_thread_num: int | *2 - - // Whether to clear file cache when tablet is deleted - clear_file_cache: bool | *false - - // the count of thread to calc delete bitmap - calc_delete_bitmap_max_thread: int | *32 - - // The number of threads in the light work pool. - brpc_light_work_pool_threads: int | *-1 - - // The maximum number of requests that can be queued in the light work pool. - brpc_light_work_pool_max_queue_size: int | *-1 - - // the time of brpc server keep idle connection, setting this value too small may cause rpc between backends to fail, the default value is set to -1, which means never close idle connection. - brpc_idle_timeout_sec: int | *-1 - - // The number of threads in the heavy work pool. - brpc_heavy_work_pool_threads: int | *-1 - - // The maximum number of requests that can be queued in the heavy work pool. - brpc_heavy_work_pool_max_queue_size: int | *-1 - - // The port number of the Thrift server on the BE, which is used to receive requests from the FE. - be_port: int | *9060 - - // The port of brpc on the BE, which is used for communication between the BEs. - brpc_port: int | *8060 - - // The service port of the HTTP server on the BE. - webserver_port: int | *8040 - - // The heartbeat service port (Thrift) on the BE, which is used to receive heartbeats from the FE. - heartbeat_service_port: int | *9050 - - // The port of the Arrow Flight SQL server on the FE, which is used for communication between the Arrow Flight Client and the BE - arrow_flight_sql_port: int | *-1 - - // memory mode, performance or compact - memory_mode: string & "performance" | "compact" | *"moderate" - - // Limit the percentage of the server's maximum memory used by the BE process. It is used to prevent BE memory from occupying too much memory of the machine. This parameter must be greater than 0. When the percentage is greater than 100%, the value will default to 100%. - mem_limit: string | *"90%" - - // Soft memory limit as a fraction of hard memory limit. - soft_mem_limit_frac: float | *0.9 - - // Configure the location of the be_custom.conf file - custom_config_dir: string | *"/opt/apache-doris/be/conf" - - // Default dirs to put jdbc drivers. - jdbc_drivers_dir: string | *"/opt/apache-doris/be/jdbc_drivers" - - // This configuration is mainly used to modify the number of bthreads for brpc. If the value is set to -1, which means the number of bthreads is #cpu-cores - brpc_num_threads: int | *256 - - // Declare a selection strategy for those servers with many IPs. Note that at most one ip should match this list. This is a semicolon-separated list in CIDR notation, such as 10.10.10.0/24. If there is no IP matching this rule, one will be randomly selected - priority_networks: string | *"" - - // Whether https is supported. If so, configure ssl_certificate_path and ssl_private_key_path in be.conf. - enable_https: bool | *false - - // data root path, separate by ';'.you can specify the storage medium of each root path, HDD or SSD. you can add capacity limit at the end of each root path, separate by ','.If the user does not use a mix of SSD and HDD disks, they do not need to configure the configuration methods in Example 1 and Example 2 below, but only need to specify the storage directory; they also do not need to modify the default storage media configuration of FE. - storage_root_path: string | *"/opt/apache-doris/be/storage" - - // The number of threads that execute the heartbeat service on BE. the default is 1, it is not recommended to modify - heartbeat_service_thread_count: int | *1 - - // When BE starts, check storage_root_path All paths under configuration. - ignore_broken_disk: bool | *false - - // es scroll keep-alive hold time - es_scroll_keepalive: string | *"5m" - - // This configuration is mainly used to modify the parameter max_body_size of brpc - brpc_max_body_size: int | *3147483648 - - // This configuration is mainly used to modify the parameter socket_max_unwritten_bytes of brpc. - brpc_socket_max_unwritten_bytes: int | *3147483648 - - // his configuration indicates the service model used by FE's Thrift service. The type is string and is case-insensitive. This parameter needs to be consistent with the setting of fe's thrift_server_type parameter. Currently there are two values for this parameter, THREADED and THREAD_POOL - thrift_server_type_of_fe: string | *"THREAD_POOL" - - // txn_map_lock fragment size, the value is 2^n, n=0,1,2,3,4. This is an enhancement to improve the performance of managing txn - txn_map_shard_size: int | *1024 - - // txn_lock shard size, the value is 2^n, n=0,1,2,3,4, this is an enhancement function that can improve the performance of submitting and publishing txn - txn_shard_size: int | *1024 - - // The maximum number of client caches per host. There are multiple client caches in BE, but currently we use the same cache size configuration. If necessary, use different configurations to set up different client-side caches - max_client_cache_size_per_host: int | *10 - - // The upper limit of query requests that can be processed on a single node - fragment_pool_queue_size: int | *4096 - - // Query the number of threads. By default, the minimum number of threads is 64 - fragment_pool_thread_num_min: int | *64 - - // Follow up query requests create threads dynamically, with a maximum of 512 threads created. - fragment_pool_thread_num_max: int | *2048 - - // The queue length of the Scanner thread pool. In Doris' scanning tasks, each Scanner will be submitted as a thread task to the thread pool waiting to be scheduled, and after the number of submitted tasks exceeds the length of the thread pool queue, subsequent submitted tasks will be blocked until there is a empty slot in the queue. - doris_scanner_thread_pool_queue_size: int | *102400 - - // The number of threads in the Scanner thread pool. In Doris' scanning tasks, each Scanner will be submitted as a thread task to the thread pool to be scheduled. This parameter determines the size of the Scanner thread pool. The default value is -1, which means the number of threads in the Scanner thread pool is equal to max(48, 2 * num_of_cpu_cores). - doris_scanner_thread_pool_thread_num: int | *-1 - - // Max thread number of Remote scanner thread pool. Remote scanner thread pool is used for scan task of all external data sources. - doris_max_remote_scanner_thread_pool_thread_num: int | *-1 - - // In vertical compaction, max memory usage for row_source_buffer - vertical_compaction_max_row_source_memory_mb: int | *200 - - // Config for default rowset type - default_rowset_type: string & "ALPHA" | "BETA" | *"BETA" - - // Enable to use segment compaction during loading to avoid -238 error - enable_segcompaction: bool | *true - - // Segment compaction is triggered when the number of segments exceeds this threshold. This configuration also limits the maximum number of raw segments in a single segment compaction task. - segcompaction_batch_size: int | *10 - - // Max row count allowed in a single source segment, bigger segments will be skipped. - segcompaction_candidate_max_rows: int | *1048576 - - // Max file size allowed in a single source segment, bigger segments will be skipped. - segcompaction_candidate_max_bytes: int | *104857600 - - // Max total row count allowed in a single segcompaction task. - segcompaction_task_max_rows: int | *1572864 - - // Max total file size allowed in a single segcompaction task. - segcompaction_task_max_bytes: int | *157286400 - - // Used for mini Load. mini load data file will be removed after this time. - load_data_reserve_hours: int | *4 - - // The count of thread to high priority batch load - push_worker_count_high_priority: int | *3 - - // The count of thread to batch load - push_worker_count_normal_priority: int | *3 - - // Whether to enable the single-copy data import function. - enable_single_replica_load: bool | *true - - // The percentage of the upper memory limit occupied by all imported threads on a single node, the default is 50% - load_process_max_memory_limit_percent: int | *50 - - // The soft limit refers to the proportion of the load memory limit of a single node. For example, the load memory limit for all load tasks is 20GB, and the soft limit defaults to 50% of this value, that is, 10GB. When the load memory usage exceeds the soft limit, the job with the largest memory consumption will be selected to be flushed to release the memory space, the default is 80% - load_process_soft_mem_limit_percent: int | *80 - - // The max size of thread pool for routine load task. this should be larger than FE config 'max_routine_load_task_num_per_be' (default 5) - max_routine_load_thread_pool_size: int | *1024 - // number of thread for flushing memtable per store, for high priority load task - high_priority_flush_thread_num_per_store: int | *6 - - // Used in single-stream-multi-table load. When receive a batch of messages from kafka, if the size of batch is more than this threshold, we will request plans for all related tables. - multi_table_batch_plan_threshold: int | *200 - - // Used in single-stream-multi-table load. When receiving a batch of messages from Kafka, - multi_table_max_wait_tables: int | *5 - - // Number of download workers for single replica load - single_replica_load_download_num_workers: int | *64 - - // If the dependent Kafka version is lower than 0.10.0.0, this value should be set to false. - kafka_api_version_request: string | *"true" - - // If the dependent Kafka version is lower than 0.10.0.0, the value set by the fallback version kafka_broker_version_fallback will be used if the value of kafka_api_version_request is set to false, and the valid values are: 0.9.0.x, 0.8.x.y. - kafka_broker_version_fallback: string | *"0.10.0" - - // The count of thread to delete - delete_worker_count: int | *3 - - // The count of thread to clear transaction task - clear_transaction_task_worker_count: int | *1 - - // The count of thread to clone - clone_worker_count: int | *3 - - // The count of thread to serve backend execution requests - be_service_threads: int | *64 - - // The count of thread to download data - download_worker_count: int | *1 - - // The count of thread to drop tablet - drop_tablet_worker_count: int | *3 - - // The count of thread for flushing memtable per store - flush_thread_num_per_store: int | *6 - - // The maximum number of the threads per disk is also the max queue depth per disk. - num_threads_per_disk: int | *0 - - // The count of thread to publish version - publish_version_worker_count: int | *8 - - // The count of thread to upload - upload_worker_count: int | *1 - - // Number of webserver workers - webserver_num_workers: int | *48 - - // Number of send batch thread pool size - send_batch_thread_pool_thread_num: int | *64 - - // Number of send batch thread pool queue size - send_batch_thread_pool_queue_size: int | *102400 - - // The count of thread to make snapshot - make_snapshot_worker_count: int | *5 - - // The count of thread to release snapshot - release_snapshot_worker_count: int | *5 - - // The memory limit for row cache, default is 20% of total memory - row_cache_mem_limit: string | *"20%" - - // The maximum low water mark of the system /proc/meminfo/MemAvailable, Unit byte, default 1.6G, actual low water mark=min(1.6G, MemTotal * 10%), avoid wasting too much memory on machines with large memory larger than 16G. Turn up max. On machines with more than 16G memory, more memory buffers will be reserved for Full GC. Turn down max. will use as much memory as possible. - max_sys_mem_available_low_water_mark_bytes: int | *6871947673 - - // Minimum read buffer size in bytes - min_buffer_size: int | *1024 - - // Whether to enable the recycle scan data thread check - path_gc_check: bool | *true - - // The maximum interval for disk garbage cleaning, unit is second - max_garbage_sweep_interval: int | *3600 - - // The minimum interval between disk garbage cleaning, unit is second - min_garbage_sweep_interval: int | *180 - - // pprof profile save directory - pprof_profile_dir: string | *"/opt/apache-doris/be/log" - - // Dir to save files downloaded by SmallFileMgr - small_file_dir: string | *"/opt/apache-doris/be/lib/small_file/" - - // udf function directory - user_function_dir: string | *"/opt/apache-doris/be/lib/udf" - - // the count of thread to clone - storage_medium_migrate_count: int | *1 - - // Cache for storage page size - storage_page_cache_limit: string | *"20%" - - // Shard size for page cache, the value must be power of two. It's recommended to set it to a value close to the number of BE cores in order to reduce lock contentions. - storage_page_cache_shard_size: int | *256 - - // Index page cache as a percentage of total storage page cache, value range is [0, 100] - index_page_cache_percentage: int & >=0 & <=100 | *10 - - // Max number of segment cache, default -1 for backward compatibility fd_number*2/5 - segment_cache_capacity: int | *-1 - - // Used to check incompatible old format strictly - storage_strict_check_incompatible_old_format: bool | *true - - // The count of thread to create table - create_tablet_worker_count: int | *3 - - // The count of thread to check consistency - check_consistency_worker_count: int | *1 - - // tablet_map_lock shard size, the value is 2^n, n=0,1,2,3,4.. this is a an enhancement for better performance to manage tablet - tablet_map_shard_size: int | *256 - - // Update interval of tablet state cache - tablet_writer_open_rpc_timeout_sec: int | *60 - - // The count of thread to alter table - alter_tablet_worker_count: int | *3 - - // The count of thread to alter index - alter_index_worker_count: int | *3 - - // Whether to continue to start be when load tablet from header failed. - ignore_load_tablet_failure: bool | *false - - // Storage directory of BE log data - sys_log_dir: string | *"/opt/apache-doris/be/log" - - // Number of log files kept - sys_log_roll_num: int | *10 - - // Log display level, used to control the log output at the beginning of VLOG in the code - sys_log_verbose_level: int | *10 - - // Log printing module, writing olap will only print the log under the olap module - sys_log_verbose_modules: string | *"" - - // aws sdk log level: Off = 0,Fatal = 1,Error = 2, Warn = 3, Info = 4,Debug = 5,Trace = 6. Default is Off = 0. - aws_log_level: int | *0 - - // log buffer level - log_buffer_level: string | *"" - - // If set to true, the metric calculator will run to collect BE-related indicator information, if set to false, it will not run - enable_metric_calculator: bool | *true - - // User control to turn on and off system indicators. - enable_system_metrics: bool | *true - - // BE Whether to enable the use of java-jni. When enabled, mutual calls between c++ and java are allowed. Currently supports hudi, java-udf, jdbc, max-compute, paimon, preload, avro - enable_java_support: bool | *true - - // The WAL directory of group commit. - group_commit_wal_path: string | *"" - - // The JAVA_OPTS startup configuration for the BE node - JAVA_OPTS: string | *"" - - // thread will sleep async_file_cache_init_sleep_interval_ms per scan async_file_cache_init_file_num_interval file num to limit IO - async_file_cache_init_file_num_interval: int | *1000 - - // thread will sleep async_file_cache_init_sleep_interval_ms per scan async_file_cache_init_file_num_interval file num to limit IO - async_file_cache_init_sleep_interval_ms: int | *20 - - // The version of bitmap serialize. - bitmap_serialize_version: int | *1 -} - -configuration: #BEParameter & { -} diff --git a/addons/doris/config/be-config-effect-scope.yaml b/addons/doris/config/be-config-effect-scope.yaml deleted file mode 100644 index 18ff25031..000000000 --- a/addons/doris/config/be-config-effect-scope.yaml +++ /dev/null @@ -1,509 +0,0 @@ -## staticParameters, list of StaticParameter, modifications of them trigger a process restart. -## dynamicParameters, list of DynamicParameter, modifications of them trigger a config dynamic reload without process restart. -## staticParameters and dynamicParameters determine the behavior of parameter changes: -## if any of the modified parameters is in the staticParameters list, this operation will trigger a process restart. -## if all the changed parameters are in the dynamicParameters list, this change executes reload without process restart. -## if the above two conditions are not met, by default, parameter change operation follow the rule for using staticParameters. -staticParameters: - - memory_mode - - mem_limit - - soft_mem_limit_frac - - brpc_num_threads - - priority_networks - - enable_https - - storage_root_path - - heartbeat_service_thread_count - - ignore_broken_disk - - es_scroll_keepalive - - brpc_max_body_size - - brpc_socket_max_unwritten_bytes - - thrift_server_type_of_fe - - txn_map_shard_size - - txn_shard_size - - max_client_cache_size_per_host - - fragment_pool_queue_size - - fragment_pool_thread_num_min - - fragment_pool_thread_num_max - - doris_scanner_thread_pool_queue_size - - doris_scanner_thread_pool_thread_num - - doris_max_remote_scanner_thread_pool_thread_num - - vertical_compaction_max_row_source_memory_mb - - default_rowset_type - - enable_segcompaction - - segcompaction_batch_size - - segcompaction_candidate_max_rows - - segcompaction_candidate_max_bytes - - segcompaction_task_max_rows - - segcompaction_task_max_bytes - - load_data_reserve_hours - - push_worker_count_high_priority - - push_worker_count_normal_priority - - enable_single_replica_load - - load_process_max_memory_limit_percent - - load_process_soft_mem_limit_percent - - max_routine_load_thread_pool_size - - high_priority_flush_thread_num_per_store - - multi_table_batch_plan_threshold - - multi_table_max_wait_tables - - single_replica_load_download_num_workers - - kafka_api_version_request - - kafka_broker_version_fallback - - delete_worker_count - - clear_transaction_task_worker_count - - clone_worker_count - - be_service_threads - - download_worker_count - - drop_tablet_worker_count - - flush_thread_num_per_store - - num_threads_per_disk - - publish_version_worker_count - - upload_worker_count - - webserver_num_workers - - send_batch_thread_pool_thread_num - - send_batch_thread_pool_queue_size - - make_snapshot_worker_count - - release_snapshot_worker_count - - row_cache_mem_limit - - max_sys_mem_available_low_water_mark_bytes - - min_buffer_size - - path_gc_check - - max_garbage_sweep_interval - - min_garbage_sweep_interval - - pprof_profile_dir - - small_file_dir - - user_function_dir - - storage_medium_migrate_count - - storage_page_cache_limit - - storage_page_cache_shard_size - - index_page_cache_percentage - - segment_cache_capacity - - storage_strict_check_incompatible_old_format - - create_tablet_worker_count - - check_consistency_worker_count - - tablet_map_shard_size - - tablet_writer_open_rpc_timeout_sec - - alter_tablet_worker_count - - alter_index_worker_count - - ignore_load_tablet_failure - - sys_log_roll_num - - sys_log_verbose_level - - sys_log_verbose_modules - - aws_log_level - - log_buffer_level - - enable_metric_calculator - - enable_system_metrics - - enable_java_support - - group_commit_wal_path - - async_file_cache_init_file_num_interval - - bitmap_serialize_version - - broken_storage_path - - brpc_heavy_work_pool_max_queue_size - - brpc_heavy_work_pool_threads - - brpc_idle_timeout_sec - - brpc_light_work_pool_max_queue_size - - brpc_light_work_pool_threads - - calc_delete_bitmap_max_thread - - clear_file_cache - - cooldown_thread_num - - delete_bitmap_agg_cache_capacity - - delete_bitmap_dynamic_agg_cache_limit - - cold_data_compaction_thread_num - - disable_pk_storage_page_cache - - doris_enable_scanner_thread_pool_per_disk - - doris_remote_scanner_thread_pool_queue_size - - doris_remote_scanner_thread_pool_thread_num - - doris_scanner_min_thread_pool_thread_num - - download_binlog_rate_limit_kbs - - enable_all_http_auth - - enable_base_compaction_idle_sched - - enable_debug_points - - enable_feature_binlog - - enable_file_cache - - enable_file_cache_query_limit - - enable_file_logger - - enable_flush_file_cache_async - - enable_fuzzy_mode - - enable_inverted_index_cache_check_timestamp - - enable_jvm_monitor - - enable_low_cardinality_cache_code - - enable_low_cardinality_optimize - - enable_set_in_bitmap_value - - enable_skip_tablet_compaction - - enable_snapshot_action - - enable_time_lut - - enable_workload_group_for_scan - - enable_write_index_searcher_cache - - exit_on_exception - - fe_expire_duration_seconds - - file_cache_max_file_segment_size - - file_cache_min_file_segment_size - - file_cache_path - - function_service_protocol - - generate_tablet_meta_checkpoint_tasks_interval_secs - - group_commit_insert_threads - - group_commit_memory_rows_for_max_filter_ratio - - group_commit_relay_wal_threads - - group_commit_replay_wal_retry_interval_max_seconds - - group_commit_replay_wal_retry_interval_seconds - - group_commit_replay_wal_retry_num - - group_commit_wait_replay_wal_finish - - group_commit_wal_max_disk_limit - - hide_webserver_config_page - - ignore_always_true_predicate_for_segment_compaction - - ignore_invalid_partition_id_rowset_num - - ingest_binlog_work_pool_size - - inverted_index_dict_path - - inverted_index_fd_number_limit_percent - - inverted_index_query_cache_limit - - inverted_index_query_cache_shards - - inverted_index_read_buffer_size - - inverted_index_searcher_cache_limit - - kafka_debug - - load_process_safe_mem_permit_percent - - load_stream_eagain_wait_seconds - - load_stream_flush_token_max_tasks - - load_stream_max_buf_size - - load_stream_max_wait_flush_token_time_ms - - load_stream_messages_in_batch - - max_depth_in_bkd_tree - - max_depth_of_expr_tree - - max_external_file_meta_cache_num - - max_hdfs_file_handle_cache_num - - max_hdfs_file_handle_cache_time_sec - - max_meta_checkpoint_threads - - max_tablet_migration_threads - - memtable_limiter_reserved_memory_bytes - - migration_lock_timeout_ms - - min_file_descriptor_number - - min_row_group_size - - min_tablet_migration_threads - - num_broadcast_buffer - - num_cores - - num_disks - - open_load_stream_timeout_ms - - parquet_reader_max_buffer_size - - partition_disk_index_lru_size - - partition_topn_partition_threshold - - pipeline_executor_size - - pk_storage_page_cache_limit - - primary_key_data_page_size - - publish_version_task_timeout_s - - query_cache_elasticity_size_mb - - query_cache_max_partition_count - - query_cache_max_size_mb - - remote_split_source_batch_size - - rocksdb_max_write_buffer_number - - rpc_load_balancer - - s3_transfer_executor_pool_size - - segment_cache_fd_percentage - - segment_cache_memory_percentage - - share_delta_writers - - spill_io_thread_pool_queue_size - - spill_io_thread_pool_thread_num - - spill_storage_limit - - spill_storage_root_path - - ssl_certificate_path - - ssl_private_key_path - - stream_load_record_expire_time_secs - - stream_tvf_buffer_size - - sys_log_roll_mode - - sys_log_verbose_flags_v - - tablet_path_check_interval_seconds - - tablet_publish_txn_max_thread - - tablet_rowset_stale_sweep_by_size - - tmp_file_dir - - wait_internal_group_commit_finish - - wg_flush_thread_num_per_store - - - -# - - - -dynamicParameters: - - enable_query_like_bloom_filter - - enable_pipeline_task_leakage_detect - - enable_parquet_page_index - - enable_missing_rows_correctness_check - - enable_merge_on_write_correctness_check - - enable_memory_orphan_check - - enable_je_purge_dirty_pages - - enable_delete_when_cumu_compaction - - enable_debug_log_timeout_secs - - enable_compaction_priority_scheduling - - enable_compaction_checksum - - enable_column_type_check - - enable_brpc_stream_write_background - - doris_scan_block_max_mb - - doris_scanner_queue_size - - double_resize_threshold - - dwarf_location_info_mode - - disable_memory_gc - - disable_segment_cache - - disable_storage_row_cache - - doris_blocking_priority_queue_wait_timeout_ms - - doris_cgroup_cpu_path - - crash_in_alloc_large_memory_bytes - - delete_bitmap_agg_cache_stale_sweep_time_sec - - data_page_cache_stale_sweep_time_sec - - debug_inverted_index_compaction - - cumulative_compaction_max_deltas_factor - - compaction_batch_size - - compaction_keep_invisible_version_max_count - - compaction_keep_invisible_version_min_count - - compaction_keep_invisible_version_timeout_sec - - common_obj_lru_cache_stale_sweep_time_sec - - compaction_memory_bytes_limit - - compaction_promotion_version_count - - confirm_unused_remote_files_interval_sec - - column_dictionary_key_ratio_threshold - - column_dictionary_key_size_threshold - - cold_data_compaction_interval_sec - - clean_stream_load_record_interval_secs - - check_segment_when_build_rowset_meta - - buffered_reader_read_timeout_ms - - cache_periodic_prune_stale_sweep_sec - - cache_prune_interval_sec - - trash_file_expire_time_sec - - auto_inc_fetch_thread_num - - auto_inc_low_water_level_mark_size_ratio - - auto_inc_prefetch_size_ratio - - base_compaction_dup_key_max_file_size_mbytes - - base_compaction_max_compaction_score - - es_http_timeout_ms - - external_table_connect_timeout_sec - - status_report_interval - - transfer_large_data_by_brpc - - thrift_rpc_timeout_ms - - thrift_client_retry_interval_ms - - thrift_connect_timeout_seconds - - thrift_max_message_size - - txn_commit_rpc_timeout_ms - - unused_rowset_monitor_interval - - string_type_length_soft_limit_bytes - - big_column_size_buffer - - small_column_size_buffer - - jsonb_type_length_soft_limit_bytes # config > 0 && config <= 2147483643; - - doris_max_scan_key_num - - doris_scan_range_row_count - - doris_scanner_row_num - - doris_scanner_row_bytes - - exchg_node_buffer_size_bytes - - max_pushdown_conditions_per_column - - max_send_batch_parallelism_per_job # >= 1 - - doris_scan_range_max_mb - - disable_auto_compaction - - enable_vertical_compaction - - vertical_compaction_num_columns_per_group - - vertical_compaction_max_segment_size - - enable_ordered_data_compaction - - ordered_data_compaction_min_segment_size - - max_base_compaction_threads - - generate_compaction_tasks_interval_ms - - base_compaction_min_rowset_num - - base_compaction_min_data_ratio - - total_permits_for_compaction_score - - compaction_promotion_size_mbytes - - compaction_promotion_ratio - - compaction_promotion_min_size_mbytes - - compaction_min_size_mbytes - - cumulative_compaction_min_deltas - - cumulative_compaction_max_deltas - - base_compaction_trace_threshold - - cumulative_compaction_trace_threshold - - compaction_task_num_per_disk - - compaction_task_num_per_fast_disk - - cumulative_compaction_rounds_for_each_base_compaction_round - - max_cumu_compaction_threads - - segcompaction_num_threads - - disable_compaction_trace_log - - pick_rowset_to_compact_interval_sec - - max_single_replica_compaction_threads - - update_replica_infos_interval_seconds - - enable_stream_load_record - - load_error_log_reserve_hours - - load_error_log_limit_bytes - - slave_replica_writer_rpc_timeout_sec - - max_segment_num_per_rowset - - routine_load_consumer_pool_size - - load_task_high_priority_threshold_second - - min_load_rpc_timeout_ms - - max_consumer_num_per_group - - streaming_load_max_mb - - streaming_load_json_max_mb - - olap_table_sink_send_interval_microseconds - - olap_table_sink_send_interval_auto_partition_factor - - max_memory_sink_batch_count - - memtable_mem_tracker_refresh_interval_ms - - zone_map_row_num_threshold - - memory_limitation_per_thread_for_schema_change_bytes - - mem_tracker_consume_min_size_bytes - - write_buffer_size - - remote_storage_read_buffer_mb - - path_gc_check_interval_second - - path_gc_check_step - - path_gc_check_step_interval_ms - - scan_context_gc_interval_min - - default_num_rows_per_column_file_block - - disable_storage_page_cache - - disk_stat_monitor_interval - - max_percentage_of_error_disk - - read_size - - storage_flood_stage_left_capacity_bytes - - storage_flood_stage_usage_percent - - sync_tablet_meta - - pending_data_expire_time_sec - - max_tablet_version_num - - tablet_meta_checkpoint_min_interval_secs - - tablet_meta_checkpoint_min_new_rowsets_num - - tablet_rowset_stale_sweep_time_sec - - tablet_writer_ignore_eovercrowded - - streaming_load_rpc_max_alive_time_sec - - report_disk_state_interval_seconds - - result_buffer_cancelled_interval_time - - snapshot_expire_time_sec - - sys_log_level - - report_tablet_interval_seconds - - report_task_interval_seconds - - enable_token_check - - max_runnings_transactions_per_txn_map - - max_download_speed_kbps - - download_low_speed_time - - download_low_speed_limit_kbps - - priority_queue_remaining_tasks_increased_frequency - - enable_simdjson_reader - - enable_query_memory_overcommit - - user_files_secure_path - - brpc_streaming_client_batch_bytes - - grace_shutdown_wait_seconds - - ca_cert_file_paths - - agent_task_trace_threshold_sec - - async_file_cache_init_sleep_interval_ms - - enable_rowid_conversion_correctness_check - - enable_shrink_memory - - enable_use_cgroup_memory_info - - enable_vertical_segment_writer - - enable_workload_group_memory_gc - - estimated_mem_per_column_reader - - exchange_sink_ignore_eovercrowded - - exchg_buffer_queue_capacity_factor - - fetch_remote_schema_rpc_timeout_ms - - fetch_rpc_timeout_seconds - - file_cache_max_evict_num_per_round - - file_cache_max_file_reader_cache_size - - file_cache_wait_sec_after_fail - - finished_migration_tasks_size - - garbage_sweep_batch_size - - generate_cooldown_task_interval_sec - - get_stack_trace_tool - - group_commit_queue_mem_limit - - hash_table_double_grow_degree - - high_disk_avail_level_diff_usages - - hive_sink_max_file_size - - iceberg_sink_max_file_size - - ignore_not_found_file_in_external_table - - ignore_rowset_stale_unconsistent_delete - - ignore_schema_change_check - - in_memory_file_size - - index_cache_entry_stay_time_after_lookup_s - - index_page_cache_stale_sweep_time_sec - - inverted_index_cache_stale_sweep_time_sec - - inverted_index_compaction_enable - - inverted_index_max_buffered_docs - - inverted_index_ram_buffer_size - - inverted_index_ram_dir_enable - - jdbc_connection_pool_cache_clear_time_sec - - je_dirty_pages_mem_limit_percent - - jeprofile_dir - - kerberos_ccache_path - - kerberos_krb5_conf_path - - local_exchange_buffer_mem_limit - - lookup_connection_cache_bytes_limit - - low_priority_compaction_score_threshold - - low_priority_compaction_task_num_per_disk - - max_amplified_read_ratio - - max_fill_rate - - max_fragment_start_wait_time_seconds - - max_s3_client_retry_times - - max_tablet_io_errors - - memory_gc_sleep_time_ms - - memory_limitation_per_thread_for_storage_migration_bytes - - memory_maintenance_sleep_time_ms - - memtable_flush_running_count_limit - - memtable_hard_limit_active_percent - - memtable_insert_memory_ratio - - memtable_soft_limit_active_percent - - merged_hdfs_min_io_size - - merged_oss_min_io_size - - migration_remaining_size_threshold_mb - - migration_task_timeout_secs - - min_bytes_in_scanner_queue - - mmap_threshold - - mow_publish_max_discontinuous_version_num - - multi_get_max_threads - - nodechannel_pending_queue_max_bytes - - orc_natural_read_size_mb - - parquet_column_max_buffer_mb - - parquet_header_max_size_mb - - parquet_rowgroup_max_buffer_mb - - pipeline_status_report_interval - - pipeline_task_leakage_detect_period_secs - - pk_index_page_cache_stale_sweep_time_sec - - point_query_row_cache_stale_sweep_time_sec - - pre_serialize_keys_limit_bytes - - process_full_gc_size - - process_minor_gc_size - - public_access_ip - - query_statistics_reserve_timeout_ms - - remove_unused_remote_files_interval_sec - - report_query_statistics_interval_ms - - report_random_wait - - rf_predicate_check_row_num - - s3_read_base_wait_time_ms - - s3_read_max_wait_time_ms - - s3_write_buffer_size - - s3_writer_buffer_allocation_timeout - - scan_thread_nice_value - - schema_cache_capacity - - schema_cache_sweep_time_sec - - segment_compression_threshold_kb - - skip_loading_stale_rowset_meta - - spill_gc_interval_ms - - spill_gc_work_time_ms - - stacktrace_in_alloc_large_memory_bytes - - storage_refresh_storage_policy_task_interval_seconds - - stream_load_record_batch_size - - table_sink_non_partition_write_scaling_data_processed_threshold - - table_sink_partition_write_max_partition_nums_per_writer - - table_sink_partition_write_min_data_processed_rebalance_threshold - - table_sink_partition_write_min_partition_data_processed_rebalance_threshold - - tablet_lookup_cache_stale_sweep_time_sec - - tablet_meta_serialize_size_limit - - tablet_path_check_batch_size - - tablet_rowset_stale_sweep_threshold_size - - tablet_schema_cache_capacity - - tablet_schema_cache_recycle_interval - - tablet_version_graph_orphan_vertex_ratio - - thread_wait_gc_max_milliseconds - - thrift_client_open_num_tries - - variant_enable_flatten_nested - - variant_max_merged_tablet_schema_size - - variant_ratio_of_defaults_as_sparse_column - - variant_threshold_rows_to_estimate_sparse_column - - variant_throw_exeception_on_invalid_json - - wg_weighted_memory_ratio_refresh_interval_ms - - workload_group_scan_task_wait_timeout_ms - - write_buffer_size_for_agg -# - -immutableParameters: - - be_port - - brpc_port - - webserver_port - - heartbeat_service_port - - arrow_flight_sql_port - - custom_config_dir - - jdbc_drivers_dir - - sys_log_dir - - JAVA_OPTS diff --git a/addons/doris/config/be-config.tpl b/addons/doris/config/be-config.tpl deleted file mode 100644 index a97d7e73d..000000000 --- a/addons/doris/config/be-config.tpl +++ /dev/null @@ -1,500 +0,0 @@ -{{- $phy_memory := getContainerMemory ( index $.podSpec.containers 0 ) }} - -{{- if le $phy_memory 2147483648 }} -JAVA_OPTS="-Xmx1024m -Xloggc:/opt/apache-doris/be/log/be.gc.log.${DATE} -Dsun.java.command=DorisBE -XX:-CriticalJNINatives" -{{- else if le $phy_memory 8589934592 }} -JAVA_OPTS="-Xmx2048m -Xloggc:/opt/apache-doris/be/log/be.gc.log.${DATE} -Dsun.java.command=DorisBE -XX:-CriticalJNINatives" -{{- else }} -JAVA_OPTS="-Xmx4096m -Xloggc:/opt/apache-doris/be/log/be.gc.log.${DATE} -Dsun.java.command=DorisBE -XX:-CriticalJNINatives" -{{- end}} - -agent_task_trace_threshold_sec=2 -alter_index_worker_count=3 -alter_tablet_worker_count=3 -arrow_flight_sql_port=-1 -async_file_cache_init_file_num_interval=1000 -async_file_cache_init_sleep_interval_ms=20 -auto_inc_fetch_thread_num=3 -auto_inc_low_water_level_mark_size_ratio=3 -auto_inc_prefetch_size_ratio=10 -aws_log_level=0 -base_compaction_dup_key_max_file_size_mbytes=1024 -base_compaction_max_compaction_score=20 -base_compaction_min_data_ratio=0.3 -base_compaction_min_rowset_num=5 -base_compaction_trace_threshold=60 -be_port=9060 -be_service_threads=64 -big_column_size_buffer=65535 -bitmap_serialize_version=1 -broken_storage_path= -brpc_heavy_work_pool_max_queue_size=-1 -brpc_heavy_work_pool_threads=-1 -brpc_idle_timeout_sec=-1 -brpc_light_work_pool_max_queue_size=-1 -brpc_light_work_pool_threads=-1 -brpc_max_body_size=3147483648 -brpc_num_threads=256 -brpc_port=8060 -brpc_socket_max_unwritten_bytes=-1 -brpc_streaming_client_batch_bytes=262144 -buffered_reader_read_timeout_ms=600000 -ca_cert_file_paths=/etc/pki/tls/certs/ca-bundle.crt;/etc/ssl/certs/ca-certificates.crt;/etc/ssl/ca-bundle.pem -cache_periodic_prune_stale_sweep_sec=300 -cache_prune_interval_sec=10 -calc_delete_bitmap_max_thread=32 -check_consistency_worker_count=1 -check_segment_when_build_rowset_meta=false -clean_stream_load_record_interval_secs=1800 -clear_file_cache=false -clear_transaction_task_worker_count=1 -clone_worker_count=3 -cold_data_compaction_interval_sec=1800 -cold_data_compaction_thread_num=2 -column_dictionary_key_ratio_threshold=0 -column_dictionary_key_size_threshold=0 -common_obj_lru_cache_stale_sweep_time_sec=900 -compaction_batch_size=-1 -compaction_keep_invisible_version_max_count=500 -compaction_keep_invisible_version_min_count=50 -compaction_keep_invisible_version_timeout_sec=1800 -compaction_memory_bytes_limit=1073741824 -compaction_min_size_mbytes=64 -compaction_promotion_min_size_mbytes=128 -compaction_promotion_ratio=0.05 -compaction_promotion_size_mbytes=1024 -compaction_promotion_version_count=1000 -compaction_task_num_per_disk=4 -compaction_task_num_per_fast_disk=8 -confirm_unused_remote_files_interval_sec=60 -cooldown_thread_num=5 -crash_in_alloc_large_memory_bytes=-1 -create_tablet_worker_count=3 -cumulative_compaction_max_deltas=1000 -cumulative_compaction_max_deltas_factor=10 -cumulative_compaction_min_deltas=5 -cumulative_compaction_rounds_for_each_base_compaction_round=9 -cumulative_compaction_trace_threshold=10 -custom_config_dir=/opt/apache-doris/be/conf -data_page_cache_stale_sweep_time_sec=300 -debug_inverted_index_compaction=false -default_num_rows_per_column_file_block=1024 -default_rowset_type=BETA -delete_bitmap_agg_cache_capacity=104857600 -delete_bitmap_agg_cache_stale_sweep_time_sec=1800 -delete_bitmap_dynamic_agg_cache_limit=0.5% -delete_worker_count=3 -disable_auto_compaction=false -disable_compaction_trace_log=true -disable_memory_gc=true -disable_pk_storage_page_cache=false -disable_segment_cache=false -disable_storage_page_cache=false -disable_storage_row_cache=true -disk_stat_monitor_interval=5 -doris_blocking_priority_queue_wait_timeout_ms=500 -doris_cgroup_cpu_path= -doris_enable_scanner_thread_pool_per_disk=true -doris_max_remote_scanner_thread_pool_thread_num=-1 -doris_max_scan_key_num=48 -doris_remote_scanner_thread_pool_queue_size=102400 -doris_remote_scanner_thread_pool_thread_num=48 -doris_scan_block_max_mb=67108864 -doris_scan_range_max_mb=1024 -doris_scan_range_row_count=524288 -doris_scanner_min_thread_pool_thread_num=8 -doris_scanner_queue_size=1024 -doris_scanner_row_bytes=10485760 -doris_scanner_row_num=16384 -doris_scanner_thread_pool_queue_size=102400 -doris_scanner_thread_pool_thread_num=48 -double_resize_threshold=23 -download_binlog_rate_limit_kbs=0 -download_low_speed_limit_kbps=50 -download_low_speed_time=300 -download_worker_count=1 -drop_tablet_worker_count=3 -dwarf_location_info_mode=FAST -enable_all_http_auth=false -enable_base_compaction_idle_sched=true -enable_brpc_stream_write_background=true -enable_column_type_check=true -enable_compaction_checksum=false -enable_compaction_priority_scheduling=true -enable_debug_log_timeout_secs=0 -enable_debug_points=false -enable_delete_when_cumu_compaction=false -enable_feature_binlog=false -enable_file_cache=false -enable_file_cache_query_limit=false -enable_file_logger=true -enable_flush_file_cache_async=true -enable_fuzzy_mode=false -enable_https=false -enable_inverted_index_cache_check_timestamp=true -enable_java_support=true -enable_je_purge_dirty_pages=true -enable_jvm_monitor=false -enable_low_cardinality_cache_code=true -enable_low_cardinality_optimize=true -enable_memory_orphan_check=true -enable_merge_on_write_correctness_check=true -enable_metric_calculator=true -enable_missing_rows_correctness_check=false -enable_ordered_data_compaction=true -enable_parquet_page_index=false -enable_pipeline_task_leakage_detect=false -enable_query_like_bloom_filter=true -enable_query_memory_overcommit=true -enable_rowid_conversion_correctness_check=false -enable_segcompaction=true -enable_set_in_bitmap_value=false -enable_shrink_memory=false -enable_simdjson_reader=true -enable_single_replica_load=true -enable_skip_tablet_compaction=true -enable_snapshot_action=false -enable_stream_load_record=false -enable_system_metrics=true -enable_time_lut=true -enable_token_check=true -enable_use_cgroup_memory_info=true -enable_vertical_compaction=true -enable_vertical_segment_writer=true -enable_workload_group_for_scan=false -enable_workload_group_memory_gc=true -enable_write_index_searcher_cache=true -es_http_timeout_ms=5000 -es_scroll_keepalive=5m -estimated_mem_per_column_reader=1024 -exchange_sink_ignore_eovercrowded=true -exchg_buffer_queue_capacity_factor=64 -exchg_node_buffer_size_bytes=20485760 -exit_on_exception=false -external_table_connect_timeout_sec=30 -fe_expire_duration_seconds=60 -fetch_remote_schema_rpc_timeout_ms=60000 -fetch_rpc_timeout_seconds=30 -file_cache_max_evict_num_per_round=5000 -file_cache_max_file_reader_cache_size=1000000 -file_cache_max_file_segment_size=4194304 -file_cache_min_file_segment_size=1048576 -file_cache_path= -file_cache_wait_sec_after_fail=0 -finished_migration_tasks_size=10000 -flush_thread_num_per_store=6 -fragment_pool_queue_size=4096 -fragment_pool_thread_num_max=2048 -fragment_pool_thread_num_min=64 -function_service_protocol=h2:grpc -garbage_sweep_batch_size=100 -generate_compaction_tasks_interval_ms=10 -generate_cooldown_task_interval_sec=20 -generate_tablet_meta_checkpoint_tasks_interval_secs=600 -get_stack_trace_tool=libunwind -grace_shutdown_wait_seconds=120 -group_commit_insert_threads=10 -group_commit_memory_rows_for_max_filter_ratio=10000 -group_commit_queue_mem_limit=67108864 -group_commit_relay_wal_threads=10 -group_commit_replay_wal_retry_interval_max_seconds=1800 -group_commit_replay_wal_retry_interval_seconds=5 -group_commit_replay_wal_retry_num=10 -group_commit_wait_replay_wal_finish=false -group_commit_wal_max_disk_limit=10% -group_commit_wal_path= -hash_table_double_grow_degree=31 -heartbeat_service_port=9050 -heartbeat_service_thread_count=1 -hide_webserver_config_page=false -high_disk_avail_level_diff_usages=0.15 -high_priority_flush_thread_num_per_store=6 -hive_sink_max_file_size=1073741824 -iceberg_sink_max_file_size=1073741824 -ignore_always_true_predicate_for_segment=true -ignore_broken_disk=false -ignore_invalid_partition_id_rowset_num=0 -ignore_load_tablet_failure=false -ignore_not_found_file_in_external_table=true -ignore_rowset_stale_unconsistent_delete=false -ignore_schema_change_check=false -in_memory_file_size=1048576 -index_cache_entry_stay_time_after_lookup_s=1800 -index_page_cache_percentage=10 -index_page_cache_stale_sweep_time_sec=600 -ingest_binlog_work_pool_size=-1 -inverted_index_cache_stale_sweep_time_sec=600 -inverted_index_compaction_enable=false -inverted_index_dict_path=/opt/apache-doris/be/dict -inverted_index_fd_number_limit_percent=40 -inverted_index_max_buffered_docs=-1 -inverted_index_query_cache_limit=10% -inverted_index_query_cache_shards=256 -inverted_index_ram_buffer_size=512 -inverted_index_ram_dir_enable=true -inverted_index_read_buffer_size=4096 -inverted_index_searcher_cache_limit=10% -jdbc_connection_pool_cache_clear_time_sec=28800 -jdbc_drivers_dir=/opt/apache-doris/be/jdbc_drivers -je_dirty_pages_mem_limit_percent=5% -jeprofile_dir=/opt/apache-doris/be/log -jsonb_type_length_soft_limit_bytes=1048576 -kafka_api_version_request=true -kafka_broker_version_fallback=0.10.0 -kafka_debug=disable -kerberos_ccache_path= -kerberos_krb5_conf_path=/etc/krb5.conf -load_data_reserve_hours=4 -load_error_log_limit_bytes=209715200 -load_error_log_reserve_hours=48 -load_process_max_memory_limit_percent=50 -load_process_safe_mem_permit_percent=5 -load_process_soft_mem_limit_percent=80 -load_stream_eagain_wait_seconds=600 -load_stream_flush_token_max_tasks=15 -load_stream_max_buf_size=20971520 -load_stream_max_wait_flush_token_time_ms=600000 -load_stream_messages_in_batch=128 -load_task_high_priority_threshold_second=120 -local_exchange_buffer_mem_limit=134217728 -log_buffer_level= -lookup_connection_cache_bytes_limit=4294967296 -low_priority_compaction_score_threshold=200 -low_priority_compaction_task_num_per_disk=2 -make_snapshot_worker_count=5 -max_amplified_read_ratio=0.8 -max_base_compaction_threads=4 -max_client_cache_size_per_host=10 -max_consumer_num_per_group=3 -max_cumu_compaction_threads=-1 -max_depth_in_bkd_tree=32 -max_depth_of_expr_tree=600 -max_download_speed_kbps=50000 -max_external_file_meta_cache_num=1000 -max_fill_rate=2 -max_fragment_start_wait_time_seconds=30 -max_garbage_sweep_interval=3600 -max_hdfs_file_handle_cache_num=1000 -max_hdfs_file_handle_cache_time_sec=3600 -max_memory_sink_batch_count=20 -max_meta_checkpoint_threads=-1 -max_percentage_of_error_disk=100 -max_pushdown_conditions_per_column=1024 -max_routine_load_thread_pool_size=1024 -max_runnings_transactions_per_txn_map=2000 -max_s3_client_retry=10 -max_segment_num_per_rowset=1000 -max_send_batch_parallelism_per_job=5 -max_single_replica_compaction_threads=-1 -max_sys_mem_available_low_water_mark_bytes=6871947673 -max_tablet_io_errors=-1 -max_tablet_migration_threads=1 -max_tablet_version_num=2000 -mem_limit=90% -mem_tracker_consume_min_size_bytes=1048576 -memory_gc_sleep_time_ms=500 -memory_limitation_per_thread_for_schema_change_bytes=2147483648 -memory_limitation_per_thread_for_storage_migration_bytes=100000000 -memory_maintenance_sleep_time_ms=100 -memory_mode=moderate -memtable_flush_running_count_limit=2 -memtable_hard_limit_active_percent=50 -memtable_insert_memory_ratio=1.4 -memtable_limiter_reserved_memory_bytes=838860800 -memtable_mem_tracker_refresh_interval_ms=5 -memtable_soft_limit_active_percent=50 -merged_hdfs_min_io_size=8192 -merged_oss_min_io_size=1048576 -migration_lock_timeout_ms=1000 -migration_remaining_size_threshold_mb=10 -migration_task_timeout_secs=300 -min_buffer_size=1024 -min_bytes_in_scanner_queue=67108864 -min_file_descriptor_number=60000 -min_garbage_sweep_interval=180 -min_load_rpc_timeout_ms=20000 -min_row_group_size=134217728 -min_tablet_migration_threads=1 -mmap_threshold=134217728 -mow_publish_max_discontinuous_version_num=20 -multi_get_max_threads=10 -multi_table_batch_plan_threshold=200 -multi_table_max_wait_tables=5 -nodechannel_pending_queue_max_bytes=67108864 -num_broadcast_buffer=32 -num_cores=0 -num_disks=0 -num_threads_per_disk=0 -olap_table_sink_send_interval_auto_partition_factor=0.001 -olap_table_sink_send_interval_microseconds=1000 -open_load_stream_timeout_ms=60000 -orc_natural_read_size_mb=8 -ordered_data_compaction_min_segment_size=10485760 -parquet_column_max_buffer_mb=8 -parquet_header_max_size_mb=1 -parquet_reader_max_buffer_size=50 -parquet_rowgroup_max_buffer_mb=128 -partition_disk_index_lru_size=10000 -partition_topn_partition_threshold=1024 -path_gc_check=true -path_gc_check_interval_second=86400 -path_gc_check_step=1000 -path_gc_check_step_interval_ms=10 -pending_data_expire_time_sec=1800 -pick_rowset_to_compact_interval_sec=86400 -pipeline_executor_size=0 -pipeline_status_report_interval=10 -pipeline_task_leakage_detect_period_secs=60 -pk_index_page_cache_stale_sweep_time_sec=600 -pk_storage_page_cache_limit=10% -point_query_row_cache_stale_sweep_time_sec=300 -pprof_profile_dir=/opt/apache-doris/be/log -pre_serialize_keys_limit_bytes=16777216 -primary_key_data_page_size=32768 -priority_networks= -priority_queue_remaining_tasks_increased_frequency=512 -process_full_gc_size=10% -process_minor_gc_size=5% -public_access_ip= -publish_version_task_timeout_s=8 -publish_version_worker_count=8 -push_worker_count_high_priority=3 -push_worker_count_normal_priority=3 -query_cache_elasticity_size_mb=128 -query_cache_max_partition_count=1024 -query_cache_max_size_mb=256 -query_statistics_reserve_timeout_ms=30000 -read_size=8388608 -release_snapshot_worker_count=5 -remote_split_source_batch_size=10240 -remote_storage_read_buffer_mb=16 -remove_unused_remote_files_interval_sec=21600 -report_disk_state_interval_seconds=60 -report_query_statistics_interval_ms=3000 -report_random_wait=true -report_tablet_interval_seconds=60 -report_task_interval_seconds=10 -result_buffer_cancelled_interval_time=300 -rf_predicate_check_row_num=204800 -rocksdb_max_write_buffer_number=5 -routine_load_consumer_pool_size=1024 -row_cache_mem_limit=20% -rpc_load_balancer=rr -s3_read_base_wait_time_ms=100 -s3_read_max_wait_time_ms=800 -s3_transfer_executor_pool_size=2 -s3_write_buffer_size=5242880 -s3_writer_buffer_allocation_timeout=300 -scan_context_gc_interval_min=5 -scan_thread_nice_value=0 -schema_cache_capacity=1024 -schema_cache_sweep_time_sec=100 -segcompaction_batch_size=10 -segcompaction_candidate_max_bytes=104857600 -segcompaction_candidate_max_rows=1048576 -segcompaction_num_threads=5 -segcompaction_task_max_bytes=157286400 -segcompaction_task_max_rows=1572864 -segment_cache_capacity=-1 -segment_cache_fd_percentage=40 -segment_cache_memory_percentage=2 -segment_compression_threshold_kb=256 -send_batch_thread_pool_queue_size=102400 -send_batch_thread_pool_thread_num=64 -share_delta_writers=true -single_replica_load_download_num_workers=64 -skip_loading_stale_rowset_meta=false -slave_replica_writer_rpc_timeout_sec=60 -small_column_size_buffer=100 -small_file_dir=/opt/apache-doris/be/lib/small_file/ -snapshot_expire_time_sec=172800 -soft_mem_limit_frac=0.9 -spill_gc_interval_ms=2000 -spill_gc_work_time_ms=2000 -spill_io_thread_pool_queue_size=102400 -spill_io_thread_pool_thread_num=48 -spill_storage_limit=20% -spill_storage_root_path= -ssl_certificate_path= -ssl_private_key_path= -stacktrace_in_alloc_large_memory_bytes=2147483648 -status_report_interval=5 -storage_flood_stage_left_capacity_bytes=1073741824 -storage_flood_stage_usage_percent=90 -storage_medium_migrate_count=1 -storage_page_cache_limit=20% -storage_page_cache_shard_size=256 -storage_refresh_storage_policy_task_interval_seconds=5 -storage_root_path=/opt/apache-doris/be/storage -storage_strict_check_incompatible_old_format=true -stream_load_record_batch_size=50 -stream_load_record_expire_time_secs=28800 -stream_tvf_buffer_size=1048576 -streaming_load_json_max_mb=100 -streaming_load_max_mb=10240 -streaming_load_rpc_max_alive_time_sec=1200 -string_type_length_soft_limit_bytes=1048576 -sync_tablet_meta=false -sys_log_dir= -sys_log_level=INFO -sys_log_roll_mode=SIZE-MB-1024 -sys_log_roll_num=10 -sys_log_verbose_flags_v=-1 -sys_log_verbose_level=10 -sys_log_verbose_modules= -table_sink_non_partition_write_scaling_data_processed_threshold=26214400 -table_sink_partition_write_max_partition_nums_per_writer=128 -table_sink_partition_write_min_data_processed_rebalance_threshold=26214400 -table_sink_partition_write_min_partition_data_processed_rebalance_threshold=15728640 -tablet_lookup_cache_stale_sweep_time_sec=30 -tablet_map_shard_size=256 -tablet_meta_checkpoint_min_interval_secs=600 -tablet_meta_checkpoint_min_new_rowsets_num=10 -tablet_meta_serialize_size_limit=1610612736 -tablet_path_check_batch_size=1000 -tablet_path_check_interval_seconds=-1 -tablet_publish_txn_max_thread=32 -tablet_rowset_stale_sweep_by_size=false -tablet_rowset_stale_sweep_threshold_size=100 -tablet_rowset_stale_sweep_time_sec=300 -tablet_schema_cache_capacity=102400 -tablet_schema_cache_recycle_interval=3600 -tablet_version_graph_orphan_vertex_ratio=0.1 -tablet_writer_ignore_eovercrowded=true -tablet_writer_open_rpc_timeout_sec=60 -thread_wait_gc_max_milliseconds=1000 -thrift_client_open_num_tries=1 -thrift_client_retry_interval_ms=1000 -thrift_connect_timeout_seconds=3 -thrift_max_message_size=104857600 -thrift_rpc_timeout_ms=60000 -thrift_server_type_of_fe=THREAD_POOL -tmp_file_dir=tmp -total_permits_for_compaction_score=10000 -transfer_large_data_by_brpc=true -trash_file_expire_time_sec=86400 -txn_commit_rpc_timeout_ms=60000 -txn_map_shard_size=1024 -txn_shard_size=1024 -unused_rowset_monitor_interval=30 -update_replica_infos_interval_seconds=60 -upload_worker_count=1 -user_files_secure_path=/opt/apache-doris/be -user_function_dir=/opt/apache-doris/be/lib/udf -variant_enable_flatten_nested=false -variant_max_merged_tablet_schema_size=2048 -variant_ratio_of_defaults_as_sparse_column=1 -variant_threshold_rows_to_estimate_sparse_column=2048 -variant_throw_exeception_on_invalid_json=false -vertical_compaction_max_row_source_memory_mb=200 -vertical_compaction_max_segment_size=1073741824 -vertical_compaction_num_columns_per_group=5 -wait_internal_group_commit_finish=false -webserver_num_workers=48 -webserver_port=8040 -wg_flush_thread_num_per_store=6 -wg_weighted_memory_ratio_refresh_interval_ms=50 -workload_group_scan_task_wait_timeout_ms=10000 -write_buffer_size=209715200 -write_buffer_size_for_agg=419430400 -zone_map_row_num_threshold=20 \ No newline at end of file diff --git a/addons/doris/config/fe-config-constraint.cue b/addons/doris/config/fe-config-constraint.cue deleted file mode 100644 index f175b342d..000000000 --- a/addons/doris/config/fe-config-constraint.cue +++ /dev/null @@ -1,1497 +0,0 @@ -#FEParameter: { - // DYNAMIC parameters - - // Abort transaction time after lost heartbeat. The default value is 300s, which means transactions of be will be aborted after lost heartbeat 300s. - abort_txn_after_lost_heartbeat_time_second: int | *300 - - // This config will decide whether to resend agent task when create_time for agent_task is set, only when current_time - create_time > agent_task_resend_wait_time_ms can ReportHandler do resend agent task - agent_task_resend_wait_time_ms: int | *5000 - - // Maximal timeout of ALTER TABLE request. Set long enough to fit your table data size. Default value is 2592000s(1 month). - alter_table_timeout_second: int | *2592000 - - // audit_event_log_queue_size = qps * query_audit_log_timeout_ms - audit_event_log_queue_size: int | *250000 - - // This parameter controls the time interval for automatic collection jobs to check the health of table statistics and trigger automatic collection - auto_check_statistics_in_minutes: int | *5 - - // Max number of buckets for auto bucket - autobucket_max_buckets: int | *128 - - // Minimal number of buckets for auto bucket - autobucket_min_buckets: int | *1 - - // Sets a fixed disk usage factor in the BE load fraction. The BE load score is a combination of disk usage and replica count. The valid value range is [0, 1]. When it is out of this range, other methods are used to automatically calculate this coefficient. - backend_load_capacity_coeficient: float | *-1.0 - - // Default timeout of Backup Job - backup_job_default_timeout_ms: int | *86400000 - - // The max number of upload tasks assigned to each be during the backup process, the default value is 3 - backup_upload_task_num_per_be: int | *3 - - // Balance order, a temporary config, may delete later. - balance_be_then_disk: bool | *true - - // The threshold of cluster balance score, if a backend's load score is 10% lower than average score, this backend will be marked as LOW load, if load score is 10% higher than average score, HIGH load will be marked. - balance_load_score_threshold: float | *0.1 - - // 1 slot for reduce unnecessary balance task, provided a more accurate estimate of capacity - balance_slot_num_per_path: int | *1 - - // Max data version of backends serialize block. - be_exec_version: int | *5 - - // If set to TRUE, FE will: 1. divide BE into high load and low load(no mid load) to force triggering tablet scheduling;2. ignore whether the cluster can be more balanced during tablet scheduling. It's used to test the reliability in single replica case when tablet scheduling are frequent. Default is false. - be_rebalancer_fuzzy_test: bool | *false - - // When be rebalancer idle, then disk balance will occurs. - be_rebalancer_idle_seconds: int | *0 - - // The maximum waiting time for BE nodes to report statistical information to FE nodes. - be_report_query_statistics_timeout_ms: int | *60000 - - // After a backend is marked as unavailable, it will be added to blacklist. Default is 120s. - blacklist_duration_second: int | *120 - - // Default timeout for broker load job, in seconds. - broker_load_default_timeout_second: int | *14400 - - // If set to true, fe will get data from be cache. This option is suitable for real-time updating of partial partitions. - cache_enable_partition_mode: bool | *true - - // If set to true, fe will enable sql result cache. This option is suitable for offline data update scenarios. - cache_enable_sql_mode: bool | *true - - // Minimum interval between last version when caching results, This parameter distinguishes between offline and real-time updates. - cache_last_version_interval_second: int | *30 - - // Maximum data size of rows that can be cached in SQL/Partition Cache, is 3000 by default. - cache_result_max_data_size: int | *3000 - - // Maximum number of rows that can be cached in SQL/Partition Cache, is 3000 by default. - cache_result_max_row_count: int | *3000 - - // The high water of disk capacity used percent. This is used for calculating load score of a backend. - capacity_used_percent_high_water: float | *0.75 - - // After dropping database(table/partition), you can recover it by using RECOVER stmt. And this specifies the maximal data retention time. After time, the data will be deleted permanently. - catalog_trash_expire_second: int | *86400 - - // The tryLock timeout configuration of catalog locr. Normally it does not need to change, unless you need to test something. - catalog_try_lock_timeout_ms: int | *5000 - - // Default sample percentage. The value from 0 ~ 100. The 100 means no sampling and fetch all data. - cbo_default_sample_percentage: int & >=0 & <=100 | *100 - - // The max unfinished statistics job number - cbo_max_statistics_job_num: int | *20 - - // Default timeout of a single consistency check task. Set long enough to fit your tablet size. Default value is 600s. - check_consistency_default_timeout_second: int | *600 - - // The threshold for the read ratio of cold data. - cloud_cold_read_percent: int | *10 - - // The number of replicas for each data block in cloud storage. - cloud_replica_num: int | *3 - - // The relocation of a colocation group may involve a large number of tablets moving within the cluster. Therefore, doris should use a more conservative strategy to avoid relocation of colocation groups as much as possible. Reloaction usually occurs after a BE node goes offline or goes down. This parameter is used to delay the determination of BE node unavailability. The default is 30 minutes, i.e.,if a BE node recovers within 30 minutes, relocation of the colocation group will not be triggered. - colocate_group_relocate_delay_second: int | *1800 - - // Maximal waiting time for all data inserted before one transaction to be committed, in seconds. This parameter is only used for transactional insert operation. - commit_timeout_second: int | *30 - - // End time of consistency check. Used with `consistency_check_start_time` to decide the start and end time of consistency check. If set to the same value, consistency check will not be scheduled. - consistency_check_end_time: string | *"23" - - // Start time of consistency check. Used with `consistency_check_end_time` to decide the start and end time of consistency check. If set to the same value, consistency check will not be scheduled. - consistency_check_start_time: string | *"23" - - // When creating tablet of a partition, always start from the first BE. This method may cause BE imbalance. - create_tablet_round_robin_from_start: bool | *false - - // When tablet size of decommissioned backend is lower than this threshold, SystemHandler will start to check if all tablets of this backend are in recycled status, this backend will be dropped immediately if the check result is true. For performance based considerations, better not set a very high value for this. - decommission_tablet_check_threshold: int | *5000 - - // Decommission a tablet need to wait all the previous txns finished. If wait timeout, decommission will fail. Need to increase this wait time if the txn take a long time. - decommission_tablet_wait_time_seconds: int | *3600 - - // Used to set default db data quota bytes. Default is 1PB - default_db_data_quota_bytes: int | *1125899906842624 - - // Used to set default db transaction quota num. - default_db_max_running_txn_num: int | *-1 - - // Used to set default db replica quota num. - default_db_replica_quota_size: int | *1073741824 - - // The default parallelism of the load execution plan on a single node when the broker load is submitted - default_load_parallelism: int | *8 - - // Maximum percentage of data that can be filtered (due to reasons such as data is irregularly). Default is 0 - default_max_filter_ratio: float | *0 - - // Control the default max num of the instance for a user. - default_max_query_instances: int | *-1 - - // Maximal timeout for delete job, in seconds. - delete_job_max_timeout_second: int | *300 - - // Maximal number of waiting jobs for Broker Load. This is a desired number. In some situation, such as switch the master, the current number is maybe more than this value. - desired_max_waiting_jobs: int | *100 - - // The maximum difference in the number of tablets of each BE in partition rebalance mode. If it is less than this value, it will be diagnosed as balanced. - diagnose_balance_max_tablet_num_diff: int | *50 - - // The maximum ratio of the number of tablets in each BE in partition rebalance mode. If it is less than this value, it will be diagnosed as balanced. - diagnose_balance_max_tablet_num_ratio: float | *1.1 - - // Set to true to disable backend black list, so that even if doris failed to send task to a backend, that backend won't be added to black list. This should only be set when running tests, such as regression test. Highly recommended NOT disable it in product environment. - disable_backend_black_list: bool | *false - - // If set to true, TabletScheduler will not do balance. - disable_balance: bool | *false - - // This configs can set to true to disable the automatic colocate tables's relocate and balance. If 'disable_colocate_balance' is set to true, ColocateTableBalancer will not relocate and balance colocate tables. - disable_colocate_balance: bool | *false - - // Whether to allow colocate balance between all groups - disable_colocate_balance_between_groups: bool | *false - - // To prevent different types (V1, V2, V3) of behavioral inconsistencies, doris may delete the DecimalV2 and DateV1 types in the future. At this stage, doris use ‘disable_decimalv2’ and ‘disable_datev1’ to determine whether these two types take effect. - disable_datev1: bool | *true - - // To prevent different types (V1, V2, V3) of behavioral inconsistencies, doris may delete the DecimalV2 and DateV1 types in the future. At this stage, doris use ‘disable_decimalv2’ and ‘disable_datev1’ to determine whether these two types take effect. - disable_decimalv2: bool | *true - - // If set to true, TabletScheduler will not do disk balance - disable_disk_balance: bool | *false - - // Load using hadoop cluster will be deprecated in future. Set to true to disable this kind of load. - disable_hadoop_load: bool | *false - - // If set to true, all pending load job will failed when call begin txn api; all prepare load job will failed when call commit txn api; all committed load job will waiting to be published. - disable_load_job: bool | *false - - // Whether to disable LocalDeployManager drop node. - disable_local_deploy_manager_drop_node: bool | *true - - // Whether to disable show stream load and clear stream load records in memory. - disable_show_stream_load: bool | *false - - // When disable_storage_medium_check is true, ReportHandler would not check tablet's storage medium and disable storage cool down function. - disable_storage_medium_check: bool | *false - - // If set to true, the tablet scheduler will not work, so that all tablet repair/balance task will not work. - disable_tablet_scheduler: bool | *false - - // Whether to disable creating catalog with WITH RESOURCE statement. - disallow_create_catalog_with_resource: bool | *true - - // This variable indicates the number of digits by which to increase the scale of the result of division operations performed with the `/` operator. The default value is 4, and it is currently only used for the DECIMALV3 type. - div_precision_increment: int | *4 - - // If set to true, the backend will be automatically dropped after finishing decommission. If set to false, the backend will not be dropped and remaining in DECOMMISSION state. - drop_backend_after_decommission: bool | *true - - // The maximum number of retries allowed after an RPC request fails. - drop_rpc_retry_num: int | *200 - - // Decide how often to check dynamic partition. - dynamic_partition_check_interval_seconds: int | *600 - - // If set to true, dynamic partition feature will open. - dynamic_partition_enable: bool | *true - - // The log roll size of BDBJE. When the number of log entries exceeds this value, the log will be rolled. - edit_log_roll_num: int | *5000 - - // This config is used to try skip broker when access bos or other cloud storage via broker. - enable_access_file_without_broker: bool | *false - - // Used for regression test - enable_alter_queue_prop_sync: bool | *false - - // Support complex data type ARRAY. - enable_array_type: bool | *false - - // Whether to add a delete sign column when create unique table. - enable_batch_delete_by_default: bool | *true - - // Whether to enable multi redundancy for cloud cloud storage. - enable_cloud_multi_replica: bool | *false - - // Whether to collect internal query performance analysis data - enable_collect_internal_query_profile: bool | *false - - // Whether to enable the "affinity adjustment" strategy for replicas during the control cooldown period - enable_cooldown_replica_affinity: bool | *true - - // Whether to create the bitmap index in the form of an inverted index - enable_create_bitmap_index_as_inverted_index: bool | *true - - // Enable external hive bucket table. - enable_create_hive_bucket_table: bool | *false - - // If set to TRUE, FE will convert date/datetime to datev2/datetimev2(0) automatically. - enable_date_conversion: bool | *true - - // If set to TRUE, FE will convert DecimalV2 to DecimalV3 automatically. - enable_decimal_conversion: bool | *true - - // If set to false, TabletScheduler will not do disk balance for replica num = 1. - enable_disk_balance_for_single_replica: bool | *false - - // When doing clone or repair tablet task, there may be replica is REDUNDANT state, which should be dropped later. But there are be loading task on these replicas, so the default strategy is to wait until the loading task finished before dropping them. But the default strategy may takes very long time to handle these redundant replicas. So doris can set this config to true to not wait any loading task. Set this config to true may cause loading task failed, but will speed up the process of tablet balance and repair. - enable_force_drop_redundant_replica: bool | *false - - // Whether to add a version column when create unique table - enable_hidden_version_column_by_default: bool | *true - - // If set to true, Planner will try to select replica of tablet on same host as this Frontend. - enable_local_replica_selection: bool | *false - - // Used with enable_local_replica_selection. If the local replicas is not available, fallback to the nonlocal replicas. - enable_local_replica_selection_fallback: bool | *false - - // Enable the 'delete predicate' for DELETE statements. If enabled, it will enhance the performance of DELETE statements, but partial column updates after a DELETE may result in erroneous data. If disabled, it will reduce the performance of DELETE statements to ensure accuracy. - enable_mow_light_delete: bool | *false - - // Whether to allow the creation of odbc, mysql, broker type external tables - enable_odbc_mysql_broker_table: bool | *false - - // Whether to enable the pipelined data loading - enable_pipeline_load: bool | *true - - // Whether to collect performance analysis data when analyzing. - enable_profile_when_analyze: bool | *false - - // Enable quantile_state type column. Default value is false. - enable_quantile_state_type: bool | *true - - // This configuration is used to enable the statistics of query information, which will record the access status of databases, tables, and columns, and can be used to guide the optimization of table structures - enable_query_hit_stats: bool | *false - - // If set to true, doris will try to parse the ddl of a hive view and try to execute the query otherwise it will throw an AnalysisException. - enable_query_hive_views: bool | *true - - // Whether to enable the query queue. - enable_query_queue: bool | *true - - // Whether to enable the round-robin tablet creation strategy. - enable_round_robin_create_tablet: bool | *true - - // There's a case, all backend has a high disk, by default, it will not run urgent disk balance. If set this value to true, urgent disk balance will always run, the backends will exchange tablets among themselves. - enable_urgent_balance_no_low_backend: bool | *true - - // Whether to enable the CPU hard limit. - enable_cpu_hard_limit: bool | *false - - // Whether to enable the MTMV feature. - enable_mtmv: bool | *false - - // Whether to enable the Nereids optimizer. If enabled, the load statement of the new optimizer can be used to import data. If this function fails, the old load statement will be degraded. - enable_nereids_load: bool | *false - - // Whether to enable the single replica load. If enabled, the load statement of the new optimizer can be used to import data. If this function fails, the old load statement will be degraded. - enable_single_replica_load: bool | *false - - // Whether to enable the workload group. If enabled, the user can create a workload group and assign the query to the group. - enable_workload_group: bool | *true - - // Shuffle won't be enabled for DUPLICATE KEY tables if its tablet num is lower than this number - min_tablets_for_dup_table_shuffle: int | *64 - - // This config is used to control the number of sql cache managed by NereidsSqlCacheManager. Default set to 100. - sql_cache_manage_num: int | *100 - - // Expire sql cache in frontend time. Default set to 300 seconds. - expire_sql_cache_in_fe_second: int | *300 - - // Limit on the number of expr children of an expr tree. Exceed this limit may cause long analysis time while holding database read lock. Do not set this if you know what you are doing. - expr_children_limit: int | *10000 - - // Limit on the depth of an expr tree. Exceed this limit may cause long analysis time while holding db read lock. Do not set this if you know what you are doing. - expr_depth_limit: int | *3000 - - // The interval of FE fetch stream load record from BE. Default set to 120 seconds. - fetch_stream_load_record_interval_second: int | *120 - - // Whether to fix the tablet partition id to 0. Default set to false. - fix_tablet_partition_id_eq_0: bool | *false - - // If set to true, the checkpoint thread will make the checkpoint regardless of the jvm memory used percent. - force_do_metadata_checkpoint: bool | *false - - // Used to force set the replica allocation of the internal table. If the config is not empty, the replication_num and replication_allocation specified by the user when creating the table or partitions will be ignored, and the value set by this parameter will be used. This config effect the operations including create tables, create partitions and create dynamic partitions. This config is recommended to be used only in the test environment. - force_olap_table_replication_allocation: string | *"" - - // Used to force the number of replicas of the internal table. If the config is not 0, the replication_num specified by the user when creating the table or partitions will be ignored, and the value set by this parameter will be used. - force_olap_table_replication_num: int | *0 - - // Github workflow test type, for setting some session variables only for certain test type. - fuzzy_test_type: string | *"" - - // In the scenario of memory backpressure, the time interval for obtaining BE memory usage at regular intervals. Default set to 10000 milliseconds. - get_be_resource_usage_interval_ms: int | *10000 - - // Default timeout for hadoop load job. Default set to 3 days. - hadoop_load_default_timeout_second: int | *259200 - - // For ALTER, EXPORT jobs, remove the finished job if expired. Default set to 7 day. - history_job_keep_max_second: int | *604800 - - // Default hive file format for creating table - hive_default_file_format: string | *"orc" - - // The default connection timeout for hive metastore. Default set to 10 seconds. - hive_metastore_client_timeout_second: int | *10 - - // Sample size for hive row count estimation. - hive_stats_partition_sample_size: int | *30 - - // Maximum number of events to poll in each RPC. - hms_events_batch_size_per_rpc: int | *500 - - // whether to ignore table that not support type when backup, and not report exception. - ignore_backup_not_support_table_type: bool | *false - - // Whether to ignore metadata delay. If the metadata delay of the master FE exceeds this threshold, non - master FEs will still provide read services when the config is set to true. Default set to false. - ignore_meta_check: bool | *false - - // Default timeout for insert load job, in seconds. Default set to 4 - insert_load_default_timeout_second: int | *14400 - - // Default storage format of inverted index, the default value is V1. - inverted_index_storage_format: string | *"V1" - - // Whether to retain the associated MTMV tasks when deleting a Job. - keep_scheduler_mtmv_task_when_job_deleted: bool | *false - - // Labels of finished or cancelled load jobs will be removed after this time. The removed labels can be reused. Default set to 3 days. - label_keep_max_second: int | *259200 - - // The threshold of load labels' number. After this number is exceeded, the labels of the completed import jobs or tasks will be deleted and the deleted labels can be reused. When the value is -1, it indicates no threshold. Default set to 100000. - label_num_threshold: int | *2000 - - // The timeout for LDAP cache, in days. Default set to 30 days. - ldap_cache_timeout_day: int | *30 - - // The timeout for LDAP user cache, in seconds. Default set to 12 hours. - ldap_user_cache_timeout_s: int | *43200 - - // When execute admin set replica status = 'drop', the replica will marked as user drop. Doris will try to drop this replica within time not exceeds manual_drop_replica_valid_second. Default set to 24 hours. - manual_drop_replica_valid_second: int | *86400 - - // Used to limit element num of InPredicate in delete statement. Default set to 1024. - max_allowed_in_element_num_of_delete: int | *1024 - - // For auto-partitioned tables to prevent users from accidentally creating a large number of partitions, the number of partitions allowed per OLAP table is `max_auto_partition_num`. Default set to 2000. - max_auto_partition_num: int | *2000 - - // Maximum backend heartbeat failure tolerance count, default set to 1. - max_backend_heartbeat_failure_tolerance_count: int | *1 - - // Control the max num of backup/restore job per db. - max_backup_restore_job_num_per_db: int | *10 - - // Control the max num of tablets per backup job involved, to avoid OOM - max_backup_tablets_per_job: int | *300000 - - // if the number of balancing tablets in TabletScheduler exceed max_balancing_tablets, no more balance check. Default set to 100. - max_balancing_tablets: int | *100 - - // Maximal concurrency of broker scanners - max_broker_concurrency: int | *10 - - // Max bytes a broker scanner can process in one broker load job. Default set to 500GB. - max_bytes_per_broker_scanner: int | *536870912000 - - // Max bytes that a sync job will commit. When receiving bytes larger than it, SyncJob will commit all data immediately. You should set it larger than canal memory and `min_bytes_sync_commit`. Default set to 64MB. - max_bytes_sync_commit: int | *67108864 - - // The max timeout of a statistics task - max_cbo_statistics_task_timeout_sec: int | *300 - - // max_clone_task_timeout_sec is to limit the max timeout of a clone task. Default set to 2 hours. - max_clone_task_timeout_sec: int | *7200 - - // Maximal waiting time for creating a table, in seconds. - max_create_table_timeout_second: int | *3600 - - // This will limit the max recursion depth of hash distribution pruner. - max_distribution_pruner_recursion_depth: int | *100 - - // Used to limit the maximum number of partitions that can be created when creating a dynamic partition table to avoid creating too many partitions at one time. - max_dynamic_partition_num: int | *500 - - // Maximum number of error tablets showed in broker load. - max_error_tablet_of_broker_load: int | *3 - - // The max timeout of get kafka meta. Default set to 60 seconds. - max_get_kafka_meta_timeout_second: int | *60 - - // Maximal timeout for load job, in seconds. Default set to 3 days. - max_load_timeout_second: int | *259200 - - // Maximum lock hold time; logs a warning if exceeded. Default set to 10 seconds. - max_lock_hold_threshold_seconds: int | *10 - - // Used to limit the maximum number of partitions that can be created when creating multi partition to avoid creating too many partitions at one time. - max_multi_partition_num: int | *4096 - - // Max pending task num keep in pending poll, otherwise it reject the task submit. - max_pending_mtmv_scheduler_task_num: int | *100 - - // The number of point query retries in executor. A query may retry if we encounter RPC exception and no result has been sent to user. - max_point_query_retry_time: int | *2 - - // Max query profile num. Default set to 100. - max_query_profile_num: int | *100 - - // The number of query retries. Default set to 3. - max_query_retry_time: int | *3 - - // The maximum number of replicas allowed when an OlapTable performs a schema change. - max_replica_count_when_schema_change: int | *100000 - - // Used to set maximal number of replication per tablet. Default set to 32767. - max_replication_num_per_tablet: int | *32767 - - // The max routine load job num, including NEED_SCHEDULED, RUNNING, PAUSE. Default set to 100. - max_routine_load_job_num: int | *100 - - // The max concurrent routine load task num of a single routine load job. - max_routine_load_task_concurrent_num: int | *256 - - // The max concurrent routine load task num per BE. Default set to 1024. - max_routine_load_task_num_per_be: int | *1024 - - // Max running task num at the same time, otherwise the submitted task will still be keep in pending poll. Default set to 100. - max_running_mtmv_scheduler_task_num: int | *100 - - // Control rollup job concurrent limit. - max_running_rollup_job_num_per_table: int | *1 - - // Maximum concurrent running txn num including prepare, commit txns under a single db. Txn manager will reject coming txns. - max_running_txn_num_per_db: int | *1000 - - // Max num of same name meta informatntion in catalog recycle bin. Default is 3. 0 means do not keep any meta obj with same name. < 0 means no limit. - max_same_name_catalog_trash_num: int | *3 - - // Maximal number of tablets that can be scheduled at the same time. If the number of scheduled tablets in TabletScheduler exceed max_scheduling_tablets, skip checking. - max_scheduling_tablets: int | *2000 - - // The max number of files store in SmallFileMgr. Default set to 100. - max_small_file_number: int | *100 - - // The max size of a single file store in SmallFileMgr. Default set to 1MB - max_small_file_size_bytes: int | *1048576 - - // Default max number of recent stream load record that can be stored in memory. - max_stream_load_record_size: int | *5000 - - // Maximal timeout for stream load job, in seconds. Default set to 3 days. - max_stream_load_timeout_second: int | *259200 - - // It can't auto-resume routine load job as long as one of the backends is down - max_tolerable_backend_down_num: int | *0 - - // Max number of load jobs, include PENDING、ETL、LOADING、QUORUM_FINISHED. If exceed this number, load job is not allowed to be submitted. - max_unfinished_load_job: int | *1000 - - // The maximum number of partitions allowed by Export job. Default set to 2000. - maximum_number_of_export_partitions: int | *2000 - - // The maximum parallelism allowed by Export job. Default set to 50. - maximum_parallelism_of_export_job: int | *50 - - // The maximum number of tablets allowed by an OutfileStatement in an ExportExecutorTask. Default set to 10. - maximum_tablets_of_outfile_in_export: int | *10 - - // A connection will expire after a random time during [base, 2*base), so that the FE has a chance to connect to a new RS. Set zero to disable it. - meta_service_connection_age_base_minutes: int | *5 - - // The maximum number of connections allowed in the connection pool. Default set to 20. - meta_service_connection_pool_size: int | *20 - - // Whether to enable pooling for meta service connections. Default set to true. - meta_service_connection_pooled: bool | *true - - // The number of times to retry a RPC call to meta service. - meta_service_rpc_retry_times: int | *200 - - // If the jvm memory used percent(heap or old mem pool) exceed this threshold, checkpoint thread will not work to avoid OOM. - metadata_checkpoint_memory_threshold: int | *70 - - // Only take effect when prefer_compute_node_for_external_table is true. If the compute node number is less than this value, query on external table will try to get some mix de to assign, to let the total number of node reach this value. If the compute node number is larger than this value, query on external table will assign to compute de only. - min_backend_num_for_external_table: int | *-1 - - // The data size threshold used to judge whether replica is too large. Default set to 2GB. - min_bytes_indicate_replica_too_large: int | *2147483648 - - // Minimal bytes that a single broker scanner will read. When splitting file in broker load, if the size of split file is less than this value, it will not be split. - min_bytes_per_broker_scanner: int | *67108864 - - // Min bytes that a sync job will commit. When receiving bytes less than it, SyncJob will continue to wait for the next batch of data until the time exceeds `sync_commit_interval_second`. - min_bytes_sync_commit: int | *15728640 - - // Limit the min timeout of a clone task. Default set to 3 min. - min_clone_task_timeout_sec: int | *180 - - // Minimal waiting time for creating a table, in seconds. Default set to 30 seconds. - min_create_table_timeout_second: int | *30 - - // Minimal number of write successful replicas for load job. - min_load_replica_num: int | *-1 - - // Minimal timeout for load job, in seconds. - min_load_timeout_second: int | *1 - - // Used to set minimal number of replication per tablet. Default set to 1. - min_replication_num_per_tablet: int | *1 - - // Min events that a sync job will commit. When receiving events less than it, SyncJob will continue to wait for the next batch of data until the time exceeds `sync_commit_interval_second`. Default set to 10000. - min_sync_commit_size: int | *10000 - - // The version count threshold used to judge whether replica compaction is too slow. Default set to 200. - min_version_count_indicate_replica_compaction_too_slow: int | *200 - - // Use this parameter to set the partition name prefix for multi partition. Only multi partition takes effect, not dynamic partitions. The default prefix is "p_". - multi_partition_name_prefix: string | *"p_" - - // To ensure compatibility with the MySQL ecosystem, Doris includes a built-in database called mysql. If this database conflicts with a user's own database, please modify this field to replace the name of the Doris built-in MySQL database with a different name. - mysqldb_replace_name: string | *"mysql" - - // Valid only if use PartitionRebalancer. - partition_rebalance_max_moves_num_per_selection: int | *10 - - // Valid only if use PartitionRebalancer. If this changed, cached moves will be cleared. - partition_rebalance_move_expire_after_access: int | *600 - - // A period for auto resume routine load. Default set to 10 min. - period_of_auto_resume_min: int | *10 - - // Whether to enable the plugin - plugin_enable: bool | *true - - // If set to true, query on external table will prefer to assign to compute node. And the max number of compute node is controlled by min_backend_num_for_external_table. If set to false, query on external table will assign to any node. If there is no compute node in cluster, this config takes no effect. - prefer_compute_node_for_external_table: bool | *false - - // Print log interval for publish transaction failed interval - publish_fail_log_interval_second: int | *300 - - // Interval for publish topic info interval - publish_topic_info_interval_ms: int | *30000 - - // Check the replicas which are doing schema change when publish transaction. Do not turn off this check under normal circumstances. It's only temporarily skip check if publish version and schema change have dead lock. - publish_version_check_alter_replica: bool | *true - - // Maximal waiting time for all publish version tasks of one transaction to be finished, in seconds. - publish_version_timeout_second: int | *30 - - // Waiting time for one transaction changing to \"at least one replica success\", in seconds. If time exceeds this, and for each tablet it has at least one replica publish successful, then the load task will be successful. - publish_wait_time_second: int | *300 - - // Used to set session variables randomly to check more issues in github workflow. - pull_request_id: int | *0 - - // The threshold of slow query, in milliseconds. - qe_slow_log_ms: int | *5000 - - // Timeout for query audit log, in milliseconds. It should bigger than be config report_query_statistics_interval_ms - query_audit_log_timeout_ms: int | *5000 - - // Colocate join PlanFragment instance memory limit penalty factor.The memory_limit for colocote join PlanFragment instance = `exec_mem_limit / min (query_colocate_join_memory_limit_penalty_factor, instance_num)` - query_colocate_join_memory_limit_penalty_factor: int | *1 - - // When querying the information_schema.metadata_name_ids table, the time used to obtain all tables in one database. - query_metadata_name_ids_timeout: int | *3 - - // When be memory usage bigger than this value, query could queue, default value is -1, means this value not work. Decimal value range from 0 to 1. - query_queue_by_be_used_memory: float | *-1 - - // Interval for query queue update, in milliseconds. - query_queue_update_interval_ms: int | *5000 - - // In some cases, some tablets may have all replicas damaged or lost. At this time, the data has been lost, and the damaged tablets will cause the entire query to fail, and the remaining healthy tablets cannot be queried. In this case, you can set this configuration to true. The system will replace damaged tablets with empty tablets to ensure that the query can be executed. (but at this time the data has been lost, so the query results may be inaccurate) - recover_with_empty_tablet: bool | *false - - // The timeout of executing async remote fragment. - remote_fragment_exec_timeout_ms: int | *30000 - - // Auto set the slowest compaction replica's status to bad. Default set to false. - repair_slow_replica: bool | *false - - // This threshold is to avoid piling up too many report task in FE, which may cause OOM exception. In some large Doris cluster, eg: 100 Backends with ten million replicas, a tablet report may cost several seconds after some modification of metadata(drop partition, etc..). - report_queue_size: int | *100 - - // The max number of download tasks assigned to each be during the restore process, the default value is 3. - restore_download_task_num_per_be: int | *3 - - // The default batch size in tablet scheduler for a single schedule. - schedule_batch_size: int | *50 - - // The default slot number per path for hdd in tablet scheduler - schedule_slot_num_per_hdd_path: int | *4 - - // The default slot number per path for ssd in tablet scheduler - schedule_slot_num_per_ssd_path: int | *8 - - // Remove the completed mtmv job after this expired time. Unit second. - scheduler_mtmv_job_expired: int | *86400 - - // Remove the finished mtmv task after this expired time. Unit second. - scheduler_mtmv_task_expired: int | *86400 - - // When set to true, if a query is unable to select a healthy replica, the detailed information of all the replicas of the tablet, including the specific reason why they are unqueryable, will be printed out. - show_details_for_unaccessible_tablet: bool | *true - - // If set to TRUE, the compaction slower replica will be skipped when select get queryable replicas. - skip_compaction_slower_replica: bool | *true - - // Spark dir for Spark Load - spark_home_default_dir: string | *"/opt/apache-doris/fe/lib/spark2x" - - // Default timeout for spark load job, in seconds. - spark_load_default_timeout_second: int | *86400 - - // The maximum difference in the number of splits between nodes. If this number is exceeded, the splits will be redistributed. - split_assigner_max_split_num_variance: int | *1 - - // The consistent hash algorithm has the smallest number of candidates and will select the most idle node. - split_assigner_min_consistent_hash_candidate_num: int | *2 - - // The random algorithm has the smallest number of candidates and will select the most idle node. - split_assigner_min_random_candidate_num: int | *2 - - // Local node soft affinity optimization. Prefer local replication node - split_assigner_optimized_local_scheduling: bool | *true - - // When file cache is enabled, the number of virtual nodes of each node in the consistent hash algorithm. The larger the value, the more uniform the distribution of the hash algorithm, but it will increase the memory overhead. - split_assigner_virtual_node_number: int | *256 - - // If capacity of disk reach the 'storage_flood_stage_usage_percent' and 'storage_flood_stage_left_capacity_bytes' the following operation will be rejected: 1. load job 2. restore job - storage_flood_stage_left_capacity_bytes: int | *1073741824 - - // If capacity of disk reach the 'storage_flood_stage_usage_percent' and 'storage_flood_stage_left_capacity_bytes' the following operation will be rejected: 1. load job 2. restore job - storage_flood_stage_usage_percent: int | *95 - - // 'storage_high_watermark_usage_percent' limit the max capacity usage percent of a Backend storage path. 'storage_min_left_capacity_bytes' limit the minimum left capacity of a Backend storage path. If both limitations are reached, this storage path can not be chose as tablet balance destination. But for tablet recovery, we may exceed these limit for keeping data integrity as much as possible. - storage_high_watermark_usage_percent: int | *85 - - // 'storage_high_watermark_usage_percent' limit the max capacity usage percent of a Backend storage path. 'storage_min_left_capacity_bytes' limit the minimum left capacity of a Backend storage path. If both limitations are reached, this storage path can not be chose as tablet balance destination. But for tablet recovery, we may exceed these limit for keeping data integrity as much as possible. - storage_min_left_capacity_bytes: int | *2147483648 - - // Whether to enable memtable on sink node by default in stream load - stream_load_default_memtable_on_sink_node: bool | *false - - // Default pre-commit timeout for stream load job, in seconds - stream_load_default_precommit_timeout_second: int | *3600 - - // Default timeout for stream load job, in seconds. - stream_load_default_timeout_second: int | *259200 - - // For some high frequency load jobs such as INSERT, STREAMING LOAD, ROUTINE_LOAD_TASK, DELETE, Remove the finished job or task if expired. The removed job or task can be reused - streaming_label_keep_max_second: int | *43200 - - // The max duration of a tablet stream load job, in seconds. - sts_duration: int | *3600 - - // Maximal intervals between two sync job's commits - sync_commit_interval_second: int | *10 - - // The timeout for FE Follower/Observer synchronizing an image file from the FE Master, can be adjusted by the user on the size of image file in the ${meta_dir}/image and the network environment between nodes. The default values is 300 - sync_image_timeout_second: int | *300 - - // The max length of table name. - table_name_length_limit: int | *64 - - // Maximal waiting time for creating a single replica, in seconds. If you create a table with #m tablets and #n replicas for each tablet, the create table request will run at most. - tablet_create_timeout_second: int | *2 - - // The same meaning as `tablet_create_timeout_second`, but used when delete a tablet. - tablet_delete_timeout_second: int | *2 - - // Clone a tablet, further repair max times. - tablet_further_repair_max_times: int | *5 - - // Clone a tablet, further repair timeout. - tablet_further_repair_timeout_second: int | *1200 - - // If tablet loaded txn failed recently, it will get higher priority to repair. - tablet_recent_load_failed_second: int | *1800 - - // The factor of delay time before deciding to repair tablet. If priority is VERY_HIGH, repair it immediately. HIGH, delay tablet_repair_delay_factor_second * 1; NORMAL: delay tablet_repair_delay_factor_second * 2; LOW: delay tablet_repair_delay_factor_second * 3. - tablet_repair_delay_factor_second: int | *60 - - // Base time for higher tablet scheduler task, set this config value bigger if want the high priority effect last longer. - tablet_schedule_high_priority_second: int | *1800 - - // If disk usage > balance_load_score_threshold + urgent_disk_usage_extra_threshold, then this disk need schedule quickly, this value could less than 0. - urgent_balance_disk_usage_extra_threshold: float | *0.05 - - // The percentage of disk usage that will be considered as urgent balance. - urgent_balance_pick_large_disk_usage_percentage: int & >0 & <100 | * 80 - - // The threshold of tablet number that will be considered as urgent balance. - urgent_balance_pick_large_tablet_num_threshold: float | *1000.0 - - // When run urgent disk balance, shuffle the top large tablets with this percentage. - urgent_balance_shuffle_large_tablet_percentage: int & >0 & <100 | * 1 - - // If set to true, the thrift structure of query plan will be sent to BE in compact mode. - use_compact_thrift_rpc: bool | *true - - // Set session variables randomly to check more issues in github workflow. - use_fuzzy_session_variable: bool | *false - - // Whether to use mysql's bigint type to return Doris's largeint type - use_mysql_bigint_for_largeint: bool | *false - - // The max diff of disk capacity used percent between BE. It is used for calculating load score of a backend. - used_capacity_percent_max_diff: float | * 0.30 - - // The valid ratio threshold of the difference between the version count of the slowest replica and the fastest replica. If repair_slow_replica is set to true, it is used to determine whether to repair the slowest replica - valid_version_count_delta_ratio_between_replicas: float | * 0.5 - - // Wait for the internal batch to be written before returning insert into and stream load use group commit by default. - wait_internal_group_commit_finish: bool | *false - - // The max number of workload groups. - workload_group_max_num: int | *15 - - // The max number of actions in a policy. - workload_max_action_num_in_policy: int | *5 - - // The max number of conditions in a policy. - workload_max_condition_num_in_policy: int | *5 - - // The max number of policies. - workload_max_policy_num: int | *25 - - // The interval of checking the runtime status of a workload group, in milliseconds. - workload_runtime_status_thread_interval_ms: int | *2000 - - // The interval of scheduling a workload group, in milliseconds. - workload_sched_policy_interval_ms: int | *1000 - - // STATIC parameters - // The path of the user-defined configuration file, used to store fe_custom.conf. The configuration in this file will override the configuration in fe.conf - custom_config_dir: string | *"/opt/apache-doris/fe/conf" - - // The port of FE Arrow-Flight-SQL server - arrow_flight_sql_port: int | *-1 - - // The directory to save Doris meta data - meta_dir: string | *"/opt/apache-doris/fe/doris-meta" - - // The path of the FE log file, used to store fe.log. This parameter is deprecated. Use the LOG_DIR environment variable instead. - sys_log_dir: string | *"/opt/apache-doris/fe/log" - - // The level of FE log - sys_log_level: string & ("INFO" | "WARN" | "ERROR" | "FATAL") | *"INFO" - - // The path of the FE audit log file, used to store fe.audit.log - audit_log_dir: string | *"/opt/apache-doris/fe/log" - - // The port of FE MySQL server - query_port: int | *9030 - - // The port of FE thrift server - rpc_port: int | *9020 - - // The port of BDBJE - edit_log_port: int | *9010 - - // Fe http port, currently all FE's http port must be same - http_port: int | *8030 - - // Fe https port, currently all FE's https port must be same - https_port: int | *8050 - - // Set the specific domain name that allows cross-domain access. By default, any domain name is allowed cross-domain access - access_control_allowed_origin_domain: string | *"*" - - // Specify the default authentication class of internal catalog - access_controller_type: string | *"default" - - // For some test case, we may need to create a table with multi replicas. DO NOT use it for production env. - allow_replica_on_same_host: bool | *false - - // Determine the persist number of automatic triggered analyze job execution status - analyze_record_limit: int | *20000 - - // The alive time of the user token in Arrow Flight Server, expire after write, unit minutes, the default value is 4320, which is 3 days - arrow_flight_token_alive_time: int | *4320 - - // The maximum number of user tokens cached in Arrow Flight Server, which are eliminated according to LRU rules after exceeding the limit, the default value is 512, the mandatory limit is less than qe_max_connection/2 to avoid `Reach limit of connections`, because arrow flight sql is a stateless protocol, the connection is usually not actively disconnected, bearer token is evict from the cache will unregister ConnectContext. - arrow_flight_token_cache_size: int | *512 - - // The loading load task executor pool size. This pool size limits the max running loading load tasks. Currently, it only limits the loading load task of broker load. - async_loading_load_task_pool_size: int | *10 - - // The pending load task executor pool size. This pool size limits the max running pending load tasks. Currently, it only limits the pending load task of broker load and spark load. It should be less than `max_running_txn_num_per_db` - async_pending_load_task_pool_size: int | *10 - - // The number of threads used to consume async tasks. @See TaskDisruptor if we have a lot of async tasks, we need more threads to consume them. Sure, it's depends on the cpu cores. - async_task_consumer_thread_num: int | *64 - - // The number of async tasks that can be queued. @See TaskDisruptor if consumer is slow, the queue will be full, and the producer will be blocked. - async_task_queen_size: int | *1024 - - // The maximum survival time of the FE audit log file. After exceeding this time, the log file will be deleted. Supported formats include: 7d, 10h, 60m, 120s - audit_log_delete_age: string | *"30d" - - // enable compression for FE audit log file - audit_log_enable_compress: bool | *false - - // The split cycle of the FE audit log file - audit_log_roll_interval: string & ("DAY" | "HOUR") | *"DAY" - - // The maximum number of FE audit log files. After exceeding this number, the oldest log file will be deleted - audit_log_roll_num: int | *90 - - // No description found in fe_config.java - audit_sys_accumulated_file_size: int | *4 - - // Cluster token used for internal authentication. - auth_token: string | *"" - - // Specifies the authentication type - authentication_type: string & ("ldap" | "default") | *"default" - - // The maximum number of simultaneously running analyze tasks. - auto_analyze_simultaneously_running_task_num: int | *1 - - // BackendServiceProxy pool size for pooling GRPC channels. - backend_proxy_num: int | *48 - - // Timeout for backend RPC requests, unit milliseconds - backend_rpc_timeout_ms: int | *60000 - - // Plugins' path for BACKUP and RESTORE operations. Currently deprecated. - backup_plugin_path: string | *"/tools/trans_file_tool/trans_files.sh" - - // BDBJE file logging level - bdbje_file_logging_level: string & ("INFO" | "OFF" | "SEVERE" | "WARNING" | "CONFIG" | "FINE" | "FINER" | "FINEST" | "ALL") | *"INFO" - - // Amount of free disk space required by BDBJE. If the free disk space is less than this value, BDBJE will not be able to write. - bdbje_free_disk_bytes: int | *1073741824 - - // The heartbeat timeout for bdbje. The default is 30 seconds, which is same as default value in bdbje. If the network is experiencing transient problems, of some unexpected long java GC annoying you, you can try to increase this value to decrease the chances of false timeouts - bdbje_heartbeat_timeout_second: int | *30 - - // The lock timeout of bdbje operation, in seconds. If there are many LockTimeoutException in FE WARN log, you can try to increase this value - bdbje_lock_timeout_second: int | *5 - - // The replica ack timeout of bdbje between master and follower, in seconds. If there are many ReplicaWriteException in FE WARN log, you can try to increase this value - bdbje_replica_ack_timeout_second: int | *10 - - // The desired upper limit on the number of bytes of reserved space to retain in a replicated JE Environment. This parameter is ignored in a non-replicated JE Environment. - bdbje_reserved_disk_bytes: int | *1073741824 - - // The timeout of RPC between FE and Broker, in milliseconds - broker_timeout_ms: int | *10000 - - // Whether to ignore the minimum erase latency when erasing catalog trash. - catalog_trash_ignore_min_erase_latency: bool | *false - - // The number of threads used to consume CBO concurrency statistics tasks. - cbo_concurrency_statistics_task_num: int | *10 - - // If set to true, Doris will check if the compiled and running versions of Java are compatible - check_java_version: bool | *true - - // Whether to check table lock leaky - check_table_lock_leaky: bool | *false - - // the timeout threshold of checking wal_queue on be(ms) - check_wal_queue_timeout_threshold: int | *180000 - - // The interval time to check cloud cluster status(second) - cloud_cluster_check_interval_second: int | *10 - - // The maximum number of times to retry a failed RPC call to the cloud meta service. - cloud_meta_service_rpc_failed_retry_times: int | *200 - - // The cluster ID of the SQL Server cluster in the cloud. - cloud_sql_server_cluster_id: string | *"RESERVED_CLUSTER_ID_FOR_SQL_SERVER" - - // The cluster name of the SQL Server cluster in the cloud. - cloud_sql_server_cluster_name: string | *"RESERVED_CLUSTER_NAME_FOR_SQL_SERVER" - - // The unique ID of the cloud cluster. - cloud_unique_id: string | *"" - - // Cluster id used for internal authentication. Usually a random integer generated when master FE start at first time. You can also specify one. - cluster_id: int | *-1 - - // The CPU resource limit per analyze task. - cpu_resource_limit_per_analyze_task: int | *1 - - // The interval time to update the used data quota of the cloud cluster(second) - db_used_data_quota_update_interval_secs: int | *300 - - // Deadlock detection interval time, unit minute - deadlock_detection_interval_minute: int | *5 - - // The default timeout for getting the version from the cloud meta service(second) - default_get_version_from_ms_timeout_second: int | *3 - - // The interval time to schedule the default schema change task(millisecond) - default_schema_change_scheduler_interval_millisecond: int | *500 - - // When create a table(or partition), you can specify its storage medium(HDD or SSD). - // If not specified, the default medium specified by this configuration will be used. - default_storage_medium: string | *"HDD" - - // Whether to disable mini load, disabled by default - disable_mini_load: bool | *true - - // The number of bytes to reduce in each DPP job. - dpp_bytes_per_reduce: int | *104857600 - - // The configuration string for DPP jobs. - dpp_config_str: string | *"{palo-dpp : {hadoop_palo_path : '/dir',hadoop_configs : 'fs.default.name=hdfs://host:port;mapred.job.tracker=host:port;hadoop.job.ugi=user,password'}}" - - // The default cluster name for DPP jobs. - dpp_default_cluster: string | *"palo-dpp" - - // The default configuration string for DPP jobs. - dpp_default_config_str: string | *"{hadoop_configs : 'mapred.job.priority=NORMAL;mapred.job.map.capacity=50;mapred.job.reduce.capacity=50;mapred.hce.replace.streaming=false;abaci.long.stored.job=true;dce.shuffle.enable=false;dfs.client.authserver.force_stop=true;dfs.client.auth.method=0'}" - - // The path to the Hadoop client binary. - dpp_hadoop_client_path: string | *"/lib/hadoop-client/hadoop/bin/hadoop" - - // The storage type of the metadata log. BDB: Logs are stored in BDBJE. LOCAL: logs are stored in a local file (for testing only) - edit_log_type: string | *"bdb" - - // If set to true, FE will be started in BDBJE debug mode - enable_bdbje_debug_mode: bool | *false - - // Whether to enable cloud snapshot version. - enable_cloud_snapshot_version: bool | *true - - // Whether to enable concurrent update. - enable_concurrent_update: bool | *false - - // Temporary config filed, will make all olap tables enable light schema change - enable_convert_light_weight_schema_change: bool | *false - - // Whether to enable deadlock detection - enable_deadlock_detection: bool | *false - - // is enable debug points, use in test. - enable_debug_points: bool | *false - - // Whether to delete existing files when creating a table. - enable_delete_existing_files: bool | *false - - // Whether to enable deploy manager. - enable_deploy_manager: string | *"disable" - - // Whether to use file to record log. When starting FE with --console, all logs will be written to both standard output and file. Close this option will no longer use file to record log. - enable_file_logger: bool | *true - - // Whether to enable the function of getting log files through http interface - enable_get_log_file_api: bool | *false - - // If set to true, doris will automatically synchronize hms metadata to the cache in fe. - enable_hms_events_incremental_sync: bool | *false - - // Whether to enable http server v2. - enable_http_server_v2: bool | *true - - // Used to enable java_udf, default is true. if this configuration is false, creation and use of java_udf is disabled. - enable_java_udf: bool | *true - - // If set to true, we will allow the interval unit to be set to second, when creating a recurring job. - enable_job_schedule_second_for_test: bool | *false - - // If set to true, metric collector will be run as a daemon timer to collect metrics at fix interval - enable_metric_calculator: bool | *true - - // Controls whether multiple tags are enabled for the system - enable_multi_tags: bool | *false - - // Whether to allow the outfile function to export the results to the local disk. - enable_outfile_to_local: bool | *false - - // Whether to enable proxy protocol - enable_proxy_protocol: bool | *false - - // Storage policy feature control - enable_storage_policy: bool | *true - - // Controls STS VPC access - enable_sts_vpc: bool | *true - - // Controls token validation - enable_token_check: bool | *true - - // Elasticsearch state sync interval in seconds - es_state_sync_interval_second: int | *10 - - // Whether to enable HTTP authentication for all endpoints - enable_all_http_auth: bool | *false - - // Whether to enable binlog feature - enable_feature_binlog: bool | *false - - // Whether to use FQDN mode for node identification - enable_fqdn_mode: bool | *false - - // Whether to enable HTTPS for web UI and API endpoints - enable_https: bool | *false - - // If set to true, doris will establish an encrypted channel based on the SSL protocol with mysql. - enable_ssl: bool | *false - - // Set the maximum byte length of binlog message - max_binlog_messsage_size: int | *1073741824 // 1GB - - // External cache expiration time in minutes after access - external_cache_expire_time_minutes_after_access: int | *10 - - // Maximum seconds to save finished jobs - finish_job_max_saved_second: int | *259200 // 3 days - - // Threshold time in hours for cleaning up finished jobs - finished_job_cleanup_threshold_time_hour: int | *24 - - // Whether to forbid running ALTER jobs - forbid_running_alter_job: bool | *false - - // Force SQLServer Jdbc Catalog encrypt to false - force_sqlserver_jdbc_encrypt_false: bool | *false - - // Default value for group commit data bytes - group_commit_data_bytes_default_value: int | *134217728 // 128MB - - // Default value for group commit interval in milliseconds - group_commit_interval_ms_default_value: int | *10000 // 10 seconds - - // Seconds to keep gRPC connection alive - grpc_keep_alive_second: int | *10 - - // Maximum gRPC message size in bytes - grpc_max_message_size_bytes: int | *2147483647 // 2GB - - // Number of threads in gRPC thread manager - grpc_threadmgr_threads_nums: int | *4096 - - // Heartbeat interval in seconds - heartbeat_interval_second: int | *10 - - // Queue size to store heartbeat task in heartbeat_mgr - heartbeat_mgr_blocking_queue_size: int | *1024 - - // Number of threads to handle heartbeat events - heartbeat_mgr_threads_num: int | *8 - - // Polling interval for HMS events in milliseconds - hms_events_polling_interval_ms: int | *10000 - - // Extra base path for HTTP API - http_api_extra_base_path: string | *"" - - // Maximum number of worker threads for HTTP load submitter - http_load_submitter_max_worker_threads: int | *2 - - // Maximum number of worker threads for HTTP SQL submitter - http_sql_submitter_max_worker_threads: int | *2 - - // Whether to ignore BDBJE log checksum read - ignore_bdbje_log_checksum_read: bool | *false - - // Whether to ignore unknown metadata module - ignore_unknown_metadata_module: bool | *false - - // Accumulated file size for info system - info_sys_accumulated_file_size: int | *4 - - // Initial password for root user - initial_root_password: string | *"" - - // Safe path for JDBC driver, default is "*" to allow all - jdbc_driver_secure_path: string | *"*" - - // The path to save jdbc drivers - jdbc_drivers_dir: string | *"/opt/apache-doris/fe/jdbc_drivers" - - // MySQL Jdbc Catalog mysql does not support pushdown functions - jdbc_mysql_unsupported_pushdown_functions: string | *'["date_trunc", "money_format", "negative"]' - - // Number of acceptors for Jetty server - jetty_server_acceptors: int | *2 - - // Maximum HTTP header size for Jetty server - jetty_server_max_http_header_size: int | *1048576 - - // Maximum HTTP post size for Jetty server - jetty_server_max_http_post_size: int | *104857600 // 100MB - - // Number of selectors for Jetty server - jetty_server_selectors: int | *4 - - // Number of workers for Jetty server - jetty_server_workers: int | *0 - - // Maximum threads in Jetty thread pool - jetty_threadPool_maxThreads: int | *400 - - // Minimum threads in Jetty thread pool - jetty_threadPool_minThreads: int | *20 - - // Queue size for job dispatch timer - job_dispatch_timer_job_queue_size: int | *1024 - - // Number of threads for job dispatch timer - job_dispatch_timer_job_thread_num: int | *2 - - // Number of threads for insert task consumer - job_insert_task_consumer_thread_num: int | *10 - - // Number of threads for MTMV task consumer - job_mtmv_task_consumer_thread_num: int | *10 - - // The alias of the key store certificate - key_store_alias: string | *"doris_ssl_certificate" - - // The password of the key store - key_store_password: string | *"" - - // The path of the key store - key_store_path: string | *"/opt/apache-doris/fe/conf/ssl/doris_ssl_certificate.keystore" - - // The type of the key store - key_store_type: string | *"JKS" - - // The interval for cleaning labels in seconds - label_clean_interval_second: int | *3600 - - // The maximum length of label regex - label_regex_length: int | *128 - - // The admin name for LDAP authentication - ldap_admin_name: string | *"cn=admin,dc=domain,dc=com" - - // Whether to enable LDAP authentication - ldap_authentication_enabled: bool | *false - - // The base DN for LDAP group search - ldap_group_basedn: string | *"ou=group,dc=domain,dc=com" - - // The LDAP server host - ldap_host: string | *"127.0.0.1" - - // The maximum number of active connections in the LDAP connection pool - ldap_pool_max_active: int | *8 - - // The maximum number of idle connections in the LDAP connection pool - ldap_pool_max_idle: int | *8 - - // The maximum total number of connections in the LDAP connection pool - ldap_pool_max_total: int | *-1 - - // The maximum wait time in milliseconds for obtaining a connection from the LDAP pool - ldap_pool_max_wait: int | *-1 - - // The minimum number of idle connections in the LDAP connection pool - ldap_pool_min_idle: int | *0 - - // Whether to test connections when borrowing from the LDAP pool - ldap_pool_test_on_borrow: bool | *false - - // Whether to test connections when returning to the LDAP pool - ldap_pool_test_on_return: bool | *false - - // Whether to test idle connections in the LDAP pool - ldap_pool_test_while_idle: bool | *false - - // The behavior when the LDAP connection pool is exhausted - ldap_pool_when_exhausted: int | *1 - - // The LDAP server port - ldap_port: int | *389 - - // The base DN for LDAP user search - ldap_user_basedn: string | *"ou=people,dc=domain,dc=com" - - // The filter for LDAP user search - ldap_user_filter: string | *"(&(uid={login}))" - - // The interval in seconds for checking load jobs - load_checker_interval_second: int | *5 - - // The runtime locale when executing commands - locale: string | *"zh_CN.UTF-8" - - // The threshold in milliseconds for reporting lock acquisition time - lock_reporting_threshold_ms: int | *500 - - // The maximum size of log files in MB before rolling - log_roll_size_mb: int | *1024 - - // The strategy for log file rollover - log_rollover_strategy: string | *"age" - - // Whether table names are stored in lowercase (0: case sensitive, 1: lowercase, 2: case insensitive) - lower_case_table_names: int | *0 - - // The synchronization policy for master operations (SYNC, NO_SYNC, WRITE_NO_SYNC) - master_sync_policy: string | *"SYNC" - - // The maximum number of agent task threads - max_agent_task_threads_num: int | *4096 - - // The maximum allowed clock delta in milliseconds for BDB JE - max_bdbje_clock_delta_ms: int | *5000 - - // Maximum version number of BE execution - max_be_exec_version: int | *5 - - // Maximum size of thread pool for external cache loader - max_external_cache_loader_thread_pool_size: int | *64 - - // Maximum number of external file cache entries - max_external_file_cache_num: int | *10000 - - // Maximum number of external schema cache entries - max_external_schema_cache_num: int | *10000 - - // Maximum number of external table cache entries - max_external_table_cache_num: int | *1000 - - // Maximum number of external table row count cache entries - max_external_table_row_count_cache_num: int | *100000 - - // Maximum number of Hive list partitions (-1 means no limit) - max_hive_list_partition_num: int | *-1 - - // Maximum number of Hive partition cache entries - max_hive_partition_cache_num: int | *10000 - - // Maximum number of Hive partition table cache entries - max_hive_partition_table_cache_num: int | *1000 - - // Maximum number of meta object cache entries - max_meta_object_cache_num: int | *1000 - - // Maximum number of MySQL service task threads - max_mysql_service_task_threads_num: int | *4096 - - // Maximum number of persistence tasks - max_persistence_task_count: int | *100 - - // Maximum number of remote file system cache entries - max_remote_file_system_cache_num: int | *100 - - // Maximum number of synchronization task threads - max_sync_task_threads_num: int | *10 - - // Toleration time in seconds for meta delay (default: 300 seconds = 5 minutes) - meta_delay_toleration_second: int | *300 - - // Timeout in milliseconds for meta publish operations - meta_publish_timeout_ms: int | *1000 - - // MetaService endpoint in format "ip:port", e.g., "192.0.0.10:8866" - meta_service_endpoint: string | *"" - - // Minimum version number of BE execution - min_be_exec_version: int | *0 - - // Number of in-memory records for MySQL load - mysql_load_in_memory_record: int | *20 - - // Secure path for MySQL load server - mysql_load_server_secure_path: string | *"" - - // Thread pool size for MySQL load - mysql_load_thread_pool: int | *4 - - // Backlog number for MySQL NIO - mysql_nio_backlog_num: int | *1024 - - - // Number of IO threads for MySQL service - mysql_service_io_threads_num: int | *4 - - // Path to the default CA certificate for MySQL SSL - mysql_ssl_default_ca_certificate: string | *"/opt/apache-doris/fe/mysql_ssl_default_certificate/ca_certificate.p12" - - // Password for the default CA certificate - mysql_ssl_default_ca_certificate_password: string | *"doris" - - // Path to the default server certificate for MySQL SSL - mysql_ssl_default_server_certificate: string | *"/opt/apache-doris/fe/mysql_ssl_default_certificate/server_certificate.p12" - - // Password for the default server certificate - mysql_ssl_default_server_certificate_password: string | *"doris" - - // Directory for storing Nereids trace logs - nereids_trace_log_dir: string | *"/opt/apache-doris/fe/log/nereids_trace" - - // Interval in seconds for updating partition information - partition_info_update_interval_secs: int | *60 - - // Number of simultaneously running tasks for period analyze - period_analyze_simultaneously_running_task_num: int | *1 - - // Directory for Doris plugins - plugin_dir: string | *"/opt/apache-doris/fe/plugins" - - // Timeout in milliseconds for point queries - point_query_timeout_ms: int | *10000 - - // Comma-separated list of CIDR network segments for priority networks - priority_networks: string | *"" - - // Enable proxy authentication - proxy_auth_enable: bool | *false - - // Magic prefix for proxy authentication - proxy_auth_magic_prefix: string | *"x@8" - - // Interval in milliseconds for publishing versions - publish_version_interval_ms: int | *10 - - - // The maximum number of connections allowed for query engine - qe_max_connection: int | *1024 - - // The size of the Ranger cache - ranger_cache_size: int | *10000 - - // The policy for replica acknowledgment: ALL, NONE, SIMPLE_MAJORITY - replica_ack_policy: string | *"SIMPLE_MAJORITY" - - // The policy for replica synchronization: SYNC, NO_SYNC, WRITE_NO_SYNC - replica_sync_policy: string | *"SYNC" - - // Whether to skip authentication check for localhost connections - skip_localhost_auth_check: bool | *true - - // The directory for storing small files - small_file_dir: string | *"/opt/apache-doris/fe/small_files" - - // The version of Spark DPP (Data Processing Pipeline) - spark_dpp_version: string | *"1.2-SNAPSHOT" - - // The directory for Spark launcher logs - spark_launcher_log_dir: string | *"/opt/apache-doris/fe/log/spark_launcher_log" - - // The interval in seconds for checking Spark load tasks - spark_load_checker_interval_second: int | *60 - - // The path for Spark resources - spark_resource_path: string | *"" - - // Whether to force client authentication for SSL connections - ssl_force_client_auth: bool | *false - - // The type of SSL trust store - ssl_trust_store_type: string | *"PKCS12" - - // The number of statistics tasks that can run simultaneously - statistics_simultaneously_running_task_num: int | *3 - - // The memory limit in bytes for statistics SQL queries - statistics_sql_mem_limit_in_bytes: int | *2147483648 - - // The number of parallel execution instances for statistics SQL queries - statistics_sql_parallel_exec_instance_num: int | *1 - - // The size of the statistics cache - stats_cache_size: int | *500000 - - // The interval in seconds for synchronization checking - sync_checker_interval_second: int | *5 - // The maximum survival time of the FE log file. After exceeding this time, the log file will be deleted. Supported formats include: 7d, 10h, 60m, 120s - sys_log_delete_age: string | *"7d" - - // enable compression for FE log file - sys_log_enable_compress: bool | *false - - // The output mode of FE log. - sys_log_mode: string & ("NORMAL" | "ASYNC" | "BRIEF") | *"NORMAL" - - // The split cycle of the FE log file - sys_log_roll_interval: string & ("DAY" | "HOUR") | *"DAY" - - // The maximum number of FE log files. After exceeding this number, the oldest log file will be deleted - sys_log_roll_num: int | *10 - - // Verbose module. The VERBOSE level log is implemented by the DEBUG level of log4j. If set to `org.apache.doris.catalog`, the DEBUG log of the class under this package will be printed. - sys_log_verbose_modules: string | *'[]' - - // The interval in milliseconds for tablet checking - tablet_checker_interval_ms: int | *20000 - - // The type of tablet rebalancer, default is "BeLoad" - tablet_rebalancer_type: string | *"BeLoad" - - // The interval in milliseconds for tablet scheduling - tablet_schedule_interval_ms: int | *1000 - - // The interval in seconds for updating tablet statistics - tablet_stat_update_interval_second: int | *60 - - // The number of backlog connections for Thrift server - thrift_backlog_num: int | *1024 - - // The timeout in milliseconds for Thrift client - thrift_client_timeout_ms: int | *0 - - // The maximum frame size for Thrift communication - thrift_max_frame_size: int | *16384000 - - // The maximum message size for Thrift communication - thrift_max_message_size: int | *104857600 - - // The maximum number of worker threads for Thrift server - thrift_server_max_worker_threads: int | *4096 - - // The type of Thrift server, options: THREADED, THREAD_POOL - thrift_server_type: string | *"THREAD_POOL" - - // The temporary directory for storing temporary files - tmp_dir: string | *"/opt/apache-doris/fe/temp_dir" - - // The period in hours for token generation - token_generate_period_hour: int | *12 - - // The size of the token queue, one token will keep alive for {token_queue_size * token_generate_period_hour} hours - token_queue_size: int | *6 - - // The interval in seconds for cleaning transactions - transaction_clean_interval_second: int | *30 - - // The limit for transaction rollback operations - txn_rollback_limit: int | *100 - - // Whether to use the new tablet scheduler - use_new_tablet_scheduler: bool | *true - - // The warning threshold for system accumulated file size in GB - warn_sys_accumulated_file_size: int | *2 - - // Whether to use Kubernetes certificates - with_k8s_certs: bool | *false - - // The path to the YARN client - yarn_client_path: string | *"/opt/apache-doris/fe/lib/yarn-client/hadoop/bin/yarn" - - // The directory for YARN configuration files - yarn_config_dir: string | *"/opt/apache-doris/fe/lib/yarn-config" - - // The JAVA_OPTS startup configuration for the FE node. The default value of -Xmx is 80% of the container available memory. - JAVA_OPTS: string | *"" -} - -configuration: #FEParameter & { -} diff --git a/addons/doris/config/fe-config-effect-scope.yaml b/addons/doris/config/fe-config-effect-scope.yaml deleted file mode 100644 index 9dfea613d..000000000 --- a/addons/doris/config/fe-config-effect-scope.yaml +++ /dev/null @@ -1,505 +0,0 @@ -staticParameters: - - access_control_allowed_origin_domain - - access_controller_type - - allow_replica_on_same_host - - analyze_record_limit - - arrow_flight_token_alive_time - - arrow_flight_token_cache_size - - async_loading_load_task_pool_size - - async_pending_load_task_pool_size - - async_task_consumer_thread_num - - async_task_queen_size - - audit_log_delete_age - - audit_log_enable_compress - - audit_log_roll_interval - - audit_log_roll_num - - audit_sys_accumulated_file_size - - auth_token - - authentication_type - - auto_analyze_simultaneously_running_task_num - - backend_proxy_num - - backend_rpc_timeout_ms - - backup_plugin_path - - bdbje_file_logging_level - - bdbje_free_disk_bytes - - bdbje_heartbeat_timeout_second - - bdbje_lock_timeout_second - - bdbje_replica_ack_timeout_second - - bdbje_reserved_disk_bytes - - broker_timeout_ms - - catalog_trash_ignore_min_erase_latency - - cbo_concurrency_statistics_task_num - - check_java_version - - check_table_lock_leaky - - check_wal_queue_timeout_threshold - - cloud_cluster_check_interval_second - - cloud_meta_service_rpc_failed_retry_times - - cloud_sql_server_cluster_id - - cloud_sql_server_cluster_name - - cloud_unique_id - - cluster_id - - cpu_resource_limit_per_analyze_task - - db_used_data_quota_update_interval_secs - - deadlock_detection_interval_minute - - default_get_version_from_ms_timeout_second - - default_schema_change_scheduler_interval_millisecond - - default_storage_medium - - disable_mini_load - - dpp_bytes_per_reduce - - dpp_config_str - - dpp_default_cluster - - dpp_default_config_str - - dpp_hadoop_client_path - - edit_log_type - - enable_bdbje_debug_mode - - enable_cloud_snapshot_version - - enable_concurrent_update - - enable_convert_light_weight_schema_change - - enable_deadlock_detection - - enable_debug_points - - enable_delete_existing_files - - enable_deploy_manager - - enable_file_logger - - enable_get_log_file_api - - enable_hms_events_incremental_sync - - enable_http_server_v2 - - enable_java_udf - - enable_job_schedule_second_for_test - - enable_metric_calculator - - enable_multi_tags - - enable_outfile_to_local - - enable_proxy_protocol - - enable_storage_policy - - enable_sts_vpc - - enable_token_check - - es_state_sync_interval_second - - enable_all_http_auth - - enable_feature_binlog - - enable_fqdn_mode - - enable_https - - max_binlog_messsage_size - - external_cache_expire_time_minutes_after_access - - finish_job_max_saved_second - - finished_job_cleanup_threshold_time_hour - - forbid_running_alter_job - - force_sqlserver_jdbc_encrypt_false - - group_commit_data_bytes_default_value - - group_commit_interval_ms_default_value - - grpc_keep_alive_second - - grpc_max_message_size_bytes - - grpc_threadmgr_threads_nums - - heartbeat_interval_second - - heartbeat_mgr_blocking_queue_size - - heartbeat_mgr_threads_num - - hms_events_polling_interval_ms - - http_api_extra_base_path - - http_load_submitter_max_worker_threads - - http_sql_submitter_max_worker_threads - - ignore_bdbje_log_checksum_read - - ignore_unknown_metadata_module - - info_sys_accumulated_file_size - - initial_root_password - - jdbc_driver_secure_path - - jdbc_drivers_dir - - jdbc_mysql_unsupported_pushdown_functions - - jetty_server_acceptors - - jetty_server_max_http_header_size - - jetty_server_max_http_post_size - - jetty_server_selectors - - jetty_server_workers - - jetty_threadPool_maxThreads - - jetty_threadPool_minThreads - - job_dispatch_timer_job_queue_size - - job_dispatch_timer_job_thread_num - - job_insert_task_consumer_thread_num - - job_mtmv_task_consumer_thread_num - - key_store_alias - - key_store_password - - key_store_path - - key_store_type - - label_clean_interval_second - - label_regex_length - - ldap_admin_name - - ldap_authentication_enabled - - ldap_group_basedn - - ldap_host - - ldap_pool_max_active - - ldap_pool_max_idle - - ldap_pool_max_total - - ldap_pool_max_wait - - ldap_pool_min_idle - - ldap_pool_test_on_borrow - - ldap_pool_test_on_return - - ldap_pool_test_while_idle - - ldap_pool_when_exhausted - - ldap_port - - ldap_user_basedn - - ldap_user_filter - - load_checker_interval_second - - locale - - lock_reporting_threshold_ms - - log_roll_size_mb - - log_rollover_strategy - - lower_case_table_names - - master_sync_policy - - max_agent_task_threads_num - - max_bdbje_clock_delta_ms - - max_be_exec_version - - max_external_cache_loader_thread_pool_size - - max_external_file_cache_num - - max_external_schema_cache_num - - max_external_table_cache_num - - max_external_table_row_count_cache_num - - max_hive_list_partition_num - - max_hive_partition_cache_num - - max_hive_partition_table_cache_num - - max_meta_object_cache_num - - max_mysql_service_task_threads_num - - max_persistence_task_count - - max_remote_file_system_cache_num - - max_sync_task_threads_num - - meta_delay_toleration_second - - meta_publish_timeout_ms - - meta_service_endpoint - - min_be_exec_version - - mysql_load_in_memory_record - - mysql_load_server_secure_path - - mysql_load_thread_pool - - mysql_nio_backlog_num - - mysql_service_io_threads_num - - nereids_trace_log_dir - - partition_info_update_interval_secs - - period_analyze_simultaneously_running_task_num - - plugin_dir - - point_query_timeout_ms - - priority_networks - - proxy_auth_enable - - proxy_auth_magic_prefix - - publish_version_interval_ms - - qe_max_connection - - ranger_cache_size - - replica_ack_policy - - replica_sync_policy - - small_file_dir - - spark_dpp_version - - spark_launcher_log_dir - - spark_load_checker_interval_second - - spark_resource_path - - ssl_force_client_auth - - ssl_trust_store_type - - statistics_simultaneously_running_task_num - - statistics_sql_mem_limit_in_bytes - - statistics_sql_parallel_exec_instance_num - - stats_cache_size - - sync_checker_interval_second - - sys_log_delete_age - - sys_log_dir - - sys_log_enable_compress - - sys_log_level - - sys_log_mode - - sys_log_roll_interval - - sys_log_roll_num - - sys_log_verbose_modules - - tablet_checker_interval_ms - - tablet_rebalancer_type - - tablet_schedule_interval_ms - - tablet_stat_update_interval_second - - thrift_backlog_num - - thrift_client_timeout_ms - - thrift_max_frame_size - - thrift_max_message_size - - thrift_server_max_worker_threads - - thrift_server_type - - tmp_dir - - token_generate_period_hour - - token_queue_size - - transaction_clean_interval_second - - txn_rollback_limit - - use_new_tablet_scheduler - - warn_sys_accumulated_file_size - - with_k8s_certs - - yarn_client_path - - yarn_config_dir - - -dynamicParameters: - - abort_txn_after_lost_heartbeat_time_second - - agent_task_resend_wait_time_ms - - alter_table_timeout_second - - audit_event_log_queue_size - - auto_check_statistics_in_minutes - - autobucket_max_buckets - - autobucket_min_buckets - - backend_load_capacity_coeficient - - backup_job_default_timeout_ms - - backup_upload_task_num_per_be - - balance_be_then_disk - - balance_load_score_threshold - - balance_slot_num_per_path - - be_exec_version - - be_rebalancer_fuzzy_test - - be_rebalancer_idle_seconds - - be_report_query_statistics_timeout_ms - - blacklist_duration_second - - broker_load_default_timeout_second - - cache_enable_partition_mode - - cache_enable_sql_mode - - cache_last_version_interval_second - - cache_result_max_data_size - - cache_result_max_row_count - - capacity_used_percent_high_water - - catalog_trash_expire_second - - catalog_try_lock_timeout_ms - - cbo_default_sample_percentage - - cbo_max_statistics_job_num - - check_consistency_default_timeout_second - - cloud_cold_read_percent - - cloud_replica_num - - colocate_group_relocate_delay_second - - commit_timeout_second - - consistency_check_end_time - - consistency_check_start_time - - create_tablet_round_robin_from_start - - decommission_tablet_check_threshold - - decommission_tablet_wait_time_seconds - - default_db_data_quota_bytes - - default_db_max_running_txn_num - - default_db_replica_quota_size - - default_load_parallelism - - default_max_filter_ratio - - default_max_query_instances - - delete_job_max_timeout_second - - desired_max_waiting_jobs - - diagnose_balance_max_tablet_num_diff - - diagnose_balance_max_tablet_num_ratio - - disable_backend_black_list - - disable_balance - - disable_colocate_balance - - disable_colocate_balance_between_groups - - disable_datev1 - - disable_decimalv2 - - disable_disk_balance - - disable_hadoop_load - - disable_load_job - - disable_local_deploy_manager_drop_node - - disable_show_stream_load - - disable_storage_medium_check - - disable_tablet_scheduler - - disallow_create_catalog_with_resource - - div_precision_increment - - drop_backend_after_decommission - - drop_rpc_retry_num - - dynamic_partition_check_interval_seconds - - dynamic_partition_enable - - edit_log_roll_num - - enable_access_file_without_broker - - enable_alter_queue_prop_sync - - enable_array_type - - enable_batch_delete_by_default - - enable_cloud_multi_replica - - enable_collect_internal_query_profile - - enable_cooldown_replica_affinity - - enable_create_bitmap_index_as_inverted_index - - enable_create_hive_bucket_table - - enable_date_conversion - - enable_decimal_conversion - - enable_disk_balance_for_single_replica - - enable_force_drop_redundant_replica - - enable_hidden_version_column_by_default - - enable_local_replica_selection - - enable_local_replica_selection_fallback - - enable_mow_light_delete - - enable_odbc_mysql_broker_table - - enable_pipeline_load - - enable_profile_when_analyze - - enable_quantile_state_type - - enable_query_hit_stats - - enable_query_hive_views - - enable_query_queue - - enable_round_robin_create_tablet - - enable_urgent_balance_no_low_backend - - enable_cpu_hard_limit - - enable_mtmv - - enable_nereids_load - - enable_single_replica_load - - enable_workload_group - - min_tablets_for_dup_table_shuffle - - sql_cache_manage_num - - expire_sql_cache_in_fe_second - - expr_children_limit - - expr_depth_limit - - fetch_stream_load_record_interval_second - - fix_tablet_partition_id_eq_0 - - force_do_metadata_checkpoint - - force_olap_table_replication_allocation - - force_olap_table_replication_num - - fuzzy_test_type - - get_be_resource_usage_interval_ms - - hadoop_load_default_timeout_second - - history_job_keep_max_second - - hive_default_file_format - - hive_metastore_client_timeout_second - - hive_stats_partition_sample_size - - hms_events_batch_size_per_rpc - - ignore_backup_not_support_table_type - - ignore_meta_check - - insert_load_default_timeout_second - - inverted_index_storage_format - - keep_scheduler_mtmv_task_when_job_deleted - - label_keep_max_second - - label_num_threshold - - ldap_cache_timeout_day - - ldap_user_cache_timeout_s - - manual_drop_replica_valid_second - - max_allowed_in_element_num_of_delete - - max_auto_partition_num - - max_backend_heartbeat_failure_tolerance_count - - max_backup_restore_job_num_per_db - - max_backup_tablets_per_job - - max_balancing_tablets - - max_broker_concurrency - - max_bytes_per_broker_scanner - - max_bytes_sync_commit - - max_cbo_statistics_task_timeout_sec - - max_clone_task_timeout_sec - - max_create_table_timeout_second - - max_distribution_pruner_recursion_depth - - max_dynamic_partition_num - - max_error_tablet_of_broker_load - - max_get_kafka_meta_timeout_second - - max_load_timeout_second - - max_lock_hold_threshold_seconds - - max_multi_partition_num - - max_pending_mtmv_scheduler_task_num - - max_point_query_retry_time - - max_query_profile_num - - max_query_retry_time - - max_replica_count_when_schema_change - - max_replication_num_per_tablet - - max_routine_load_job_num - - max_routine_load_task_concurrent_num - - max_routine_load_task_num_per_be - - max_running_mtmv_scheduler_task_num - - max_running_rollup_job_num_per_table - - max_running_txn_num_per_db - - max_same_name_catalog_trash_num - - max_scheduling_tablets - - max_small_file_number - - max_small_file_size_bytes - - max_stream_load_record_size - - max_stream_load_timeout_second - - max_tolerable_backend_down_num - - max_unfinished_load_job - - maximum_number_of_export_partitions - - maximum_parallelism_of_export_job - - maximum_tablets_of_outfile_in_export - - meta_service_connection_age_base_minutes - - meta_service_connection_pool_size - - meta_service_connection_pooled - - meta_service_rpc_retry_times - - metadata_checkpoint_memory_threshold - - min_backend_num_for_external_table - - min_bytes_indicate_replica_too_large - - min_bytes_per_broker_scanner - - min_bytes_sync_commit - - min_clone_task_timeout_sec - - min_create_table_timeout_second - - min_load_replica_num - - min_load_timeout_second - - min_replication_num_per_tablet - - min_sync_commit_size - - min_version_count_indicate_replica_compaction_too_slow - - multi_partition_name_prefix - - mysqldb_replace_name - - partition_rebalance_max_moves_num_per_selection - - partition_rebalance_move_expire_after_access - - period_of_auto_resume_min - - plugin_enable - - prefer_compute_node_for_external_table - - publish_fail_log_interval_second - - publish_topic_info_interval_ms - - publish_version_check_alter_replica - - publish_version_timeout_second - - publish_wait_time_second - - pull_request_id - - qe_slow_log_ms - - query_audit_log_timeout_ms - - query_colocate_join_memory_limit_penalty_factor - - query_metadata_name_ids_timeout - - query_queue_by_be_used_memory - - query_queue_update_interval_ms - - recover_with_empty_tablet - - remote_fragment_exec_timeout_ms - - repair_slow_replica - - report_queue_size - - restore_download_task_num_per_be - - schedule_batch_size - - schedule_slot_num_per_hdd_path - - schedule_slot_num_per_ssd_path - - scheduler_mtmv_job_expired - - scheduler_mtmv_task_expired - - show_details_for_unaccessible_tablet - - skip_compaction_slower_replica - - spark_home_default_dir - - spark_load_default_timeout_second - - split_assigner_max_split_num_variance - - split_assigner_min_consistent_hash_candidate_num - - split_assigner_min_random_candidate_num - - split_assigner_optimized_local_scheduling - - split_assigner_virtual_node_number - - storage_flood_stage_left_capacity_bytes - - storage_flood_stage_usage_percent - - storage_high_watermark_usage_percent - - storage_min_left_capacity_bytes - - stream_load_default_memtable_on_sink_node - - stream_load_default_precommit_timeout_second - - stream_load_default_timeout_second - - streaming_label_keep_max_second - - sts_duration - - sync_commit_interval_second - - sync_image_timeout_second - - table_name_length_limit - - tablet_create_timeout_second - - tablet_delete_timeout_second - - tablet_further_repair_max_times - - tablet_further_repair_timeout_second - - tablet_recent_load_failed_second - - tablet_repair_delay_factor_second - - tablet_schedule_high_priority_second - - urgent_balance_disk_usage_extra_threshold - - urgent_balance_pick_large_disk_usage_percentage - - urgent_balance_pick_large_tablet_num_threshold - - urgent_balance_shuffle_large_tablet_percentage - - use_compact_thrift_rpc - - use_fuzzy_session_variable - - use_mysql_bigint_for_largeint - - used_capacity_percent_max_diff - - valid_version_count_delta_ratio_between_replicas - - wait_internal_group_commit_finish - - workload_group_max_num - - workload_max_action_num_in_policy - - workload_max_condition_num_in_policy - - workload_max_policy_num - - workload_runtime_status_thread_interval_ms - - workload_sched_policy_interval_ms - -immutableParameters: - - custom_config_dir - - arrow_flight_sql_port - - meta_dir - - audit_log_dir - - query_port - - rpc_port - - edit_log_port - - http_port - - https_port - - enable_ssl - - mysql_ssl_default_ca_certificate - - mysql_ssl_default_ca_certificate_password - - mysql_ssl_default_server_certificate - - mysql_ssl_default_server_certificate_password - - skip_localhost_auth_check - - JAVA_OPTS - - - \ No newline at end of file diff --git a/addons/doris/config/fe-config.tpl b/addons/doris/config/fe-config.tpl deleted file mode 100644 index 74086036e..000000000 --- a/addons/doris/config/fe-config.tpl +++ /dev/null @@ -1,500 +0,0 @@ - -{{- $phy_memory := getContainerMemory ( index $.podSpec.containers 0 ) }} -{{- $heap_size := mul (div $phy_memory 10) 8 }} - -JAVA_OPTS="-Xmx{{ $heap_size }} -XX:+UseMembar -XX:SurvivorRatio=8 -XX:MaxTenuringThreshold=7 -XX:+PrintGCDateStamps -XX:+PrintGCDetails -XX:+UseConcMarkSweepGC -XX:+UseParNewGC -XX:+CMSClassUnloadingEnabled -XX:-CMSParallelRemarkEnabled -XX:CMSInitiatingOccupancyFraction=80 -XX:SoftRefLRUPolicyMSPerMB=0 -Xloggc:/opt/apache-doris/fe/log/fe.gc.log.$DATE" - - -# tls -{{- if eq (index $ "TLS_ENABLED") "true" }} -enable_ssl=true -mysql_ssl_default_ca_certificate=/opt/apache-doris/fe/mysql_ssl_default_certificate/ca_certificate.p12 -mysql_ssl_default_ca_certificate_password=doris -mysql_ssl_default_server_certificate=/opt/apache-doris/fe/mysql_ssl_default_certificate/server_certificate.p12 -mysql_ssl_default_server_certificate_password=doris -{{ else }} -enable_ssl=false -{{- end }} - - - -abort_txn_after_lost_heartbeat_time_second=300 -access_control_allowed_origin_domain=* -access_controller_type=default -agent_task_resend_wait_time_ms=5000 -allow_replica_on_same_host=false -alter_table_timeout_second=2592000 -analyze_record_limit=20000 -arrow_flight_sql_port=-1 -arrow_flight_token_alive_time=4320 -arrow_flight_token_cache_size=512 -async_loading_load_task_pool_size=10 -async_pending_load_task_pool_size=10 -async_task_consumer_thread_num=64 -async_task_queen_size=1024 -audit_event_log_queue_size=250000 -audit_log_delete_age=30d -audit_log_dir=/opt/apache-doris/fe/log -audit_log_enable_compress=false -audit_log_roll_interval=DAY -audit_log_roll_num=90 -audit_sys_accumulated_file_size=4 -authentication_type=default -auto_analyze_simultaneously_running_task_num=1 -auto_check_statistics_in_minutes=5 -autobucket_max_buckets=128 -autobucket_min_buckets=1 -backend_load_capacity_coeficient=-1.0 -backend_proxy_num=48 -backend_rpc_timeout_ms=60000 -backup_job_default_timeout_ms=86400000 -backup_plugin_path=/tools/trans_file_tool/trans_files.sh -backup_upload_task_num_per_be=3 -balance_be_then_disk=true -balance_load_score_threshold=0.1 -balance_slot_num_per_path=1 -bdbje_file_logging_level=INFO -bdbje_free_disk_bytes=1073741824 -bdbje_heartbeat_timeout_second=30 -bdbje_lock_timeout_second=5 -bdbje_replica_ack_timeout_second=10 -bdbje_reserved_disk_bytes=1073741824 -be_exec_version=5 -be_rebalancer_fuzzy_test=false -be_rebalancer_idle_seconds=0 -be_report_query_statistics_timeout_ms=60000 -blacklist_duration_second=120 -broker_load_default_timeout_second=14400 -broker_timeout_ms=10000 -cache_enable_partition_mode=true -cache_enable_sql_mode=true -cache_last_version_interval_second=30 -cache_result_max_data_size=31457280 -cache_result_max_row_count=3000 -capacity_used_percent_high_water=0.75 -catalog_trash_expire_second=86400 -catalog_trash_ignore_min_erase_latency=false -catalog_try_lock_timeout_ms=5000 -cbo_concurrency_statistics_task_num=10 -cbo_default_sample_percentage=10 -cbo_max_statistics_job_num=20 -check_consistency_default_timeout_second=600 -check_java_version=true -check_table_lock_leaky=false -check_wal_queue_timeout_threshold=180000 -cloud_cluster_check_interval_second=10 -cloud_cold_read_percent=10 -cloud_meta_service_rpc_failed_retry_times=200 -cloud_replica_num=3 -cloud_sql_server_cluster_id=RESERVED_CLUSTER_ID_FOR_SQL_SERVER -cloud_sql_server_cluster_name=RESERVED_CLUSTER_NAME_FOR_SQL_SERVER -cluster_id=-1 -colocate_group_relocate_delay_second=1800 -commit_timeout_second=30 -consistency_check_end_time=23 -consistency_check_start_time=23 -cpu_resource_limit_per_analyze_task=1 -create_tablet_round_robin_from_start=false -custom_config_dir=/opt/apache-doris/fe/conf -db_used_data_quota_update_interval_secs=300 -deadlock_detection_interval_minute=5 -decommission_tablet_check_threshold=5000 -decommission_tablet_wait_time_seconds=3600 -default_db_data_quota_bytes=1125899906842624 -default_db_max_running_txn_num=-1 -default_db_replica_quota_size=1073741824 -default_get_version_from_ms_timeout_second=3 -default_load_parallelism=8 -default_max_filter_ratio=0.0 -default_max_query_instances=-1 -default_schema_change_scheduler_interval_millisecond=500 -default_storage_medium=HDD -delete_job_max_timeout_second=300 -desired_max_waiting_jobs=100 -diagnose_balance_max_tablet_num_diff=50 -diagnose_balance_max_tablet_num_ratio=1.1 -disable_backend_black_list=false -disable_balance=false -disable_colocate_balance=false -disable_colocate_balance_between_groups=false -disable_datev1=true -disable_decimalv2=true -disable_disk_balance=false -disable_hadoop_load=false -disable_load_job=false -disable_local_deploy_manager_drop_node=true -disable_mini_load=true -disable_show_stream_load=false -disable_storage_medium_check=false -disable_tablet_scheduler=false -disallow_create_catalog_with_resource=true -div_precision_increment=4 -dpp_bytes_per_reduce=104857600 -dpp_config_str={palo-dpp : {hadoop_palo_path : '/dir',hadoop_configs : 'fs.default.name=hdfs://host:port;mapred.job.tracker=host:port;hadoop.job.ugi=user,password'}} -dpp_default_cluster=palo-dpp -dpp_default_config_str={hadoop_configs : 'mapred.job.priority=NORMAL;mapred.job.map.capacity=50;mapred.job.reduce.capacity=50;mapred.hce.replace.streaming=false;abaci.long.stored.job=true;dce.shuffle.enable=false;dfs.client.authserver.force_stop=true;dfs.client.auth.method=0'} -dpp_hadoop_client_path=/lib/hadoop-client/hadoop/bin/hadoop -drop_backend_after_decommission=true -drop_rpc_retry_num=200 -dynamic_partition_check_interval_seconds=600 -dynamic_partition_enable=true -edit_log_port=9010 -edit_log_roll_num=50000 -edit_log_type=bdb -enable_access_file_without_broker=false -enable_alter_queue_prop_sync=false -enable_array_type=false -enable_batch_delete_by_default=true -enable_bdbje_debug_mode=false -enable_cloud_multi_replica=false -enable_cloud_snapshot_version=true -enable_collect_internal_query_profile=false -enable_concurrent_update=false -enable_convert_light_weight_schema_change=false -enable_cooldown_replica_affinity=true -enable_create_bitmap_index_as_inverted_index=true -enable_create_hive_bucket_table=false -enable_date_conversion=true -enable_deadlock_detection=false -enable_debug_points=false -enable_decimal_conversion=true -enable_delete_existing_files=false -enable_deploy_manager=disable -enable_disk_balance_for_single_replica=false -enable_file_logger=true -enable_force_drop_redundant_replica=false -enable_get_log_file_api=false -enable_hidden_version_column_by_default=true -enable_hms_events_incremental_sync=false -enable_http_server_v2=true -enable_java_udf=true -enable_job_schedule_second_for_test=false -enable_local_replica_selection=false -enable_local_replica_selection_fallback=false -enable_metric_calculator=true -enable_mow_light_delete=false -enable_multi_tags=false -enable_odbc_mysql_broker_table=false -enable_outfile_to_local=false -enable_pipeline_load=true -enable_profile_when_analyze=false -enable_proxy_protocol=false -enable_quantile_state_type=true -enable_query_hit_stats=false -enable_query_hive_views=true -enable_query_queue=true -enable_round_robin_create_tablet=true -enable_storage_policy=true -enable_sts_vpc=true -enable_token_check=true -enable_urgent_balance_no_low_backend=true -es_state_sync_interval_second=10 -enable_all_http_auth=false -enable_cpu_hard_limit=false -enable_feature_binlog=false -enable_fqdn_mode=true -enable_https=false -enable_mtmv=false -enable_nereids_load=false -enable_single_replica_load=false -enable_workload_group=true -max_binlog_messsage_size=1073741824 -min_tablets_for_dup_table_shuffle=64 -sql_cache_manage_num=100 -expire_sql_cache_in_fe_second=300 -expr_children_limit=10000 -expr_depth_limit=3000 -external_cache_expire_time_minutes_after_access=10 -fetch_stream_load_record_interval_second=120 -finish_job_max_saved_second=259200 -finished_job_cleanup_threshold_time_hour=24 -fix_tablet_partition_id_eq_0=false -forbid_running_alter_job=false -force_do_metadata_checkpoint=false -force_olap_table_replication_num=0 -force_sqlserver_jdbc_encrypt_false=false -get_be_resource_usage_interval_ms=10000 -group_commit_data_bytes_default_value=134217728 -group_commit_interval_ms_default_value=10000 -grpc_keep_alive_second=10 -grpc_max_message_size_bytes=2147483647 -grpc_threadmgr_threads_nums=4096 -hadoop_load_default_timeout_second=259200 -heartbeat_interval_second=10 -heartbeat_mgr_blocking_queue_size=1024 -heartbeat_mgr_threads_num=8 -history_job_keep_max_second=604800 -hive_default_file_format=orc -hive_metastore_client_timeout_second=10 -hive_stats_partition_sample_size=30 -hms_events_batch_size_per_rpc=500 -hms_events_polling_interval_ms=10000 -http_load_submitter_max_worker_threads=2 -http_port=8030 -http_sql_submitter_max_worker_threads=2 -https_port=8050 -ignore_backup_not_support_table_type=false -ignore_bdbje_log_checksum_read=false -ignore_meta_check=false -ignore_unknown_metadata_module=false -info_sys_accumulated_file_size=4 -insert_load_default_timeout_second=14400 -inverted_index_storage_format=V1 -jdbc_driver_secure_path=* -jdbc_drivers_dir=/opt/apache-doris/fe/jdbc_drivers -jdbc_mysql_unsupported_pushdown_functions=[date_trunc, money_format, negative] -jetty_server_acceptors=2 -jetty_server_max_http_header_size=1048576 -jetty_server_max_http_post_size=104857600 -jetty_server_selectors=4 -jetty_server_workers=0 -jetty_threadPool_maxThreads=400 -jetty_threadPool_minThreads=20 -job_dispatch_timer_job_queue_size=1024 -job_dispatch_timer_job_thread_num=2 -job_insert_task_consumer_thread_num=10 -job_mtmv_task_consumer_thread_num=10 -keep_scheduler_mtmv_task_when_job_deleted=false -key_store_alias=doris_ssl_certificate -key_store_path=/opt/apache-doris/fe/conf/ssl/doris_ssl_certificate.keystore -key_store_type=JKS -label_clean_interval_second=3600 -label_keep_max_second=259200 -label_num_threshold=2000 -label_regex_length=128 -ldap_admin_name=cn=admin,dc=domain,dc=com -ldap_authentication_enabled=false -ldap_cache_timeout_day=30 -ldap_group_basedn=ou=group,dc=domain,dc=com -ldap_host=127.0.0.1 -ldap_pool_max_active=8 -ldap_pool_max_idle=8 -ldap_pool_max_total=-1 -ldap_pool_max_wait=-1 -ldap_pool_min_idle=0 -ldap_pool_test_on_borrow=false -ldap_pool_test_on_return=false -ldap_pool_test_while_idle=false -ldap_pool_when_exhausted=1 -ldap_port=389 -ldap_user_basedn=ou=people,dc=domain,dc=com -ldap_user_cache_timeout_s=43200 -ldap_user_filter=(&(uid={login})) -load_checker_interval_second=5 -locale=zh_CN.UTF-8 -lock_reporting_threshold_ms=500 -log_roll_size_mb=1024 -log_rollover_strategy=age -lower_case_table_names=1 -manual_drop_replica_valid_second=86400 -master_sync_policy=SYNC -max_agent_task_threads_num=4096 -max_allowed_in_element_num_of_delete=1024 -max_auto_partition_num=2000 -max_backend_heartbeat_failure_tolerance_count=1 -max_backup_restore_job_num_per_db=10 -max_backup_tablets_per_job=300000 -max_balancing_tablets=100 -max_bdbje_clock_delta_ms=5000 -max_be_exec_version=5 -max_broker_concurrency=10 -max_bytes_per_broker_scanner=536870912000 -max_bytes_sync_commit=67108864 -max_cbo_statistics_task_timeout_sec=300 -max_clone_task_timeout_sec=7200 -max_create_table_timeout_second=3600 -max_distribution_pruner_recursion_depth=100 -max_dynamic_partition_num=500 -max_error_tablet_of_broker_load=3 -max_external_cache_loader_thread_pool_size=64 -max_external_file_cache_num=10000 -max_external_schema_cache_num=10000 -max_external_table_cache_num=1000 -max_external_table_row_count_cache_num=100000 -max_get_kafka_meta_timeout_second=60 -max_hive_list_partition_num=-1 -max_hive_partition_cache_num=10000 -max_hive_partition_table_cache_num=1000 -max_load_timeout_second=259200 -max_lock_hold_threshold_seconds=10 -max_meta_object_cache_num=1000 -max_multi_partition_num=4096 -max_mysql_service_task_threads_num=4096 -max_pending_mtmv_scheduler_task_num=100 -max_persistence_task_count=100 -max_point_query_retry_time=2 -max_query_profile_num=100 -max_query_retry_time=3 -max_remote_file_system_cache_num=100 -max_replica_count_when_schema_change=100000 -max_replication_num_per_tablet=32767 -max_routine_load_job_num=100 -max_routine_load_task_concurrent_num=256 -max_routine_load_task_num_per_be=1024 -max_running_mtmv_scheduler_task_num=100 -max_running_rollup_job_num_per_table=1 -max_running_txn_num_per_db=1000 -max_same_name_catalog_trash_num=3 -max_scheduling_tablets=2000 -max_small_file_number=100 -max_small_file_size_bytes=1048576 -max_stream_load_record_size=5000 -max_stream_load_timeout_second=259200 -max_sync_task_threads_num=10 -max_tolerable_backend_down_num=0 -max_unfinished_load_job=1000 -maximum_number_of_export_partitions=2000 -maximum_parallelism_of_export_job=50 -maximum_tablets_of_outfile_in_export=10 -meta_delay_toleration_second=300 -meta_dir=/opt/apache-doris/fe/doris-meta -meta_publish_timeout_ms=1000 -meta_service_connection_age_base_minutes=5 -meta_service_connection_pool_size=20 -meta_service_connection_pooled=true -meta_service_rpc_retry_times=200 -metadata_checkpoint_memory_threshold=70 -min_backend_num_for_external_table=-1 -min_be_exec_version=0 -min_bytes_indicate_replica_too_large=2147483648 -min_bytes_per_broker_scanner=67108864 -min_bytes_sync_commit=15728640 -min_clone_task_timeout_sec=180 -min_create_table_timeout_second=30 -min_load_replica_num=-1 -min_load_timeout_second=1 -min_replication_num_per_tablet=1 -min_sync_commit_size=10000 -min_version_count_indicate_replica_compaction_too_slow=200 -multi_partition_name_prefix=p_ -mysql_load_in_memory_record=20 -mysql_load_thread_pool=4 -mysql_nio_backlog_num=1024 -mysql_service_io_threads_num=4 - -mysqldb_replace_name=mysql -nereids_trace_log_dir=/opt/apache-doris/fe/log/nereids_trace -partition_info_update_interval_secs=60 -partition_rebalance_max_moves_num_per_selection=10 -partition_rebalance_move_expire_after_access=600 -period_analyze_simultaneously_running_task_num= -period_of_auto_resume_min=10 -plugin_dir=/opt/apache-doris/fe/plugins -plugin_enable=true -point_query_timeout_ms=10000 -prefer_compute_node_for_external_table=false -proxy_auth_enable=false -proxy_auth_magic_prefix=x@8 -publish_fail_log_interval_second=300 -publish_topic_info_interval_ms=30000 -publish_version_check_alter_replica=true -publish_version_interval_ms=10 -publish_version_timeout_second=30 -publish_wait_time_second=300 -pull_request_id=0 -qe_max_connection=1024 -qe_slow_log_ms=5000 -query_audit_log_timeout_ms=5000 -query_colocate_join_memory_limit_penalty_factor=1 -query_metadata_name_ids_timeout=3 -query_port=9030 -query_queue_by_be_used_memory=-1.0 -query_queue_update_interval_ms=5000 -ranger_cache_size=10000 -recover_with_empty_tablet=false -remote_fragment_exec_timeout_ms=30000 -repair_slow_replica=false -replica_ack_policy=SIMPLE_MAJORITY -replica_sync_policy=SYNC -report_queue_size=100 -restore_download_task_num_per_be=3 -rpc_port=9020 -schedule_batch_size=50 -schedule_slot_num_per_hdd_path=4 -schedule_slot_num_per_ssd_path=8 -scheduler_mtmv_job_expired=86400 -scheduler_mtmv_task_expired=86400 -show_details_for_unaccessible_tablet=true -skip_compaction_slower_replica=true -skip_localhost_auth_check=true -small_file_dir=/opt/apache-doris/fe/small_files -spark_dpp_version=1.2-SNAPSHOT -spark_home_default_dir=/opt/apache-doris/fe/lib/spark2x -spark_launcher_log_dir=/opt/apache-doris/fe/log/spark_launcher_log -spark_load_checker_interval_second=60 -spark_load_default_timeout_second=86400 -split_assigner_max_split_num_variance=1 -split_assigner_min_consistent_hash_candidate_num=2 -split_assigner_min_random_candidate_num=2 -split_assigner_optimized_local_scheduling=true -split_assigner_virtual_node_number=256 -ssl_force_client_auth=false -ssl_trust_store_type=PKCS12 -statistics_simultaneously_running_task_num=3 -statistics_sql_mem_limit_in_bytes=2147483648 -statistics_sql_parallel_exec_instance_num=1 -stats_cache_size=500000 -storage_flood_stage_left_capacity_bytes=1073741824 -storage_flood_stage_usage_percent=95 -storage_high_watermark_usage_percent=85 -storage_min_left_capacity_bytes=2147483648 -stream_load_default_memtable_on_sink_node=false -stream_load_default_precommit_timeout_second=3600 -stream_load_default_timeout_second=259200 -streaming_label_keep_max_second=43200 -sts_duration=3600 -sync_checker_interval_second=5 -sync_commit_interval_second=10 -sync_image_timeout_second=300 -sys_log_delete_age=7d -sys_log_dir=/opt/apache-doris/fe/log -sys_log_enable_compress=false -sys_log_level=INFO -sys_log_mode=NORMAL -sys_log_roll_interval=DAY -sys_log_roll_num=10 -sys_log_verbose_modules= -table_name_length_limit=64 -tablet_checker_interval_ms=20000 -tablet_create_timeout_second=2 -tablet_delete_timeout_second=2 -tablet_further_repair_max_times=5 -tablet_further_repair_timeout_second=1200 -tablet_rebalancer_type=BeLoad -tablet_recent_load_failed_second=1800 -tablet_repair_delay_factor_second=60 -tablet_schedule_high_priority_second=1800 -tablet_schedule_interval_ms=1000 -tablet_stat_update_interval_second=60 -thrift_backlog_num=1024 -thrift_client_timeout_ms=0 -thrift_max_frame_size=16384000 -thrift_max_message_size=104857600 -thrift_server_max_worker_threads=4096 -thrift_server_type=THREAD_POOL -tmp_dir=/opt/apache-doris/fe/temp_dir -token_generate_period_hour=12 -token_queue_size=6 -transaction_clean_interval_second=30 -txn_rollback_limit=100 -urgent_balance_disk_usage_extra_threshold=0.05 -urgent_balance_pick_large_disk_usage_percentage=80 -urgent_balance_pick_large_tablet_num_threshold=1000.0 -urgent_balance_shuffle_large_tablet_percentage=1 -use_compact_thrift_rpc=true -use_fuzzy_session_variable=false -use_mysql_bigint_for_largeint=false -use_new_tablet_scheduler=true -used_capacity_percent_max_diff=0.3 -valid_version_count_delta_ratio_between_replicas=0.5 -wait_internal_group_commit_finish=false -warn_sys_accumulated_file_size=2 -with_k8s_certs=false -workload_group_max_num=15 -workload_max_action_num_in_policy=5 -workload_max_condition_num_in_policy=5 -workload_max_policy_num=25 -workload_runtime_status_thread_interval_ms=2000 -workload_sched_policy_interval_ms=10000 -yarn_client_path=/opt/apache-doris/fe/lib/yarn-client/hadoop/bin/yarn -yarn_config_dir=/opt/apache-doris/fe/lib/yarn-config diff --git a/addons/doris/dataprotection/backup.sh b/addons/doris/dataprotection/backup.sh deleted file mode 100644 index 2bfc6dfbc..000000000 --- a/addons/doris/dataprotection/backup.sh +++ /dev/null @@ -1,108 +0,0 @@ -#!/bin/bash - -export PATH="$PATH:$DP_DATASAFED_BIN_PATH" -export DATASAFED_BACKEND_BASE_PATH="$DP_BACKUP_BASE_PATH" - -SQL_CMD="mysql -N -B -h ${DP_DB_HOST}.${POD_NAMESPACE}.svc.cluster.local -P ${FE_QUERY_PORT} -u root -p${DP_DB_PASSWORD} -e" - -# Save backup status info file for syncing progress. -# timeFormat: %Y-%m-%dT%H:%M:%SZ -DP_save_backup_status_info() { - local totalSize=$1 - local startTime=$2 - local stopTime=$3 - local timeZone=$4 - local extras=$5 - local timeZoneStr="" - if [ ! -z ${timeZone} ]; then - timeZoneStr=",\"timeZone\":\"${timeZone}\"" - fi - if [ -z "${stopTime}" ];then - echo "{\"totalSize\":\"${totalSize}\"}" > ${DP_BACKUP_INFO_FILE} - elif [ -z "${startTime}" ];then - echo "{\"totalSize\":\"${totalSize}\",\"extras\":[${extras}],\"timeRange\":{\"end\":\"${stopTime}\"${timeZoneStr}}}" > ${DP_BACKUP_INFO_FILE} - else - echo "{\"totalSize\":\"${totalSize}\",\"extras\":[${extras}],\"timeRange\":{\"start\":\"${startTime}\",\"end\":\"${stopTime}\"${timeZoneStr}}}" > ${DP_BACKUP_INFO_FILE} - fi -} - -do_backup_and_wait() { - all_dbs=$($SQL_CMD "SHOW DATABASES" | grep -v "information_schema" | grep -v "mysql") - # DP_BACKUP_NAME=brier-5f7695dcfb-20251023071631 - for db in $all_dbs; do - $SQL_CMD "BACKUP SNAPSHOT $db.\`${db}_${DP_BACKUP_ID}\` TO $DP_DORIS_REPOSITORY" -D "$db" - done - - wait_backup_complete "$all_dbs" -} - -wait_backup_complete() { - local max_wait_time=3600 # 1h - local wait_interval=10 # 10s - local elapsed_time=0 - local all_finished=false - - DP_log "Wait for backup tasks to complete in $max_wait_time seconds..." - local all_dbs=$1 - DP_log "Backup databases: $all_dbs" - - while [ $elapsed_time -lt $max_wait_time ]; do - all_finished=true - - for db in $all_dbs; do - local backup_status - DP_log "Check backup status for database $db" - backup_status=$($SQL_CMD "SHOW BACKUP" -D "$db" | grep "$db.$BACKUP_ID" | awk '{print $4}') - if [ -z "$backup_status" ]; then - DP_log "Warning: no backup status found for database $db" - all_finished=false - elif [ "$backup_status" != "FINISHED" ]; then - DP_log "Backup task for database $db is not finished, status: $backup_status" - all_finished=false - else - DP_log "Backup task for database $db is finished" - fi - done - - if $all_finished; then - DP_log "All backup tasks are finished" - return - fi - - DP_log "Wait $wait_interval seconds to check backup status again..." - sleep $wait_interval - elapsed_time=$((elapsed_time + wait_interval)) - done - - DP_error_log "Error: Backup tasks timeout after $elapsed_time seconds" -} - -backup_meta() { - tar -cvf - /opt/apache-doris/fe/doris-meta | datasafed push -z zstd-fastest - "${DP_BACKUP_NAME}.tar.zst" -} - -main() { - start_time=$(date -u '+%Y-%m-%dT%H:%M:%SZ') - parse_datasafed_conf - prepare_s3_repository - do_backup_and_wait - cleanup_repository - end_time=$(date -u '+%Y-%m-%dT%H:%M:%SZ') - total_size=0 - for _ in $(seq 1 5); do - output=$(datasafed stat / 2>&1) - DP_log "datasafed stat / output: ${output}" - if echo "${output}" | grep -q 'TotalSize:'; then - total_size=$(echo "${output}" | grep 'TotalSize:' | awk '{print $2}') - if [[ -n "${total_size}" && "${total_size}" -gt 0 ]]; then - break - fi - fi - DP_log "Failed to get a valid total size. Full output: ${output}" - sleep 2 - done - total_size=$(datasafed stat / | grep 'TotalSize:' | awk '{print $2}') - DP_save_backup_status_info "${total_size:-0}" "$start_time" "$end_time" "" "" -} - -main \ No newline at end of file diff --git a/addons/doris/dataprotection/common.sh b/addons/doris/dataprotection/common.sh deleted file mode 100644 index ee5a73378..000000000 --- a/addons/doris/dataprotection/common.sh +++ /dev/null @@ -1,187 +0,0 @@ -#!/bin/bash - -export PATH="$PATH:$DP_DATASAFED_BIN_PATH" -export DATASAFED_BACKEND_BASE_PATH="$DP_BACKUP_BASE_PATH" - -DP_log() { - msg=$1 - local curr_date=$(date -u '+%Y-%m-%d %H:%M:%S') - echo "${curr_date} INFO: $msg" -} - -# log error info -DP_error_log() { - msg=$1 - local curr_date=$(date -u '+%Y-%m-%d %H:%M:%S') - echo "${curr_date} ERROR: $msg" - exit 1 -} - -empty_check() { - var_name=$1 - if [ -z "${!var_name}" ]; then - DP_error_log "$var_name is empty" - fi -} - -parse_datasafed_conf() { - local conf_file="/etc/datasafed/datasafed.conf" - - if [ ! -f "$conf_file" ]; then - DP_error_log "s3 repository config file not found: $conf_file" - fi - - - local s3_type="" - local s3_provider="" - local s3_env_auth="" - local s3_access_key_id="" - local s3_secret_access_key="" - local s3_region="" - local s3_endpoint="" - local s3_root="" - local s3_no_check_certificate="" - local s3_no_check_bucket="" - local s3_chunk_size="" - - while IFS='=' read -r key value; do - key=$(echo "$key" | xargs) - value=$(echo "$value" | xargs) - - case "$key" in - "type") - s3_type="$value" - ;; - "provider") - s3_provider="$(echo "$value" | tr '[:upper:]' '[:lower:]')" - ;; - "env_auth") - s3_env_auth="$value" - ;; - "access_key_id") - s3_access_key_id="$value" - ;; - "secret_access_key") - s3_secret_access_key="$value" - ;; - "region") - s3_region="$value" - ;; - "endpoint") - s3_endpoint=$(echo "$value" | tr -d '`"') - ;; - "root") - s3_root="$value" - ;; - "no_check_certificate") - s3_no_check_certificate="$value" - ;; - "no_check_bucket") - s3_no_check_bucket="$value" - ;; - "chunk_size") - s3_chunk_size="$value" - ;; - esac - done < <(grep -v '^\[' "$conf_file" | grep '=') - - export DP_S3_TYPE="$s3_type" - export DP_S3_PROVIDER="$s3_provider" - export DP_S3_ENV_AUTH="$s3_env_auth" - export DP_S3_ACCESS_KEY_ID="$s3_access_key_id" - export DP_S3_SECRET_ACCESS_KEY="$s3_secret_access_key" - export DP_S3_REGION="$s3_region" - # - export DP_S3_ENDPOINT="$s3_endpoint" - export DP_S3_ROOT="$s3_root" - export DP_S3_NO_CHECK_CERTIFICATE="$s3_no_check_certificate" - export DP_S3_NO_CHECK_BUCKET="$s3_no_check_bucket" - export DP_S3_CHUNK_SIZE="$s3_chunk_size" - BACKUP_ID=$(echo "$DP_BACKUP_NAME" | awk -F'-' '{print $NF}') - export DP_BACKUP_ID="$BACKUP_ID" - empty_check "DP_S3_TYPE" - empty_check "DP_S3_PROVIDER" - empty_check "DP_S3_ACCESS_KEY_ID" - empty_check "DP_S3_SECRET_ACCESS_KEY" - empty_check "DP_S3_ENDPOINT" - empty_check "DP_S3_ROOT" - empty_check "DP_BACKUP_ID" - - # parse s3 repository name from endpoint - if [ -n "$s3_endpoint" ]; then - endpoint_without_protocol=${s3_endpoint#*://} - host_part=${endpoint_without_protocol%%:*} - host_part=${host_part%%/*} - cluster_name=${host_part%%.*} - # replace - with _ - cluster_name_fixed=${cluster_name//-/_} - export DP_DORIS_REPOSITORY="${cluster_name_fixed}_${DP_BACKUP_ID}" - fi - prepare_repository_location - - DP_log "S3 PROVIDER: $DP_S3_PROVIDER" - DP_log "S3 ENDPOINT: $DP_S3_ENDPOINT" - DP_log "S3 ROOT: $DP_S3_ROOT" - DP_log "DORIS REPOSITORY NAME: $DP_DORIS_REPOSITORY" - DP_log "S3 LOCATION: $DP_S3_LOCATION" - DP_log "BACKUP ID: $DP_BACKUP_ID" -} - -prepare_repository_location() { - local location="" - if [[ "$DP_S3_ROOT" == s3://* ]]; then - location="$DP_S3_ROOT" - else - location="s3://$DP_S3_ROOT" - fi - - location+="$DP_BACKUP_BASE_PATH" - export DP_S3_LOCATION="$location" -} - -prepare_s3_repository() { - empty_check "DP_DORIS_REPOSITORY" - empty_check "DP_S3_LOCATION" - local create_repo_sql="CREATE REPOSITORY \`$DP_DORIS_REPOSITORY\` " - create_repo_sql+="WITH S3 " - create_repo_sql+="ON LOCATION \"$DP_S3_LOCATION\" " - create_repo_sql+="PROPERTIES (" - create_repo_sql+="\"s3.endpoint\" = \"$DP_S3_ENDPOINT\", " - create_repo_sql+="\"s3.access_key\" = \"$DP_S3_ACCESS_KEY_ID\", " - create_repo_sql+="\"s3.secret_key\" = \"$DP_S3_SECRET_ACCESS_KEY\"" - - if [ -n "$DP_S3_REGION" ]; then - create_repo_sql+=", \"s3.region\" = \"$DP_S3_REGION\"" - fi - - # minio - if [ "$DP_S3_PROVIDER" = "minio" ]; then - create_repo_sql+=", \"s3.region\" = \"dummy-region\"" - create_repo_sql+=", \"use_path_style\" = \"true\"" - fi - - - create_repo_sql+=");" - - DP_log "Try to create repository: $create_repo_sql" - - if ! $SQL_CMD "$create_repo_sql"; then - DP_error_log "Failed to create repository: $DP_DORIS_REPOSITORY" - fi - - repo_exists=$($SQL_CMD "SHOW REPOSITORIES" | grep -c "$DP_DORIS_REPOSITORY") - if [ "$repo_exists" -gt 0 ]; then - DP_log "Repository '$DP_DORIS_REPOSITORY' created successfully" - else - DP_error_log "Error: Failed to create repository '$DP_DORIS_REPOSITORY'" - fi -} - -cleanup_repository() { - empty_check "DP_DORIS_REPOSITORY" - DP_log "Try to drop repository: $DP_DORIS_REPOSITORY" - if ! $SQL_CMD "DROP REPOSITORY \`$DP_DORIS_REPOSITORY\`"; then - DP_error_log "Failed to drop repository: $DP_DORIS_REPOSITORY" - fi - DP_log "Repository '$DP_DORIS_REPOSITORY' dropped successfully" -} \ No newline at end of file diff --git a/addons/doris/dataprotection/download-meta.sh b/addons/doris/dataprotection/download-meta.sh deleted file mode 100644 index 53d6007f8..000000000 --- a/addons/doris/dataprotection/download-meta.sh +++ /dev/null @@ -1,26 +0,0 @@ -#!/bin/bash - -export PATH="$PATH:$DP_DATASAFED_BIN_PATH" -export DATASAFED_BACKEND_BASE_PATH="$DP_BACKUP_BASE_PATH" - -DP_log() { - msg=$1 - local curr_date=$(date -u '+%Y-%m-%d %H:%M:%S') - echo "${curr_date} INFO: $msg" -} - - -function download_backups() { - local backup_name=${1:-${DP_BACKUP_NAME}} - local target_path=${BACKUP_DIR}/INIT_BACKUPS/${backup_name} - mkdir -p ${target_path} - datasafed pull -d zstd-fastest "${backup_name}.tar.zst" - | tar -xvf - -C ${target_path} - - echo "" > ${BACKUP_DIR}/.restore -} - -# download_backups -# cd -# datasafed pull -d zstd-fastest "${backupFile}" - | tar -xvf - -C ${DATA_DIR} - -# CREATE REPOSITORY `APE_kb10-5cd4db689c-minio` WITH S3 ON LOCATION "s3://kb-backup" PROPERTIES ("s3.endpoint" = " http://kb10-5cd4db689c-minio.kb-system.svc.cluster.local:9000 ", "s3.access_key" = "root", "s3.secret_key" = "6Zn98rv0YOej9970", "s3.region" = "dummy-region", "use_path_style" = "true"); \ No newline at end of file diff --git a/addons/doris/dataprotection/restore.sh b/addons/doris/dataprotection/restore.sh deleted file mode 100644 index a56ee36a8..000000000 --- a/addons/doris/dataprotection/restore.sh +++ /dev/null @@ -1,106 +0,0 @@ -#!/bin/bash - -export PATH="$PATH:$DP_DATASAFED_BIN_PATH" -export DATASAFED_BACKEND_BASE_PATH="$DP_BACKUP_BASE_PATH" - -SQL_CMD="mysql -N -B -h ${DP_DB_HOST}.${POD_NAMESPACE}.svc.cluster.local -P ${FE_QUERY_PORT} -u root -p${DP_DB_PASSWORD} -e" - -do_snapshot_restore(){ - snapshot_name=$1 - db_name=$(echo "$snapshot_name" | awk -F'_' '{print $1}') -} - -restore_backups(){ - # show all snapshot - local all_dbs="" - local all_snapshots=$($SQL_CMD "SHOW SNAPSHOT ON \`$DP_DORIS_REPOSITORY\`") - - while read -r line; do - snapshot_name=$(echo "$line" | awk '{print $1}') - timestamp=$(echo "$line" | awk '{print $2}') - # ignore empty lines - if [ -z "$snapshot_name" ]; then - continue - fi - - if [[ "$snapshot_name" != *_${DP_BACKUP_ID} ]]; then - DP_log "Skipping snapshot '$snapshot_name' as it does not belong to this backup." - continue - fi - - if [[ "$snapshot_name" == "__internal_schema_"* ]]; then - DP_log "Skipping internal schema snapshot: $snapshot_name" - continue - fi - - # Correctly parse db_name by removing the backup ID suffix - db_name=${snapshot_name%_${DP_BACKUP_ID}} - DP_log "Creating database \`$db_name\` if not exists" - $SQL_CMD "CREATE DATABASE IF NOT EXISTS \`$db_name\`" - if [ $? -ne 0 ]; then - DP_log "Failed to create database \`$db_name\`, skip restore snapshot \`$snapshot_name\`" - continue - fi - local restore_sql - restore_sql="RESTORE SNAPSHOT \`$snapshot_name\` FROM \`$DP_DORIS_REPOSITORY\`" - restore_sql+=" PROPERTIES (" - restore_sql+="\"backup_timestamp\" = \"$timestamp\"" - restore_sql+=");" - DP_log "Restoring snapshot \`$snapshot_name\` for database \`$db_name\`: $restore_sql" - if ! $SQL_CMD "$restore_sql" -D "$db_name"; then - DP_error_log "Failed to restore snapshot \`$snapshot_name\` for database \`$db_name\`" - fi - all_dbs+="$db_name " - done <<< "$all_snapshots" - - all_dbs=$(echo "$all_dbs" | xargs) - wait_for_restore_complete "$all_dbs" -} - -wait_for_restore_complete() { - local max_wait_time=3600 # 1h - local wait_interval=10 # 10s - local elapsed_time=0 - local all_finished=false - local all_dbs=$1 - DP_log "Wait for restore tasks to complete in $max_wait_time seconds..." - DP_log "Restore databases: $all_dbs" - - while [ $elapsed_time -lt $max_wait_time ]; do - all_finished=true - for db in $all_dbs; do - local restore_status - DP_log "Check restore status for database $db" - restore_status=$($SQL_CMD "SHOW RESTORE" -D "$db" | grep "$db.$BACKUP_ID" | awk '{print $5}') - if [ -z "$restore_status" ]; then - DP_log "Warning: no restore status found for database $db" - all_finished=false - elif [ "$restore_status" != "FINISHED" ]; then - DP_log "Restore task for database $db is not finished, status: $restore_status" - all_finished=false - else - DP_log "Restore task for database $db is finished" - fi - done - - if $all_finished; then - DP_log "All restore tasks are finished" - return - fi - - DP_log "Wait $wait_interval seconds to check restore status again..." - sleep $wait_interval - elapsed_time=$((elapsed_time + wait_interval)) - done - - DP_error_log "Error: Restore tasks timeout after $elapsed_time seconds" -} - -main() { - parse_datasafed_conf - prepare_s3_repository - restore_backups - cleanup_repository -} - -main \ No newline at end of file diff --git a/addons/doris/scripts/be/be_prestop.sh b/addons/doris/scripts/be/be_prestop.sh deleted file mode 100755 index 3251b7e36..000000000 --- a/addons/doris/scripts/be/be_prestop.sh +++ /dev/null @@ -1,40 +0,0 @@ -#!/bin/bash -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. - -DORIS_ROOT=${DORIS_ROOT:-"/opt/apache-doris"} -DORIS_HOME=${DORIS_ROOT}/be -BE_CONFFILE=${DORIS_HOME}/conf/be.conf - -parse_confval_from_be_conf() -{ - local confkey=$1 - local confvalue=`grep "^\s*$confkey" $BE_CONFFILE | grep -v "^\s\#" | sed 's|^\s*'$confkey'\s*=\s*\(.*\)\s*$|\1|g'` - echo $confvalue -} - -log_dir=`parse_confval_from_be_conf "LOG_DIR"` - -if [[ "x$log_dir" == "x" ]]; then - log_dir="/opt/apache-doris/be/log" -fi - -log_replace_var_dir=`eval echo "$log_dir"` -kill_time=$(date "+%Y-%m-%d %H:%M:%S") -eval echo "[be_prestop.sh] ${kill_time} kubelet kill call the be_prestop.sh to stop be service." >> "$log_replace_var_dir/be.out" -eval echo "[be_prestop.sh] ${kill_time} kubelet kill call the be_prestop.sh to stop be service ." >> "/proc/1/fd/1" -$DORIS_HOME/bin/stop_be.sh --grace diff --git a/addons/doris/scripts/be/entry_point.sh b/addons/doris/scripts/be/entry_point.sh deleted file mode 100755 index a80252e61..000000000 --- a/addons/doris/scripts/be/entry_point.sh +++ /dev/null @@ -1,163 +0,0 @@ -#!/bin/bash -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. - -set -eo pipefail -shopt -s nullglob - -# Constant Definition -readonly DORIS_HOME="/opt/apache-doris" -readonly MAX_RETRY_TIMES=60 -readonly RETRY_INTERVAL=1 - -# Log Function -log_message() { - local level="$1" - shift - local message="$*" - if [ "$#" -eq 0 ]; then - message="$(cat)" - fi - local timestamp="$(date -Iseconds)" - printf '%s [%s] [Entrypoint]: %s\n' "${timestamp}" "${level}" "${message}" -} - -log_info() { - log_message "INFO" "$@" -} - -log_warn() { - log_message "WARN" "$@" >&2 -} - -log_error() { - log_message "ERROR" "$@" >&2 - exit 1 -} - -# Check whether it is a source file call -is_sourced() { - [ "${#FUNCNAME[@]}" -ge 2 ] && - [ "${FUNCNAME[0]}" = 'is_sourced' ] && - [ "${FUNCNAME[1]}" = 'source' ] -} - -# Parsing configuration parameters -parse_config() { - declare -g FE_SERVICE_ADDR CURRENT_BE_FQDN CURRENT_BE_PORT #PRIORITY_NETWORKS - - FE_SERVICE_ADDR="${FE_DISCOVERY_ADDR}" - BE_HEADLESS_SERVICE="${CLUSTER_NAME}-${COMPONENT_NAME}-headless" - CURRENT_BE_FQDN="${POD_NAME}.${BE_HEADLESS_SERVICE}.${CLUSTER_NAMESPACE}.svc.${CLUSTER_DOMAIN}" - CURRENT_BE_PORT="${HEARTBEAT_PORT:-9050}" - - # Exporting environment variables - export FE_SERVICE_ADDR CURRENT_BE_FQDN CURRENT_BE_PORT #PRIORITY_NETWORKS -} - -# Check BE status -check_be_status() { - local retry_count=0 - while [ $retry_count -lt $MAX_RETRY_TIMES ]; do - if [ "$1" = "true" ]; then - # Check FE status - if mysql -u"${DORIS_USER}" -P"${FE_QUERY_PORT}" -h"${FE_SERVICE_ADDR}" \ - -N -e "SHOW FRONTENDS" 2>/dev/null | grep -w "${FE_SERVICE_ADDR}" &>/dev/null; then - log_info "Master FE is ready" - return 0 - fi - else - # Check BE status - if mysql -u"${DORIS_USER}" -P"${FE_QUERY_PORT}" -h"${FE_SERVICE_ADDR}" \ - -N -e "SHOW BACKENDS" 2>/dev/null | grep -w "${CURRENT_BE_FQDN}" | grep -w "${CURRENT_BE_PORT}" | grep -w "true" &>/dev/null; then - log_info "BE node is ready" - return 0 - fi - fi - - retry_count=$((retry_count + 1)) - if [ $((retry_count % 20)) -eq 1 ]; then - if [ "$1" = "true" ]; then - log_info "Waiting for master FE... ($retry_count/$MAX_RETRY_TIMES)" - else - log_info "Waiting for BE node... ($retry_count/$MAX_RETRY_TIMES)" - fi - fi - sleep "$RETRY_INTERVAL" - done - - return 1 -} - -# Processing initialization files -process_init_files() { - local f - for f; do - case "$f" in - *.sh) - if [ -x "$f" ]; then - log_info "Executing $f" - "$f" - else - log_info "Sourcing $f" - . "$f" - fi - ;; - *.sql) - log_info "Executing SQL file $f" - mysql -u"${DORIS_USER}" -P"${FE_QUERY_PORT}" -h"${FE_SERVICE_ADDR}" < "$f" - ;; - *.sql.gz) - log_info "Executing compressed SQL file $f" - gunzip -c "$f" | mysql -u"${DORIS_USER}" -P"${FE_QUERY_PORT}" -h"${FE_SERVICE_ADDR}" - ;; - *) - log_warn "Ignoring $f" - ;; - esac - done -} - -# Main Function -main() { - # validate_environment - parse_config - - # Start BE Node - { - set +e - bash /opt/apache-doris/scripts/init_be.sh 2>/dev/null - } & - - # Waiting for BE node to be ready - if ! check_be_status false; then - log_error "BE node failed to start" - fi - - # Processing initialization files - if [ -d "/docker-entrypoint-initdb.d" ]; then - sleep 15 # Wait for the system to fully boot up - process_init_files /docker-entrypoint-initdb.d/* - fi - - # Waiting for BE process - wait -} - -if ! is_sourced; then - main "$@" -fi diff --git a/addons/doris/scripts/be/init_be.sh b/addons/doris/scripts/be/init_be.sh deleted file mode 100644 index 533705726..000000000 --- a/addons/doris/scripts/be/init_be.sh +++ /dev/null @@ -1,172 +0,0 @@ -#!/bin/bash -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. - -set -eo pipefail -shopt -s nullglob - -# Constant Definition -readonly DORIS_HOME="/opt/apache-doris" -readonly MAX_RETRY_TIMES=60 -readonly RETRY_INTERVAL=1 -readonly FE_QUERY_PORT=9030 -readonly BE_CONFIG_FILE="${DORIS_HOME}/be/conf/be.conf" -export DATE="$(date +%Y%m%d-%H%M%S)" -# Log Function -log_message() { - local level="$1" - shift - local message="$*" - if [ "$#" -eq 0 ]; then - message="$(cat)" - fi - local timestamp="$(date -Iseconds)" - printf '%s [%s] [INIT_BE]: %s\n' "${timestamp}" "${level}" "${message}" -} - -log_info() { - log_message "INFO" "$@" -} - -log_warn() { - log_message "WARN" "$@" >&2 -} - -log_error() { - log_message "ERROR" "$@" >&2 - exit 1 -} - -# Check whether it is a source file call -is_sourced() { - [ "${#FUNCNAME[@]}" -ge 2 ] && - [ "${FUNCNAME[0]}" = 'is_sourced' ] && - [ "${FUNCNAME[1]}" = 'source' ] -} - -# Initialize environment variables -init_environment() { - declare -g database_exists - if [ -d "${DORIS_HOME}/be/storage/data" ]; then - database_exists='true' - fi -} - -# Check if the BE node is registered -check_be_registered() { - # Check if BE is registered - local query_result - query_result=$(mysql -u"${DORIS_USER}" -P"${FE_QUERY_PORT}" -h"${FE_SERVICE_ADDR}" \ - -N -e "SHOW BACKENDS" 2>/dev/null | grep -w "${CURRENT_BE_FQDN}" | grep -w "${CURRENT_BE_PORT}" ) - - if [ -n "$query_result" ]; then - log_info "BE node ${CURRENT_BE_FQDN}:${CURRENT_BE_PORT} is already registered" - return 0 - fi - - return 1 -} - -# Register BE node to FE -register_be() { - # First check if the node is registered - if check_be_registered; then - return - fi - - # Try to register BE node - local retry_count=0 - while [ $retry_count -lt $MAX_RETRY_TIMES ]; do - if mysql -u"${DORIS_USER}" -P"${FE_QUERY_PORT}" -h"${FE_SERVICE_ADDR}" \ - -e "ALTER SYSTEM ADD BACKEND '${CURRENT_BE_FQDN}:${CURRENT_BE_PORT}'" 2>/dev/null; then - - # Wait for the BE node to become registered - local check_count=0 - while [ $check_count -lt 30 ]; do - if mysql -u"${DORIS_USER}" -P"${FE_QUERY_PORT}" -h"${FE_SERVICE_ADDR}" \ - -N -e "SHOW BACKENDS" 2>/dev/null | grep -w "${CURRENT_BE_FQDN}" | grep -w "${CURRENT_BE_PORT}" &>/dev/null; then - log_info "Successfully registered BE node" - return 0 - else - log_warn "BE node is not ready, retrying... ($check_count/30)" - fi - check_count=$((check_count + 1)) - sleep 1 - done - fi - - retry_count=$((retry_count + 1)) - if [ $((retry_count % 20)) -eq 1 ]; then - log_warn "Failed to register BE node or BE not ready, retrying... ($retry_count/$MAX_RETRY_TIMES)" - fi - sleep "$RETRY_INTERVAL" - done - - log_error "Failed to register BE node after ${MAX_RETRY_TIMES} attempts" -} - -# Configuring Node Roles -setup_node_role() { - if [[ ${NODE_ROLE} == 'computation' ]]; then - log_info "Setting up computation node role" - echo "be_node_role=computation" >> "$BE_CONFIG_FILE" - else - log_info "Setting up mixed node role" - fi -} - -# Print BE configuration information -show_be_config() { - log_info "==== BE Node Configuration ====" - log_info "Master FE Service: ${FE_SERVICE_ADDR}" - log_info "Current BE FQDN: ${CURRENT_BE_FQDN}" - log_info "Current BE Port: ${CURRENT_BE_PORT}" - log_info "Node Role: ${NODE_ROLE:-mixed}" - log_info "==========================" -} - -# Cleanup Function -cleanup() { - log_info "Stopping BE node" - ${DORIS_HOME}/be/bin/stop_be.sh -} - -# Main Function -main() { - trap cleanup SIGTERM SIGINT - init_environment - - # Check the storage directory - if [ -z "$database_exists" ]; then - log_info "Initializing BE configuration" - setup_node_role - show_be_config - register_be - else - log_info "Storage directory exists, skipping initialization" - fi - - log_info "Starting BE node" - export SKIP_CHECK_ULIMIT=true - ${DORIS_HOME}/be/bin/start_be.sh --console & - child_pid=$! - wait $child_pid -} - -if ! is_sourced; then - main "$@" -fi diff --git a/addons/doris/scripts/fe/fe_check_status.sh b/addons/doris/scripts/fe/fe_check_status.sh deleted file mode 100755 index ccabceecd..000000000 --- a/addons/doris/scripts/fe/fe_check_status.sh +++ /dev/null @@ -1,42 +0,0 @@ -#!/bin/bash -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. - -check_fe_status() -{ - local start_time=$(date +%s) - local expire_timeout=120 - # local helper=$1 - while true; do - output=$(timeout 15 mysql --connect-timeout 2 -h "127.0.0.1" -P "${FE_QUERY_PORT}" -u "${DORIS_USER}" -p"${DORIS_PASSWORD}" --skip-column-names --batch -e "SHOW FRONTENDS;") - if [[ "x$output" != "x" ]]; then - return 0 - fi - - let "expire=start_time+expire_timeout" - local now=$(date +%s) - if [[ $expire -le $now ]]; then - echo "[$(date)] the first container is not started" >& 2 - exit 1 - fi - - sleep 2 - done -} - -check_fe_status - diff --git a/addons/doris/scripts/fe/fe_member_leave.sh b/addons/doris/scripts/fe/fe_member_leave.sh deleted file mode 100644 index f5addd628..000000000 --- a/addons/doris/scripts/fe/fe_member_leave.sh +++ /dev/null @@ -1,105 +0,0 @@ -#!/usr/bin/env bash - -set +x -set -o errexit - - -function info() { - echo "[$(date +'%Y-%m-%d %H:%M:%S')] $*" -} - -info "Start to leave FE member" - -leader_host="" -leave_member_host="" -leave_member_port="" -leave_role="" -# always use 0 pod FQDN as helper_endpoints -helper_endpoints=$(echo "$POD_FQDN_LIST" | cut -d, -f1) -helper_pod_name=$(echo "$helper_endpoints" | cut -d: -f1 | cut -d. -f1) -candidate_names="" - - -# root@x-fe-0:/opt/starrocks# mysql -h 127.0.0.1 -P 9030 -e "show frontends" -## +-----------------------------------------+------------------------------------------------------+-------------+----------+-----------+---------+--------------------+----------+----------+-----------+------+-------+-------------------+---------------------+---------------------+----------+--------+-----------------------------+------------------+ -# | Name | Host | EditLogPort | HttpPort | QueryPort | RpcPort | ArrowFlightSqlPort | Role | IsMaster | ClusterId | Join | Alive | ReplayedJournalId | LastStartTime | LastHeartbeat | IsHelper | ErrMsg | Version | CurrentConnected | -# +-----------------------------------------+------------------------------------------------------+-------------+----------+-----------+---------+--------------------+----------+----------+-----------+------+-------+-------------------+---------------------+---------------------+----------+--------+-----------------------------+------------------+ -# | fe_61708f2c_e1ea_4a21_8367_ea6d7103f065 | test-fe-2.test-fe-headless.default.svc.cluster.local | 9010 | 8030 | 9030 | 9020 | -1 | FOLLOWER | false | 606612320 | true | true | 57 | 2025-09-25 15:28:02 | 2025-09-25 15:31:03 | true | | doris-2.1.6-rc04-653e315ba5 | No | -# | fe_6ced9e5e_6a36_4b2d_83e6_e2b22b100123 | test-fe-0.test-fe-headless.default.svc.cluster.local | 9010 | 8030 | 9030 | 9020 | -1 | FOLLOWER | true | 606612320 | true | true | 58 | 2025-09-25 15:27:12 | 2025-09-25 15:31:03 | true | | doris-2.1.6-rc04-653e315ba5 | Yes | -# | fe_8caab8a5_a581_40ae_b9ce_934d9e135e32 | test-fe-1.test-fe-headless.default.svc.cluster.local | 9010 | 8030 | 9030 | 9020 | -1 | FOLLOWER | false | 606612320 | true | true | 57 | 2025-09-25 15:27:34 | 2025-09-25 15:31:03 | true | | doris-2.1.6-rc04-653e315ba5 | No | -# +-----------------------------------------+------------------------------------------------------+-------------+----------+-----------+---------+--------------------+----------+----------+-----------+------+-------+-------------------+---------------------+---------------------+----------+--------+-----------------------------+------------------+ - -function show_frontends() { - local retry_count=0 - local max_retries=20 - local retry_interval=6 - while (( retry_count < max_retries )); do - if mysql -N -B -h "${FE_DISCOVERY_ADDR}" -P 9030 -u"${DORIS_USER}" -p"${DORIS_PASSWORD}" -e "show frontends"; then - return 0 - fi - retry_count=$((retry_count + 1)) - info "Failed to execute 'show frontends', retrying in ${retry_interval} seconds... (${retry_count}/${max_retries})" >&2 - sleep ${retry_interval} - done - info "Failed to execute 'show frontends' after ${max_retries} retries." >&2 - exit 1 -} - -function switch_leader() { - info "switch leader from ${leader_host} to ${candidate_names}, address:${helper_endpoints}" - java -jar /opt/apache-doris/fe/lib/je-18.3.14-doris-SNAPSHOT.jar DbGroupAdmin -helperHosts "${helper_endpoints}" -groupName PALO_JOURNAL_GROUP -transferMaster -force "${candidate_names}" 5000 -} - -function wait_for_leader_switched() { - until [[ $(show_frontends | awk '$9 == "true" {print $2}') != ${KB_LEAVE_MEMBER_POD_NAME}* ]]; do - sleep 5 - info "waiting for leader to be switched" - done -} - -info "KB_LEAVE_MEMBER_POD_NAME: ${KB_LEAVE_MEMBER_POD_NAME}" -output=$(show_frontends) -info "frontends:" -info "${output}" - -# execute a mysql command and iterate the output line by line -while IFS= read -r line; do - name=$(echo "$line" | awk '{print $1}') - ip=$(echo "$line" | awk '{print $2}') - edit_log_port=$(echo "$line" | awk '{print $3}') - role=$(echo "$line" | awk '{print $8}') - is_master=$(echo "$line" | awk '{print $9}') - if [[ ${ip} == ${KB_LEAVE_MEMBER_POD_NAME}* ]]; then - leave_member_host=${ip} - leave_member_port=${edit_log_port} - leave_role=${role} - fi - if [ "${is_master}" == "true" ]; then - leader_host=${ip} - fi - - if [[ ${ip} == "${helper_endpoints}" ]]; then - candidate_names=${name} - helper_endpoints=${ip}:${edit_log_port} - fi -done <<< "$output" - -info "leave member: ${leave_member_host}:${leave_member_port}" -info "leave role: ${leave_role}" -info "leader: ${leader_host}" -info "helper hosts: ${helper_endpoints}" -info "candidate hosts: ${candidate_names}" - -if [ -z "${leave_member_host}" ] || [ -z "${leave_member_port}" ]; then - info "leave member ${KB_LEAVE_MEMBER_POD_NAME} not found, may be removed already" - exit 0 -fi - -if [[ ${KB_AGENT_POD_NAME} != ${helper_pod_name} ]]; then - switch_leader - wait_for_leader_switched -fi - -mysql -h "${leader_host}" -u"${DORIS_USER}" -p"${DORIS_PASSWORD}" -P 9030 -e "alter system drop ${leave_role} '${leave_member_host}:${leave_member_port}';" - -info "leave member ${leave_member_host}:${leave_member_port} success" diff --git a/addons/doris/scripts/fe/fe_post_start.sh b/addons/doris/scripts/fe/fe_post_start.sh deleted file mode 100644 index ae41b5945..000000000 --- a/addons/doris/scripts/fe/fe_post_start.sh +++ /dev/null @@ -1,18 +0,0 @@ -#!/usr/bin/env bash - -idx=${POD_NAME##*-} -if [ $idx -ne 0 ]; then - exit 0 -fi - -while true; do - # we don't use `select 1` here, because the starrocks will return the following error: - # ERROR 1064 (HY000) at line 1: Backend node not found. Check if any backend node is down.backend - if mysql --connect-timeout=1 -h${FE_DISCOVERY_ADDR} -uroot -P9030 -p${DORIS_PASSWORD} -e "show databases" && \ - mysql --connect-timeout=1 -h${FE_DISCOVERY_ADDR} -uadmin -P9030 -p${DORIS_ADMIN_PASSWORD} -e "show databases"; then - break - fi - mysql --connect-timeout=1 -h${FE_DISCOVERY_ADDR} -uroot -P9030 -e "SET PASSWORD FOR root@'%' = PASSWORD('${DORIS_PASSWORD}')" - mysql --connect-timeout=1 -h${FE_DISCOVERY_ADDR} -uadmin -P9030 -e "SET PASSWORD FOR 'admin'@'%' = PASSWORD('${DORIS_ADMIN_PASSWORD}')" - sleep 1 -done diff --git a/addons/doris/scripts/fe/fe_prestop.sh b/addons/doris/scripts/fe/fe_prestop.sh deleted file mode 100755 index eb7c39662..000000000 --- a/addons/doris/scripts/fe/fe_prestop.sh +++ /dev/null @@ -1,41 +0,0 @@ -#!/bin/bash -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. - -DORIS_ROOT=${DORIS_ROOT:-"/opt/apache-doris"} -DORIS_HOME=${DORIS_ROOT}/fe -FE_CONFFILE=$DORIS_HOME/conf/fe.conf - -parse_confval_from_fe_conf() -{ - local confkey=$1 - local confvalue=`grep "^\s*$confkey" $FE_CONFFILE | grep -v "^\s\#" | sed 's|^\s*'$confkey'\s*=\s*\(.*\)\s*$|\1|g'` - echo $confvalue -} - -log_dir=`parse_confval_from_fe_conf "LOG_DIR"` - -if [[ "x$log_dir" == "x" ]]; then - log_dir="/opt/apache-doris/fe/log" -fi - -log_replace_var_dir=`eval echo "$log_dir"` -kill_time=$(date "+%Y-%m-%d %H:%M:%S") -eval echo "[fe_prestop.sh] ${kill_time} kubelet kill call the fe_prestop.sh to stop fe service." >> "$log_replace_var_dir/fe.log" -#eval echo "[fe_prestop.sh] ${kill_time} kubelet kill call the fe_prestop.sh to stop fe service." 2>&1 -eval echo "[fe_prestop.sh] ${kill_time} kubelet kill call the fe_prestop.sh to stop fe service ." >> "/proc/1/fd/1" -$DORIS_HOME/bin/stop_fe.sh diff --git a/addons/doris/scripts/fe/fe_role_probe.sh b/addons/doris/scripts/fe/fe_role_probe.sh deleted file mode 100644 index 77397a41e..000000000 --- a/addons/doris/scripts/fe/fe_role_probe.sh +++ /dev/null @@ -1,42 +0,0 @@ -#!/bin/bash - -fe_role_probe() -{ - -# SHOW FRONTENDS output like: -# -# +-----------------------------------------+------------------------------------------------------+-------------+----------+-----------+---------+--------------------+----------+----------+-----------+------+-------+-------------------+---------------------+---------------------+----------+--------+-----------------------------+------------------+ -# | Name | Host | EditLogPort | HttpPort | QueryPort | RpcPort | ArrowFlightSqlPort | Role | IsMaster | ClusterId | Join | Alive | ReplayedJournalId | LastStartTime | LastHeartbeat | IsHelper | ErrMsg | Version | CurrentConnected | -# +-----------------------------------------+------------------------------------------------------+-------------+----------+-----------+---------+--------------------+----------+----------+-----------+------+-------+-------------------+---------------------+---------------------+----------+--------+-----------------------------+------------------+ -# | fe_61708f2c_e1ea_4a21_8367_ea6d7103f065 | test-fe-2.test-fe-headless.default.svc.cluster.local | 9010 | 8030 | 9030 | 9020 | -1 | FOLLOWER | false | 606612320 | true | true | 57 | 2025-09-25 15:28:02 | 2025-09-25 15:31:03 | true | | doris-2.1.6-rc04-653e315ba5 | No | -# | fe_6ced9e5e_6a36_4b2d_83e6_e2b22b100123 | test-fe-0.test-fe-headless.default.svc.cluster.local | 9010 | 8030 | 9030 | 9020 | -1 | FOLLOWER | true | 606612320 | true | true | 58 | 2025-09-25 15:27:12 | 2025-09-25 15:31:03 | true | | doris-2.1.6-rc04-653e315ba5 | Yes | -# | fe_8caab8a5_a581_40ae_b9ce_934d9e135e32 | test-fe-1.test-fe-headless.default.svc.cluster.local | 9010 | 8030 | 9030 | 9020 | -1 | FOLLOWER | false | 606612320 | true | true | 57 | 2025-09-25 15:27:34 | 2025-09-25 15:31:03 | true | | doris-2.1.6-rc04-653e315ba5 | No | -# +-----------------------------------------+------------------------------------------------------+-------------+----------+-----------+---------+--------------------+----------+----------+-----------+------+-------+-------------------+---------------------+---------------------+----------+--------+-----------------------------+------------------+ - - SELF_FE_FQDN="$(hostname -f)" - probe_output=$(mysql -h "${FE_DISCOVERY_ADDR}" -P "${FE_QUERY_PORT}" -u "${DORIS_USER}" -p"${DORIS_PASSWORD}" -e "SHOW FRONTENDS" 2>/dev/null || true) - is_master_value=$(echo "${probe_output}" | grep -w "${SELF_FE_FQDN}" | awk '{print $9}' || true) - role_value=$(echo "${probe_output}" | grep -w "${SELF_FE_FQDN}" | awk '{print $8}' || true) - - if [[ "x${is_master_value}" == "x" ]]; then - return 1 - fi - - if [[ "x${role_value}" != "xFOLLOWER" ]]; then - echo "${role_value}" - return 0 - fi - - if [[ "x${is_master_value}" == "xtrue" ]]; then - echo "master" - return 0 - fi - - if [[ "x${is_master_value}" == "xfalse" ]]; then - echo "follower" - return 0 - fi - -} - -fe_role_probe diff --git a/addons/doris/scripts/fe/init_fe.sh b/addons/doris/scripts/fe/init_fe.sh deleted file mode 100644 index 85b62ccdf..000000000 --- a/addons/doris/scripts/fe/init_fe.sh +++ /dev/null @@ -1,326 +0,0 @@ -#!/bin/bash -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. - -set -eo pipefail -shopt -s nullglob - -# Constant Definition -readonly DORIS_HOME="/opt/apache-doris" -readonly MAX_RETRY_TIMES=60 -readonly RETRY_INTERVAL=3 -readonly FE_CONFIG_FILE="${DORIS_HOME}/fe/conf/fe.conf" -readonly FOLLOWER_NUMBER=3 -readonly BACKUP_DIR="${DORIS_HOME}/fe/doris-meta/ape/backup" - -export DATE="$(date +%Y%m%d-%H%M%S)" - -cp /etc/config/fe.conf ${FE_CONFIG_FILE} - -# Log Function -log_message() { - local level="$1" - shift - local message="$*" - if [ "$#" -eq 0 ]; then - message="$(cat)" - fi - local timestamp="$(date -Iseconds)" - printf '%s [%s] [Entrypoint]: %s\n' "${timestamp}" "${level}" "${message}" -} - -log_info() { - log_message "INFO" "$@" -} - -log_warn() { - log_message "WARN" "$@" >&2 -} - -log_error() { - log_message "ERROR" "$@" >&2 - exit 1 -} - -# Check if the parameter is empty -check_required_param() { - local param_name="$1" - local param_value="$2" - if [ -z "$param_value" ]; then - log_error "${param_name} is required but not set" - fi -} - -# Check whether it is a source file call -is_sourced() { - [ "${#FUNCNAME[@]}" -ge 2 ] && - [ "${FUNCNAME[0]}" = 'is_sourced' ] && - [ "${FUNCNAME[1]}" = 'source' ] -} - - -# Parsing a comma-delimited string -parse_comma_separated() { - local input="$1" - local -n arr=$2 # 使用nameref来存储结果 - local IFS=',' - read -r -a arr <<< "$input" -} - -# Configuring the election mode -setup_election_mode() { - local pod_name_array - local fqdn_array - - - parse_comma_separated "$POD_NAME_LIST" pod_name_array - parse_comma_separated "$POD_FQDN_LIST" fqdn_array - - local fe_edit_log_port="${FE_EDIT_LOG_PORT:-9010}" - - master_fe_ip="${fqdn_array[0]}" - master_fe_port="${fe_edit_log_port}" - - local found=false - local pod_name="${HOSTNAME}" - for i in "${!pod_name_array[@]}"; do - if [[ $i -ge ${FOLLOWER_NUMBER:-3} ]]; then - is_observer_fe="true" - fi - - if [[ "${pod_name_array[i]}" == "${pod_name}" ]]; then - current_fe_ip="${fqdn_array[i]}" - current_fe_port="${fe_edit_log_port}" - found=true - break - fi - done - - if [ "$found" = "false" ]; then - log_info "Could not find configuration for pod '${pod_name}' in POD_FQDN_LIST" - log_info "The pod may be removed by scale-in Ops" - local retry_count=0 - while [ "$retry_count" -lt "$MAX_RETRY_TIMES" ]; do - sleep ${RETRY_INTERVAL} - retry_count=$((retry_count + 1)) - done - log_error "Pod should be removed by scale-in Ops after ${retry_count} retries" - fi - - is_master_fe=$([[ "$pod_name" == "${pod_name_array[0]}" ]] && echo "true" || echo "false") -} - - -# Configure the specified mode -setup_assign_mode() { - master_fe_ip="$FE_MASTER_IP" - master_fe_port="$FE_MASTER_PORT" - current_fe_ip="$FE_CURRENT_IP" - current_fe_port="$FE_CURRENT_PORT" - - is_master_fe=$([[ "$master_fe_ip" == "$current_fe_ip" ]] && echo "true" || echo "false") -} - -# Add RECOVERY mode configuration function -setup_recovery_mode() { - # In recovery mode, we need to read the configuration from the metadata - local meta_dir="${DORIS_HOME}/fe/doris-meta" - if [ ! -d "$meta_dir" ] || [ -z "$(ls -A "$meta_dir")" ]; then - log_error "Cannot start in recovery mode: meta directory is empty or does not exist" - fi - - log_info "Starting in recovery mode, using existing meta directory" - is_master_fe="true" # In recovery mode, it starts as the master node by default -} - -# Configuring FE Nodes -setup_fe_node() { - declare -g master_fe_ip master_fe_port current_fe_ip current_fe_port - declare -g is_master_fe - - case $run_mode in - "ELECTION") - setup_election_mode - ;; - "RECOVERY") - setup_recovery_mode - ;; - esac - - # Print key configuration information - log_info "==== FE Node Configuration ====" - log_info "Run Mode: ${run_mode}" - if [ "$run_mode" = "RECOVERY" ]; then - log_info "Recovery Mode: true" - log_info "Meta Directory: ${DORIS_HOME}/fe/doris-meta" - else - log_info "Is Master: ${is_master_fe}" - log_info "Is Observer: ${is_observer_fe}" - log_info "Master FE IP: ${master_fe_ip}" - log_info "Master FE Port: ${master_fe_port}" - log_info "Current FE IP: ${current_fe_ip}" - log_info "Current FE Port: ${current_fe_port}" - if [ "$run_mode" = "ELECTION" ]; then - log_info "FE HOST: ${HOSTNAME}" - log_info "FE Servers: ${POD_FQDN_LIST}" - fi - - fi - log_info "==========================" -} - -# Start FE node -start_fe() { - if [ "$run_mode" = "RECOVERY" ]; then - log_info "Starting FE node in recovery mode" - ${DORIS_HOME}/fe/bin/start_fe.sh --metadata_failure_recovery - return - fi - - if [ "$is_master_fe" = "true" ]; then - log_info "Starting master FE node" - ${DORIS_HOME}/fe/bin/start_fe.sh --console - else - log_info "Starting follower FE node" - ${DORIS_HOME}/fe/bin/start_fe.sh --helper "${master_fe_ip}:${master_fe_port}" --console - fi -} - -# Check whether the FE node is registered -check_fe_registered() { - local query_result - query_result=$(mysql -uroot -P"${FE_QUERY_PORT}" -h"${master_fe_ip}" -p"${DORIS_PASSWORD}" \ - -N -e "SHOW FRONTENDS" 2>/dev/null | grep -w "${current_fe_ip}" | grep -w "${current_fe_port}" || true) - - if [ -n "$query_result" ]; then - log_info "FE node ${current_fe_ip}:${current_fe_port} is already registered" - return 0 - fi - return 1 -} - -# Check the metadata directory -check_meta_dir() { - local meta_dir="${DORIS_HOME}/fe/doris-meta" - if [ -d "$meta_dir/image" ] && [ -n "$(ls -A "$meta_dir/image")" ]; then - log_info "Meta directory already exists and not empty" - return 0 - fi - return 1 -} - -# Register FE Node -register_fe() { - if [ "$is_master_fe" = "true" ]; then - log_info "Master FE node does not need registration" - return - fi - local fe_role=${1:-"FOLLOWER"} - # First check if the node is registered - if check_fe_registered; then - return - fi - - local retry_count=0 - while [ $retry_count -lt $MAX_RETRY_TIMES ]; do - if mysql -uroot -P"${FE_QUERY_PORT}" -h"${master_fe_ip}" -p"${DORIS_PASSWORD}" \ - -e "ALTER SYSTEM ADD ${fe_role} '${current_fe_ip}:${current_fe_port}'" 2>/dev/null; then - log_info "Successfully registered FE node" - return - fi - - retry_count=$((retry_count + 1)) - if [ $((retry_count % 20)) -eq 1 ]; then - log_warn "Failed to register FE node, retrying... ($retry_count/$MAX_RETRY_TIMES)" - fi - sleep "$RETRY_INTERVAL" - done - - log_error "Failed to register FE node after ${MAX_RETRY_TIMES} attempts" -} - -# Cleanup Function -cleanup() { - log_info "Stopping FE node" - ${DORIS_HOME}/fe/bin/stop_fe.sh -} - -# Config FE TLS -config_fe_tls() { - if [ -n "$TLS_ENABLED" ] && [ "$TLS_ENABLED" = "true" ]; then - openssl pkcs12 -inkey /certificates/ca-key.pem -in /etc/pki/tls/ca.pem -export -out /opt/apache-doris/fe/mysql_ssl_default_certificate/ca_certificate.p12 -passout pass:"doris" - if [ $? -ne 0 ]; then - log_error "Failed to create CA certificate.p12" - else - log_info "Successfully created CA certificate.p12" - fi - openssl pkcs12 -inkey /etc/pki/tls/key.pem -in /etc/pki/tls/cert.pem -export -out /opt/apache-doris/fe/mysql_ssl_default_certificate/server_certificate.p12 -passout pass:"doris" - if [ $? -ne 0 ]; then - log_error "Failed to create server certificate.p12" - else - log_info "Successfully created server certificate.p12" - fi - fi -} - - -# Main Function -main() { - # validate_environment - trap cleanup SIGTERM SIGINT - run_mode="${run_mode:-ELECTION}" - - # if [ -f "${BACKUP_DIR}/.restore" ]; then - # log_info "Found .restore file, run_mode set to RECOVERY" - # run_mode="RECOVERY" - # rm "${BACKUP_DIR}/.restore" - # fi - - # Config FE TLS - config_fe_tls - - if [ "$run_mode" = "RECOVERY" ]; then - setup_fe_node - start_fe & - wait $! - else - setup_fe_node - - # Check the metadata directory - if check_meta_dir; then - log_info "Meta directory exists, starting FE directly" - start_fe & - wait $! - return - fi - - # The metadata directory does not exist and needs to be initialized and registered. - log_info "Initializing meta directory" - if [ "$is_observer_fe" = "true" ]; then - log_info "Register FE Node as OBSERVER.." - register_fe "OBSERVER" - else - register_fe "FOLLOWER" - fi - start_fe & - wait $! - fi -} - -if ! is_sourced; then - main "$@" -fi diff --git a/addons/doris/templates/_helpers.tpl b/addons/doris/templates/_helpers.tpl deleted file mode 100644 index d10c433f8..000000000 --- a/addons/doris/templates/_helpers.tpl +++ /dev/null @@ -1,153 +0,0 @@ -{{/* -Expand the name of the chart. -*/}} -{{- define "doris.name" -}} -{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }} -{{- end }} - -{{/* -Create a default fully qualified app name. -We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). -If release name contains chart name it will be used as a full name. -*/}} -{{- define "doris.fullname" -}} -{{- if .Values.fullnameOverride }} -{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }} -{{- else }} -{{- $name := default .Chart.Name .Values.nameOverride }} -{{- if contains $name .Release.Name }} -{{- .Release.Name | trunc 63 | trimSuffix "-" }} -{{- else }} -{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }} -{{- end }} -{{- end }} -{{- end }} - -{{/* -Create chart name and version as used by the chart label. -*/}} -{{- define "doris.chart" -}} -{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }} -{{- end }} - -{{/* -Common labels -*/}} -{{- define "doris.labels" -}} -helm.sh/chart: {{ include "doris.chart" . }} -{{ include "doris.selectorLabels" . }} -{{- if .Chart.AppVersion }} -app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} -{{- end }} -app.kubernetes.io/managed-by: {{ .Release.Service }} -{{- end }} - -{{/* -Selector labels -*/}} -{{- define "doris.selectorLabels" -}} -app.kubernetes.io/name: {{ include "doris.name" . }} -app.kubernetes.io/instance: {{ .Release.Name }} -{{- end }} - -{{/* -Common annotations -*/}} -{{- define "doris.annotations" -}} -{{ include "kblib.helm.resourcePolicy" . }} -{{ include "doris.apiVersion" . }} -{{- end }} - -{{/* -API version annotation -*/}} -{{- define "doris.apiVersion" -}} -kubeblocks.io/crd-api-version: apps.kubeblocks.io/v1 -{{- end }} - -{{- define "fe.componentDefName" -}} -doris-fe-{{ .Chart.Version }} -{{- end -}} - -{{- define "be.componentDefName" -}} -doris-be-{{ .Chart.Version }} -{{- end -}} - - -{{- define "fe.componentVersionName" -}} -doris-fe -{{- end -}} - -{{- define "be.componentVersionName" -}} -doris-be -{{- end -}} - -{{/* -Define fe component definition regex pattern -*/}} -{{- define "fe.cmpdRegexPattern" -}} -^doris-fe- -{{- end -}} - -{{/* -Define be component definition regex pattern -*/}} -{{- define "be.cmpdRegexPattern" -}} -^doris-be- -{{- end -}} - -{{/* -Define fe component configuration template name -*/}} -{{- define "fe.configurationTemplate" -}} -doris-fe-configuration-template -{{- end -}} - -{{/* -Define be component configuration template name -*/}} -{{- define "be.configurationTemplate" -}} -doris-be-configuration-template -{{- end -}} - -{{/* -Define doris fe component scripts configMap template name -*/}} -{{- define "fe.scriptsTemplate" -}} -doris-fe-scripts-template -{{- end -}} - -{{/* -Define doris be component scripts configMap template name -*/}} -{{- define "be.scriptsTemplate" -}} -doris-be-scripts-template -{{- end -}} - -{{/* -Define doris fe parameters config render name -*/}} -{{- define "fe.pcrName" -}} -doris-fe-pcr -{{- end -}} - -{{/* -Define doris be parameters config render name -*/}} -{{- define "be.pcrName" -}} -doris-be-pcr -{{- end -}} - -{{/* -Define doris fe parameters definition name -*/}} -{{- define "fe.paramsDefName" -}} -doris-fe-pd -{{- end -}} - -{{/* -Define doris be parameters definition name -*/}} -{{- define "be.paramsDefName" -}} -doris-be-pd -{{- end -}} \ No newline at end of file diff --git a/addons/doris/templates/actionset.yaml b/addons/doris/templates/actionset.yaml deleted file mode 100644 index a9d7add41..000000000 --- a/addons/doris/templates/actionset.yaml +++ /dev/null @@ -1,43 +0,0 @@ -apiVersion: dataprotection.kubeblocks.io/v1alpha1 -kind: ActionSet -metadata: - name: doris-full-backup -spec: - backupType: Full - env: - - name: BACKUP_DIR - value: /opt/apache-doris/fe/doris-meta/ape/backup - backup: - preBackup: [] - postBackup: [] - backupData: - image: {{ .Values.image.registry | default "docker.io" }}/{{ .Values.image.fe.repository }}:2.1.6 - runOnTargetPodNode: true - command: - - bash - - -c - - | - {{- .Files.Get "dataprotection/common.sh" | nindent 10 }} - {{- .Files.Get "dataprotection/backup.sh" | nindent 10 }} - syncProgress: - enabled: true - intervalSeconds: 5 - restore: - prepareData: - runOnTargetPodNode: true - image: {{ .Values.image.registry | default "docker.io" }}/{{ .Values.image.fe.repository }}:2.1.6 - command: - - bash - - -c - - | - {{- .Files.Get "dataprotection/download-meta.sh" | nindent 10 }} - postReady: - - job: - image: {{ .Values.image.registry | default "docker.io" }}/{{ .Values.image.fe.repository }}:2.1.6 - runOnTargetPodNode: false - command: - - bash - - -c - - | - {{- .Files.Get "dataprotection/common.sh" | nindent 12 }} - {{- .Files.Get "dataprotection/restore.sh" | nindent 12 }} \ No newline at end of file diff --git a/addons/doris/templates/backuppolicytemplate.yaml b/addons/doris/templates/backuppolicytemplate.yaml deleted file mode 100644 index be7ee8451..000000000 --- a/addons/doris/templates/backuppolicytemplate.yaml +++ /dev/null @@ -1,32 +0,0 @@ -apiVersion: dataprotection.kubeblocks.io/v1alpha1 -kind: BackupPolicyTemplate -metadata: - name: doris-backup-policy-template - labels: - {{- include "doris.labels" . | nindent 4 }} - annotations: - dataprotection.kubeblocks.io/is-default-policy-template: "true" -spec: - serviceKind: doris - compDefs: - - ^doris-fe.* - target: - role: "master" - account: root - containerPort: - containerName: fe - portName: mysql - backupMethods: - - name: full - actionSetName: doris-full-backup - targetVolumes: - volumeMounts: - - name: metadata - mountPath: /opt/apache-doris/fe/doris-meta - - name: log - mountPath: /opt/apache-doris/fe/log - schedules: - - backupMethod: full - enabled: false - cronExpression: "0 18 * * *" - retentionPeriod: 8d diff --git a/addons/doris/templates/clusterdefinition.yaml b/addons/doris/templates/clusterdefinition.yaml deleted file mode 100644 index c3375d224..000000000 --- a/addons/doris/templates/clusterdefinition.yaml +++ /dev/null @@ -1,24 +0,0 @@ -apiVersion: apps.kubeblocks.io/v1 -kind: ClusterDefinition -metadata: - name: {{ include "doris.name" . }} - labels: - {{- include "doris.labels" . | nindent 4 }} - annotations: - {{- include "doris.apiVersion" . | nindent 4 }} -spec: - topologies: - - name: aggregated - components: - - name: fe - compDef: {{ include "fe.cmpdRegexPattern" . }} - - name: be - compDef: {{ include "be.cmpdRegexPattern" . }} - orders: - provision: - - fe - - be - terminate: - - be - - fe - diff --git a/addons/doris/templates/cmpd-be.yaml b/addons/doris/templates/cmpd-be.yaml deleted file mode 100644 index 336b48993..000000000 --- a/addons/doris/templates/cmpd-be.yaml +++ /dev/null @@ -1,205 +0,0 @@ -apiVersion: apps.kubeblocks.io/v1 -kind: ComponentDefinition -metadata: - name: {{ include "be.componentDefName" . }} - labels: - {{- include "doris.labels" . | nindent 4 }} - annotations: - apps.kubeblocks.io/skip-immutable-check: "true" - {{- include "doris.annotations" . | nindent 4 }} -spec: - provider: kubeblocks - description: A Doris BE component definition for Kubernetes - serviceKind: doris-be - podManagementPolicy: Parallel - services: - - name: be - serviceName: be - spec: - ipFamilyPolicy: PreferDualStack - ipFamilies: - - IPv4 - ports: - - name: be - port: 9060 - targetPort: be - - name: webserver - port: 8040 - targetPort: webserver - - name: heartbeat - port: 9050 - targetPort: heartbeat - - name: brpc - port: 8060 - targetPort: brpc - configs: - - name: be-cm - template: {{ include "be.configurationTemplate" . }} - namespace: {{ .Release.Namespace }} - volumeName: be-cm - restartOnFileChange: true - externalManaged: true - volumes: - - name: data - needSnapshot: true - - name: log - scripts: - - name: scripts - template: {{ include "be.scriptsTemplate" . }} - namespace: {{ .Release.Namespace }} - volumeName: scripts - defaultMode: 0555 - vars: - - name: CLUSTER_NAME - valueFrom: - clusterVarRef: - clusterName: Required - - name: CLUSTER_NAMESPACE - valueFrom: - clusterVarRef: - namespace: Required - - name: COMPONENT_NAME - value: be - - name: CLUSTER_DOMAIN - value: {{ .Values.clusterDomain }} - - name: POD_NAME_LIST - valueFrom: - componentVarRef: - optional: false - podNames: Required - - name: POD_FQDN_LIST - valueFrom: - componentVarRef: - optional: false - podFQDNs: Required - - name: FE_DISCOVERY_SERVICE_NAME - valueFrom: - serviceVarRef: - compDef: {{ include "fe.componentDefName" . }} - name: fe - host: Required - - name: FE_DISCOVERY_ADDR - value: $(FE_DISCOVERY_SERVICE_NAME).$(CLUSTER_NAMESPACE).svc.{{ .Values.clusterDomain }} - - name: FE_QUERY_PORT - value: "9030" - - name: DORIS_USER - valueFrom: - credentialVarRef: - compDef: {{ include "fe.componentDefName" . }} - name: root - optional: false - username: Required - - name: DORIS_PASSWORD - valueFrom: - credentialVarRef: - compDef: {{ include "fe.componentDefName" . }} - name: root - optional: false - password: Required - - name: MYSQL_PWD - valueFrom: - credentialVarRef: - compDef: {{ include "fe.componentDefName" . }} - name: root - optional: false - password: Required - runtime: - # securityContext: - # sysctls: - # - name: vm.max_map_count - # value: "2000000" - initContainers: - - name: sysctl - securityContext: - privileged: true - command: ['sh', '-c', 'sysctl -w vm.max_map_count=2000000'] - containers: - - name: be - imagePullPolicy: {{ default .Values.image.pullPolicy "IfNotPresent" }} - command: - - bash - - -c - - | - /opt/apache-doris/scripts/entry_point.sh - env: - - name: HOST_TYPE - value: FQDN - - name: POD_NAME - valueFrom: - fieldRef: - apiVersion: v1 - fieldPath: metadata.name - - name: POD_IP - valueFrom: - fieldRef: - apiVersion: v1 - fieldPath: status.podIP - - name: HOST_IP - valueFrom: - fieldRef: - apiVersion: v1 - fieldPath: status.hostIP - - name: POD_NAMESPACE - valueFrom: - fieldRef: - apiVersion: v1 - fieldPath: metadata.namespace - - name: COMPONENT_NAME - value: be - - name: FE_QUERY_PORT - value: "9030" - - name: CONFIGMAP_MOUNT_PATH - value: /etc/doris/be/conf - - name: WEBSERVER_PORT - value: "8040" - - name: HEARTBEAT_PORT - value: "9050" - lifecycle: - preStop: - exec: - command: - - /opt/apache-doris/scripts/be_prestop.sh - ports: - - containerPort: 9060 - name: be - protocol: TCP - - containerPort: 8040 - name: webserver - protocol: TCP - - containerPort: 9050 - name: heartbeat - protocol: TCP - - containerPort: 8060 - name: brpc - protocol: TCP - securityContext: - allowPrivilegeEscalation: false - readOnlyRootFilesystem: false - livenessProbe: - httpGet: - path: /api/health - port: 8040 - periodSeconds: 5 - failureThreshold: 60 - successThreshold: 1 - timeoutSeconds: 5 - initialDelaySeconds: 15 - readinessProbe: - httpGet: - path: /api/health - port: 8040 - periodSeconds: 5 - failureThreshold: 60 - successThreshold: 1 - timeoutSeconds: 5 - initialDelaySeconds: 15 - volumeMounts: - - mountPath: /opt/apache-doris/be/conf/be.conf - name: be-cm - subPath: be.conf - - mountPath: /opt/apache-doris/be/storage - name: data - - mountPath: /opt/apache-doris/be/log - name: log - - mountPath: /opt/apache-doris/scripts - name: scripts diff --git a/addons/doris/templates/cmpd-fe.yaml b/addons/doris/templates/cmpd-fe.yaml deleted file mode 100644 index f237b8026..000000000 --- a/addons/doris/templates/cmpd-fe.yaml +++ /dev/null @@ -1,270 +0,0 @@ -apiVersion: apps.kubeblocks.io/v1 -kind: ComponentDefinition -metadata: - name: {{ include "fe.componentDefName" . }} - labels: - {{ include "doris.labels" . | nindent 4 }} - annotations: - apps.kubeblocks.io/skip-immutable-check: "true" - {{- include "doris.annotations" . | nindent 4 }} -spec: - provider: kubeblocks - description: A Doris FE component definition for Kubernetes - # The FE can only perform leader election when the majority of members are active. - podManagementPolicy: Parallel - serviceKind: doris-fe - services: - - name: fe - serviceName: fe - spec: - ipFamilies: - - IPv4 - ipFamilyPolicy: PreferDualStack - ports: - - name: mysql - port: 9030 - targetPort: mysql - - name: http - port: 8030 - targetPort: http - - name: rpc - port: 9020 - targetPort: rpc - - name: edit-log - port: 9010 - targetPort: edit-log - scripts: - - name: scripts - template: {{ include "fe.scriptsTemplate" . }} - namespace: {{ .Release.Namespace }} - volumeName: scripts - defaultMode: 0555 - volumes: - - name: metadata - needSnapshot: true - - name: log - systemAccounts: - - name: root - initAccount: true - passwordGenerationPolicy: - length: 10 - numDigits: 5 - numSymbols: 0 - letterCase: MixedCases - - name: admin - initAccount: true - passwordGenerationPolicy: - length: 10 - numDigits: 5 - numSymbols: 0 - letterCase: MixedCases - configs: - - name: fe-cm - template: {{ include "fe.configurationTemplate" . }} - namespace: {{ .Release.Namespace }} - volumeName: fe-cm - restartOnFileChange: true - externalManaged: true - vars: - - name: CLUSTER_NAME - valueFrom: - clusterVarRef: - clusterName: Required - - name: CLUSTER_NAMESPACE - valueFrom: - clusterVarRef: - namespace: Required - - name: COMPONENT_NAME - value: fe - - name: POD_NAME_LIST - valueFrom: - componentVarRef: - optional: false - podNames: Required - - name: POD_FQDN_LIST - valueFrom: - componentVarRef: - optional: false - podFQDNs: Required - - name: FE_DISCOVERY_SERVICE_NAME - valueFrom: - serviceVarRef: - compDef: {{ include "fe.componentDefName" . }} - name: fe - host: Required - - name: FE_DISCOVERY_ADDR - value: $(FE_DISCOVERY_SERVICE_NAME).$(CLUSTER_NAMESPACE).svc.{{ .Values.clusterDomain }} - - name: CLUSTER_DOMAIN - value: {{ .Values.clusterDomain }} - - name: FE_QUERY_PORT - value: "9030" - - name: DORIS_USER - valueFrom: - credentialVarRef: - compDef: {{ include "fe.componentDefName" . }} - name: root - optional: false - username: Required - - name: DORIS_PASSWORD - valueFrom: - credentialVarRef: - compDef: {{ include "fe.componentDefName" . }} - name: root - optional: false - password: Required - - name: DORIS_ADMIN_PASSWORD - valueFrom: - credentialVarRef: - compDef: {{ include "fe.componentDefName" . }} - name: admin - optional: false - password: Required - - name: TLS_ENABLED - valueFrom: - tlsVarRef: - enabled: Optional - runtime: - containers: - - name: fe - imagePullPolicy: {{ default .Values.image.pullPolicy "IfNotPresent" }} - command: - - bash - - -c - - | - /opt/apache-doris/scripts/init_fe.sh - ports: - - containerPort: 8030 - name: http - protocol: TCP - - containerPort: 9020 - name: rpc - protocol: TCP - - containerPort: 9030 - name: mysql - protocol: TCP - - containerPort: 9010 - name: edit-log - protocol: TCP - env: - - name: HOST_TYPE - value: FQDN - - name: TZ - value: {{ .Values.timezone }} - - name: POD_NAME - valueFrom: - fieldRef: - apiVersion: v1 - fieldPath: metadata.name - - name: POD_IP - valueFrom: - fieldRef: - apiVersion: v1 - fieldPath: status.podIP - - name: HOST_IP - valueFrom: - fieldRef: - apiVersion: v1 - fieldPath: status.hostIP - - name: POD_NAMESPACE - valueFrom: - fieldRef: - apiVersion: v1 - fieldPath: metadata.namespace - - name: CONFIGMAP_MOUNT_PATH - value: /etc/doris/fe/conf - - name: HTTP_PORT - value: "8030" - - name: FE_EDIT_LOG_PORT - value: "9010" - - name: JAVA_HOME - value: /usr/lib/jvm/java - securityContext: - allowPrivilegeEscalation: false - readOnlyRootFilesystem: false - volumeMounts: - - mountPath: /opt/apache-doris/fe/doris-meta - name: metadata - - mountPath: /opt/apache-doris/fe/log - name: log - - mountPath: /etc/config - name: fe-cm - - mountPath: /opt/apache-doris/scripts - name: scripts - # volume certificates is defined in cluster.yaml and referenced here - # as the - - mountPath: /certificates - name: certificates - lifecycle: - postStart: - exec: - command: - - bash - - -c - - | - /opt/apache-doris/scripts/fe_post_start.sh > /tmp/post-start-hook.log 2>&1 & - preStop: - exec: - command: - - bash - - -c - - | - /opt/apache-doris/scripts/fe_prestop.sh > /tmp/pre-stop-hook.log 2>&1 & - livenessProbe: - httpGet: - path: /api/health - port: 8030 - periodSeconds: 5 - failureThreshold: 60 - successThreshold: 1 - timeoutSeconds: 5 - initialDelaySeconds: 15 - readinessProbe: - httpGet: - path: /api/health - port: 8030 - periodSeconds: 5 - failureThreshold: 60 - successThreshold: 1 - timeoutSeconds: 5 - initialDelaySeconds: 15 - lifecycleActions: - roleProbe: - initialDelaySeconds: 15 - periodSeconds: 5 - timeoutSeconds: 3 - exec: - container: fe - command: - - bash - - -c - - /opt/apache-doris/scripts/fe_role_probe.sh - memberLeave: - retryPolicy: - maxRetries: 3 - retryInterval: 5 - timeoutSeconds: 300 - exec: - command: - - /bin/bash - - -c - - | - /opt/apache-doris/scripts/fe_member_leave.sh >> /opt/apache-doris/fe/log/member-leave.log 2>&1 - targetPodSelector: Role - container: fe - matchingKey: master - roles: - - name: master - updatePriority: 3 - participatesInQuorum: true - - name: follower - updatePriority: 2 - participatesInQuorum: true - - name: observer - updatePriority: 1 - participatesInQuorum: false - tls: - volumeName: tls - mountPath: /etc/pki/tls - caFile: ca.pem - certFile: cert.pem - keyFile: key.pem \ No newline at end of file diff --git a/addons/doris/templates/cmpv-be.yaml b/addons/doris/templates/cmpv-be.yaml deleted file mode 100644 index d7cdc7719..000000000 --- a/addons/doris/templates/cmpv-be.yaml +++ /dev/null @@ -1,20 +0,0 @@ -apiVersion: apps.kubeblocks.io/v1 -kind: ComponentVersion -metadata: - name: {{ include "be.componentVersionName" . }} - labels: - {{- include "doris.labels" . | nindent 4 }} - annotations: - {{- include "doris.apiVersion" . | nindent 4 }} -spec: - compatibilityRules: - - compDefs: - - {{ include "be.cmpdRegexPattern" . }} - releases: - - 2.1.6 - releases: - - name: 2.1.6 - serviceVersion: 2.1.6 - images: - be: {{ .Values.image.registry | default "docker.io" }}/{{ .Values.image.be.repository }}:2.1.6 - sysctl: {{ .Values.image.registry | default "docker.io" }}/{{ .Values.image.be.repository }}:2.1.6 \ No newline at end of file diff --git a/addons/doris/templates/cmpv-fe.yaml b/addons/doris/templates/cmpv-fe.yaml deleted file mode 100644 index 4149cfa3a..000000000 --- a/addons/doris/templates/cmpv-fe.yaml +++ /dev/null @@ -1,21 +0,0 @@ -apiVersion: apps.kubeblocks.io/v1 -kind: ComponentVersion -metadata: - name: {{ include "fe.componentVersionName" . }} - labels: - {{- include "doris.labels" . | nindent 4 }} - annotations: - {{- include "doris.apiVersion" . | nindent 4 }} -spec: - compatibilityRules: - - compDefs: - - {{ include "fe.cmpdRegexPattern" . }} - releases: - - 2.1.6 - releases: - - name: 2.1.6 - serviceVersion: 2.1.6 - images: - fe: {{ .Values.image.registry | default "docker.io" }}/{{ .Values.image.fe.repository }}:2.1.6 - roleProbe: {{ .Values.image.registry | default "docker.io" }}/{{ .Values.image.fe.repository }}:2.1.6 - memberLeave: {{ .Values.image.registry | default "docker.io" }}/{{ .Values.image.fe.repository }}:2.1.6 \ No newline at end of file diff --git a/addons/doris/templates/configuration-template.yaml b/addons/doris/templates/configuration-template.yaml deleted file mode 100644 index 1cbbc86ab..000000000 --- a/addons/doris/templates/configuration-template.yaml +++ /dev/null @@ -1,22 +0,0 @@ -apiVersion: v1 -kind: ConfigMap -metadata: - name: {{ include "fe.configurationTemplate" . }} - namespace: {{ .Release.Namespace }} - labels: - {{- include "doris.labels" . | nindent 4 }} -data: - fe.conf: |- - {{- .Files.Get "config/fe-config.tpl" | nindent 4 }} - ---- -apiVersion: v1 -kind: ConfigMap -metadata: - name: {{ include "be.configurationTemplate" . }} - namespace: {{ .Release.Namespace }} - labels: - {{- include "doris.labels" . | nindent 4 }} -data: - be.conf: |- - {{- .Files.Get "config/be-config.tpl" | nindent 4 }} \ No newline at end of file diff --git a/addons/doris/templates/pcr-be.yaml b/addons/doris/templates/pcr-be.yaml deleted file mode 100644 index d609a5566..000000000 --- a/addons/doris/templates/pcr-be.yaml +++ /dev/null @@ -1,18 +0,0 @@ -apiVersion: parameters.kubeblocks.io/v1alpha1 -kind: ParamConfigRenderer -metadata: - name: {{ include "be.pcrName" . }} - labels: - {{- include "doris.labels" . | nindent 4 }} - annotations: - {{- include "doris.annotations" . | nindent 4 }} -spec: - componentDef: {{ include "be.componentDefName" . }} - parametersDefs: - - {{ include "be.paramsDefName" . }} - configs: - - name: be.conf - fileFormatConfig: - format: props-plus - reRenderResourceTypes: - - vscale \ No newline at end of file diff --git a/addons/doris/templates/pcr-fe.yaml b/addons/doris/templates/pcr-fe.yaml deleted file mode 100644 index 352945489..000000000 --- a/addons/doris/templates/pcr-fe.yaml +++ /dev/null @@ -1,19 +0,0 @@ -apiVersion: parameters.kubeblocks.io/v1alpha1 -kind: ParamConfigRenderer -metadata: - name: {{ include "fe.pcrName" . }} - labels: - {{- include "doris.labels" . | nindent 4 }} - annotations: - {{- include "doris.annotations" . | nindent 4 }} -spec: - componentDef: {{ include "fe.componentDefName" . }} - parametersDefs: - - {{ include "fe.paramsDefName" . }} - configs: - - name: fe.conf - fileFormatConfig: - format: props-plus - reRenderResourceTypes: - - vscale - - tls \ No newline at end of file diff --git a/addons/doris/templates/pd-be.yaml b/addons/doris/templates/pd-be.yaml deleted file mode 100644 index ef27bea0c..000000000 --- a/addons/doris/templates/pd-be.yaml +++ /dev/null @@ -1,57 +0,0 @@ -{{- $cc := .Files.Get "config/be-config-effect-scope.yaml" | fromYaml }} -apiVersion: parameters.kubeblocks.io/v1alpha1 -kind: ParametersDefinition -metadata: - name: {{ include "be.paramsDefName" . }} - labels: - {{- include "doris.labels" . | nindent 4 }} - annotations: - {{- include "doris.annotations" . | nindent 4 }} -spec: - reloadAction: - shellTrigger: - sync: true - command: - - "be-update-dynamic-config.sh" - scriptConfig: - scriptConfigMapRef: {{ include "be.scriptsTemplate" . }} - namespace: {{ .Release.Namespace }} - - fileName: be.conf - - # ConfigurationSchema that impose restrictions on engine parameter's rule - parametersSchema: - # top level mysql configuration type - topLevelKey: BEParameter - - # schemaInJSON: auto generate from cue scripts - # example: ../../internal/configuration/testdata/mysql_openapi.json - cue: |- - {{- .Files.Get "config/be-config-constraint.cue" | nindent 6 }} - - ## define static parameter list - {{- if hasKey $cc "staticParameters" }} - staticParameters: - {{- $params := get $cc "staticParameters" }} - {{- range $params }} - - {{ . }} - {{- end }} - {{- end}} - - ## define dynamic parameter list - {{- if hasKey $cc "dynamicParameters" }} - dynamicParameters: - {{- $params := get $cc "dynamicParameters" }} - {{- range $params }} - - {{ . }} - {{- end }} - {{- end}} - - ## define immutable parameter list, this feature is not currently supported. - {{- if hasKey $cc "immutableParameters" }} - immutableParameters: - {{- $params := get $cc "immutableParameters" }} - {{- range $params }} - - {{ . }} - {{- end }} - {{- end}} diff --git a/addons/doris/templates/pd-fe.yaml b/addons/doris/templates/pd-fe.yaml deleted file mode 100644 index 3e6091bac..000000000 --- a/addons/doris/templates/pd-fe.yaml +++ /dev/null @@ -1,62 +0,0 @@ -{{- $cc := .Files.Get "config/fe-config-effect-scope.yaml" | fromYaml }} -apiVersion: parameters.kubeblocks.io/v1alpha1 -kind: ParametersDefinition -metadata: - name: {{ include "fe.paramsDefName" . }} - labels: - {{- include "doris.labels" . | nindent 4 }} - annotations: - {{- include "doris.annotations" . | nindent 4 }} -spec: - reloadAction: - shellTrigger: - sync: true - command: - - "fe-update-dynamic-config.sh" - scriptConfig: - scriptConfigMapRef: {{ include "fe.scriptsTemplate" . }} - namespace: {{ .Release.Namespace }} - toolsSetup: - mountPoint: /kb_tools - toolConfigs: - - name: kb-tools - image: {{ .Values.image.registry | default "docker.io" }}/{{ .Values.image.fe.repository }}:2.1.6 - asContainerImage: true - fileName: fe.conf - - # ConfigurationSchema that impose restrictions on engine parameter's rule - parametersSchema: - # top level mysql configuration type - topLevelKey: FEParameter - - # schemaInJSON: auto generate from cue scripts - # example: ../../internal/configuration/testdata/mysql_openapi.json - cue: |- - {{- .Files.Get "config/fe-config-constraint.cue" | nindent 6 }} - - ## define static parameter list - {{- if hasKey $cc "staticParameters" }} - staticParameters: - {{- $params := get $cc "staticParameters" }} - {{- range $params }} - - {{ . }} - {{- end }} - {{- end}} - - ## define dynamic parameter list - {{- if hasKey $cc "dynamicParameters" }} - dynamicParameters: - {{- $params := get $cc "dynamicParameters" }} - {{- range $params }} - - {{ . }} - {{- end }} - {{- end}} - - ## define immutable parameter list, this feature is not currently supported. - {{- if hasKey $cc "immutableParameters" }} - immutableParameters: - {{- $params := get $cc "immutableParameters" }} - {{- range $params }} - - {{ . }} - {{- end }} - {{- end}} diff --git a/addons/doris/templates/scripts-template-be.yaml b/addons/doris/templates/scripts-template-be.yaml deleted file mode 100644 index 0189f7154..000000000 --- a/addons/doris/templates/scripts-template-be.yaml +++ /dev/null @@ -1,20 +0,0 @@ -apiVersion: v1 -kind: ConfigMap -metadata: - name: {{ include "be.scriptsTemplate" . }} - labels: - {{- include "doris.labels" . | nindent 4 }} -data: - be_prestop.sh: |- - {{- .Files.Get "scripts/be/be_prestop.sh" | nindent 4 }} - entry_point.sh: |- - {{- .Files.Get "scripts/be/entry_point.sh" | nindent 4 }} - init_be.sh: |- - {{- .Files.Get "scripts/be/init_be.sh" | nindent 4 }} - be-update-dynamic-config.sh: | - #!/bin/sh - set -ex - key="${1:?missing parameterkey}" - value="${2:?missing parametervalue}" - echo "key: $key, value: $value" - curl -XPOST http://localhost:8040/api/update_config?$key=$value \ No newline at end of file diff --git a/addons/doris/templates/scripts-template-fe.yaml b/addons/doris/templates/scripts-template-fe.yaml deleted file mode 100644 index 2cf46b342..000000000 --- a/addons/doris/templates/scripts-template-fe.yaml +++ /dev/null @@ -1,29 +0,0 @@ -apiVersion: v1 -kind: ConfigMap -metadata: - name: {{ include "fe.scriptsTemplate" . }} - labels: - {{- include "doris.labels" . | nindent 4 }} -data: - fe_prestop.sh: |- - {{- .Files.Get "scripts/fe/fe_prestop.sh" | nindent 4 }} - fe_entry_point.sh: |- - {{- .Files.Get "scripts/fe/fe_entry_point.sh" | nindent 4 }} - init_fe.sh: |- - {{- .Files.Get "scripts/fe/init_fe.sh" | nindent 4 }} - fe_role_probe.sh: |- - {{- .Files.Get "scripts/fe/fe_role_probe.sh" | nindent 4 }} - fe_post_start.sh: |- - {{- .Files.Get "scripts/fe/fe_post_start.sh" | nindent 4 }} - fe_check_status.sh: |- - {{- .Files.Get "scripts/fe/fe_check_status.sh" | nindent 4 }} - fe_member_leave.sh: |- - {{- .Files.Get "scripts/fe/fe_member_leave.sh" | nindent 4 }} - fe-update-dynamic-config.sh: |- - #!/bin/sh - set -ex - key="${1:?missing parameterkey}" - value="${2:?missing parametervalue}" - echo "key: $key, value: $value" - sql="ADMIN SET FRONTEND CONFIG (\"$key\" = \"$value\")" - mysql -h 127.0.0.1 -P 9030 -e "$sql" diff --git a/addons/doris/values.yaml b/addons/doris/values.yaml deleted file mode 100644 index 30f12743e..000000000 --- a/addons/doris/values.yaml +++ /dev/null @@ -1,31 +0,0 @@ -# Default values for Doris. -# This is a YAML-formatted file. - -nameOverride: "" - -fullnameOverride: "" - -timezone: Asia/Shanghai - -image: - registry: docker.io - pullPolicy: IfNotPresent - fe: - repository: apecloud/doris-fe - be: - repository: apecloud/doris-be - -AVX2: false - -fe: - config: | - JAVA_OPTS="-Xmx8192m -XX:+UseMembar -XX:SurvivorRatio=8 -XX:MaxTenuringThreshold=7 -XX:+PrintGCDateStamps -XX:+PrintGCDetails -XX:+UseConcMarkSweepGC -XX:+UseParNewGC -XX:+CMSClassUnloadingEnabled -XX:-CMSParallelRemarkEnabled -XX:CMSInitiatingOccupancyFraction=80 -XX:SoftRefLRUPolicyMSPerMB=0 -Xloggc:/opt/apache-doris/fe/log/fe.gc.log.$DATE" - lower_case_table_names=1 - enable_fqdn_mode=true -be: - config: | - JAVA_OPTS="-Xmx2048m" - - -# cluster domain without . prefix -clusterDomain: "cluster.local"