You signed in with another tab or window. Reload to refresh your session.You signed out in another tab or window. Reload to refresh your session.You switched accounts on another tab or window. Reload to refresh your session.Dismiss alert
Allows a more general join planning algorithm that can handle more complex conditions, but only works with hash join. If hash join is not enabled, then the usual join planning algorithm is used regardless of the value of this setting.
Controls how data is split into tasks when executing a CLUSTER TABLE FUNCTION.
6795
+
6796
+
This setting defines the granularity of work distribution across the cluster:
6797
+
- `file` — each task processes an entire file.
6798
+
- `bucket` — tasks are created per internal data block within a file (for example, Parquet row groups).
6799
+
6800
+
Choosing finer granularity (like `bucket`) can improve parallelism when working with a small number of large files.
6801
+
For instance, if a Parquet file contains multiple row groups, enabling `bucket` granularity allows each group to be processed independently by different workers.
Defines the approximate size of a batch (in bytes) used in distributed processing of tasks in cluster table functions with `bucket` split granularity. The system accumulates data until at least this amount is reached. The actual size may be slightly larger to align with data boundaries.
When creating a `Merge` table without an explicit schema or when using the `merge` table function, infer schema as a union of not more than the specified number of matching tables.
{"allow_experimental_hybrid_table", false, false, "Added new setting to allow the Hybrid table engine."},
59
59
{"export_merge_tree_part_max_bytes_per_file", 0, 0, "New setting."},
60
60
{"export_merge_tree_part_max_rows_per_file", 0, 0, "New setting."},
61
+
{"allow_experimental_hybrid_table", false, false, "Added new setting to allow the Hybrid table engine."}
62
+
{"cluster_table_function_split_granularity", "file", "file", "New setting."},
63
+
{"cluster_table_function_buckets_batch_size", 0, 0, "New setting."},
64
+
{"arrow_flight_request_descriptor_type", "path", "path", "New setting. Type of descriptor to use for Arrow Flight requests: 'path' or 'command'. Dremio requires 'command'."},
65
+
{"send_profile_events", true, true, "New setting. Whether to send profile events to the clients."},
66
+
{"into_outfile_create_parent_directories", false, false, "New setting"},
67
+
{"correlated_subqueries_default_join_kind", "left", "right", "New setting. Default join kind for decorrelated query plan."},
{"max_projection_rows_to_use_projection_index", 1'000'000, 1'000'000, "New setting"},
71
+
{"min_table_rows_to_use_projection_index", 1'000'000, 1'000'000, "New setting"},
72
+
{"use_text_index_dictionary_cache", false, false, "New setting"},
73
+
{"use_text_index_header_cache", false, false, "New setting"},
74
+
{"use_text_index_postings_cache", false, false, "New setting"},
75
+
{"s3_retry_attempts", 500, 500, "Changed the value of the obsolete setting"},
76
+
{"http_write_exception_in_output_format", true, false, "Changed for consistency across formats"},
77
+
{"optimize_const_name_size", -1, 256, "Replace with scalar and use hash as a name for large constants (size is estimated by name length)"},
78
+
{"enable_lazy_columns_replication", false, true, "Enable lazy columns replication in JOIN and ARRAY JOIN by default"},
79
+
{"allow_special_serialization_kinds_in_output_formats", false, true, "Enable direct output of special columns representations like Sparse/Replicated in some output formats"},
80
+
{"allow_experimental_alias_table_engine", false, false, "New setting"},
81
+
{"input_format_parquet_local_time_as_utc", false, true, "Use more appropriate type DateTime64(..., 'UTC') for parquet 'local time without timezone' type."},
82
+
{"input_format_parquet_verify_checksums", true, true, "New setting."},
83
+
{"output_format_parquet_write_checksums", false, true, "New setting."},
{"allow_special_serialization_kinds_in_output_formats", false, false, "Add a setting to allow output of special columns representations like Sparse/Replicated without converting them to full columns"},
90
+
{"enable_lazy_columns_replication", false, false, "Add a setting to enable lazy columns replication in JOIN and ARRAY JOIN"},
91
+
{"correlated_subqueries_default_join_kind", "left", "right", "New setting. Default join kind for decorrelated query plan."},
92
+
{"show_data_lake_catalogs_in_system_tables", true, false, "Disable catalogs in system tables by default"},
93
+
{"optimize_rewrite_like_perfect_affix", false, true, "New setting"},
94
+
{"allow_dynamic_type_in_join_keys", true, false, "Disallow using Dynamic type in JOIN keys by default"},
95
+
{"s3queue_keeper_fault_injection_probability", 0, 0, "New setting."},
96
+
{"enable_join_runtime_filters", false, false, "New setting"},
97
+
{"join_runtime_filter_exact_values_limit", 10000, 10000, "New setting"},
98
+
{"join_runtime_bloom_filter_bytes", 512_KiB, 512_KiB, "New setting"},
99
+
{"join_runtime_bloom_filter_hash_functions", 3, 3, "New setting"},
100
+
{"use_join_disjunctions_push_down", false, false, "New setting."},
101
+
{"joined_block_split_single_row", false, false, "New setting"},
102
+
{"temporary_files_buffer_size", DBMS_DEFAULT_BUFFER_SIZE, DBMS_DEFAULT_BUFFER_SIZE, "New setting"},
103
+
{"rewrite_in_to_join", false, false, "New experimental setting"},
104
+
{"delta_lake_log_metadata", false, false, "New setting."},
105
+
{"distributed_cache_prefer_bigger_buffer_size", false, false, "New setting."},
106
+
{"allow_experimental_qbit_type", false, false, "New experimental setting"},
107
+
{"optimize_qbit_distance_function_reads", true, true, "New setting"},
108
+
{"read_from_distributed_cache_if_exists_otherwise_bypass_cache", false, false, "New setting"},
109
+
{"s3_slow_all_threads_after_retryable_error", false, false, "Disable the setting by default"},
110
+
{"backup_slow_all_threads_after_retryable_s3_error", false, false, "Disable the setting by default"},
111
+
{"enable_http_compression", false, true, "It should be beneficial in general"},
112
+
{"inject_random_order_for_select_without_order_by", false, false, "New setting"},
113
+
{"exclude_materialize_skip_indexes_on_insert", "", "", "New setting."},
114
+
{"optimize_empty_string_comparisons", false, true, "A new setting."},
{"schema_inference_make_columns_nullable", 1, 3, "Take nullability information from Parquet/ORC/Arrow metadata by default, instead of making everything nullable."},
117
+
{"materialized_views_squash_parallel_inserts", false, true, "Added setting to preserve old behavior if needed."},
118
+
{"distributed_cache_connect_timeout_ms", 50, 50, "New setting"},
119
+
{"distributed_cache_receive_timeout_ms", 3000, 3000, "New setting"},
120
+
{"distributed_cache_send_timeout_ms", 3000, 3000, "New setting"},
121
+
{"distributed_cache_tcp_keep_alive_timeout_ms", 2900, 2900, "New setting"},
0 commit comments