Skip to content

Commit 5a8aba7

Browse files
authored
Merge branch 'antalya-25.6.5' into feature/optimize_count_in_datalake
2 parents 5af7474 + 6a86303 commit 5a8aba7

File tree

112 files changed

+2528
-292
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

112 files changed

+2528
-292
lines changed

.github/actions/create_workflow_report/create_workflow_report.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -762,7 +762,7 @@ def create_workflow_report(
762762
"date": f"{datetime.utcnow().strftime('%Y-%m-%d %H:%M:%S')} UTC",
763763
"is_preview": mark_preview,
764764
"counts": {
765-
"jobs_status": f"{sum(fail_results['job_statuses']['job_status'] != 'success')} fail/error",
765+
"jobs_status": f"{sum(fail_results['job_statuses']['job_status'].value_counts().get(x, 0) for x in ('failure', 'error'))} fail/error",
766766
"checks_errors": len(fail_results["checks_errors"]),
767767
"checks_new_fails": len(fail_results["checks_fails"]),
768768
"regression_new_fails": len(fail_results["regression_fails"]),

ci/jobs/functional_tests.py

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -123,6 +123,7 @@ def run_specific_tests(tests, runs=1):
123123
"ParallelReplicas": "--no-zookeeper --no-shard --no-parallel-replicas",
124124
"AsyncInsert": " --no-async-insert",
125125
"DatabaseReplicated": " --no-stateful --replicated-database --jobs 3",
126+
"amd_tsan": " --timeout 1200", # NOTE (strtgbb): tsan is slow, increase the timeout to avoid timeout errors
126127
}
127128

128129

@@ -365,7 +366,7 @@ def start():
365366

366367
# if not info.is_local_run:
367368
# CH.stop_log_exports()
368-
results.append(FTResultsProcessor(wd=temp_dir).run())
369+
results.append(FTResultsProcessor(wd=temp_dir, test_options=test_options).run())
369370
test_result = results[-1]
370371

371372
# invert result status for bugfix validation

ci/jobs/scripts/functional_tests_results.py

Lines changed: 21 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -56,10 +56,11 @@ class Summary:
5656
success_finish: bool = False
5757
test_end: bool = True
5858

59-
def __init__(self, wd):
59+
def __init__(self, wd, test_options):
6060
self.tests_output_file = f"{wd}/test_result.txt"
6161
# self.test_results_parsed_file = f"{wd}/test_result.tsv"
6262
# self.status_file = f"{wd}/check_status.tsv"
63+
self.test_options = test_options
6364

6465
def _process_test_output(self):
6566
total = 0
@@ -137,6 +138,8 @@ def _process_test_output(self):
137138
if DATABASE_SIGN in line:
138139
test_end = True
139140

141+
test_options_string = ", ".join(self.test_options)
142+
140143
test_results_ = []
141144
for test in test_results:
142145
try:
@@ -153,15 +156,26 @@ def _process_test_output(self):
153156
if test[1] == "FAIL":
154157
broken_message = None
155158
if test[0] in known_broken_tests.keys():
156-
if known_broken_tests[test[0]].get("message"):
157-
if (
158-
known_broken_tests[test[0]]["message"]
159-
in test_results_[-1].info
160-
):
161-
broken_message = f"\nMarked as broken, matched message: '{known_broken_tests[test[0]]['message']}'"
159+
message = known_broken_tests[test[0]].get("message")
160+
check_types = known_broken_tests[test[0]].get("check_types")
161+
if check_types and not any(
162+
check_type in test_options_string
163+
for check_type in check_types
164+
):
165+
broken_message = None
166+
elif message:
167+
if message in test_results_[-1].info:
168+
broken_message = (
169+
f"\nMarked as broken, matched message: '{message}'"
170+
)
162171
else:
163172
broken_message = f"\nMarked as broken, no message specified"
164173

174+
if broken_message and check_types:
175+
broken_message += (
176+
f", matched one or more check types {check_types}"
177+
)
178+
165179
if broken_message:
166180
broken += 1
167181
failed -= 1

docs/en/sql-reference/statements/system.md

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -206,6 +206,12 @@ SYSTEM RELOAD USERS [ON CLUSTER cluster_name]
206206
207207
Normally shuts down ClickHouse (like `service clickhouse-server stop` / `kill {$pid_clickhouse-server}`)
208208
209+
## PRESHUTDOWN {#preshutdown}
210+
211+
<CloudNotSupportedBadge/>
212+
213+
Prepare node for graceful shutdown. Unregister in autodiscovered clusters, stop accepting distributed requests to object storages (s3Cluster, icebergCluster, etc.).
214+
209215
## KILL {#kill}
210216
211217
Aborts ClickHouse process (like `kill -9 {$ pid_clickhouse-server}`)

programs/server/Server.cpp

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -2292,6 +2292,8 @@ try
22922292

22932293
}
22942294

2295+
global_context->startSwarmMode();
2296+
22952297
{
22962298
std::lock_guard lock(servers_lock);
22972299
/// We should start interserver communications before (and more important shutdown after) tables.
@@ -2701,6 +2703,8 @@ try
27012703

27022704
is_cancelled = true;
27032705

2706+
global_context->stopSwarmMode();
2707+
27042708
LOG_DEBUG(log, "Waiting for current connections to close.");
27052709

27062710
size_t current_connections = 0;

src/Access/Common/AccessType.h

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -74,6 +74,7 @@ enum class AccessType : uint8_t
7474
enabled implicitly by the grant ALTER_TABLE */\
7575
M(ALTER_SETTINGS, "ALTER SETTING, ALTER MODIFY SETTING, MODIFY SETTING, RESET SETTING", TABLE, ALTER_TABLE) /* allows to execute ALTER MODIFY SETTING */\
7676
M(ALTER_MOVE_PARTITION, "ALTER MOVE PART, MOVE PARTITION, MOVE PART", TABLE, ALTER_TABLE) \
77+
M(ALTER_EXPORT_PART, "ALTER EXPORT PART, EXPORT PART", TABLE, ALTER_TABLE) \
7778
M(ALTER_FETCH_PARTITION, "ALTER FETCH PART, FETCH PARTITION", TABLE, ALTER_TABLE) \
7879
M(ALTER_FREEZE_PARTITION, "FREEZE PARTITION, UNFREEZE", TABLE, ALTER_TABLE) \
7980
M(ALTER_UNLOCK_SNAPSHOT, "UNLOCK SNAPSHOT", TABLE, ALTER_TABLE) \
@@ -200,6 +201,7 @@ enum class AccessType : uint8_t
200201
M(SYSTEM_TTL_MERGES, "SYSTEM STOP TTL MERGES, SYSTEM START TTL MERGES, STOP TTL MERGES, START TTL MERGES", TABLE, SYSTEM) \
201202
M(SYSTEM_FETCHES, "SYSTEM STOP FETCHES, SYSTEM START FETCHES, STOP FETCHES, START FETCHES", TABLE, SYSTEM) \
202203
M(SYSTEM_MOVES, "SYSTEM STOP MOVES, SYSTEM START MOVES, STOP MOVES, START MOVES", TABLE, SYSTEM) \
204+
M(SYSTEM_SWARM, "SYSTEM STOP SWARM MODE, SYSTEM START SWARM MODE, STOP SWARM MODE, START SWARM MODE", GLOBAL, SYSTEM) \
203205
M(SYSTEM_PULLING_REPLICATION_LOG, "SYSTEM STOP PULLING REPLICATION LOG, SYSTEM START PULLING REPLICATION LOG", TABLE, SYSTEM) \
204206
M(SYSTEM_CLEANUP, "SYSTEM STOP CLEANUP, SYSTEM START CLEANUP", TABLE, SYSTEM) \
205207
M(SYSTEM_VIEWS, "SYSTEM REFRESH VIEW, SYSTEM START VIEWS, SYSTEM STOP VIEWS, SYSTEM START VIEW, SYSTEM STOP VIEW, SYSTEM CANCEL VIEW, REFRESH VIEW, START VIEWS, STOP VIEWS, START VIEW, STOP VIEW, CANCEL VIEW", VIEW, SYSTEM) \

src/CMakeLists.txt

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -138,6 +138,7 @@ add_headers_and_sources(dbms Storages/ObjectStorage/Azure)
138138
add_headers_and_sources(dbms Storages/ObjectStorage/S3)
139139
add_headers_and_sources(dbms Storages/ObjectStorage/HDFS)
140140
add_headers_and_sources(dbms Storages/ObjectStorage/Local)
141+
add_headers_and_sources(dbms Storages/ObjectStorage/MergeTree)
141142
add_headers_and_sources(dbms Storages/ObjectStorage/DataLakes)
142143
add_headers_and_sources(dbms Storages/ObjectStorage/DataLakes/Iceberg)
143144
add_headers_and_sources(dbms Storages/ObjectStorage/DataLakes/DeltaLake)

src/Common/CurrentMetrics.cpp

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -430,6 +430,7 @@
430430
M(StartupScriptsExecutionState, "State of startup scripts execution: 0 = not finished, 1 = success, 2 = failure.") \
431431
\
432432
M(IsServerShuttingDown, "Indicates if the server is shutting down: 0 = no, 1 = yes") \
433+
M(IsSwarmModeEnabled, "Indicates if the swarm mode enabled or not: 0 = disabled, 1 = enabled") \
433434
\
434435
M(TotalMergeFailures, "Number of all failed merges since startup, including the ones that were aborted") \
435436
M(NonAbortedMergeFailures, "Number of failed merges since startup, excluding the merges that were aborted") \

src/Core/ServerSettings.cpp

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -99,6 +99,7 @@ namespace DB
9999
DECLARE(UInt64, max_unexpected_parts_loading_thread_pool_size, 8, R"(The number of threads to load inactive set of data parts (Unexpected ones) at startup.)", 0) \
100100
DECLARE(UInt64, max_parts_cleaning_thread_pool_size, 128, R"(The number of threads for concurrent removal of inactive data parts.)", 0) \
101101
DECLARE(UInt64, max_mutations_bandwidth_for_server, 0, R"(The maximum read speed of all mutations on server in bytes per second. Zero means unlimited.)", 0) \
102+
DECLARE(UInt64, max_exports_bandwidth_for_server, 0, R"(The maximum read speed of all exports on server in bytes per second. Zero means unlimited.)", 0) \
102103
DECLARE(UInt64, max_merges_bandwidth_for_server, 0, R"(The maximum read speed of all merges on server in bytes per second. Zero means unlimited.)", 0) \
103104
DECLARE(UInt64, max_replicated_fetches_network_bandwidth_for_server, 0, R"(The maximum speed of data exchange over the network in bytes per second for replicated fetches. Zero means unlimited.)", 0) \
104105
DECLARE(UInt64, max_replicated_sends_network_bandwidth_for_server, 0, R"(The maximum speed of data exchange over the network in bytes per second for replicated sends. Zero means unlimited.)", 0) \

src/Core/Settings.cpp

Lines changed: 19 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1713,6 +1713,22 @@ Possible values:
17131713
- `global` — Replaces the `IN`/`JOIN` query with `GLOBAL IN`/`GLOBAL JOIN.`
17141714
- `allow` — Allows the use of these types of subqueries.
17151715
)", IMPORTANT) \
1716+
DECLARE(ObjectStorageClusterJoinMode, object_storage_cluster_join_mode, ObjectStorageClusterJoinMode::ALLOW, R"(
1717+
Changes the behaviour of object storage cluster function or table.
1718+
1719+
ClickHouse applies this setting when the query contains the product of object storage cluster function ot table, i.e. when the query for a object storage cluster function ot table contains a non-GLOBAL subquery for the object storage cluster function ot table.
1720+
1721+
Restrictions:
1722+
1723+
- Only applied for JOIN subqueries.
1724+
- Only if the FROM section uses a object storage cluster function ot table.
1725+
1726+
Possible values:
1727+
1728+
- `local` — Replaces the database and table in the subquery with local ones for the destination server (shard), leaving the normal `IN`/`JOIN.`
1729+
- `global` — Unsupported for now. Replaces the `IN`/`JOIN` query with `GLOBAL IN`/`GLOBAL JOIN.`
1730+
- `allow` — Default value. Allows the use of these types of subqueries.
1731+
)", 0) \
17161732
\
17171733
DECLARE(UInt64, max_concurrent_queries_for_all_users, 0, R"(
17181734
Throw exception if the value of this setting is less or equal than the current number of simultaneously processed queries.
@@ -6895,6 +6911,9 @@ Execute request to object storage as remote on one of object_storage_cluster nod
68956911
DECLARE_WITH_ALIAS(Bool, allow_experimental_time_series_aggregate_functions, false, R"(
68966912
Experimental timeSeries* aggregate functions for Prometheus-like timeseries resampling, rate, delta calculation.
68976913
)", EXPERIMENTAL, allow_experimental_ts_to_grid_aggregate_function) \
6914+
DECLARE_WITH_ALIAS(Bool, allow_experimental_export_merge_tree_part, false, R"(
6915+
Experimental export merge tree part.
6916+
)", EXPERIMENTAL, allow_experimental_export_merge_tree_part) \
68986917
\
68996918

69006919
/* ####################################################### */ \

0 commit comments

Comments
 (0)