Skip to content

Commit c554cb9

Browse files
Merge pull request ClickHouse#79113 from ClickHouse/settings-lost-replica
Move settings from engine to query level during `recoverLostReplica`
2 parents b086eae + ff493ca commit c554cb9

File tree

3 files changed

+90
-1
lines changed

3 files changed

+90
-1
lines changed

src/Databases/DatabaseReplicated.cpp

Lines changed: 7 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -23,6 +23,7 @@
2323
#include <Interpreters/DDLTask.h>
2424
#include <Interpreters/DatabaseCatalog.h>
2525
#include <Interpreters/InterpreterCreateQuery.h>
26+
#include <Interpreters/InterpreterSetQuery.h>
2627
#include <Interpreters/ReplicatedDatabaseQueryStatusSource.h>
2728
#include <Interpreters/evaluateConstantExpression.h>
2829
#include <Interpreters/executeDDLQueryOnCluster.h>
@@ -1462,8 +1463,13 @@ void DatabaseReplicated::recoverLostReplica(const ZooKeeperPtr & current_zookeep
14621463
}
14631464

14641465
auto query_ast = parseQueryFromMetadataInZooKeeper(table_name, create_query_string);
1465-
LOG_INFO(log, "Executing {}", query_ast->formatForLogging());
1466+
14661467
auto create_query_context = make_query_context();
1468+
/// Check larger comment in DatabaseOnDisk::createTableFromAST
1469+
/// TL;DR applySettingsFromQuery will move the settings from engine to query level
1470+
/// making it possible to overcome a backward incompatible change.
1471+
InterpreterSetQuery::applySettingsFromQuery(query_ast, create_query_context);
1472+
LOG_INFO(log, "Executing {}", query_ast->formatForLogging());
14671473
InterpreterCreateQuery(query_ast, create_query_context).execute();
14681474
};
14691475

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,2 @@
1+
a,b
2+
1,2
Lines changed: 81 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,81 @@
1+
import logging
2+
import os
3+
4+
import pytest
5+
6+
from helpers.cluster import ClickHouseCluster
7+
from helpers.config_cluster import minio_secret_key
8+
from helpers.mock_servers import start_mock_servers
9+
from helpers.test_tools import TSV
10+
11+
logging.getLogger().setLevel(logging.INFO)
12+
logging.getLogger().addHandler(logging.StreamHandler())
13+
14+
SCRIPT_DIR = os.path.dirname(os.path.realpath(__file__))
15+
S3_DATA = [
16+
"data/clickhouse/part1.csv",
17+
]
18+
19+
20+
def create_buckets_s3(cluster):
21+
minio = cluster.minio_client
22+
23+
for file in S3_DATA:
24+
minio.fput_object(
25+
bucket_name=cluster.minio_bucket,
26+
object_name=file,
27+
file_path=os.path.join(SCRIPT_DIR, file),
28+
)
29+
for obj in minio.list_objects(cluster.minio_bucket, recursive=True):
30+
print(obj.object_name)
31+
32+
33+
@pytest.fixture(scope="module")
34+
def started_cluster():
35+
try:
36+
cluster = ClickHouseCluster(__file__)
37+
# Until 24.10, query level settings were specified in the .sql file
38+
cluster.add_instance(
39+
"old_node",
40+
image="clickhouse/clickhouse-server",
41+
tag="24.9.2.42",
42+
with_zookeeper=True,
43+
with_installed_binary=True,
44+
with_minio=True,
45+
stay_alive=True,
46+
)
47+
48+
cluster.add_instance(
49+
"new_node",
50+
with_zookeeper=True,
51+
)
52+
53+
logging.info("Starting cluster...")
54+
cluster.start()
55+
logging.info("Cluster started")
56+
57+
create_buckets_s3(cluster)
58+
59+
yield cluster
60+
finally:
61+
cluster.shutdown()
62+
63+
64+
def test_query_settings_in_create_recover_lost_replica(started_cluster):
65+
old_node = started_cluster.instances["old_node"]
66+
old_node.query("DROP DATABASE IF EXISTS replicated_lost_replica SYNC")
67+
old_node.query(
68+
"CREATE DATABASE replicated_lost_replica ENGINE = Replicated('/test/replicated_lost_replica', 'shard1', 'replica' || '1');"
69+
)
70+
old_node.query("DROP TABLE IF EXISTS replicated_lost_replica.b")
71+
old_node.query(
72+
f"""CREATE TABLE replicated_lost_replica.b Engine = S3('http://minio1:9001/root/data/clickhouse/part1.csv', 'minio', '{minio_secret_key}') SETTINGS s3_create_new_file_on_insert = 1;"""
73+
)
74+
75+
new_node = started_cluster.instances["new_node"]
76+
new_node.query("DROP DATABASE IF EXISTS replicated_lost_replica SYNC")
77+
# Adding new replica will trigger the `recoverLostReplica` method.
78+
new_node.query(
79+
"CREATE DATABASE replicated_lost_replica ENGINE = Replicated('/test/replicated_lost_replica', 'shard1', 'replica' || '2');"
80+
)
81+
new_node.query("SYSTEM SYNC DATABASE REPLICA replicated_lost_replica")

0 commit comments

Comments
 (0)