Skip to content

Commit 27f1df7

Browse files
authored
Merge pull request ClickHouse#78637 from Algunenano/fix_attach
Remove query settings during attach
2 parents 08051d6 + f8ab2f5 commit 27f1df7

File tree

5 files changed

+114
-12
lines changed

5 files changed

+114
-12
lines changed

src/Databases/DatabaseOnDisk.cpp

Lines changed: 11 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -18,6 +18,7 @@
1818
#include <Interpreters/Context.h>
1919
#include <Interpreters/DatabaseCatalog.h>
2020
#include <Interpreters/InterpreterCreateQuery.h>
21+
#include <Interpreters/InterpreterSetQuery.h>
2122
#include <Parsers/ASTCreateQuery.h>
2223
#include <Parsers/ASTFunction.h>
2324
#include <Parsers/ParserCreateQuery.h>
@@ -131,18 +132,16 @@ std::pair<String, StoragePtr> createTableFromAST(
131132
}
132133
}
133134

134-
return
135-
{
136-
ast_create_query.getTable(),
137-
StorageFactory::instance().get(
138-
ast_create_query,
139-
table_data_path_relative,
140-
context,
141-
context->getGlobalContext(),
142-
columns,
143-
constraints,
144-
mode)
145-
};
135+
/// Before 24.10 it was possible for query settings to be stored with the .sql definition with some engines, which would ignore them
136+
/// Later (breaking) changes to table storages made the engines throw, which now prevents attaching old definitions which include
137+
/// those query settings
138+
/// In order to ignore them now we call `applySettingsFromQuery` which will move the settings from engine to query level
139+
auto ast = std::make_shared<ASTCreateQuery>(std::move(ast_create_query));
140+
InterpreterSetQuery::applySettingsFromQuery(ast, context);
141+
142+
return {
143+
ast->getTable(),
144+
StorageFactory::instance().get(*ast, table_data_path_relative, context, context->getGlobalContext(), columns, constraints, mode)};
146145
}
147146

148147

tests/integration/test_session_settings_table/__init__.py

Whitespace-only changes.
Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,2 @@
1+
a,b
2+
1,2
Lines changed: 71 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,71 @@
1+
import logging
2+
import os
3+
4+
import pytest
5+
6+
from helpers.cluster import ClickHouseCluster
7+
from helpers.config_cluster import minio_secret_key
8+
from helpers.mock_servers import start_mock_servers
9+
from helpers.test_tools import TSV
10+
11+
logging.getLogger().setLevel(logging.INFO)
12+
logging.getLogger().addHandler(logging.StreamHandler())
13+
14+
SCRIPT_DIR = os.path.dirname(os.path.realpath(__file__))
15+
S3_DATA = [
16+
"data/clickhouse/part1.csv",
17+
]
18+
19+
20+
def create_buckets_s3(cluster):
21+
minio = cluster.minio_client
22+
23+
for file in S3_DATA:
24+
minio.fput_object(
25+
bucket_name=cluster.minio_bucket,
26+
object_name=file,
27+
file_path=os.path.join(SCRIPT_DIR, file),
28+
)
29+
for obj in minio.list_objects(cluster.minio_bucket, recursive=True):
30+
print(obj.object_name)
31+
32+
33+
@pytest.fixture(scope="module")
34+
def started_cluster():
35+
try:
36+
cluster = ClickHouseCluster(__file__)
37+
# Until 24.10, query level settings were specified in the .sql file
38+
cluster.add_instance(
39+
"old_node",
40+
image="clickhouse/clickhouse-server",
41+
tag="24.9.2.42",
42+
with_installed_binary=True,
43+
with_minio=True,
44+
stay_alive=True,
45+
)
46+
47+
logging.info("Starting cluster...")
48+
cluster.start()
49+
logging.info("Cluster started")
50+
51+
create_buckets_s3(cluster)
52+
53+
yield cluster
54+
finally:
55+
cluster.shutdown()
56+
57+
58+
def test_upgrade_with_query_setting_in_create(started_cluster):
59+
node = started_cluster.instances["old_node"]
60+
node.query(
61+
f"""CREATE TABLE b Engine = S3('http://minio1:9001/root/data/clickhouse/part1.csv', 'minio', '{minio_secret_key}') SETTINGS s3_create_new_file_on_insert = 1;"""
62+
)
63+
64+
try:
65+
show_query = "SHOW CREATE TABLE b"
66+
node.query(show_query)
67+
68+
node.restart_with_latest_version()
69+
node.query(show_query)
70+
finally:
71+
node.query("DROP TABLE b")

tests/integration/test_storage_iceberg/test.py

Lines changed: 30 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1102,6 +1102,36 @@ def execute_spark_query(query: str):
11021102
],
11031103
)
11041104

1105+
# Do a single check to verify that restarting CH maintains the setting (ATTACH)
1106+
# We are just interested on the setting working after restart, so no need to run it on all combinations
1107+
if format_version == "1" and storage_type == "s3" and not is_table_function:
1108+
1109+
instance.restart_clickhouse()
1110+
1111+
execute_spark_query(
1112+
f"""
1113+
ALTER TABLE {TABLE_NAME} RENAME COLUMN e TO z;
1114+
"""
1115+
)
1116+
1117+
check_schema_and_data(
1118+
instance,
1119+
table_select_expression,
1120+
[
1121+
["b", "Nullable(Float64)"],
1122+
["f", "Nullable(Int64)"],
1123+
["c", "Decimal(12, 2)"],
1124+
["z", "Nullable(String)"],
1125+
],
1126+
[
1127+
["3", "-30", "7.12", "AAA"],
1128+
["3", "12", "-9.13", "BBB"],
1129+
["3.4", "\\N", "-9.13", "\\N"],
1130+
["5", "7", "18.1", "\\N"],
1131+
["\\N", "4", "7.12", "\\N"],
1132+
],
1133+
)
1134+
11051135

11061136
@pytest.mark.parametrize("format_version", ["1", "2"])
11071137
@pytest.mark.parametrize("storage_type", ["s3", "azure", "local"])

0 commit comments

Comments
 (0)