Skip to content

Commit 6384aee

Browse files
authored
Merge branch 'antalya-25.8' into backports/antalya-25.8/90490
2 parents 462ac2e + bb1c5a4 commit 6384aee

File tree

8 files changed

+602
-19
lines changed

8 files changed

+602
-19
lines changed

ci/docker/integration/runner/requirements.txt

Lines changed: 4 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -15,8 +15,8 @@ azure-core==1.30.1
1515
azure-storage-blob==12.19.0
1616
bcrypt==4.1.3
1717
beautifulsoup4==4.12.3
18-
boto3[all]==1.37.7
19-
botocore==1.37.7
18+
boto3==1.39.11
19+
botocore==1.39.11
2020
bs4==0.0.2
2121
cassandra-driver==3.29.0
2222
certifi==2025.4.26
@@ -97,7 +97,8 @@ redis==5.0.1
9797
requests-kerberos==0.14.0
9898
requests==2.32.4
9999
rich==13.9.4
100-
s3transfer==0.11.4
100+
s3fs==2024.12.0
101+
s3transfer==0.13.0
101102
setuptools==78.1.1
102103
simplejson==3.19.2
103104
sortedcontainers==2.4.0

src/Interpreters/DDLWorker.cpp

Lines changed: 21 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -1140,7 +1140,6 @@ bool DDLWorker::initializeMainThread()
11401140
auto zookeeper = getAndSetZooKeeper();
11411141
zookeeper->createAncestors(fs::path(queue_dir) / "");
11421142
initializeReplication();
1143-
markReplicasActive(true);
11441143
initialized = true;
11451144
return true;
11461145
}
@@ -1213,6 +1212,14 @@ void DDLWorker::runMainThread()
12131212
}
12141213

12151214
cleanup_event->set();
1215+
try
1216+
{
1217+
markReplicasActive(reinitialized);
1218+
}
1219+
catch (...)
1220+
{
1221+
tryLogCurrentException(log, "An error occurred when markReplicasActive: ");
1222+
}
12161223
scheduleTasks(reinitialized);
12171224
subsequent_errors_count = 0;
12181225

@@ -1291,20 +1298,23 @@ void DDLWorker::createReplicaDirs(const ZooKeeperPtr & zookeeper, const NameSet
12911298
zookeeper->createAncestors(fs::path(replicas_dir) / host_id / "");
12921299
}
12931300

1294-
void DDLWorker::markReplicasActive(bool /*reinitialized*/)
1301+
void DDLWorker::markReplicasActive(bool reinitialized)
12951302
{
12961303
auto zookeeper = getZooKeeper();
12971304

1298-
// Reset all active_node_holders
1299-
for (auto & it : active_node_holders)
1305+
if (reinitialized)
13001306
{
1301-
auto & active_node_holder = it.second.second;
1302-
if (active_node_holder)
1303-
active_node_holder->setAlreadyRemoved();
1304-
active_node_holder.reset();
1305-
}
1307+
// Reset all active_node_holders
1308+
for (auto & it : active_node_holders)
1309+
{
1310+
auto & active_node_holder = it.second.second;
1311+
if (active_node_holder)
1312+
active_node_holder->setAlreadyRemoved();
1313+
active_node_holder.reset();
1314+
}
13061315

1307-
active_node_holders.clear();
1316+
active_node_holders.clear();
1317+
}
13081318

13091319
for (auto it = active_node_holders.begin(); it != active_node_holders.end();)
13101320
{
@@ -1385,12 +1395,7 @@ void DDLWorker::markReplicasActive(bool /*reinitialized*/)
13851395
{
13861396
zookeeper->deleteEphemeralNodeIfContentMatches(active_path, active_id);
13871397
}
1388-
Coordination::Requests ops;
1389-
ops.emplace_back(zkutil::makeCreateRequest(active_path, active_id, zkutil::CreateMode::Ephemeral));
1390-
/// To bump node mtime
1391-
ops.emplace_back(zkutil::makeSetRequest(fs::path(replicas_dir) / host_id, "", -1));
1392-
zookeeper->multi(ops);
1393-
1398+
zookeeper->create(active_path, active_id, zkutil::CreateMode::Ephemeral);
13941399
auto active_node_holder_zookeeper = zookeeper;
13951400
auto active_node_holder = zkutil::EphemeralNodeHolder::existing(active_path, *active_node_holder_zookeeper);
13961401
active_node_holders[host_id] = {active_node_holder_zookeeper, active_node_holder};

src/Storages/ObjectStorage/DataLakes/Iceberg/IcebergMetadata.cpp

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -990,12 +990,16 @@ std::optional<size_t> IcebergMetadata::totalBytes(ContextPtr local_context) cons
990990
std::optional<String> IcebergMetadata::partitionKey(ContextPtr) const
991991
{
992992
SharedLockGuard lock(mutex);
993+
if (!relevant_snapshot)
994+
return {};
993995
return relevant_snapshot->partition_key;
994996
}
995997

996998
std::optional<String> IcebergMetadata::sortingKey(ContextPtr) const
997999
{
9981000
SharedLockGuard lock(mutex);
1001+
if (!relevant_snapshot)
1002+
return {};
9991003
return relevant_snapshot->sorting_key;
10001004
}
10011005

Lines changed: 62 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,62 @@
1+
services:
2+
nessie:
3+
image: ghcr.io/projectnessie/nessie:latest
4+
depends_on:
5+
minio:
6+
condition: service_started
7+
ports:
8+
- "19120:19120"
9+
environment:
10+
- nessie.version.store.type=IN_MEMORY
11+
- nessie.catalog.default-warehouse=warehouse
12+
- nessie.catalog.warehouses.warehouse.location=s3://warehouse-rest/
13+
- nessie.catalog.service.s3.default-options.endpoint=http://minio:9000/
14+
- nessie.catalog.service.s3.default-options.external-endpoint=http://127.0.0.1:9002/
15+
- nessie.catalog.service.s3.default-options.access-key=urn:nessie-secret:quarkus:nessie.catalog.secrets.access-key
16+
- nessie.catalog.service.s3.default-options.path-style-access=true
17+
- nessie.catalog.service.s3.default-options.auth-type=STATIC
18+
- nessie.catalog.secrets.access-key.name=minio
19+
- nessie.catalog.secrets.access-key.secret=ClickHouse_Minio_P@ssw0rd
20+
- nessie.catalog.service.s3.default-options.region=us-east-1
21+
- nessie.server.authentication.enabled=false
22+
healthcheck:
23+
test: ["CMD", "curl", "-f", "http://localhost:19120/api/v2/config"]
24+
interval: 30s
25+
timeout: 10s
26+
retries: 5
27+
start_period: 60s
28+
29+
# TODO: can we simply use with_minio=True instead?
30+
minio:
31+
image: minio/minio:RELEASE.2024-07-31T05-46-26Z
32+
environment:
33+
- MINIO_ROOT_USER=minio
34+
- MINIO_ROOT_PASSWORD=ClickHouse_Minio_P@ssw0rd
35+
- MINIO_DOMAIN=minio
36+
networks:
37+
default:
38+
aliases:
39+
- warehouse-rest.minio
40+
ports:
41+
- "9001:9001"
42+
- "9002:9000"
43+
command: ["server", "/data", "--console-address", ":9001"]
44+
45+
# TODO: move this code to cluster.py
46+
mc:
47+
depends_on:
48+
- minio
49+
# Stick to version with "mc config"
50+
image: minio/mc:RELEASE.2025-04-16T18-13-26Z
51+
environment:
52+
- AWS_ACCESS_KEY_ID=minio
53+
- AWS_SECRET_ACCESS_KEY=ClickHouse_Minio_P@ssw0rd
54+
- AWS_REGION=us-east-1
55+
entrypoint: >
56+
/bin/sh -c "
57+
until (/usr/bin/mc config host add minio http://minio:9000 minio ClickHouse_Minio_P@ssw0rd) do echo '...waiting...' && sleep 1; done;
58+
/usr/bin/mc rm -r --force minio/warehouse-rest;
59+
/usr/bin/mc mb minio/warehouse-rest --ignore-existing;
60+
/usr/bin/mc policy set public minio/warehouse-rest;
61+
tail -f /dev/null
62+
"
Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1 @@
1+
#!/usr/bin/env python3
Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,5 @@
1+
<clickhouse>
2+
<backups>
3+
<allowed_path>/backups</allowed_path>
4+
</backups>
5+
</clickhouse>

0 commit comments

Comments
 (0)