|
| 1 | +import logging |
| 2 | +import os |
| 3 | + |
| 4 | +import pytest |
| 5 | + |
| 6 | +from helpers.cluster import ClickHouseCluster |
| 7 | +from helpers.config_cluster import minio_secret_key |
| 8 | +from helpers.mock_servers import start_mock_servers |
| 9 | +from helpers.test_tools import TSV |
| 10 | + |
| 11 | +logging.getLogger().setLevel(logging.INFO) |
| 12 | +logging.getLogger().addHandler(logging.StreamHandler()) |
| 13 | + |
| 14 | +SCRIPT_DIR = os.path.dirname(os.path.realpath(__file__)) |
| 15 | +S3_DATA = [ |
| 16 | + "data/clickhouse/part1.csv", |
| 17 | +] |
| 18 | + |
| 19 | + |
| 20 | +def create_buckets_s3(cluster): |
| 21 | + minio = cluster.minio_client |
| 22 | + |
| 23 | + for file in S3_DATA: |
| 24 | + minio.fput_object( |
| 25 | + bucket_name=cluster.minio_bucket, |
| 26 | + object_name=file, |
| 27 | + file_path=os.path.join(SCRIPT_DIR, file), |
| 28 | + ) |
| 29 | + for obj in minio.list_objects(cluster.minio_bucket, recursive=True): |
| 30 | + print(obj.object_name) |
| 31 | + |
| 32 | + |
| 33 | +@pytest.fixture(scope="module") |
| 34 | +def started_cluster(): |
| 35 | + try: |
| 36 | + cluster = ClickHouseCluster(__file__) |
| 37 | + # Until 24.10, query level settings were specified in the .sql file |
| 38 | + cluster.add_instance( |
| 39 | + "old_node", |
| 40 | + image="clickhouse/clickhouse-server", |
| 41 | + tag="24.9.2.42", |
| 42 | + with_zookeeper=True, |
| 43 | + with_installed_binary=True, |
| 44 | + with_minio=True, |
| 45 | + stay_alive=True, |
| 46 | + ) |
| 47 | + |
| 48 | + cluster.add_instance( |
| 49 | + "new_node", |
| 50 | + with_zookeeper=True, |
| 51 | + ) |
| 52 | + |
| 53 | + logging.info("Starting cluster...") |
| 54 | + cluster.start() |
| 55 | + logging.info("Cluster started") |
| 56 | + |
| 57 | + create_buckets_s3(cluster) |
| 58 | + |
| 59 | + yield cluster |
| 60 | + finally: |
| 61 | + cluster.shutdown() |
| 62 | + |
| 63 | + |
| 64 | +def test_query_settings_in_create_recover_lost_replica(started_cluster): |
| 65 | + old_node = started_cluster.instances["old_node"] |
| 66 | + old_node.query("DROP DATABASE IF EXISTS replicated_lost_replica SYNC") |
| 67 | + old_node.query( |
| 68 | + "CREATE DATABASE replicated_lost_replica ENGINE = Replicated('/test/replicated_lost_replica', 'shard1', 'replica' || '1');" |
| 69 | + ) |
| 70 | + old_node.query("DROP TABLE IF EXISTS replicated_lost_replica.b") |
| 71 | + old_node.query( |
| 72 | + f"""CREATE TABLE replicated_lost_replica.b Engine = S3('http://minio1:9001/root/data/clickhouse/part1.csv', 'minio', '{minio_secret_key}') SETTINGS s3_create_new_file_on_insert = 1;""" |
| 73 | + ) |
| 74 | + |
| 75 | + new_node = started_cluster.instances["new_node"] |
| 76 | + new_node.query("DROP DATABASE IF EXISTS replicated_lost_replica SYNC") |
| 77 | + # Adding new replica will trigger the `recoverLostReplica` method. |
| 78 | + new_node.query( |
| 79 | + "CREATE DATABASE replicated_lost_replica ENGINE = Replicated('/test/replicated_lost_replica', 'shard1', 'replica' || '2');" |
| 80 | + ) |
| 81 | + new_node.query("SYSTEM SYNC DATABASE REPLICA replicated_lost_replica") |
0 commit comments