Skip to content

Commit 147d21c

Browse files
committed
rmv super flaky test
1 parent 522cfdb commit 147d21c

File tree

1 file changed

+0
-54
lines changed
  • tests/integration/test_export_replicated_mt_partition_to_object_storage

1 file changed

+0
-54
lines changed

tests/integration/test_export_replicated_mt_partition_to_object_storage/test.py

Lines changed: 0 additions & 54 deletions
Original file line numberDiff line numberDiff line change
@@ -241,60 +241,6 @@ def test_drop_source_table_during_export(cluster):
241241
assert node.query(f"SELECT count() FROM s3(s3_conn, filename='{s3_table}/commit_*', format=LineAsString)") == '0\n', "Background operations completed even with the table dropped"
242242

243243

244-
def test_drop_destination_table_during_export(cluster):
245-
node = cluster.instances["replica1"]
246-
# node2 = cluster.instances["replica2"]
247-
watcher_node = cluster.instances["watcher_node"]
248-
249-
mt_table = "drop_destination_table_during_export_mt_table"
250-
s3_table = "drop_destination_table_during_export_s3_table"
251-
252-
create_tables_and_insert_data(node, mt_table, s3_table, "replica1")
253-
create_s3_table(watcher_node, s3_table)
254-
255-
# Block S3/MinIO requests to keep exports alive via retry mechanism
256-
# This allows ZooKeeper operations (KILL) to proceed quickly
257-
minio_ip = cluster.minio_ip
258-
minio_port = cluster.minio_port
259-
260-
with PartitionManager() as pm:
261-
# Block responses from MinIO (source_port matches MinIO service)
262-
pm_rule_reject_responses = {
263-
"destination": node.ip_address,
264-
"source_port": minio_port,
265-
"action": "REJECT --reject-with tcp-reset",
266-
}
267-
pm._add_rule(pm_rule_reject_responses)
268-
269-
# Block requests to MinIO (destination: MinIO, destination_port: minio_port)
270-
pm_rule_reject_requests = {
271-
"destination": minio_ip,
272-
"destination_port": minio_port,
273-
"action": "REJECT --reject-with tcp-reset",
274-
}
275-
pm._add_rule(pm_rule_reject_requests)
276-
277-
export_queries = f"""
278-
ALTER TABLE {mt_table}
279-
EXPORT PARTITION ID '2020' TO TABLE {s3_table}
280-
SETTINGS allow_experimental_export_merge_tree_part=1;
281-
ALTER TABLE {mt_table}
282-
EXPORT PARTITION ID '2021' TO TABLE {s3_table}
283-
SETTINGS allow_experimental_export_merge_tree_part=1;
284-
"""
285-
286-
node.query(export_queries)
287-
288-
# The pointer to the destination table is still valid, so the write will continue
289-
node.query(f"DROP TABLE {s3_table}")
290-
291-
# give some time for the export to finish
292-
time.sleep(10)
293-
294-
# not sure this is the expected behavior, but adding until we make a decision
295-
assert node.query(f"SELECT count() FROM s3(s3_conn, filename='{s3_table}/commit_*', format=LineAsString)") != '0\n', "Background operations did not complete after dropping the destination table"
296-
297-
298244
def test_concurrent_exports_to_different_targets(cluster):
299245
node = cluster.instances["replica1"]
300246

0 commit comments

Comments
 (0)