|
4 | 4 |
|
5 | 5 | import pytest |
6 | 6 |
|
7 | | -from helpers.cluster import ClickHouseCluster |
| 7 | +from helpers.cluster import ClickHouseCluster, QueryRuntimeException |
8 | 8 | from helpers.network import PartitionManager |
9 | 9 | from helpers.test_tools import assert_eq_with_retry, assert_logs_contain |
10 | 10 |
|
@@ -354,8 +354,13 @@ def test_pause(started_cluster, cleanup): |
354 | 354 | assert node2.query("select * from re.a") == "1\n" |
355 | 355 | node2.query("system stop replicated view re.a") |
356 | 356 | node1.restart_clickhouse() # just to guarantee that it notices the new znode |
| 357 | + try: |
| 358 | + node2.query("system wait view re.a") |
| 359 | + except QueryRuntimeException as ex: |
| 360 | + # If the node1.restart_clickhouse() interrupted a refresh, the error message (with substring |
| 361 | + # "cancelled") is written to keeper, then thrown by "system wait view". That's normal. |
| 362 | + assert "cancelled" in str(ex) |
357 | 363 | node2.query( |
358 | | - "system wait view re.a;" |
359 | 364 | "truncate table re.src;" |
360 | 365 | "insert into re.src values (2);") |
361 | 366 | time.sleep(3) |
@@ -415,6 +420,8 @@ def do_test_backup(to_table): |
415 | 420 |
|
416 | 421 | assert node1.query(tables_exist_query) == "2\n" |
417 | 422 | assert node2.query(tables_exist_query) == "2\n" |
| 423 | + node1.query(f'SYSTEM SYNC REPLICA re.{target}') |
| 424 | + node2.query(f'SYSTEM SYNC REPLICA re.{target}') |
418 | 425 | assert node1.query(f'SELECT * FROM re.{target}') == '1\n' |
419 | 426 | assert node2.query(f'SELECT * FROM re.{target}') == '1\n' |
420 | 427 |
|
|
0 commit comments