Skip to content

Commit 99de6ae

Browse files
Merge branch 'master' into no-sentry
2 parents fc1eefb + ce890ea commit 99de6ae

File tree

4 files changed

+56
-5
lines changed

4 files changed

+56
-5
lines changed

tests/integration/test_refreshable_mat_view_replicated/test.py

Lines changed: 5 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -412,10 +412,12 @@ def test_real_wait_refresh(
412412
expected_rows += 2
413413
expect_rows(expected_rows, table=tgt)
414414

415+
is_close = lambda x, y: x is not None and y is not None and abs(x.timestamp() - y.timestamp()) <= 3
416+
415417
rmv2 = get_rmv_info(
416418
node,
417419
"test_rmv",
418-
condition=lambda x: x["last_refresh_time"] == rmv["next_refresh_time"],
420+
condition=lambda x: is_close(x["last_refresh_time"], rmv["next_refresh_time"]),
419421
# wait for refresh a little bit more than 10 seconds
420422
max_attempts=30,
421423
delay=0.5,
@@ -438,8 +440,8 @@ def test_real_wait_refresh(
438440

439441
assert rmv2["exception"] is None
440442
assert rmv2["status"] in ["Scheduled", "Running"]
441-
assert rmv2["last_success_time"] == rmv["next_refresh_time"]
442-
assert rmv2["last_refresh_time"] == rmv["next_refresh_time"]
443+
assert is_close(rmv2["last_success_time"], rmv["next_refresh_time"])
444+
assert is_close(rmv2["last_refresh_time"], rmv["next_refresh_time"])
443445
assert rmv2["retry"] == 0 and rmv22["retry"] == 0
444446

445447
for n in nodes:

tests/integration/test_refreshable_mv/test.py

Lines changed: 9 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -4,7 +4,7 @@
44

55
import pytest
66

7-
from helpers.cluster import ClickHouseCluster
7+
from helpers.cluster import ClickHouseCluster, QueryRuntimeException
88
from helpers.network import PartitionManager
99
from helpers.test_tools import assert_eq_with_retry, assert_logs_contain
1010

@@ -354,8 +354,13 @@ def test_pause(started_cluster, cleanup):
354354
assert node2.query("select * from re.a") == "1\n"
355355
node2.query("system stop replicated view re.a")
356356
node1.restart_clickhouse() # just to guarantee that it notices the new znode
357+
try:
358+
node2.query("system wait view re.a")
359+
except QueryRuntimeException as ex:
360+
# If the node1.restart_clickhouse() interrupted a refresh, the error message (with substring
361+
# "cancelled") is written to keeper, then thrown by "system wait view". That's normal.
362+
assert "cancelled" in str(ex)
357363
node2.query(
358-
"system wait view re.a;"
359364
"truncate table re.src;"
360365
"insert into re.src values (2);")
361366
time.sleep(3)
@@ -415,6 +420,8 @@ def do_test_backup(to_table):
415420

416421
assert node1.query(tables_exist_query) == "2\n"
417422
assert node2.query(tables_exist_query) == "2\n"
423+
node1.query(f'SYSTEM SYNC REPLICA re.{target}')
424+
node2.query(f'SYSTEM SYNC REPLICA re.{target}')
418425
assert node1.query(f'SELECT * FROM re.{target}') == '1\n'
419426
assert node2.query(f'SELECT * FROM re.{target}') == '1\n'
420427

tests/queries/0_stateless/03460_alter_materialized_view_on_cluster.reference

Whitespace-only changes.
Lines changed: 42 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,42 @@
1+
-- Tags: no-replicated-database
2+
-- ^ due to the usage of ON CLUSTER queries
3+
4+
SET distributed_ddl_output_mode = 'none', enable_analyzer = true;
5+
6+
drop table if exists source, mview;
7+
8+
CREATE TABLE source
9+
(
10+
timestamp DateTime,
11+
card_id UInt64,
12+
_id String
13+
)
14+
ENGINE = MergeTree Partition by toYYYYMM(timestamp)
15+
ORDER BY _id TTL toDateTime(timestamp + toIntervalDay(7));
16+
17+
CREATE MATERIALIZED VIEW mview on cluster test_shard_localhost
18+
ENGINE = SummingMergeTree ORDER BY (day, card_id)
19+
as SELECT
20+
toDate(timestamp) AS day,
21+
card_id,
22+
count(*) AS card_view
23+
FROM source GROUP BY (day, card_id);
24+
25+
DROP TABLE mview;
26+
27+
CREATE MATERIALIZED VIEW mview on cluster test_shard_localhost
28+
(
29+
day Date,
30+
card_id UInt64,
31+
card_view Int64
32+
)
33+
ENGINE = SummingMergeTree ORDER BY (day, card_id)
34+
as SELECT
35+
toDate(timestamp) AS day,
36+
card_id,
37+
count(*) AS card_view
38+
FROM source GROUP BY (day, card_id);
39+
40+
alter table source on cluster test_shard_localhost MODIFY SETTING ttl_only_drop_parts = 1;
41+
42+
drop table if exists mview, source;

0 commit comments

Comments
 (0)