Skip to content

Commit bb5a592

Browse files
Merge pull request ClickHouse#79362 from azat/tests/03393_max_merge_delayed_streams_for_parallel_write
tests: fix 03393_max_merge_delayed_streams_for_parallel_write flakiness (disable for MSan)
2 parents 9d5c05f + cd3ece2 commit bb5a592

File tree

1 file changed

+5
-6
lines changed

1 file changed

+5
-6
lines changed

tests/queries/0_stateless/03393_max_merge_delayed_streams_for_parallel_write.sql

Lines changed: 5 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
1-
-- Tags: no-fasttest, long, no-parallel, no-flaky-check
1+
-- Tags: no-fasttest, long, no-parallel, no-flaky-check, no-msan
22
-- - no-fasttest -- S3 is required
33
-- - no-flaky-check -- not compatible with ThreadFuzzer
44

@@ -10,26 +10,25 @@ engine = MergeTree
1010
partition by ()
1111
order by ()
1212
settings
13-
-- cache has it's own problem (see filesystem_cache_prefer_bigger_buffer_size)
13+
-- cache has it's own problems (see filesystem_cache_prefer_bigger_buffer_size)
1414
storage_policy = 's3_no_cache',
15-
-- horizontal merges does opens all stream at once, so will still huge chunk of memory
15+
-- horizontal merges does opens all stream at once, so will still use huge amount of memory
1616
min_rows_for_wide_part = 0,
1717
min_bytes_for_wide_part = 0,
1818
vertical_merge_algorithm_min_rows_to_activate = 0,
1919
vertical_merge_algorithm_min_columns_to_activate = 1,
2020
min_bytes_for_full_part_storage = 0,
2121
--- avoid excessive memory usage (due to default buffer size of 1MiB that is created for each column)
22-
max_merge_delayed_streams_for_parallel_write = 1,
22+
max_merge_delayed_streams_for_parallel_write = 100,
2323
-- avoid superfluous merges
2424
merge_selector_base = 1000
2525
;
2626

27-
SET max_execution_time = 300;
2827
insert into metric_log select * from generateRandom() limit 10;
2928

3029
optimize table metric_log final;
3130
system flush logs part_log;
32-
select 'max_merge_delayed_streams_for_parallel_write=1' as test, * from system.part_log where table = 'metric_log' and database = currentDatabase() and event_date >= yesterday() and event_type = 'MergeParts' and peak_memory_usage > 100_000_000 format Vertical;
31+
select 'max_merge_delayed_streams_for_parallel_write=100' as test, * from system.part_log where table = 'metric_log' and database = currentDatabase() and event_date >= yesterday() and event_type = 'MergeParts' and peak_memory_usage > 1_000_000_000 format Vertical;
3332

3433
alter table metric_log modify setting max_merge_delayed_streams_for_parallel_write = 10000;
3534

0 commit comments

Comments
 (0)