Skip to content

Commit c104ad2

Browse files
authored
Merge pull request ClickHouse#78980 from ClickHouse/divanik/remove_test_async_insert_adaptive_busy_timeout_flaky_tests
tests test_async_insert_adaptive_busy_timeout/test.py::test_change_queries_frequency and test_async_insert_adaptive_busy_timeout/test.py::test_compare_sequential_inserts_durations_for_adaptive_and_fixed_async_timeouts are flaky
2 parents f94cd4e + 5a33757 commit c104ad2

File tree

1 file changed

+0
-123
lines changed
  • tests/integration/test_async_insert_adaptive_busy_timeout

1 file changed

+0
-123
lines changed

tests/integration/test_async_insert_adaptive_busy_timeout/test.py

Lines changed: 0 additions & 123 deletions
Original file line numberDiff line numberDiff line change
@@ -186,126 +186,3 @@ def test_with_replicated_merge_tree_multithread():
186186
)
187187

188188
node.query("DROP TABLE {} SYNC".format(table_name))
189-
190-
191-
# Ensure that the combined duration of inserts with adaptive timeouts is less than
192-
# the combined duration for fixed timeouts.
193-
def test_compare_sequential_inserts_durations_for_adaptive_and_fixed_async_timeouts():
194-
fixed_tm_table_name = "async_insert_mt_fixed_async_timeout"
195-
node.query(
196-
"CREATE TABLE {} (a UInt64, b Array(UInt64)) ENGINE=MergeTree() ORDER BY a".format(
197-
fixed_tm_table_name
198-
)
199-
)
200-
201-
fixed_tm_settings = copy.copy(_query_settings)
202-
fixed_tm_settings["async_insert_use_adaptive_busy_timeout"] = 0
203-
fixed_tm_settings["async_insert_busy_timeout_ms"] = 100
204-
205-
fixed_tm_run_duration = timeit.timeit(
206-
lambda: _insert_queries_sequentially(
207-
fixed_tm_table_name,
208-
fixed_tm_settings,
209-
iterations=50,
210-
max_values_size=1000,
211-
array_size_range=[10, 50],
212-
),
213-
setup="pass",
214-
number=3,
215-
)
216-
217-
node.query("DROP TABLE IF EXISTS {}".format(fixed_tm_table_name))
218-
219-
logging.debug(
220-
"Run duration with fixed asynchronous timeout is {} seconds".format(
221-
fixed_tm_run_duration
222-
)
223-
)
224-
225-
adaptive_tm_table_name = "async_insert_mt_adaptive_async_timeout"
226-
node.query(
227-
"CREATE TABLE {} (a UInt64, b Array(UInt64)) ENGINE=MergeTree() ORDER BY a".format(
228-
adaptive_tm_table_name
229-
)
230-
)
231-
232-
adaptive_tm_settings = copy.copy(_query_settings)
233-
adaptive_tm_settings["async_insert_busy_timeout_min_ms"] = 10
234-
adaptive_tm_settings["async_insert_busy_timeout_max_ms"] = 500
235-
236-
adaptive_tm_run_duration = timeit.timeit(
237-
lambda: _insert_queries_sequentially(
238-
adaptive_tm_table_name,
239-
adaptive_tm_settings,
240-
iterations=50,
241-
max_values_size=1000,
242-
array_size_range=[10, 50],
243-
),
244-
setup="pass",
245-
number=3,
246-
)
247-
248-
logging.debug(
249-
"Run duration with adaptive asynchronous timeout is {} seconds.".format(
250-
adaptive_tm_run_duration
251-
)
252-
)
253-
254-
node.query("DROP TABLE IF EXISTS {}".format(adaptive_tm_table_name))
255-
256-
assert adaptive_tm_run_duration <= fixed_tm_run_duration
257-
258-
259-
# Ensure that the delay converges to a minimum for sequential inserts and wait_for_async_insert=1.
260-
def test_change_queries_frequency():
261-
table_name = "async_insert_mt_change_queries_frequencies"
262-
263-
create_query = " ".join(
264-
(
265-
"CREATE TABLE {} (a UInt64, b Array(UInt64))".format(table_name),
266-
"ENGINE=ReplicatedMergeTree('/clickhouse/tables/test_frequencies/{}', 'node')".format(
267-
table_name
268-
),
269-
"ORDER BY a",
270-
)
271-
)
272-
273-
node.query(create_query)
274-
275-
settings = copy.copy(_query_settings)
276-
min_ms = 50
277-
max_ms = 200
278-
279-
settings["async_insert_busy_timeout_min_ms"] = min_ms
280-
settings["async_insert_busy_timeout_max_ms"] = max_ms
281-
282-
# When we do sequential queries, the timeout converges the minimum
283-
284-
_insert_queries_sequentially(
285-
table_name,
286-
settings,
287-
iterations=50,
288-
max_values_size=1000,
289-
array_size_range=[10, 50],
290-
)
291-
node.query("SYSTEM FLUSH LOGS")
292-
select_log_query = f"SELECT countIf(timeout_milliseconds - {min_ms} < 25) FROM (SELECT timeout_milliseconds FROM system.asynchronous_insert_log ORDER BY event_time DESC LIMIT 10)"
293-
res = node.query(select_log_query)
294-
assert int(res) >= 5
295-
296-
# When we do many parallel queries, the timeout converges the maximum
297-
298-
_insert_queries_in_parallel(
299-
table_name,
300-
settings,
301-
thread_num=10,
302-
tasks=1000,
303-
max_values_size=1000,
304-
array_size_range=[10, 15],
305-
)
306-
node.query("SYSTEM FLUSH LOGS")
307-
select_log_query = f"SELECT countIf({max_ms} - timeout_milliseconds < 100) FROM (SELECT timeout_milliseconds FROM system.asynchronous_insert_log ORDER BY event_time DESC LIMIT 10)"
308-
res = node.query(select_log_query)
309-
assert int(res) >= 5
310-
311-
node.query("DROP TABLE IF EXISTS {} SYNC".format(table_name))

0 commit comments

Comments
 (0)