Skip to content

Commit d2d1685

Browse files
committed
feat testsuite: more robust waitings
commit_hash:c1934115b6550dfbe0f5dc06b70fd79504294087
1 parent b7f382f commit d2d1685

File tree

12 files changed

+200
-112
lines changed

12 files changed

+200
-112
lines changed

core/functional_tests/basic_chaos/tests-restart/test_restart.py

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -1,11 +1,11 @@
1-
import asyncio
2-
import datetime
1+
import pytest_userver.utils.sync as sync
32

43

54
async def wait_for_daemon_stop(_global_daemon_store):
6-
deadline = datetime.datetime.now() + datetime.timedelta(seconds=10)
7-
while datetime.datetime.now() < deadline and _global_daemon_store.has_running_daemons():
8-
await asyncio.sleep(0.05)
5+
def is_ready():
6+
return not _global_daemon_store.has_running_daemons()
7+
8+
await sync.wait(is_ready)
99

1010
assert not _global_daemon_store.has_running_daemons(), 'Daemon has not stopped'
1111
await _global_daemon_store.aclose()

core/functional_tests/early_monitor_port_open/tests/test_boot.py

Lines changed: 7 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -2,6 +2,7 @@
22

33
import aiohttp
44
import pytest
5+
import pytest_userver.utils.sync as sync
56

67

78
@pytest.mark.skip(reason='the feature is not yet enabled')
@@ -15,16 +16,12 @@ async def test_monitor_port_is_open_before_all_components_are_ready(
1516

1617
# call /monitor
1718
async with aiohttp.ClientSession() as session:
18-
for i in range(120):
19-
try:
20-
response = await session.get(monitor_baseurl + 'monitor')
21-
if response.status == 200:
22-
break
23-
await asyncio.sleep(0.5)
24-
except aiohttp.ClientConnectorError:
25-
await asyncio.sleep(0.5)
26-
else:
27-
assert False, 'Service has not started'
19+
20+
async def is_ready():
21+
response = await session.get(monitor_baseurl + 'monitor')
22+
return response.status == 200
23+
24+
await sync.wait(is_ready, catch=aiohttp.ClientConnectorError)
2825

2926
# make sure /ping is now ready
3027
await daemon_task

core/functional_tests/graceful_shutdown/tests/test_graceful_shutdown.py

Lines changed: 6 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -1,11 +1,13 @@
11
import asyncio
2-
import datetime
2+
3+
import pytest_userver.utils.sync as sync
34

45

56
async def wait_for_daemon_stop(_global_daemon_store):
6-
deadline = datetime.datetime.now() + datetime.timedelta(seconds=10)
7-
while datetime.datetime.now() < deadline and _global_daemon_store.has_running_daemons():
8-
await asyncio.sleep(0.05)
7+
def is_ready():
8+
return not _global_daemon_store.has_running_daemons()
9+
10+
await sync.wait(is_ready)
911

1012
assert not _global_daemon_store.has_running_daemons(), 'Daemon has not stopped'
1113
await _global_daemon_store.aclose()

core/functional_tests/http2server/tests/test_http2server.py

Lines changed: 20 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -9,6 +9,7 @@
99
import h2.events
1010
import h2.settings
1111
import pytest
12+
import pytest_userver.utils.sync as sync
1213

1314
DEFAULT_PATH = '/http2server'
1415
DEFAULT_DATA = {'hello': 'world'}
@@ -134,19 +135,26 @@ async def test_concurrent_requests(
134135

135136
await service_client.update_server_state()
136137

137-
await asyncio.sleep(1.0)
138+
def expect_eq(a, b):
139+
if a == b:
140+
return
141+
raise sync.NotReady
142+
143+
async def is_ready():
144+
metrics = await monitor_client.metrics(prefix='server.requests.http2')
145+
expect_eq(len(metrics), 5)
146+
total_requests = clients_count * req_per_client + current_streams
147+
expect_eq(total_requests, await _get_metric(monitor_client, 'streams-count'))
148+
expect_eq(total_requests, await _get_metric(monitor_client, 'streams-close'))
149+
expect_eq(0, await _get_metric(monitor_client, 'reset-streams'))
150+
expect_eq(0, await _get_metric(monitor_client, 'goaway'))
151+
expect_eq(
152+
streams_parse_error,
153+
await _get_metric(monitor_client, 'streams-parse-error'),
154+
)
155+
return True
138156

139-
metrics = await monitor_client.metrics(prefix='server.requests.http2')
140-
assert len(metrics) == 5
141-
total_requests = clients_count * req_per_client + current_streams
142-
assert total_requests == await _get_metric(monitor_client, 'streams-count')
143-
assert total_requests == await _get_metric(monitor_client, 'streams-close')
144-
assert 0 == await _get_metric(monitor_client, 'reset-streams')
145-
assert 0 == await _get_metric(monitor_client, 'goaway')
146-
assert streams_parse_error == await _get_metric(
147-
monitor_client,
148-
'streams-parse-error',
149-
)
157+
await sync.wait(is_ready)
150158

151159

152160
async def test_concurrent_requests_with_big_body(

core/functional_tests/signal_during_boot/tests/test_rotate_logs.py

Lines changed: 10 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -1,24 +1,20 @@
1-
import asyncio
21
import os
32
import signal
43

4+
import pytest_userver.utils.sync as sync
5+
56
TIMEOUT = 60
67

78

89
async def wait_for_logfile(fname: str):
9-
for i in range(TIMEOUT):
10+
async def check_ready():
1011
with open(fname) as ifile:
1112
for line in ifile.readlines():
1213
if 'Starting to catch signals' in line:
13-
# Yes, sleeping for sync. Yes, that's bad.
14-
# No, it is not possible to wait for a specific event.
15-
await asyncio.sleep(1)
1614
return
15+
raise sync.NotReady()
1716

18-
# not yet started, waiting
19-
await asyncio.sleep(1)
20-
21-
assert False, f'log file {fname} does not contain start log entry, has the service started?'
17+
await sync.wait_until(check_ready)
2218

2319

2420
async def test_boot_logs(
@@ -50,9 +46,8 @@ async def _checker(*, session, process) -> bool:
5046

5147
daemon.process.send_signal(signal.SIGUSR1)
5248

53-
for i in range(TIMEOUT):
54-
if os.path.exists(_service_logfile_path):
55-
break
56-
await asyncio.sleep(1)
57-
else:
58-
assert False, f'log file {_service_logfile_path} is not reopened'
49+
async def check_ready():
50+
if not os.path.exists(_service_logfile_path):
51+
raise sync.NotReady()
52+
53+
await sync.wait_until(check_ready)

core/functional_tests/websocket/tests/test_websocket.py

Lines changed: 8 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -2,6 +2,7 @@
22
import logging
33

44
import pytest
5+
import pytest_userver.utils.sync as sync
56
import websockets
67

78
# Disabling redundant logs from third party library
@@ -146,15 +147,14 @@ async def test_ping_pong_close(websocket_client):
146147
websocket_client.ping_interval = None
147148
websocket_client.ping_timeout = None
148149

149-
for _ in range(20):
150-
try:
151-
await chat.recv()
152-
await asyncio.sleep(1)
153-
except websockets.exceptions.ConnectionClosed:
154-
connection_closed_by_ping = True
155-
break
150+
async def check_ready():
151+
await chat.recv()
152+
raise sync.NotReady()
156153

157-
assert connection_closed_by_ping
154+
try:
155+
await sync.wait_until(check_ready)
156+
except websockets.exceptions.ConnectionClosed:
157+
pass
158158

159159

160160
async def test_upgrade_header_with_tab_then_reconnect(service_port):

redis/functional_tests/basic_chaos/tests/test_redis.py

Lines changed: 4 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,7 @@
11
import asyncio
22

33
import pytest
4+
import pytest_userver.utils.sync as sync
45

56

67
async def _check_that_restores(client, gate):
@@ -11,13 +12,11 @@ async def _check_that_restores(client, gate):
1112

1213
assert gate.connections_count() >= 1
1314

14-
for _ in range(10):
15+
async def is_ready():
1516
res = await client.delete('/chaos?key=foo')
16-
if res.status == 200:
17-
return
18-
await asyncio.sleep(1)
17+
return res.status == 200
1918

20-
assert False, 'Bad results after connection restore'
19+
await sync.wait(is_ready)
2120

2221

2322
async def _check_crud(client):

redis/functional_tests/cluster_auto_topology_pubsub/tests/test_redis_pubsub.py

Lines changed: 24 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,7 @@
11
import asyncio
22

33
import pytest
4+
import pytest_userver.utils.sync as sync
45
import redis
56

67
# Some messages may be lost (it's a Redis limitation). *_failover tests require more than 100 retries on slow CI
@@ -38,7 +39,8 @@ async def _validate_pubsub(
3839
for i in range(INPUT_CHANNELS_COUNT):
3940
channel_name = channel_prefix + str(i)
4041
message = msg + str(i)
41-
for _ in range(REQUESTS_RETRIES):
42+
43+
async def check_ready():
4244
publish_method(redis_db, channel_name, message)
4345

4446
response = await service_client.get(
@@ -52,11 +54,11 @@ async def _validate_pubsub(
5254
if data:
5355
assert message in data
5456
await service_client.delete(url)
55-
break
57+
return
5658

57-
await asyncio.sleep(REQUESTS_RELAX_TIME)
58-
else:
59-
assert False, f'Retries exceeded trying to read from {channel_name}'
59+
raise sync.NotReady()
60+
61+
await sync.wait_until(check_ready)
6062

6163

6264
async def _test_service_subscription(service_client, node, prefix):
@@ -93,13 +95,13 @@ async def _validate_service_publish(service_client, nodes, shards_count=0):
9395
redis_clients = [(node, node.get_client()) for node in nodes]
9496

9597
async def _get_message(pubsub, retries=5, delay=0.5):
96-
ret = None
97-
for _ in range(retries):
98+
async def check_ready():
9899
ret = pubsub.get_message()
99100
if ret is not None:
100101
return ret
101-
await asyncio.sleep(delay)
102-
return ret
102+
raise sync.NotReady()
103+
104+
return await sync.wait_until(check_ready)
103105

104106
async def _ensure_published(
105107
pubsub,
@@ -120,8 +122,13 @@ async def _ensure_published(
120122
assert False, 'Retries exceeded'
121123

122124
async def _validate(service_client, pubsub, prefix):
123-
for index in range(REQUESTS_RETRIES):
125+
index = 1
126+
127+
async def check_ready():
128+
nonlocal index
129+
index = index + 1
124130
msg = prefix + str(index)
131+
125132
try:
126133
response = await service_client.get(url, params={'publish': msg})
127134
assert response.status == 200
@@ -130,8 +137,9 @@ async def _validate(service_client, pubsub, prefix):
130137
return
131138
except Exception as exc: # pylint: disable=broad-except
132139
print(f'Pubsub validation failed for shard zero: {exc}')
133-
await asyncio.sleep(REQUESTS_RELAX_TIME)
134-
assert False, 'Retries exceeded for shard zero'
140+
raise sync.NotReady()
141+
142+
await sync.wait_until(check_ready)
135143

136144
async def _validate_round_robin(service_client, pubsub, prefix):
137145
successes = list()
@@ -180,13 +188,13 @@ async def _validate_service_spublish(service_client, nodes):
180188
"""
181189

182190
async def _get_message(pubsub, retries=5, delay=0.5):
183-
ret = None
184-
for _ in range(retries):
191+
async def check_ready():
185192
ret = pubsub.get_sharded_message()
186193
if ret is not None:
187194
return ret
188-
await asyncio.sleep(delay)
189-
return ret
195+
raise sync.NotReady()
196+
197+
return await sync.wait_until(check_ready)
190198

191199
async def _ensure_published(
192200
pubsub,

redis/functional_tests/integration_tests/tests/test_redis_cluster.py

Lines changed: 16 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -1,9 +1,9 @@
11
import asyncio
22

3+
import pytest_userver.utils.sync as sync
34
import redis
45

56
KEYS_SEQ_LEN = 10 # enough sequential keys to test all shards
6-
FAILOVER_DEADLINE_SEC = 30 # maximum time allowed to finish failover
77

88

99
async def test_happy_path(service_client):
@@ -32,12 +32,11 @@ async def _check_write_all_shards(service_client, key_prefix, value):
3232

3333

3434
async def _wait_for_replicas_and_masters_negotiation(service_client, key, value):
35-
for _ in range(FAILOVER_DEADLINE_SEC):
36-
write_ok = await _check_write_all_shards(service_client, key, value)
37-
if write_ok:
38-
break
39-
await asyncio.sleep(1)
40-
assert write_ok
35+
async def check_ready():
36+
if not await _check_write_all_shards(service_client, key, value):
37+
raise sync.NotReady()
38+
39+
await sync.wait_until(check_ready)
4140

4241

4342
async def _check_read_all_shards(service_client, key_prefix, value):
@@ -75,17 +74,17 @@ async def test_failover(service_client, redis_cluster_store):
7574
await _assert_read_all_shards(service_client, 'hf_key1', 'abc')
7675

7776
# Replica may be syncing, use some retries
78-
for _ in range(FAILOVER_DEADLINE_SEC):
79-
read_ok = await _check_read_all_shards(service_client, 'hf_key2', 'cde')
80-
if read_ok:
81-
break
82-
await asyncio.sleep(1)
83-
assert read_ok
77+
async def check_ready():
78+
if not await _check_read_all_shards(service_client, 'hf_key2', 'cde'):
79+
raise sync.NotReady()
80+
81+
await sync.wait_until(check_ready)
8482

8583
# Failover master back where it was and make sure it gets there
8684
assert redis_cluster_store.cluster_failover(target_node=primary)
8785
await _wait_for_replicas_and_masters_negotiation(service_client, 'hf_key3', 'xyz')
88-
for _ in range(FAILOVER_DEADLINE_SEC):
86+
87+
async def check_ready():
8988
try:
9089
redis_cluster_store.flushall(target_nodes=redis.cluster.RedisCluster.PRIMARIES)
9190
redis_cluster_store.wait(1, 10, target_nodes=redis.cluster.RedisCluster.PRIMARIES)
@@ -94,4 +93,6 @@ async def test_failover(service_client, redis_cluster_store):
9493
redis_cluster_store.close()
9594
redis_cluster_store.nodes_manager.reset()
9695
redis_cluster_store.nodes_manager.initialize()
97-
await asyncio.sleep(1)
96+
raise sync.NotReady()
97+
98+
await sync.wait_until(check_ready)

redis/functional_tests/integration_tests/tests/test_redis_sentinel.py

Lines changed: 6 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,8 @@
11
import asyncio
22

3+
import pytest_userver.utils.sync as sync
4+
35
KEYS_SEQ_LEN = 10 # enough sequential keys to test all shards
4-
FAILOVER_DEADLINE_SEC = 30 # maximum time allowed to finish failover
56

67

78
async def test_happy_path(service_client):
@@ -47,12 +48,10 @@ async def test_failover(service_client, redis_sentinel):
4748
redis_sentinel.sentinel_failover('test_master1')
4849

4950
# Wait for failover to happen
50-
for i in range(FAILOVER_DEADLINE_SEC):
51-
write_ok = await _check_write_all_shards(service_client, 'failover_{i}_', 'xyz')
52-
if write_ok:
53-
break
54-
await asyncio.sleep(1)
55-
assert write_ok
51+
async def is_ready():
52+
return await _check_write_all_shards(service_client, 'failover_{i}_', 'xyz')
53+
54+
await sync.wait(is_ready)
5655

5756
# Now that one of the replicas has become the master,
5857
# check reading from the remaining replica

0 commit comments

Comments
 (0)