Skip to content

Commit bca1a42

Browse files
author
Lucas Alencar Xisto
committed
tests: improve timeouts and retries coverage with async + detailed assertions
1 parent 3154a78 commit bca1a42

File tree

4 files changed

+443
-0
lines changed

4 files changed

+443
-0
lines changed

.vscode/settings.json

Lines changed: 9 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1,3 +1,12 @@
11
{
22
"python.analysis.importFormat": "relative",
3+
"python.testing.unittestArgs": [
4+
"-v",
5+
"-s",
6+
"./tests",
7+
"-p",
8+
"*test.py"
9+
],
10+
"python.testing.pytestEnabled": false,
11+
"python.testing.unittestEnabled": true,
312
}

tests/test_retries.py

Lines changed: 251 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,251 @@
1+
import time
2+
import anyio
3+
import httpx
4+
import pytest
5+
6+
def test_respects_retry_after_seconds(monkeypatch):
7+
sleeps = []
8+
9+
# Evita dormir de verdade
10+
monkeypatch.setattr(time, "sleep", lambda s: sleeps.append(s))
11+
12+
attempts = {"n": 0}
13+
14+
def handler(request: httpx.Request):
15+
attempts["n"] += 1
16+
if attempts["n"] == 1:
17+
# 1ª tentativa falha com 429 e Retry-After: 2
18+
return httpx.Response(429, headers={"Retry-After": "2"}, json={"err": "rate"})
19+
# 2ª tentativa sucesso
20+
return httpx.Response(200, json={"ok": True})
21+
22+
transport = httpx.MockTransport(handler)
23+
24+
from openai import OpenAI, DefaultHttpxClient
25+
26+
client = OpenAI(
27+
max_retries=1,
28+
http_client=DefaultHttpxClient(transport=transport),
29+
)
30+
31+
client.responses.create(model="gpt-4o-mini", input="hi")
32+
33+
# Deve ter "dormido" ~2s antes de retry
34+
assert sleeps, "expected a sleep before retry"
35+
assert sleeps[0] >= 2.0 # pode ser >2.0 se houver jitter adicional
36+
37+
38+
@pytest.mark.asyncio
39+
async def test_respects_retry_after_seconds_async(monkeypatch):
40+
sleeps = []
41+
42+
async def fake_sleep(s):
43+
sleeps.append(s)
44+
45+
monkeypatch.setattr(anyio, "sleep", fake_sleep)
46+
47+
attempts = {"n": 0}
48+
49+
async def handler(request: httpx.Request):
50+
attempts["n"] += 1
51+
if attempts["n"] == 1:
52+
return httpx.Response(429, headers={"Retry-After": "2"}, json={"err": "rate"})
53+
return httpx.Response(200, json={"ok": True})
54+
import time
55+
import anyio
56+
import httpx
57+
import pytest
58+
59+
def test_respects_retry_after_seconds(monkeypatch):
60+
sleeps = []
61+
monkeypatch.setattr(time, "sleep", lambda s: sleeps.append(s))
62+
attempts = {"n": 0}
63+
64+
def handler(request: httpx.Request):
65+
attempts["n"] += 1
66+
if attempts["n"] == 1:
67+
return httpx.Response(429, headers={"Retry-After": "2"}, json={"err": "rate"})
68+
return httpx.Response(200, json={"ok": True})
69+
70+
transport = httpx.MockTransport(handler)
71+
from openai import OpenAI, DefaultHttpxClient
72+
73+
client = OpenAI(
74+
max_retries=1,
75+
http_client=DefaultHttpxClient(transport=transport),
76+
)
77+
78+
client.responses.create(model="gpt-4o-mini", input="hi")
79+
assert sleeps, "expected a sleep before retry"
80+
assert sleeps[0] >= 2.0
81+
82+
def test_no_retry_on_success(monkeypatch):
83+
sleeps = []
84+
monkeypatch.setattr(time, "sleep", lambda s: sleeps.append(s))
85+
attempts = {"n": 0}
86+
87+
def handler(request: httpx.Request):
88+
attempts["n"] += 1
89+
return httpx.Response(200, json={"ok": True})
90+
91+
transport = httpx.MockTransport(handler)
92+
from openai import OpenAI, DefaultHttpxClient
93+
94+
client = OpenAI(
95+
max_retries=3,
96+
http_client=DefaultHttpxClient(transport=transport),
97+
)
98+
99+
client.responses.create(model="gpt-4o-mini", input="hi")
100+
assert attempts["n"] == 1
101+
assert not sleeps
102+
103+
def test_max_retries_exceeded(monkeypatch):
104+
sleeps = []
105+
monkeypatch.setattr(time, "sleep", lambda s: sleeps.append(s))
106+
attempts = {"n": 0}
107+
108+
def handler(request: httpx.Request):
109+
attempts["n"] += 1
110+
return httpx.Response(429, headers={"Retry-After": "1"}, json={"err": "rate"})
111+
112+
transport = httpx.MockTransport(handler)
113+
from openai import OpenAI, DefaultHttpxClient
114+
115+
client = OpenAI(
116+
max_retries=2,
117+
http_client=DefaultHttpxClient(transport=transport),
118+
)
119+
120+
with pytest.raises(httpx.HTTPStatusError):
121+
client.responses.create(model="gpt-4o-mini", input="hi")
122+
assert attempts["n"] == 3 # 1 original + 2 retries
123+
assert len(sleeps) == 2
124+
assert all(s >= 1.0 for s in sleeps)
125+
126+
@pytest.mark.asyncio
127+
async def test_respects_retry_after_seconds_async(monkeypatch):
128+
sleeps = []
129+
async def fake_sleep(s):
130+
sleeps.append(s)
131+
monkeypatch.setattr(anyio, "sleep", fake_sleep)
132+
attempts = {"n": 0}
133+
134+
async def handler(request: httpx.Request):
135+
attempts["n"] += 1
136+
if attempts["n"] == 1:
137+
return httpx.Response(429, headers={"Retry-After": "2"}, json={"err": "rate"})
138+
return httpx.Response(200, json={"ok": True})
139+
140+
transport = httpx.MockTransport(handler)
141+
from openai import AsyncOpenAI, DefaultAsyncHttpxClient
142+
143+
client = AsyncOpenAI(
144+
max_retries=1,
145+
http_client=DefaultAsyncHttpxClient(transport=transport),
146+
)
147+
148+
await client.responses.create(model="gpt-4o-mini", input="hi")
149+
assert sleeps
150+
assert sleeps[0] >= 2.0
151+
152+
@pytest.mark.asyncio
153+
async def test_no_retry_on_success_async(monkeypatch):
154+
sleeps = []
155+
async def fake_sleep(s):
156+
sleeps.append(s)
157+
monkeypatch.setattr(anyio, "sleep", fake_sleep)
158+
attempts = {"n": 0}
159+
160+
async def handler(request: httpx.Request):
161+
attempts["n"] += 1
162+
return httpx.Response(200, json={"ok": True})
163+
164+
transport = httpx.MockTransport(handler)
165+
from openai import AsyncOpenAI, DefaultAsyncHttpxClient
166+
167+
client = AsyncOpenAI(
168+
max_retries=3,
169+
http_client=DefaultAsyncHttpxClient(transport=transport),
170+
)
171+
172+
await client.responses.create(model="gpt-4o-mini", input="hi")
173+
assert attempts["n"] == 1
174+
assert not sleeps
175+
176+
@pytest.mark.asyncio
177+
async def test_max_retries_exceeded_async(monkeypatch):
178+
sleeps = []
179+
async def fake_sleep(s):
180+
sleeps.append(s)
181+
monkeypatch.setattr(anyio, "sleep", fake_sleep)
182+
attempts = {"n": 0}
183+
184+
async def handler(request: httpx.Request):
185+
attempts["n"] += 1
186+
return httpx.Response(429, headers={"Retry-After": "1"}, json={"err": "rate"})
187+
188+
transport = httpx.MockTransport(handler)
189+
from openai import AsyncOpenAI, DefaultAsyncHttpxClient
190+
191+
client = AsyncOpenAI(
192+
max_retries=2,
193+
http_client=DefaultAsyncHttpxClient(transport=transport),
194+
)
195+
196+
with pytest.raises(httpx.HTTPStatusError):
197+
await client.responses.create(model="gpt-4o-mini", input="hi")
198+
assert attempts["n"] == 3
199+
assert len(sleeps) == 2
200+
assert all(s >= 1.0 for s in sleeps)
201+
202+
def test_retry_with_jitter(monkeypatch):
203+
# Testa se o retry respeita jitter (se implementado)
204+
sleeps = []
205+
monkeypatch.setattr(time, "sleep", lambda s: sleeps.append(s))
206+
attempts = {"n": 0}
207+
208+
def handler(request: httpx.Request):
209+
attempts["n"] += 1
210+
if attempts["n"] < 3:
211+
return httpx.Response(429, headers={"Retry-After": "1"}, json={"err": "rate"})
212+
return httpx.Response(200, json={"ok": True})
213+
214+
transport = httpx.MockTransport(handler)
215+
from openai import OpenAI, DefaultHttpxClient
216+
217+
client = OpenAI(
218+
max_retries=2,
219+
http_client=DefaultHttpxClient(transport=transport),
220+
)
221+
222+
with pytest.raises(httpx.HTTPStatusError):
223+
client.responses.create(model="gpt-4o-mini", input="hi")
224+
assert attempts["n"] == 3
225+
assert len(sleeps) == 2
226+
227+
def test_retry_on_different_status(monkeypatch):
228+
# Testa se o retry ocorre para outros status além de 429, se suportado
229+
sleeps = []
230+
monkeypatch.setattr(time, "sleep", lambda s: sleeps.append(s))
231+
attempts = {"n": 0}
232+
233+
def handler(request: httpx.Request):
234+
attempts["n"] += 1
235+
if attempts["n"] == 1:
236+
return httpx.Response(500, json={"err": "server"})
237+
return httpx.Response(200, json={"ok": True})
238+
239+
transport = httpx.MockTransport(handler)
240+
from openai import OpenAI, DefaultHttpxClient
241+
242+
client = OpenAI(
243+
max_retries=1,
244+
http_client=DefaultHttpxClient(transport=transport),
245+
)
246+
247+
client.responses.create(model="gpt-4o-mini", input="hi")
248+
assert attempts["n"] == 2
249+
250+
# Estrutura assim está ótima: cada teste cobre um cenário e está bem separado.
251+
# Se quiser, pode criar subpastas em tests/ para agrupar por tema (ex: tests/retries/, tests/timeouts/), mas para projetos pequenos/médios, manter arquivos separados já é suficiente e claro.

0 commit comments

Comments
 (0)