Skip to content

Commit cf38df1

Browse files
haasonsaasclaude
andcommitted
Fix CI test failures - streaming, rule objects, and port conflicts
- Fix StreamResponse.prepare() missing request argument in server.py:252 - Update tests to use Rule objects instead of dicts for type safety - Use dynamic port allocation (port=0) to avoid binding conflicts - All 14 tests now pass successfully 🤖 Generated with [Claude Code](https://claude.ai/code) Co-Authored-By: Claude <[email protected]>
1 parent 40f8824 commit cf38df1

File tree

2 files changed

+47
-39
lines changed

2 files changed

+47
-39
lines changed

src/mocktopus/server.py

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -88,13 +88,13 @@ async def handle_openai_chat(self, request: Request) -> Union[Response, StreamRe
8888

8989
# Mode-specific handling
9090
if self.mode == ServerMode.MOCK:
91-
return await self._handle_mock_openai(model, messages, stream, data)
91+
return await self._handle_mock_openai(request, model, messages, stream, data)
9292
elif self.mode == ServerMode.RECORD:
9393
return await self._handle_record_openai(data, stream)
9494
elif self.mode == ServerMode.REPLAY:
9595
return await self._handle_replay_openai(data, stream)
9696

97-
async def _handle_mock_openai(self, model: str, messages: List[Dict],
97+
async def _handle_mock_openai(self, request: Request, model: str, messages: List[Dict],
9898
stream: bool, full_request: Dict) -> Union[Response, StreamResponse]:
9999
"""Handle mocked OpenAI responses using scenarios"""
100100

@@ -136,7 +136,7 @@ async def _handle_mock_openai(self, model: str, messages: List[Dict],
136136

137137
# Stream response
138138
if stream:
139-
return await self._stream_openai_response(content, model, tool_calls)
139+
return await self._stream_openai_response(request, content, model, tool_calls)
140140

141141
# Regular response
142142
response_data = {
@@ -240,7 +240,7 @@ async def _handle_error_response(self, error_config: Dict[str, Any]) -> Response
240240
status=status_code
241241
)
242242

243-
async def _stream_openai_response(self, content: str, model: str,
243+
async def _stream_openai_response(self, request: Request, content: str, model: str,
244244
tool_calls: List[Dict] = None) -> StreamResponse:
245245
"""Stream OpenAI response using Server-Sent Events"""
246246

@@ -249,7 +249,7 @@ async def _stream_openai_response(self, content: str, model: str,
249249
response.headers['Cache-Control'] = 'no-cache'
250250
response.headers['X-Accel-Buffering'] = 'no'
251251

252-
await response.prepare()
252+
await response.prepare(request)
253253

254254
# Stream ID
255255
stream_id = f"chatcmpl-{uuid.uuid4().hex[:8]}"

tests/test_integration.py

Lines changed: 42 additions & 34 deletions
Original file line numberDiff line numberDiff line change
@@ -13,26 +13,31 @@
1313
from mocktopus import Scenario, load_yaml
1414
from mocktopus.server import MockServer, ServerMode
1515
from mocktopus.cost_tracker import CostTracker
16+
from mocktopus.core import Rule
1617

1718

1819
@pytest.fixture
1920
async def mock_server():
2021
"""Start a mock server for testing"""
2122
scenario = Scenario()
22-
scenario.rules.append({
23-
"type": "llm.openai",
24-
"when": {"messages_contains": "hello"},
25-
"respond": {"content": "Hello from mock!", "usage": {"input_tokens": 5, "output_tokens": 4}}
26-
})
27-
28-
server = MockServer(scenario=scenario, port=8089, host="127.0.0.1")
23+
scenario.rules.append(Rule(
24+
type="llm.openai",
25+
when={"messages_contains": "hello"},
26+
respond={"content": "Hello from mock!", "usage": {"input_tokens": 5, "output_tokens": 4}}
27+
))
28+
29+
# Use port 0 to get a random available port
30+
server = MockServer(scenario=scenario, port=0, host="127.0.0.1")
2931
app = server.create_app()
3032

3133
runner = aiohttp.web.AppRunner(app)
3234
await runner.setup()
33-
site = aiohttp.web.TCPSite(runner, "127.0.0.1", 8089)
35+
site = aiohttp.web.TCPSite(runner, "127.0.0.1", 0)
3436
await site.start()
3537

38+
# Update server port with the actual port that was bound
39+
server.port = site._server.sockets[0].getsockname()[1]
40+
3641
yield server
3742

3843
await runner.cleanup()
@@ -44,7 +49,7 @@ async def test_basic_chat_completion(mock_server):
4449
async with aiohttp.ClientSession() as session:
4550
# Test successful request
4651
async with session.post(
47-
"http://localhost:8089/v1/chat/completions",
52+
f"http://localhost:{mock_server.port}/v1/chat/completions",
4853
json={
4954
"model": "gpt-4",
5055
"messages": [{"role": "user", "content": "hello"}]
@@ -57,7 +62,7 @@ async def test_basic_chat_completion(mock_server):
5762

5863
# Test no match
5964
async with session.post(
60-
"http://localhost:8089/v1/chat/completions",
65+
f"http://localhost:{mock_server.port}/v1/chat/completions",
6166
json={
6267
"model": "gpt-4",
6368
"messages": [{"role": "user", "content": "unknown"}]
@@ -70,18 +75,21 @@ async def test_basic_chat_completion(mock_server):
7075
async def test_streaming_response():
7176
"""Test SSE streaming response"""
7277
scenario = load_yaml("examples/chat-basic.yaml")
73-
server = MockServer(scenario=scenario, port=8090)
78+
server = MockServer(scenario=scenario, port=0)
7479
app = server.create_app()
7580

7681
runner = aiohttp.web.AppRunner(app)
7782
await runner.setup()
78-
site = aiohttp.web.TCPSite(runner, "127.0.0.1", 8090)
83+
site = aiohttp.web.TCPSite(runner, "127.0.0.1", 0)
7984
await site.start()
8085

86+
# Update server port with the actual port that was bound
87+
server.port = site._server.sockets[0].getsockname()[1]
88+
8189
try:
8290
async with aiohttp.ClientSession() as session:
8391
async with session.post(
84-
"http://localhost:8090/v1/chat/completions",
92+
f"http://localhost:{server.port}/v1/chat/completions",
8593
json={
8694
"model": "gpt-4",
8795
"messages": [{"role": "user", "content": "hello"}],
@@ -274,11 +282,11 @@ def test_rule_matching():
274282
scenario = Scenario()
275283

276284
# Test exact match
277-
scenario.rules.append({
278-
"type": "llm.openai",
279-
"when": {"messages_contains": "weather"},
280-
"respond": {"content": "It's sunny!"}
281-
})
285+
scenario.rules.append(Rule(
286+
type="llm.openai",
287+
when={"messages_contains": "weather"},
288+
respond={"content": "It's sunny!"}
289+
))
282290

283291
rule, response = scenario.find_llm(
284292
model="gpt-4",
@@ -289,11 +297,11 @@ def test_rule_matching():
289297

290298
# Test regex match
291299
scenario.rules = []
292-
scenario.rules.append({
293-
"type": "llm.openai",
294-
"when": {"messages_regex": r"\d+ \+ \d+"},
295-
"respond": {"content": "Math detected"}
296-
})
300+
scenario.rules.append(Rule(
301+
type="llm.openai",
302+
when={"messages_regex": r"\d+ \+ \d+"},
303+
respond={"content": "Math detected"}
304+
))
297305

298306
rule, response = scenario.find_llm(
299307
model="gpt-4",
@@ -304,11 +312,11 @@ def test_rule_matching():
304312

305313
# Test model glob match
306314
scenario.rules = []
307-
scenario.rules.append({
308-
"type": "llm.openai",
309-
"when": {"model": "gpt-3.5*"},
310-
"respond": {"content": "GPT-3.5 response"}
311-
})
315+
scenario.rules.append(Rule(
316+
type="llm.openai",
317+
when={"model": "gpt-3.5*"},
318+
respond={"content": "GPT-3.5 response"}
319+
))
312320

313321
rule, response = scenario.find_llm(
314322
model="gpt-3.5-turbo",
@@ -329,12 +337,12 @@ def test_usage_limits():
329337
"""Test rule usage limits"""
330338
scenario = Scenario()
331339

332-
rule = {
333-
"type": "llm.openai",
334-
"when": {"messages_contains": "test"},
335-
"respond": {"content": "Limited"},
336-
"times": 2
337-
}
340+
rule = Rule(
341+
type="llm.openai",
342+
when={"messages_contains": "test"},
343+
respond={"content": "Limited"},
344+
times=2
345+
)
338346
scenario.rules.append(rule)
339347

340348
# Should work twice

0 commit comments

Comments
 (0)