|
5 | 5 | from mcp.server.fastmcp import FastMCP |
6 | 6 | from mcp.shared.memory import create_connected_server_and_client_session as create_session |
7 | 7 |
|
8 | | -_sleep_time_seconds = 0.01 |
9 | 8 | _resource_name = "slow://slow_resource" |
10 | 9 |
|
11 | 10 |
|
12 | | -@pytest.mark.filterwarnings( |
13 | | - "ignore:coroutine 'test_messages_are_executed_concurrently.<locals>.slow_resource' was never awaited:RuntimeWarning" |
14 | | -) |
15 | 11 | @pytest.mark.anyio |
16 | 12 | async def test_messages_are_executed_concurrently(): |
17 | 13 | server = FastMCP("test") |
18 | | - call_timestamps = [] |
| 14 | + event = anyio.Event() |
| 15 | + call_order = [] |
19 | 16 |
|
20 | 17 | @server.tool("sleep") |
21 | 18 | async def sleep_tool(): |
22 | | - call_timestamps.append(("tool_start_time", anyio.current_time())) |
23 | | - await anyio.sleep(_sleep_time_seconds) |
24 | | - call_timestamps.append(("tool_end_time", anyio.current_time())) |
| 19 | + call_order.append("waiting_for_event") |
| 20 | + await event.wait() |
| 21 | + call_order.append("tool_end") |
25 | 22 | return "done" |
26 | 23 |
|
27 | 24 | @server.resource(_resource_name) |
28 | 25 | async def slow_resource(): |
29 | | - call_timestamps.append(("resource_start_time", anyio.current_time())) |
30 | | - await anyio.sleep(_sleep_time_seconds) |
31 | | - call_timestamps.append(("resource_end_time", anyio.current_time())) |
| 26 | + event.set() |
| 27 | + call_order.append("resource_end") |
32 | 28 | return "slow" |
33 | 29 |
|
34 | 30 | async with create_session(server._mcp_server) as client_session: |
| 31 | + # First tool will wait on event, second will set it |
35 | 32 | async with anyio.create_task_group() as tg: |
36 | | - for _ in range(10): |
37 | | - tg.start_soon(client_session.call_tool, "sleep") |
38 | | - tg.start_soon(client_session.read_resource, AnyUrl(_resource_name)) |
39 | | - |
40 | | - active_calls = 0 |
41 | | - max_concurrent_calls = 0 |
42 | | - for call_type, _ in sorted(call_timestamps, key=lambda x: x[1]): |
43 | | - if "start" in call_type: |
44 | | - active_calls += 1 |
45 | | - max_concurrent_calls = max(max_concurrent_calls, active_calls) |
46 | | - else: |
47 | | - active_calls -= 1 |
48 | | - print(f"Max concurrent calls: {max_concurrent_calls}") |
49 | | - assert max_concurrent_calls > 1, "No concurrent calls were executed" |
| 33 | + # Start the tool first (it will wait on event) |
| 34 | + tg.start_soon(client_session.call_tool, "sleep") |
| 35 | + # Then the resource (it will set the event) |
| 36 | + tg.start_soon(client_session.read_resource, AnyUrl(_resource_name)) |
| 37 | + |
| 38 | + # Verify that both ran concurrently |
| 39 | + assert call_order == [ |
| 40 | + "waiting_for_event", |
| 41 | + "resource_end", |
| 42 | + "tool_end", |
| 43 | + ], f"Expected concurrent execution, but got: {call_order}" |
0 commit comments