Skip to content

Commit 658bdea

Browse files
committed
Fix conflicts
2 parents ae22d0e + f99fa5f commit 658bdea

File tree

13 files changed

+199
-14
lines changed

13 files changed

+199
-14
lines changed

docs/assets/images/mcp-tracing.jpg

398 KB
Loading

docs/mcp.md

Lines changed: 11 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
1-
# Model context protocol
1+
# Model context protocol (MCP)
22

33
The [Model context protocol](https://modelcontextprotocol.io/introduction) (aka MCP) is a way to provide tools and context to the LLM. From the MCP docs:
44

@@ -46,6 +46,15 @@ Every time an Agent runs, it calls `list_tools()` on the MCP server. This can be
4646

4747
If you want to invalidate the cache, you can call `invalidate_tools_cache()` on the servers.
4848

49-
## End-to-end example
49+
## End-to-end examples
5050

5151
View complete working examples at [examples/mcp](https://github.com/openai/openai-agents-python/tree/main/examples/mcp).
52+
53+
## Tracing
54+
55+
[Tracing](./tracing.md) automatically captures MCP operations, including:
56+
57+
1. Calls to the MCP server to list tools
58+
2. MCP-related info on function calls
59+
60+
![MCP Tracing Screenshot](./assets/images/mcp-tracing.jpg)

examples/mcp/filesystem_example/README.md

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -10,7 +10,7 @@ uv run python python examples/mcp/filesystem_example/main.py
1010

1111
## Details
1212

13-
The example uses the `MCPServerStdio` class from `agents`, with the command:
13+
The example uses the `MCPServerStdio` class from `agents.mcp`, with the command:
1414

1515
```bash
1616
npx -y "@modelcontextprotocol/server-filesystem" <samples_directory>

examples/mcp/git_example/README.md

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -10,16 +10,17 @@ uv run python examples/mcp/git_example/main.py
1010

1111
## Details
1212

13-
The example uses the `MCPServerStdio` class from `agents`, with the command:
13+
The example uses the `MCPServerStdio` class from `agents.mcp`, with the command:
1414

1515
```bash
1616
uvx mcp-server-git
1717
```
18+
1819
Prior to running the agent, the user is prompted to provide a local directory path to their git repo. Using that, the Agent can invoke Git MCP tools like `git_log` to inspect the git commit log.
1920

2021
Under the hood:
2122

2223
1. The server is spun up in a subprocess, and exposes a bunch of tools like `git_log()`
2324
2. We add the server instance to the Agent via `mcp_agents`.
24-
3. Each time the agent runs, we call out to the MCP server to fetch the list of tools via `server.list_tools()`. The result is cached.
25+
3. Each time the agent runs, we call out to the MCP server to fetch the list of tools via `server.list_tools()`. The result is cached.
2526
4. If the LLM chooses to use an MCP tool, we call the MCP server to run the tool via `server.run_tool()`.

examples/mcp/git_example/main.py

Lines changed: 4 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -29,7 +29,10 @@ async def main():
2929
# Ask the user for the directory path
3030
directory_path = input("Please enter the path to the git repository: ")
3131

32-
async with MCPServerStdio(params={"command": "uvx", "args": ["mcp-server-git"]}) as server:
32+
async with MCPServerStdio(
33+
cache_tools_list=True, # Cache the tools list, for demonstration
34+
params={"command": "uvx", "args": ["mcp-server-git"]},
35+
) as server:
3336
with trace(workflow_name="MCP Git Example"):
3437
await run(server, directory_path)
3538

examples/mcp/sse_example/README.md

Lines changed: 13 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,13 @@
1+
# MCP SSE Example
2+
3+
This example uses a local SSE server in [server.py](server.py).
4+
5+
Run the example via:
6+
7+
```
8+
uv run python python examples/mcp/sse_example/main.py
9+
```
10+
11+
## Details
12+
13+
The example uses the `MCPServerSse` class from `agents.mcp`. The server runs in a sub-process at `https://localhost:8000/sse`.

examples/mcp/sse_example/main.py

Lines changed: 83 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,83 @@
1+
import asyncio
2+
import os
3+
import shutil
4+
import subprocess
5+
import time
6+
from typing import Any
7+
8+
from agents import Agent, Runner, gen_trace_id, trace
9+
from agents.mcp import MCPServer, MCPServerSse
10+
from agents.model_settings import ModelSettings
11+
12+
13+
async def run(mcp_server: MCPServer):
14+
agent = Agent(
15+
name="Assistant",
16+
instructions="Use the tools to answer the questions.",
17+
mcp_servers=[mcp_server],
18+
model_settings=ModelSettings(tool_choice="required"),
19+
)
20+
21+
# Use the `add` tool to add two numbers
22+
message = "Add these numbers: 7 and 22."
23+
print(f"Running: {message}")
24+
result = await Runner.run(starting_agent=agent, input=message)
25+
print(result.final_output)
26+
27+
# Run the `get_weather` tool
28+
message = "What's the weather in Tokyo?"
29+
print(f"\n\nRunning: {message}")
30+
result = await Runner.run(starting_agent=agent, input=message)
31+
print(result.final_output)
32+
33+
# Run the `get_secret_word` tool
34+
message = "What's the secret word?"
35+
print(f"\n\nRunning: {message}")
36+
result = await Runner.run(starting_agent=agent, input=message)
37+
print(result.final_output)
38+
39+
40+
async def main():
41+
async with MCPServerSse(
42+
name="SSE Python Server",
43+
params={
44+
"url": "http://localhost:8000/sse",
45+
},
46+
) as server:
47+
trace_id = gen_trace_id()
48+
with trace(workflow_name="SSE Example", trace_id=trace_id):
49+
print(f"View trace: https://platform.openai.com/traces/{trace_id}\n")
50+
await run(server)
51+
52+
53+
if __name__ == "__main__":
54+
# Let's make sure the user has uv installed
55+
if not shutil.which("uv"):
56+
raise RuntimeError(
57+
"uv is not installed. Please install it: https://docs.astral.sh/uv/getting-started/installation/"
58+
)
59+
60+
# We'll run the SSE server in a subprocess. Usually this would be a remote server, but for this
61+
# demo, we'll run it locally at http://localhost:8000/sse
62+
process: subprocess.Popen[Any] | None = None
63+
try:
64+
this_dir = os.path.dirname(os.path.abspath(__file__))
65+
server_file = os.path.join(this_dir, "server.py")
66+
67+
print("Starting SSE server at http://localhost:8000/sse ...")
68+
69+
# Run `uv run server.py` to start the SSE server
70+
process = subprocess.Popen(["uv", "run", server_file])
71+
# Give it 3 seconds to start
72+
time.sleep(3)
73+
74+
print("SSE server started. Running example...\n\n")
75+
except Exception as e:
76+
print(f"Error starting SSE server: {e}")
77+
exit(1)
78+
79+
try:
80+
asyncio.run(main())
81+
finally:
82+
if process:
83+
process.terminate()

examples/mcp/sse_example/server.py

Lines changed: 33 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,33 @@
1+
import random
2+
3+
import requests
4+
from mcp.server.fastmcp import FastMCP
5+
6+
# Create server
7+
mcp = FastMCP("Echo Server")
8+
9+
10+
@mcp.tool()
11+
def add(a: int, b: int) -> int:
12+
"""Add two numbers"""
13+
print(f"[debug-server] add({a}, {b})")
14+
return a + b
15+
16+
17+
@mcp.tool()
18+
def get_secret_word() -> str:
19+
print("[debug-server] get_secret_word()")
20+
return random.choice(["apple", "banana", "cherry"])
21+
22+
23+
@mcp.tool()
24+
def get_current_weather(city: str) -> str:
25+
print(f"[debug-server] get_current_weather({city})")
26+
27+
endpoint = "https://wttr.in"
28+
response = requests.get(f"{endpoint}/{city}")
29+
return response.text
30+
31+
32+
if __name__ == "__main__":
33+
mcp.run(transport="sse")

pyproject.toml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,6 @@
11
[project]
22
name = "openai-agents"
3-
version = "0.0.6"
3+
version = "0.0.7"
44
description = "OpenAI Agents SDK"
55
readme = "README.md"
66
requires-python = ">=3.9"

src/agents/_run_impl.py

Lines changed: 12 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -536,7 +536,8 @@ async def execute_handoffs(
536536
run_config: RunConfig,
537537
) -> SingleStepResult:
538538
# If there is more than one handoff, add tool responses that reject those handoffs
539-
if len(run_handoffs) > 1:
539+
multiple_handoffs = len(run_handoffs) > 1
540+
if multiple_handoffs:
540541
output_message = "Multiple handoffs detected, ignoring this one."
541542
new_step_items.extend(
542543
[
@@ -558,6 +559,16 @@ async def execute_handoffs(
558559
context_wrapper, actual_handoff.tool_call.arguments
559560
)
560561
span_handoff.span_data.to_agent = new_agent.name
562+
if multiple_handoffs:
563+
requested_agents = [handoff.handoff.agent_name for handoff in run_handoffs]
564+
span_handoff.set_error(
565+
SpanError(
566+
message="Multiple handoffs requested",
567+
data={
568+
"requested_agents": requested_agents,
569+
},
570+
)
571+
)
561572

562573
# Append a tool output item for the handoff
563574
new_step_items.append(

0 commit comments

Comments
 (0)