Skip to content

Commit 01b12b5

Browse files
committed
feat: Multi-tool use with limits
1 parent dc8cd45 commit 01b12b5

File tree

2 files changed

+196
-0
lines changed

2 files changed

+196
-0
lines changed

multi_mcp_use/mermaid_diagrams.py

Lines changed: 128 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,128 @@
1+
invalid_mermaid_diagram = """
2+
```mermaid
3+
graph LR
4+
User((User)) --> |"Run script<br>(e.g., pydantic_mcp.py)"| Agent
5+
6+
# Agent Frameworks
7+
subgraph "Agent"
8+
Agent[Agent]
9+
ADK["Google ADK<br>(adk_mcp.py)"]
10+
LG["LangGraph<br>(langgraph_mcp.py)"]
11+
OAI["OpenAI Agents<br>(oai-agent_mcp.py)"]
12+
PYD["Pydantic-AI<br>(pydantic_mcp.py)"]
13+
14+
Agent --> ADK
15+
Agent --> LG
16+
Agent --> OAI
17+
Agent --> PYD
18+
end
19+
20+
# MCP Server
21+
subgraph "MCP"
22+
direction TD
23+
MCP["Model Context Protocol Server<br>(run_server.py)"]
24+
Tools["Tools<br>- add(a, b)<br>- get_current_time() e.g. {current_time}"]
25+
Resources["Resources<br>- greeting://{{name}}"]
26+
MCP --- Tools
27+
MCP --- Resources
28+
end
29+
30+
# LLM Providers
31+
subgraph "LLM Providers"
32+
direction TD
33+
OAI_LLM["OpenAI Models"]
34+
GEM["Google Gemini Models"]
35+
OTHER["Other LLM Providers..."]
36+
end
37+
38+
Logfire[("Logfire<br>Tracing")]
39+
40+
ADK --> MCP
41+
LG --> MCP
42+
OAI --> MCP
43+
PYD --> MCP
44+
45+
MCP --> OAI_LLM
46+
MCP --> GEM
47+
MCP --> OTHER
48+
49+
ADK --> Logfire
50+
LG --> Logfire
51+
OAI --> Logfire
52+
PYD --> Logfire
53+
54+
LLM_Response[("Response")] --> User
55+
OAI_LLM --> LLM_Response
56+
GEM --> LLM_Response
57+
OTHER --> LLM_Response
58+
59+
style MCP fill:#f9f,stroke:#333,stroke-width:2px
60+
style User fill:#bbf,stroke:#338,stroke-width:2px
61+
style Logfire fill:#bfb,stroke:#383,stroke-width:2px
62+
style LLM_Response fill:#fbb,stroke:#833,stroke-width:2px
63+
```
64+
"""
65+
66+
valid_mermaid_diagram = """
67+
```mermaid
68+
graph LR
69+
User((User)) --> |"Run script<br>(e.g., pydantic_mcp.py)"| Agent
70+
71+
%% Agent Frameworks
72+
subgraph "Agent Frameworks"
73+
direction TB
74+
Agent[Agent]
75+
ADK["Google ADK<br>(adk_mcp.py)"]
76+
LG["LangGraph<br>(langgraph_mcp.py)"]
77+
OAI["OpenAI Agents<br>(oai-agent_mcp.py)"]
78+
PYD["Pydantic-AI<br>(pydantic_mcp.py)"]
79+
80+
Agent --> ADK
81+
Agent --> LG
82+
Agent --> OAI
83+
Agent --> PYD
84+
end
85+
86+
%% MCP Server
87+
subgraph "MCP Server"
88+
direction TB
89+
MCP["Model Context Protocol Server<br>(run_server.py)"]
90+
Tools["Tools<br>- add(a, b)<br>- get_current_time() e.g. {current_time}"]
91+
Resources["Resources<br>- greeting://{{name}}"]
92+
MCP --- Tools
93+
MCP --- Resources
94+
end
95+
96+
subgraph "LLM Providers"
97+
OAI_LLM["OpenAI Models"]
98+
GEM["Google Gemini Models"]
99+
OTHER["Other LLM Providers..."]
100+
end
101+
102+
Logfire[("Logfire<br>Tracing")]
103+
104+
ADK --> MCP
105+
LG --> MCP
106+
OAI --> MCP
107+
PYD --> MCP
108+
109+
MCP --> OAI_LLM
110+
MCP --> GEM
111+
MCP --> OTHER
112+
113+
ADK --> Logfire
114+
LG --> Logfire
115+
OAI --> Logfire
116+
PYD --> Logfire
117+
118+
LLM_Response[("Response")] --> User
119+
OAI_LLM --> LLM_Response
120+
GEM --> LLM_Response
121+
OTHER --> LLM_Response
122+
123+
style MCP fill:#f9f,stroke:#333,stroke-width:2px
124+
style User fill:#bbf,stroke:#338,stroke-width:2px
125+
style Logfire fill:#bfb,stroke:#383,stroke-width:2px
126+
style LLM_Response fill:#fbb,stroke:#833,stroke-width:2px
127+
```
128+
"""

multi_mcp_use/pydantic_mcp.py

Lines changed: 68 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,68 @@
1+
import asyncio
2+
3+
import logfire
4+
from dotenv import load_dotenv
5+
from pydantic_ai import Agent
6+
from pydantic_ai.mcp import MCPServerStdio
7+
from pydantic_ai.usage import UsageLimits
8+
9+
from mermaid_diagrams import invalid_mermaid_diagram, valid_mermaid_diagram
10+
11+
load_dotenv()
12+
13+
# Configure logging to logfire if LOGFIRE_TOKEN is set in environment
14+
logfire.configure(send_to_logfire="if-token-present", service_name="pydantic-multi-mcp")
15+
logfire.instrument_mcp()
16+
logfire.instrument_pydantic_ai()
17+
18+
# Configure MCP servers
19+
local_server = MCPServerStdio(
20+
command="uv",
21+
args=[
22+
"run",
23+
"run_server.py",
24+
"stdio",
25+
],
26+
)
27+
mermaid_server = MCPServerStdio(
28+
command="npx",
29+
args=[
30+
"-y",
31+
"@rtuin/mcp-mermaid-validator@latest",
32+
],
33+
)
34+
# Create Agent with MCP servers
35+
agent = Agent(
36+
"gemini-2.5-pro-preview-03-25",
37+
# "openai:o4-mini",
38+
mcp_servers=[local_server, mermaid_server],
39+
)
40+
Agent.instrument_all()
41+
42+
43+
async def main(query: str = "Hi!", request_limit: int = 5) -> None:
44+
"""
45+
Main function to run the agent
46+
47+
Args:
48+
query (str): The query to run the agent with
49+
request_limit (int): The number of requests to make to the MCP servers
50+
"""
51+
# Set a request limit for LLM calls
52+
usage_limits = UsageLimits(request_limit=request_limit)
53+
54+
# Invoke the agent with the usage limits
55+
async with agent.run_mcp_servers():
56+
result = await agent.run(query, usage_limits=usage_limits)
57+
# print(result.output)
58+
return result
59+
60+
# Evals
61+
# 1. Did it use both MCP tools?
62+
# 2. Is the mermaid diagram valid?
63+
# 3. Is the mermaid diagram close to the ground truth?
64+
65+
66+
if __name__ == "__main__":
67+
query = f"Add the current time and fix the mermaid diagram syntax using the validator: {invalid_mermaid_diagram}. Return only the fixed mermaid diagram between backticks."
68+
asyncio.run(main(query))

0 commit comments

Comments
 (0)