|
21 | 21 | # "openai-agents", |
22 | 22 | # "httpx", |
23 | 23 | # "mcp", |
| 24 | +# "elastic-opentelemetry", |
24 | 25 | # "openinference-instrumentation-openai-agents", |
25 | 26 | # "opentelemetry-instrumentation-httpx", |
26 | 27 | # "openinference-instrumentation-mcp", |
|
42 | 43 | OpenAIProvider, |
43 | 44 | RunConfig, |
44 | 45 | Runner, |
45 | | - Tool, |
46 | 46 | ) |
47 | | -from agents.mcp import MCPServerStreamableHttp, MCPUtil |
| 47 | +from agents.mcp import MCPServer, MCPServerStreamableHttp, MCPUtil |
48 | 48 |
|
49 | | - |
50 | | -async def run_agent(prompt: str, model_name: str, tools: list[Tool]): |
51 | | - model = OpenAIProvider(use_responses=False).get_model(model_name) |
52 | | - agent = Agent(name="Assistant", model=model, tools=tools) |
53 | | - result = await Runner.run( |
54 | | - starting_agent=agent, |
55 | | - input=prompt, |
56 | | - run_config=RunConfig(workflow_name="envoy-ai-gateway"), |
57 | | - ) |
58 | | - print(result.final_output) |
| 49 | +# Uncomment the following lines to enable agent verbose logging |
| 50 | +# from agents import enable_verbose_stdout_logging |
| 51 | +# enable_verbose_stdout_logging() |
59 | 52 |
|
60 | 53 |
|
61 | 54 | async def main(prompt: str, model_name: str, mcp_url: str): |
62 | | - if not mcp_url: |
63 | | - await run_agent(prompt, model_name, []) |
64 | | - return |
65 | | - |
66 | | - async with MCPServerStreamableHttp({"url": mcp_url,"timeout": 300.0},cache_tools_list=True) as server: |
67 | | - tools = await server.list_tools() |
68 | | - util = MCPUtil() |
69 | | - tools = [util.to_function_tool(tool, server, False) for tool in tools] |
70 | | - await run_agent(prompt, model_name, tools) |
| 55 | + async with MCPServerStreamableHttp( |
| 56 | + name="Envoy AI Gateway MCP", |
| 57 | + params={"url": mcp_url, "timeout": 300}, |
| 58 | + cache_tools_list=True, |
| 59 | + ) as server: |
| 60 | + model = OpenAIProvider(use_responses=False).get_model(model_name) |
| 61 | + agent = Agent(name="Assistant", model=model, mcp_servers=[server]) |
| 62 | + result = await Runner.run( |
| 63 | + starting_agent=agent, |
| 64 | + input=prompt, |
| 65 | + run_config=RunConfig(workflow_name="Envoy AI Gateway Example"), |
| 66 | + ) |
| 67 | + print(result.final_output) |
71 | 68 |
|
72 | 69 |
|
73 | 70 | if __name__ == "__main__": |
|
0 commit comments