From e584f8d170c9a8a97d41c55b5006168fed9a6a91 Mon Sep 17 00:00:00 2001 From: Sarmad Qadri Date: Mon, 15 Sep 2025 13:51:21 -0400 Subject: [PATCH 1/3] Improve init command flow --- src/mcp_agent/cli/commands/init.py | 61 ++- src/mcp_agent/cli/commands/quickstart.py | 47 +++ .../mcp_agent_server/elicitation/README.md | 22 ++ .../mcp_agent_server/elicitation/client.py | 76 ++++ .../mcp_agent_server/elicitation/server.py | 66 ++++ .../mcp_agent_server/notifications/README.md | 23 ++ .../mcp_agent_server/notifications/client.py | 69 ++++ .../mcp_agent_server/notifications/server.py | 73 ++++ .../mcp_agent_server/reference/README.md | 50 +++ .../mcp_agent_server/reference/client.py | 109 +++++ .../mcp_agent_server/reference/server.py | 168 ++++++++ .../mcp_agent_server/sampling/README.md | 22 ++ .../mcp_agent_server/sampling/client.py | 68 ++++ .../mcp_agent_server/sampling/server.py | 59 +++ src/mcp_agent/data/templates/README_init.md | 83 ++++ src/mcp_agent/data/templates/basic_agent.py | 372 ++++++++++++------ .../data/templates/mcp_agent.config.yaml | 16 +- 17 files changed, 1249 insertions(+), 135 deletions(-) create mode 100644 src/mcp_agent/data/examples/mcp_agent_server/elicitation/README.md create mode 100644 src/mcp_agent/data/examples/mcp_agent_server/elicitation/client.py create mode 100644 src/mcp_agent/data/examples/mcp_agent_server/elicitation/server.py create mode 100644 src/mcp_agent/data/examples/mcp_agent_server/notifications/README.md create mode 100644 src/mcp_agent/data/examples/mcp_agent_server/notifications/client.py create mode 100644 src/mcp_agent/data/examples/mcp_agent_server/notifications/server.py create mode 100644 src/mcp_agent/data/examples/mcp_agent_server/reference/README.md create mode 100644 src/mcp_agent/data/examples/mcp_agent_server/reference/client.py create mode 100644 src/mcp_agent/data/examples/mcp_agent_server/reference/server.py create mode 100644 src/mcp_agent/data/examples/mcp_agent_server/sampling/README.md create mode 100644 src/mcp_agent/data/examples/mcp_agent_server/sampling/client.py create mode 100644 src/mcp_agent/data/examples/mcp_agent_server/sampling/server.py create mode 100644 src/mcp_agent/data/templates/README_init.md diff --git a/src/mcp_agent/cli/commands/init.py b/src/mcp_agent/cli/commands/init.py index a7bd07c7c..5fc92252d 100644 --- a/src/mcp_agent/cli/commands/init.py +++ b/src/mcp_agent/cli/commands/init.py @@ -45,6 +45,33 @@ def _write(path: Path, content: str, force: bool) -> bool: return False +def _write_readme(dir_path: Path, content: str, force: bool) -> str | None: + """Create a README file with fallback naming if a README already exists. + + Returns the filename created, or None if it could not be written (in which case + the content is printed to console as a fallback). + """ + candidates = [ + "README.md", + "README.mcp-agent.md", + "README.mcp.md", + ] + # Add numeric fallbacks + candidates += [f"README.{i}.md" for i in range(1, 6)] + + for name in candidates: + path = dir_path / name + if not path.exists() or force: + ok = _write(path, content, force) + if ok: + return name + # Fallback: print content to console if we couldn't write any variant + console.print("\n[yellow]A README already exists and could not be overwritten.[/yellow]") + console.print("[bold]Suggested README contents:[/bold]\n") + console.print(content) + return None + + @app.callback(invoke_without_command=True) def init( ctx: typer.Context, @@ -153,6 +180,15 @@ def init( except Exception: pass + # No separate agents.yaml needed; agent definitions live in mcp_agent.config.yaml + + # Create README for the basic template + readme_content = _load_template("README_init.md") + if readme_content: + created = _write_readme(dir, readme_content, force) + if created: + files_created.append(created) + elif template == "server": server_path = dir / "server.py" server_content = _load_template("basic_agent_server.py") @@ -164,6 +200,13 @@ def init( except Exception: pass + # README for server template + readme_content = _load_template("README_init.md") + if readme_content: + created = _write_readme(dir, readme_content, force) + if created: + files_created.append(created) + elif template == "token": token_path = dir / "token_example.py" token_content = _load_template("token_counter.py") @@ -175,6 +218,12 @@ def init( except Exception: pass + readme_content = _load_template("README_init.md") + if readme_content: + created = _write_readme(dir, readme_content, force) + if created: + files_created.append(created) + elif template == "factory": factory_path = dir / "factory.py" factory_content = _load_template("agent_factory.py") @@ -192,6 +241,12 @@ def init( if agents_content and _write(agents_path, agents_content, force): files_created.append("agents.yaml") + readme_content = _load_template("README_init.md") + if readme_content: + created = _write_readme(dir, readme_content, force) + if created: + files_created.append(created) + # Display results if files_created: console.print("\n[green]✅ Successfully initialized project![/green]") @@ -229,9 +284,9 @@ def init( elif template == "factory": console.print("3. Customize agents in [cyan]agents.yaml[/cyan]") console.print("4. Run the factory: [cyan]uv run factory.py[/cyan]") - elif template == "minimal": - console.print("3. Create your agent script") - console.print(" See examples: [cyan]mcp-agent quickstart[/cyan]") + elif template == "minimal": + console.print("3. Create your agent script") + console.print(" See examples: [cyan]mcp-agent quickstart[/cyan]") console.print( "\n[dim]Run [cyan]mcp-agent doctor[/cyan] to check your configuration[/dim]" diff --git a/src/mcp_agent/cli/commands/quickstart.py b/src/mcp_agent/cli/commands/quickstart.py index f79144b86..854f68c0a 100644 --- a/src/mcp_agent/cli/commands/quickstart.py +++ b/src/mcp_agent/cli/commands/quickstart.py @@ -86,6 +86,10 @@ def overview() -> None: ("token-counter", "data/examples/basic/token_counter"), ("agent-factory", "data/examples/basic/agent_factory"), ("basic-agent-server", "data/examples/mcp_agent_server/asyncio"), + ("reference-agent-server", "data/examples/mcp_agent_server/reference"), + ("elicitation", "data/examples/mcp_agent_server/elicitation"), + ("sampling", "data/examples/mcp_agent_server/sampling"), + ("notifications", "data/examples/mcp_agent_server/notifications"), ] for n, p in rows: table.add_row(n, p) @@ -199,3 +203,46 @@ def basic_agent_server( src = EXAMPLE_ROOT / "mcp_agent_server" / "asyncio" copied = _copy_tree(src, dst, force) console.print(f"Copied {copied} set(s) to {dst}") + + +@app.command("reference-agent-server") +def reference_agent_server( + dir: Path = typer.Argument(Path(".")), + force: bool = typer.Option(False, "--force", "-f"), +) -> None: + dst = dir.resolve() / "reference_agent_server" + copied = _copy_pkg_tree("mcp_agent_server/reference", dst, force) + if not copied: + src = EXAMPLE_ROOT / "mcp_agent_server" / "reference" + copied = _copy_tree(src, dst, force) + console.print(f"Copied {copied} set(s) to {dst}") + + +@app.command("elicitation") +def elicitation( + dir: Path = typer.Argument(Path(".")), + force: bool = typer.Option(False, "--force", "-f"), +) -> None: + dst = dir.resolve() / "elicitation" + copied = _copy_pkg_tree("mcp_agent_server/elicitation", dst, force) + console.print(f"Copied {copied} set(s) to {dst}") + + +@app.command("sampling") +def sampling( + dir: Path = typer.Argument(Path(".")), + force: bool = typer.Option(False, "--force", "-f"), +) -> None: + dst = dir.resolve() / "sampling" + copied = _copy_pkg_tree("mcp_agent_server/sampling", dst, force) + console.print(f"Copied {copied} set(s) to {dst}") + + +@app.command("notifications") +def notifications( + dir: Path = typer.Argument(Path(".")), + force: bool = typer.Option(False, "--force", "-f"), +) -> None: + dst = dir.resolve() / "notifications" + copied = _copy_pkg_tree("mcp_agent_server/notifications", dst, force) + console.print(f"Copied {copied} set(s) to {dst}") diff --git a/src/mcp_agent/data/examples/mcp_agent_server/elicitation/README.md b/src/mcp_agent/data/examples/mcp_agent_server/elicitation/README.md new file mode 100644 index 000000000..9720bee8d --- /dev/null +++ b/src/mcp_agent/data/examples/mcp_agent_server/elicitation/README.md @@ -0,0 +1,22 @@ +# Elicitation Server + +Minimal server demonstrating user confirmation via elicitation. + +## Run + +```bash +uv run server.py +``` + +Connect with the minimal client: + +```bash +uv run client.py +``` + +Tools: + +- `confirm_action(action: str)` — prompts the user (via upstream client) to accept or decline. + +This example uses console handlers for local testing. In an MCP client UI, the prompt will be displayed to the user. + diff --git a/src/mcp_agent/data/examples/mcp_agent_server/elicitation/client.py b/src/mcp_agent/data/examples/mcp_agent_server/elicitation/client.py new file mode 100644 index 000000000..e5cb2bc05 --- /dev/null +++ b/src/mcp_agent/data/examples/mcp_agent_server/elicitation/client.py @@ -0,0 +1,76 @@ +""" +Minimal client for the Elicitation Server. + +Run: + uv run client.py +""" + +from __future__ import annotations + +import asyncio +from datetime import timedelta +from typing import Optional + +from mcp_agent.app import MCPApp +from mcp_agent.core.context import Context +from mcp_agent.config import Settings +from mcp_agent.mcp.mcp_agent_client_session import MCPAgentClientSession +from mcp_agent.mcp.gen_client import gen_client +from mcp_agent.human_input.handler import console_input_callback +from mcp_agent.elicitation.handler import console_elicitation_callback +from anyio.streams.memory import MemoryObjectReceiveStream, MemoryObjectSendStream +from mcp import ClientSession +from mcp.types import LoggingMessageNotificationParams + + +def _make_session( + read_stream: MemoryObjectReceiveStream, + write_stream: MemoryObjectSendStream, + read_timeout_seconds: timedelta | None, + context: Optional[Context] = None, +) -> ClientSession: + async def on_server_log(params: LoggingMessageNotificationParams) -> None: + level = params.level.upper() + name = params.logger or "server" + print(f"[SERVER LOG] [{level}] [{name}] {params.data}") + + return MCPAgentClientSession( + read_stream=read_stream, + write_stream=write_stream, + read_timeout_seconds=read_timeout_seconds, + logging_callback=on_server_log, + context=context, + ) + + +async def main() -> None: + settings = Settings(execution_engine="asyncio") + app = MCPApp( + name="elicitation_client", + human_input_callback=console_input_callback, + elicitation_callback=console_elicitation_callback, + settings=settings, + ) + + async with app.run() as client_app: + # Configure server entry + cfg = type("Cfg", (), {})() + cfg.name = "elicitation_server" + cfg.transport = "sse" + cfg.url = "http://127.0.0.1:8000/sse" + client_app.context.server_registry.registry["elicitation_server"] = cfg + + async with gen_client( + "elicitation_server", + client_app.context.server_registry, + client_session_factory=_make_session, + context=client_app.context, + ) as server: + await server.set_logging_level("info") + res = await server.call_tool("confirm_action", {"action": "proceed"}) + print("confirm_action:", res.content[0].text if res.content else None) + + +if __name__ == "__main__": + asyncio.run(main()) + diff --git a/src/mcp_agent/data/examples/mcp_agent_server/elicitation/server.py b/src/mcp_agent/data/examples/mcp_agent_server/elicitation/server.py new file mode 100644 index 000000000..6128b5116 --- /dev/null +++ b/src/mcp_agent/data/examples/mcp_agent_server/elicitation/server.py @@ -0,0 +1,66 @@ +""" +Elicitation Server (asyncio) + +Demonstrates user confirmation via elicitation. + +Run: + uv run server.py +""" + +from __future__ import annotations + +import asyncio +from typing import Optional + +from mcp_agent.app import MCPApp +from mcp_agent.core.context import Context as AppContext +from mcp_agent.server.app_server import create_mcp_server_for_app +from mcp_agent.human_input.handler import console_input_callback +from mcp_agent.elicitation.handler import console_elicitation_callback +from mcp.types import ElicitRequestedSchema + + +app = MCPApp( + name="elicitation_server", + description="Minimal server showing elicitation (user confirmation)", + human_input_callback=console_input_callback, + elicitation_callback=console_elicitation_callback, +) + + +@app.tool(name="confirm_action") +async def confirm_action(action: str, app_ctx: Optional[AppContext] = None) -> str: + """Ask the user to confirm an action.""" + _app = app_ctx.app if app_ctx else app + upstream = getattr(_app.context, "upstream_session", None) + schema: ElicitRequestedSchema = { + "type": "object", + "title": "Confirmation", + "properties": {"confirm": {"type": "boolean", "title": "Confirm"}}, + "required": ["confirm"], + } + if upstream is not None: + result = await upstream.elicit( + message=f"Do you want to {action}?", requestedSchema=schema + ) + accepted = getattr(result, "action", "") in ("accept", "accepted") + return f"Action '{action}' {'confirmed' if accepted else 'declined'} by user" + # Fallback to console handler + if _app.context.elicitation_handler: + resp = await _app.context.elicitation_handler( + {"message": f"Do you want to {action}?", "requestedSchema": schema} + ) + accepted = getattr(resp, "action", "") in ("accept", "accepted") + return f"Action '{action}' {'confirmed' if accepted else 'declined'}" + return f"Action '{action}' confirmed by default" + + +async def main() -> None: + async with app.run() as agent_app: + mcp_server = create_mcp_server_for_app(agent_app) + await mcp_server.run_sse_async() + + +if __name__ == "__main__": + asyncio.run(main()) + diff --git a/src/mcp_agent/data/examples/mcp_agent_server/notifications/README.md b/src/mcp_agent/data/examples/mcp_agent_server/notifications/README.md new file mode 100644 index 000000000..0f3aeb2fe --- /dev/null +++ b/src/mcp_agent/data/examples/mcp_agent_server/notifications/README.md @@ -0,0 +1,23 @@ +# Notifications Server + +Minimal server demonstrating logging and non-logging notifications. + +## Run + +```bash +uv run server.py +``` + +Connect with the minimal client: + +```bash +uv run client.py +``` + +Tools: + +- `notify(message: str, level: str='info')` — forwards logs to the upstream client. +- `notify_progress(progress: float, message: Optional[str])` — sends a progress notification. + +These are best-effort and non-blocking for the server. + diff --git a/src/mcp_agent/data/examples/mcp_agent_server/notifications/client.py b/src/mcp_agent/data/examples/mcp_agent_server/notifications/client.py new file mode 100644 index 000000000..c21780eac --- /dev/null +++ b/src/mcp_agent/data/examples/mcp_agent_server/notifications/client.py @@ -0,0 +1,69 @@ +""" +Minimal client for the Notifications Server. + +Run: + uv run client.py +""" + +from __future__ import annotations + +import asyncio +from datetime import timedelta +from typing import Optional + +from mcp_agent.app import MCPApp +from mcp_agent.core.context import Context +from mcp_agent.config import Settings +from mcp_agent.mcp.mcp_agent_client_session import MCPAgentClientSession +from mcp_agent.mcp.gen_client import gen_client +from anyio.streams.memory import MemoryObjectReceiveStream, MemoryObjectSendStream +from mcp import ClientSession +from mcp.types import LoggingMessageNotificationParams + + +def _make_session( + read_stream: MemoryObjectReceiveStream, + write_stream: MemoryObjectSendStream, + read_timeout_seconds: timedelta | None, + context: Optional[Context] = None, +) -> ClientSession: + async def on_server_log(params: LoggingMessageNotificationParams) -> None: + level = params.level.upper() + name = params.logger or "server" + print(f"[SERVER LOG] [{level}] [{name}] {params.data}") + + return MCPAgentClientSession( + read_stream=read_stream, + write_stream=write_stream, + read_timeout_seconds=read_timeout_seconds, + logging_callback=on_server_log, + context=context, + ) + + +async def main() -> None: + settings = Settings(execution_engine="asyncio") + app = MCPApp(name="notifications_client", settings=settings) + + async with app.run() as client_app: + cfg = type("Cfg", (), {})() + cfg.name = "notifications_server" + cfg.transport = "sse" + cfg.url = "http://127.0.0.1:8000/sse" + client_app.context.server_registry.registry["notifications_server"] = cfg + + async with gen_client( + "notifications_server", + client_app.context.server_registry, + client_session_factory=_make_session, + context=client_app.context, + ) as server: + await server.set_logging_level("info") + await server.call_tool("notify", {"message": "Hello from client"}) + await server.call_tool("notify_progress", {"progress": 0.25, "message": "Quarter"}) + print("Sent notify + notify_progress") + + +if __name__ == "__main__": + asyncio.run(main()) + diff --git a/src/mcp_agent/data/examples/mcp_agent_server/notifications/server.py b/src/mcp_agent/data/examples/mcp_agent_server/notifications/server.py new file mode 100644 index 000000000..622d4b2f3 --- /dev/null +++ b/src/mcp_agent/data/examples/mcp_agent_server/notifications/server.py @@ -0,0 +1,73 @@ +""" +Notifications Server (asyncio) + +Demonstrates logging and non-logging notifications. + +Run: + uv run server.py +""" + +from __future__ import annotations + +import asyncio +from typing import Optional, Literal + +from mcp_agent.app import MCPApp +from mcp_agent.core.context import Context as AppContext +from mcp_agent.server.app_server import create_mcp_server_for_app + + +app = MCPApp( + name="notifications_server", + description="Minimal server showing notifications and logging", +) + + +@app.tool(name="notify") +def notify( + message: str, + level: Literal["debug", "info", "warning", "error"] = "info", + app_ctx: Optional[AppContext] = None, +) -> str: + """Send an upstream log/notification at the requested level.""" + _app = app_ctx.app if app_ctx else app + logger = _app.logger + if level == "debug": + logger.debug(message) + elif level == "warning": + logger.warning(message) + elif level == "error": + logger.error(message) + else: + logger.info(message) + return "ok" + + +@app.tool(name="notify_progress") +async def notify_progress( + progress: float = 0.5, + message: str | None = "Demo progress", + app_ctx: Optional[AppContext] = None, +) -> str: + """Send a progress notification via upstream session (best-effort).""" + _app = app_ctx.app if app_ctx else app + upstream = getattr(_app.context, "upstream_session", None) + if upstream is None: + _app.logger.warning("No upstream session to notify") + return "no-upstream" + await upstream.send_progress_notification( + progress_token="notifications-demo", progress=progress, message=message + ) + _app.logger.info("Sent notifications/progress") + return "ok" + + +async def main() -> None: + async with app.run() as agent_app: + mcp_server = create_mcp_server_for_app(agent_app) + await mcp_server.run_sse_async() + + +if __name__ == "__main__": + asyncio.run(main()) + diff --git a/src/mcp_agent/data/examples/mcp_agent_server/reference/README.md b/src/mcp_agent/data/examples/mcp_agent_server/reference/README.md new file mode 100644 index 000000000..27843ca73 --- /dev/null +++ b/src/mcp_agent/data/examples/mcp_agent_server/reference/README.md @@ -0,0 +1,50 @@ +# Reference Agent Server + +This is a clean, strongly-typed example of an MCP Agent server showcasing: + +- Agent behavior with MCP servers (fetch + filesystem) and an LLM +- Tools implemented with `@app.tool` and `@app.async_tool` +- Notifications and logging via `app.logger` +- Elicitation (user confirmation) proxied to the upstream client +- Sampling (LLM call) with simple `RequestParams` +- Prompts and Resources registered on the FastMCP server + +## Run the server + +```bash +uv run server.py +``` + +This starts an SSE server at `http://127.0.0.1:8000/sse`. + +## Try it with the minimal client + +```bash +uv run client.py +``` + +The client connects over SSE, sets logging level, and exercises tools: + +- `finder_tool` — Agent + LLM + MCP servers +- `notify` — logging/notifications +- `sample_haiku` — LLM sampling +- `confirm_action` — elicitation prompt + +## Prompts & Resources + +The server registers a couple of demo resources and a simple prompt: + +- Resources: + - `demo://docs/readme` — sample README content + - `demo://{city}/weather` — simple weather string +- Prompt: + - `echo(message: str)` — returns `Prompt: {message}` + +You can use any MCP client capable of listing resources/prompts to explore these. + +## Configuration + +Put your API keys in `mcp_agent.secrets.yaml` or environment variables +(`OPENAI_API_KEY`, etc.). The server uses the MCP app configuration +(`mcp_agent.config.yaml`) for MCP servers and provider defaults. + diff --git a/src/mcp_agent/data/examples/mcp_agent_server/reference/client.py b/src/mcp_agent/data/examples/mcp_agent_server/reference/client.py new file mode 100644 index 000000000..11d1439fc --- /dev/null +++ b/src/mcp_agent/data/examples/mcp_agent_server/reference/client.py @@ -0,0 +1,109 @@ +""" +Minimal client for the Reference Agent Server. + +Connects to the server over SSE and exercises tools: + - finder_tool, notify, sample_haiku, confirm_action + - list tools and fetch demo prompt/resource + +Run: + uv run client.py +""" + +from __future__ import annotations + +import asyncio +from datetime import timedelta +from typing import Optional + +from mcp_agent.app import MCPApp +from mcp_agent.core.context import Context +from mcp_agent.config import Settings +from mcp_agent.mcp.gen_client import gen_client +from mcp_agent.human_input.handler import console_input_callback +from mcp_agent.elicitation.handler import console_elicitation_callback +from anyio.streams.memory import MemoryObjectReceiveStream, MemoryObjectSendStream +from mcp import ClientSession +from mcp_agent.mcp.mcp_agent_client_session import MCPAgentClientSession +from mcp.types import LoggingMessageNotificationParams + + +def _make_session( + read_stream: MemoryObjectReceiveStream, + write_stream: MemoryObjectSendStream, + read_timeout_seconds: timedelta | None, + context: Optional[Context] = None, +) -> ClientSession: + async def on_server_log(params: LoggingMessageNotificationParams) -> None: + level = params.level.upper() + name = params.logger or "server" + print(f"[SERVER LOG] [{level}] [{name}] {params.data}") + + return MCPAgentClientSession( + read_stream=read_stream, + write_stream=write_stream, + read_timeout_seconds=read_timeout_seconds, + logging_callback=on_server_log, + context=context, + ) + + +async def main() -> None: + # Force asyncio executor locally for client-side flows (sampling/elicitation callbacks) + settings = Settings(execution_engine="asyncio") + app = MCPApp( + name="reference_client", + human_input_callback=console_input_callback, + elicitation_callback=console_elicitation_callback, + settings=settings, + ) + + async with app.run() as client_app: + client_app.logger.info("Connecting to reference server...") + + # Server definition provided inline + client_app.context.server_registry.registry["reference_agent_server"] = ( + client_app.context.server_registry.registry.get("reference_agent_server") + or type("_Cfg", (), {})() + ) + cfg = client_app.context.server_registry.registry["reference_agent_server"] + cfg.name = "reference_agent_server" + cfg.transport = "sse" + cfg.url = "http://127.0.0.1:8000/sse" + + async with gen_client( + "reference_agent_server", + client_app.context.server_registry, + client_session_factory=_make_session, + context=client_app.context, + ) as server: + # Ask server to set logging level + await server.set_logging_level("info") + + # List tools + tools = await server.list_tools() + print("Tools:", [t.name for t in tools.tools]) + + # Run finder_tool + res = await server.call_tool( + "finder_tool", + {"request": "List files in current directory and summarize"}, + ) + print("finder_tool:", res.content[0].text if res.content else None) + + # Notify + await server.call_tool("notify", {"message": "Hello from client"}) + + # Sampling + res = await server.call_tool("sample_haiku", {"topic": "clouds"}) + print("sample_haiku:", res.content[0].text if res.content else None) + + # Elicitation demo + res = await server.call_tool("confirm_action", {"action": "proceed"}) + print("confirm_action:", res.content[0].text if res.content else None) + + # Exercise FastMCP prompt/resource via list_tools isn't enough; show resource URIs in README + + +if __name__ == "__main__": + asyncio.run(main()) + diff --git a/src/mcp_agent/data/examples/mcp_agent_server/reference/server.py b/src/mcp_agent/data/examples/mcp_agent_server/reference/server.py new file mode 100644 index 000000000..9b49100e4 --- /dev/null +++ b/src/mcp_agent/data/examples/mcp_agent_server/reference/server.py @@ -0,0 +1,168 @@ +""" +Reference Agent Server (asyncio) + +Demonstrates: + - Agent behavior with MCP servers (fetch + filesystem) and an LLM + - Tools using @app.tool and @app.async_tool + - Notifications and logging via app.logger + - Elicitation (user confirmation) proxied to upstream client + - Sampling (LLM request) with simple RequestParams + - Prompts and Resources registered on the FastMCP server + +Run: + uv run server.py + +Test client: + uv run client.py +""" + +from __future__ import annotations + +import asyncio +import os +from typing import Optional, Literal + +from mcp_agent.app import MCPApp +from mcp_agent.core.context import Context as AppContext +from mcp_agent.server.app_server import create_mcp_server_for_app +from mcp_agent.human_input.handler import console_input_callback +from mcp_agent.elicitation.handler import console_elicitation_callback + +from mcp_agent.agents.agent import Agent +from mcp_agent.workflows.factory import create_llm +from mcp_agent.workflows.llm.augmented_llm_openai import OpenAIAugmentedLLM +from mcp_agent.workflows.llm.augmented_llm import RequestParams as LLMRequestParams +from mcp_agent.workflows.llm.llm_selector import ModelPreferences +from mcp.types import ElicitRequestedSchema + + +app = MCPApp( + name="reference_agent_server", + description="Reference server demonstrating agent + tools + prompts + resources", + human_input_callback=console_input_callback, + elicitation_callback=console_elicitation_callback, +) + + +@app.tool(name="finder_tool") +async def finder_tool(request: str, app_ctx: Optional[AppContext] = None) -> str: + """Agent that can use filesystem+fetch and an LLM to answer the request.""" + _app = app_ctx.app if app_ctx else app + ctx = _app.context + try: + if "filesystem" in ctx.config.mcp.servers: + ctx.config.mcp.servers["filesystem"].args.extend([os.getcwd()]) + except Exception: + pass + + agent = Agent( + name="finder", + instruction=( + "Use MCP servers to fetch and read files, then answer the user's query concisely." + ), + server_names=["fetch", "filesystem"], + context=ctx, + ) + async with agent: + llm = await agent.attach_llm(OpenAIAugmentedLLM) + return await llm.generate_str(message=request) + + +@app.tool(name="notify") +def notify( + message: str, + level: Literal["debug", "info", "warning", "error"] = "info", + app_ctx: Optional[AppContext] = None, +) -> str: + """Send an upstream log/notification at the requested level.""" + _app = app_ctx.app if app_ctx else app + logger = _app.logger + if level == "debug": + logger.debug(message) + elif level == "warning": + logger.warning(message) + elif level == "error": + logger.error(message) + else: + logger.info(message) + return "ok" + + +@app.tool(name="confirm_action") +async def confirm_action( + action: str, + app_ctx: Optional[AppContext] = None, +) -> str: + """Ask the user to confirm the action via elicitation.""" + _app = app_ctx.app if app_ctx else app + upstream = getattr(_app.context, "upstream_session", None) + schema: ElicitRequestedSchema = { + "type": "object", + "title": "Confirmation", + "properties": {"confirm": {"type": "boolean", "title": "Confirm"}}, + "required": ["confirm"], + } + if upstream is not None: + result = await upstream.elicit( + message=f"Do you want to {action}?", requestedSchema=schema + ) + accepted = getattr(result, "action", "") in ("accept", "accepted") + return f"Action '{action}' {'confirmed' if accepted else 'declined'} by user" + # Fallback to handler if present + if _app.context.elicitation_handler: + resp = await _app.context.elicitation_handler( + {"message": f"Do you want to {action}?", "requestedSchema": schema} + ) + accepted = getattr(resp, "action", "") in ("accept", "accepted") + return f"Action '{action}' {'confirmed' if accepted else 'declined'}" + return f"Action '{action}' confirmed by default" + + +@app.tool(name="sample_haiku") +async def sample_haiku(topic: str, app_ctx: Optional[AppContext] = None) -> str: + """Generate a short poem using configured LLM settings.""" + _app = app_ctx.app if app_ctx else app + llm = create_llm( + agent_name="sampling_demo", + server_names=[], + instruction="You are a concise poet.", + context=_app.context, + ) + req = LLMRequestParams( + maxTokens=80, + modelPreferences=ModelPreferences(hints=[]), + systemPrompt="Write a 3-line haiku.", + temperature=0.7, + use_history=False, + max_iterations=1, + ) + return await llm.generate_str(message=f"Haiku about {topic}", request_params=req) + + +async def main() -> None: + async with app.run() as agent_app: + # Create MCP server (FastMCP) that exposes tools; then add prompts/resources + mcp_server = create_mcp_server_for_app(agent_app) + + # Register a couple of demo resources + def _res_readme() -> str: + return "# Demo Resource\n\nThis is a README resource provided by the reference server." + + def _res_weather(city: str) -> str: + return f"It is sunny in {city} today!" + + mcp_server.resource("demo://docs/readme")(_res_readme) + mcp_server.resource("demo://{city}/weather")(_res_weather) + + # Register a simple prompt + def _prompt_echo(message: str) -> str: + return f"Prompt: {message}" + + mcp_server.prompt()(_prompt_echo) + + await mcp_server.run_sse_async() + + +if __name__ == "__main__": + asyncio.run(main()) + diff --git a/src/mcp_agent/data/examples/mcp_agent_server/sampling/README.md b/src/mcp_agent/data/examples/mcp_agent_server/sampling/README.md new file mode 100644 index 000000000..55bbccae3 --- /dev/null +++ b/src/mcp_agent/data/examples/mcp_agent_server/sampling/README.md @@ -0,0 +1,22 @@ +# Sampling Server + +Minimal server demonstrating LLM sampling. + +## Run + +```bash +uv run server.py +``` + +Connect with the minimal client: + +```bash +uv run client.py +``` + +Tools: + +- `sample_haiku(topic: str)` — generates a short poem using configured LLM settings. + +Add your API key(s) to `mcp_agent.secrets.yaml` or environment variables (e.g. `OPENAI_API_KEY`). + diff --git a/src/mcp_agent/data/examples/mcp_agent_server/sampling/client.py b/src/mcp_agent/data/examples/mcp_agent_server/sampling/client.py new file mode 100644 index 000000000..60c899571 --- /dev/null +++ b/src/mcp_agent/data/examples/mcp_agent_server/sampling/client.py @@ -0,0 +1,68 @@ +""" +Minimal client for the Sampling Server. + +Run: + uv run client.py +""" + +from __future__ import annotations + +import asyncio +from datetime import timedelta +from typing import Optional + +from mcp_agent.app import MCPApp +from mcp_agent.core.context import Context +from mcp_agent.config import Settings +from mcp_agent.mcp.mcp_agent_client_session import MCPAgentClientSession +from mcp_agent.mcp.gen_client import gen_client +from anyio.streams.memory import MemoryObjectReceiveStream, MemoryObjectSendStream +from mcp import ClientSession +from mcp.types import LoggingMessageNotificationParams + + +def _make_session( + read_stream: MemoryObjectReceiveStream, + write_stream: MemoryObjectSendStream, + read_timeout_seconds: timedelta | None, + context: Optional[Context] = None, +) -> ClientSession: + async def on_server_log(params: LoggingMessageNotificationParams) -> None: + level = params.level.upper() + name = params.logger or "server" + print(f"[SERVER LOG] [{level}] [{name}] {params.data}") + + return MCPAgentClientSession( + read_stream=read_stream, + write_stream=write_stream, + read_timeout_seconds=read_timeout_seconds, + logging_callback=on_server_log, + context=context, + ) + + +async def main() -> None: + settings = Settings(execution_engine="asyncio") + app = MCPApp(name="sampling_client", settings=settings) + + async with app.run() as client_app: + cfg = type("Cfg", (), {})() + cfg.name = "sampling_server" + cfg.transport = "sse" + cfg.url = "http://127.0.0.1:8000/sse" + client_app.context.server_registry.registry["sampling_server"] = cfg + + async with gen_client( + "sampling_server", + client_app.context.server_registry, + client_session_factory=_make_session, + context=client_app.context, + ) as server: + await server.set_logging_level("info") + res = await server.call_tool("sample_haiku", {"topic": "mountains"}) + print("sample_haiku:", res.content[0].text if res.content else None) + + +if __name__ == "__main__": + asyncio.run(main()) + diff --git a/src/mcp_agent/data/examples/mcp_agent_server/sampling/server.py b/src/mcp_agent/data/examples/mcp_agent_server/sampling/server.py new file mode 100644 index 000000000..4d6af5a09 --- /dev/null +++ b/src/mcp_agent/data/examples/mcp_agent_server/sampling/server.py @@ -0,0 +1,59 @@ +""" +Sampling Server (asyncio) + +Demonstrates a minimal LLM sampling tool. + +Run: + uv run server.py +""" + +from __future__ import annotations + +import asyncio +from typing import Optional + +from mcp_agent.app import MCPApp +from mcp_agent.core.context import Context as AppContext +from mcp_agent.server.app_server import create_mcp_server_for_app +from mcp_agent.workflows.factory import create_llm +from mcp_agent.workflows.llm.augmented_llm import RequestParams as LLMRequestParams +from mcp_agent.workflows.llm.llm_selector import ModelPreferences + + +app = MCPApp( + name="sampling_server", + description="Minimal server showing LLM sampling", + human_input_callback=None, +) + + +@app.tool(name="sample_haiku") +async def sample_haiku(topic: str, app_ctx: Optional[AppContext] = None) -> str: + """Generate a short poem using configured LLM settings.""" + _app = app_ctx.app if app_ctx else app + llm = create_llm( + agent_name="sampling_demo", + server_names=[], + instruction="You are a concise poet.", + context=_app.context, + ) + req = LLMRequestParams( + maxTokens=80, + modelPreferences=ModelPreferences(hints=[]), + systemPrompt="Write a 3-line haiku.", + temperature=0.7, + use_history=False, + max_iterations=1, + ) + return await llm.generate_str(message=f"Haiku about {topic}", request_params=req) + + +async def main() -> None: + async with app.run() as agent_app: + mcp_server = create_mcp_server_for_app(agent_app) + await mcp_server.run_sse_async() + + +if __name__ == "__main__": + asyncio.run(main()) + diff --git a/src/mcp_agent/data/templates/README_init.md b/src/mcp_agent/data/templates/README_init.md new file mode 100644 index 000000000..2189a529a --- /dev/null +++ b/src/mcp_agent/data/templates/README_init.md @@ -0,0 +1,83 @@ +# MCP-Agent Starter + +Welcome! This project was generated by `mcp-agent init` and includes a small set +of tools that demonstrate the basics of MCP-Agent: + +- A simple Agent that uses MCP servers and an LLM (`finder_tool`) +- Synchronous and asynchronous tools (`greet`, `reverse_async`) +- Notifications/logging (`notify`) +- Elicitation (user confirmation) (`confirm_action`) +- Sampling with request parameters (`sample_haiku`) +- Loading AgentSpec definitions from config and running them (`agent_catalog`, `run_agent`) + +## Quick Start + +1) Add your API keys to `mcp_agent.secrets.yaml` (or environment variables): + + - `OPENAI_API_KEY` (recommended) + - `ANTHROPIC_API_KEY` (optional) + +2) Review `mcp_agent.config.yaml`. It defines: + + - Execution engine (default: `asyncio`) + - Logger settings + - MCP servers (like `filesystem`, `fetch`) + - Optional `agents.definitions` used by the `run_agent` tool + +3) Run locally: + +```bash +uv run main.py +``` + +Or develop with the dev CLI: + +```bash +mcp-agent dev chat +# or +mcp-agent dev serve --script main.py +``` + +## Tools Overview + +All tools are defined in `main.py` using the `@app.tool` or `@app.async_tool` decorators. +Each tool is strongly typed and accepts an optional `app_ctx` parameter to access the +MCPApp context at runtime (e.g., logging, upstream session, config). + +- `finder_tool(request: str)` + - Demonstrates Agent behavior. Creates an Agent with `filesystem` and `fetch`, attaches an LLM, and answers the request. + +- `agent_catalog()` + - Lists agent names defined in `mcp_agent.config.yaml` under `agents.definitions`. + +- `run_agent(agent_name: str, prompt: str)` + - Instantiates an Agent from `agents.definitions` and runs an LLM call. + +- `greet(name: str)` + - Minimal synchronous tool that logs a message and returns a greeting. + +- `notify(message: str, level: Literal["debug","info","warning","error"])` + - Demonstrates upstream logging/notifications using the app logger. + +- `confirm_action(action: str)` + - Demonstrates elicitation. When connected to an MCP client, the user is prompted for confirmation. + +- `sample_haiku(topic: str)` + - Demonstrates LLM sampling by constructing RequestParams and calling an LLM. + +- `reverse_async(text: str)` + - Simple asynchronous tool that reverses the input string. + +## Contexts + +- `app_ctx`: MCPApp Context (configuration, logger, upstream session, etc.) +- `ctx`/`mcp_ctx`: FastMCP tool context (provided by the FastMCP server runtime), included for advanced use. + +## Where to next? + +- Explore configuration in `mcp_agent.config.yaml` and add more MCP servers. +- Extend `agents.definitions` with more AgentSpec entries. +- Add new tools using `@app.tool` or `@app.async_tool`. + +Happy building! + diff --git a/src/mcp_agent/data/templates/basic_agent.py b/src/mcp_agent/data/templates/basic_agent.py index 77a8ccc74..f357436f5 100644 --- a/src/mcp_agent/data/templates/basic_agent.py +++ b/src/mcp_agent/data/templates/basic_agent.py @@ -1,151 +1,265 @@ -import asyncio +""" +Welcome to mcp-agent! +This +Canonical MCP-Agent example for new projects. + +This script showcases: + - Setting up a basic Agent that uses the fetch and filesystem MCP servers + - @app.tool and @app.async_tool decorators to define long-running tools + - Advanced MCP features: Notifications, sampling, and elicitation +""" + +from __future__ import annotations + +from typing import Optional, Literal import os -import time + +from mcp.server.fastmcp import Context as MCPContext +from mcp.types import ElicitRequestedSchema, TextContent, CreateMessageResult from mcp_agent.app import MCPApp -from mcp_agent.config import ( - Settings, - LoggerSettings, - MCPSettings, - MCPServerSettings, - OpenAISettings, - AnthropicSettings, -) from mcp_agent.agents.agent import Agent -from mcp_agent.workflows.llm.augmented_llm import RequestParams +from mcp_agent.agents.agent_spec import AgentSpec +from mcp_agent.core.context import Context as AppContext +from mcp_agent.workflows.factory import create_llm +from mcp_agent.workflows.llm.augmented_llm import RequestParams as LLMRequestParams from mcp_agent.workflows.llm.llm_selector import ModelPreferences -from mcp_agent.workflows.llm.augmented_llm_anthropic import AnthropicAugmentedLLM + +# If you want to use a different LLM provider, you can import the appropriate AugmentedLLM +# from mcp_agent.workflows.llm.augmented_llm_openai import OpenAIAugmentedLLM -from mcp_agent.tracing.token_counter import TokenSummary - -settings = Settings( - execution_engine="asyncio", - logger=LoggerSettings(type="file", level="debug"), - mcp=MCPSettings( - servers={ - "fetch": MCPServerSettings( - command="uvx", - args=["mcp-server-fetch"], - ), - "filesystem": MCPServerSettings( - command="npx", - args=["-y", "@modelcontextprotocol/server-filesystem"], - ), - } - ), - openai=OpenAISettings( - api_key="sk-my-openai-api-key", - default_model="gpt-4o-mini", - ), - anthropic=AnthropicSettings( - api_key="sk-my-anthropic-api-key", - ), -) - -# Settings can either be specified programmatically, -# or loaded from mcp_agent.config.yaml/mcp_agent.secrets.yaml -app = MCPApp(name="mcp_basic_agent") # settings=settings) - - -async def example_usage(): - async with app.run() as agent_app: - logger = agent_app.logger - context = agent_app.context - - logger.info("Current config:", data=context.config.model_dump()) - - # Add the current directory to the filesystem server's args - context.config.mcp.servers["filesystem"].args.extend([os.getcwd()]) - - finder_agent = Agent( - name="finder", - instruction="""You are an agent with access to the filesystem, - as well as the ability to fetch URLs. Your job is to identify - the closest match to a user's request, make the appropriate tool calls, - and return the URI and CONTENTS of the closest match.""", - server_names=["fetch", "filesystem"], - ) - async with finder_agent: - logger.info("finder: Connected to server, calling list_tools...") - result = await finder_agent.list_tools() - logger.info("Tools available:", data=result.model_dump()) +# Create the MCP App. Configuration is read from mcp_agent.config.yaml/secrets.yaml. +app = MCPApp(name="hello_world", description="Hello world mcp-agent application") - llm = await finder_agent.attach_llm(OpenAIAugmentedLLM) - result = await llm.generate_str( - message="Print the contents of mcp_agent.config.yaml verbatim", - ) - logger.info(f"mcp_agent.config.yaml contents: {result}") - # Let's switch the same agent to a different LLM - llm = await finder_agent.attach_llm(AnthropicAugmentedLLM) +# 1) Agent behavior (first): demonstrate an Agent using MCP servers + LLM +@app.tool(name="finder_tool") +async def finder_tool(request: str, app_ctx: Optional[AppContext] = None) -> str: + """ + Create an Agent with access to MCP servers (fetch + filesystem), attach an LLM, + and handle the user's request. + """ + _app = app_ctx.app if app_ctx else app + ctx = _app.context - result = await llm.generate_str( - message="Print the first 2 paragraphs of https://modelcontextprotocol.io/introduction", - ) - logger.info(f"First 2 paragraphs of Model Context Protocol docs: {result}") - - # Multi-turn conversations - result = await llm.generate_str( - message="Summarize those paragraphs in a 128 character tweet", - # You can configure advanced options by setting the request_params object - request_params=RequestParams( - # See https://modelcontextprotocol.io/docs/concepts/sampling#model-preferences for more details - modelPreferences=ModelPreferences( - costPriority=0.1, speedPriority=0.2, intelligencePriority=0.7 - ), - # You can also set the model directly using the 'model' field - # Generally request_params type aligns with the Sampling API type in MCP - ), - ) - logger.info(f"Paragraph as a tweet: {result}") + # Ensure filesystem server can read current working directory (dev-friendly) + try: + if "filesystem" in ctx.config.mcp.servers: + ctx.config.mcp.servers["filesystem"].args.extend([os.getcwd()]) + except Exception: + pass - # Display final comprehensive token usage summary (use app convenience) - await display_token_summary(agent_app, finder_agent) + agent = Agent( + name="finder", + instruction=( + "You are a helpful assistant. Use MCP servers to fetch and read files," + " then answer the request concisely." + ), + server_names=["fetch", "filesystem"], + ) + async with agent: + llm = await agent.attach_llm(OpenAIAugmentedLLM) + result = await llm.generate_str(message=request) + return result -async def display_token_summary(app_ctx: MCPApp, agent: Agent | None = None): - """Display comprehensive token usage summary using app/agent convenience APIs.""" - summary: TokenSummary = await app_ctx.get_token_summary() - print("\n" + "=" * 50) - print("TOKEN USAGE SUMMARY") - print("=" * 50) +# 2) Agent catalog: list agents defined in config (agents.definitions) +@app.tool(name="agent_catalog") +def agent_catalog(app_ctx: Optional[AppContext] = None) -> str: + """List agent names defined under config.agents.definitions.""" + _app = app_ctx.app if app_ctx else app + defs: list[AgentSpec] = ( + getattr(getattr(_app.context.config, "agents", None), "definitions", []) or [] + ) + names = [getattr(d, "name", "") for d in defs if getattr(d, "name", None)] + return ", ".join(names) if names else "(no agents defined in config)" - # Total usage and cost - print("\nTotal Usage:") - print(f" Total tokens: {summary.usage.total_tokens:,}") - print(f" Input tokens: {summary.usage.input_tokens:,}") - print(f" Output tokens: {summary.usage.output_tokens:,}") - print(f" Total cost: ${summary.cost:.4f}") - # Breakdown by model - if summary.model_usage: - print("\nBreakdown by Model:") - for model_key, data in summary.model_usage.items(): - print(f"\n {model_key}:") - print( - f" Tokens: {data.usage.total_tokens:,} (input: {data.usage.input_tokens:,}, output: {data.usage.output_tokens:,})" - ) - print(f" Cost: ${data.cost:.4f}") +# 3) Run a configured agent by name (from config.agents.definitions) +@app.tool(name="run_agent") +async def run_agent( + agent_name: str, + prompt: str, + app_ctx: Optional[AppContext] = None, +) -> str: + """ + Instantiate an Agent from config.agents.definitions by name and run an LLM call. + """ + _app = app_ctx.app if app_ctx else app + defs: list[AgentSpec] = ( + getattr(getattr(_app.context.config, "agents", None), "definitions", []) or [] + ) + spec = next((d for d in defs if getattr(d, "name", None) == agent_name), None) + if spec is None: + return f"agent '{agent_name}' not found" + + agent = Agent( + name=spec.name, + instruction=spec.instruction, + server_names=spec.server_names or [], + functions=getattr(spec, "functions", []), + context=_app.context, + ) + async with agent: + llm = await agent.attach_llm(OpenAIAugmentedLLM) + return await llm.generate_str(message=prompt) + + +# 4) Minimal tool: synchronous, simple types +@app.tool(name="greet") +def greet(name: str, app_ctx: Optional[AppContext] = None) -> str: + """Return a friendly greeting and log it upstream.""" + _app = app_ctx.app if app_ctx else app + _app.logger.info("greet called", data={"name": name}) + return f"Hello, {name}!" + + +# 5) Notify: demonstrate server-side logging notifications +@app.tool(name="notify") +def notify( + message: str, + level: Literal["debug", "info", "warning", "error"] = "info", + app_ctx: Optional[AppContext] = None, + mcp_ctx: Optional[MCPContext] = None, +) -> str: + """ + Send a non-logging notification via the app logger (forwarded upstream). + Tools get access to both the MCPApp Context (app_ctx) and FastMCP Context (mcp_ctx). + """ + _app = app_ctx.app if app_ctx else app + logger = _app.logger + if level == "debug": + logger.debug(message) + elif level == "warning": + logger.warning(message) + elif level == "error": + logger.error(message) + else: + logger.info(message) + return "ok" - # Optional: show a specific agent's aggregated usage - if agent is not None: - agent_usage = await agent.get_token_usage() - if agent_usage: - print("\nAgent Usage:") - print(f" Agent: {agent.name}") - print(f" Total tokens: {agent_usage.total_tokens:,}") - print(f" Input tokens: {agent_usage.input_tokens:,}") - print(f" Output tokens: {agent_usage.output_tokens:,}") - print("\n" + "=" * 50) +# 6) Elicit: prompt the user for confirmation (demonstrates elicitation) +@app.tool(name="confirm_action") +async def confirm_action( + action: str, + app_ctx: Optional[AppContext] = None, + ctx: Optional[MCPContext] = None, +) -> str: + """ + Ask the user to confirm an action. When invoked from an MCP client UI, a prompt is shown. + Falls back to the app's elicitation handler if no upstream client is attached. + """ + _app = app_ctx.app if app_ctx else app + upstream = getattr(_app.context, "upstream_session", None) + schema: ElicitRequestedSchema = { + "type": "object", + "title": "Confirmation", + "properties": {"confirm": {"type": "boolean", "title": "Confirm"}}, + "required": ["confirm"], + } + # Prefer upstream elicitation when available + if upstream is not None: + result = await upstream.elicit( + message=f"Do you want to {action}?", requestedSchema=schema + ) + accepted = getattr(result, "action", "") in ("accept", "accepted") + return f"Action '{action}' {'confirmed' if accepted else 'declined'} by user" + + # Fallback: no upstream client. If an elicitation handler is configured, use it. + if _app.context.elicitation_handler: + resp = await _app.context.elicitation_handler( + {"message": f"Do you want to {action}?", "requestedSchema": schema} + ) + accepted = getattr(resp, "action", "") in ("accept", "accepted") + return f"Action '{action}' {'confirmed' if accepted else 'declined'}" + + # Last resort: assume accepted + return f"Action '{action}' confirmed by default" + + +# 7) Sampling: call an LLM to generate a short text +@app.tool(name="sample_haiku") +async def sample_haiku(topic: str, app_ctx: Optional[AppContext] = None) -> str: + """ + Generate a tiny poem using the configured LLM. Model and keys come from config/secrets. + """ + _app = app_ctx.app if app_ctx else app + # Create a simple LLM using current app context (settings and servers) + llm = create_llm( + agent_name="sampling_demo", + server_names=[], + instruction="You are a concise poet.", + context=_app.context, + ) + req = LLMRequestParams( + maxTokens=80, + modelPreferences=ModelPreferences(hints=[]), + systemPrompt="Write a 3-line haiku.", + temperature=0.7, + use_history=False, + max_iterations=1, + ) + text = await llm.generate_str(message=f"Haiku about {topic}", request_params=req) + return text + + +# 8) Async tool: demonstrates @app.async_tool (runs asynchronously) +@app.async_tool(name="reverse_async") +async def reverse_async(text: str) -> str: + """Reverse a string asynchronously (example async tool).""" + return text[::-1] + + +# 6) Router demo (agent factory): route query to specialized agents defined in agents.yaml +@app.tool(name="route_demo") +async def route_demo(query: str, app_ctx: Optional[AppContext] = None) -> str: + """ + Use the agent factory to load agent specs from agents.yaml and route the query + to the best agent using an LLM router. + """ + from pathlib import Path + from mcp_agent.workflows.factory import ( + load_agent_specs_from_file, + create_router_llm, + ) + + _app = app_ctx.app if app_ctx else app + ctx = _app.context + specs = load_agent_specs_from_file(str(Path("agents.yaml").resolve()), context=ctx) + router = await create_router_llm( + server_names=["filesystem", "fetch"], + agents=specs, + provider="openai", + context=ctx, + ) + res = await router.generate_str(query) + return res if __name__ == "__main__": - start = time.time() - asyncio.run(example_usage()) - end = time.time() - t = end - start + # Optional: run a quick sanity check when executed directly + import asyncio + + async def _smoke(): + async with app.run() as running: + running.logger.info("Example app started") + print( + await finder_tool( + "List files in the current directory", app_ctx=running.context + ) + ) + print("Agents:", await agent_catalog(app_ctx=running.context)) + print( + await run_agent( + "filesystem_helper", + "Summarize README if present", + app_ctx=running.context, + ) + ) + print(await greet("World", app_ctx=running.context)) + print(await sample_haiku("flowers", app_ctx=running.context)) - print(f"Total run time: {t:.2f}s") + asyncio.run(_smoke()) diff --git a/src/mcp_agent/data/templates/mcp_agent.config.yaml b/src/mcp_agent/data/templates/mcp_agent.config.yaml index b41bef7d6..5a7c9432c 100644 --- a/src/mcp_agent/data/templates/mcp_agent.config.yaml +++ b/src/mcp_agent/data/templates/mcp_agent.config.yaml @@ -36,8 +36,18 @@ mcp: # Brave search server (requires BRAVE_API_KEY in secrets) # brave-search: - # command: npx - # args: ["-y", "@modelcontextprotocol/server-brave-search"] + # command: npx + # args: ["-y", "@modelcontextprotocol/server-brave-search"] + +# Optional: Define simple agents inline to use with tools like run_agent +agents: + definitions: + - name: filesystem_helper + instruction: "You can read files and summarize their contents." + server_names: [filesystem] + - name: web_helper + instruction: "You can fetch web pages and summarize their content." + server_names: [fetch] # Model provider defaults (API keys go in mcp_agent.secrets.yaml) openai: @@ -53,4 +63,4 @@ anthropic: # otel: # endpoint: "http://localhost:4317" # service_name: "mcp-agent" -# export_interval: 10 \ No newline at end of file +# export_interval: 10 From 3a27473399a247ef516cc0dcd33ebd632f853799 Mon Sep 17 00:00:00 2001 From: Sarmad Qadri Date: Wed, 17 Sep 2025 17:43:29 -0400 Subject: [PATCH 2/3] More templates updates --- src/mcp_agent/app.py | 46 ++- src/mcp_agent/cli/commands/init.py | 16 +- .../mcp_agent_server/elicitation/README.md | 11 + .../mcp_agent_server/elicitation/server.py | 37 ++- .../mcp_agent_server/notifications/README.md | 11 + .../mcp_agent_server/reference/README.md | 11 + .../mcp_agent_server/reference/server.py | 41 ++- .../mcp_agent_server/sampling/README.md | 11 + .../mcp_agent_server/sampling/server.py | 9 +- src/mcp_agent/data/templates/README_init.md | 100 +++--- src/mcp_agent/data/templates/basic_agent.py | 310 ++++++------------ .../data/templates/mcp_agent.config.yaml | 65 ++-- src/mcp_agent/workflows/factory.py | 32 +- 13 files changed, 364 insertions(+), 336 deletions(-) diff --git a/src/mcp_agent/app.py b/src/mcp_agent/app.py index 5c1b4f823..def8da1f2 100644 --- a/src/mcp_agent/app.py +++ b/src/mcp_agent/app.py @@ -4,7 +4,7 @@ import functools from types import MethodType -from typing import Any, Dict, Optional, Type, TypeVar, Callable, TYPE_CHECKING +from typing import Any, Dict, Optional, Type, TypeVar, Callable, TYPE_CHECKING, ParamSpec, overload from datetime import timedelta from contextlib import asynccontextmanager @@ -36,6 +36,7 @@ from mcp_agent.agents.agent_spec import AgentSpec from mcp_agent.executor.workflow import Workflow +P = ParamSpec("P") R = TypeVar("R") @@ -714,13 +715,27 @@ async def _run(self, *args, **kwargs): # type: ignore[no-redef] self.workflow(auto_cls, workflow_id=workflow_name) return auto_cls + @overload + def tool(self, __fn: Callable[P, R]) -> Callable[P, R]: + ... + + @overload def tool( self, name: str | None = None, *, description: str | None = None, structured_output: bool | None = None, - ) -> Callable[[Callable[..., Any]], Callable[..., Any]]: + ) -> Callable[[Callable[P, R]], Callable[P, R]]: + ... + + def tool( + self, + name: str | None = None, + *, + description: str | None = None, + structured_output: bool | None = None, + ): """ Decorator to declare a synchronous MCP tool that runs via an auto-generated Workflow and waits for completion before returning. @@ -729,7 +744,7 @@ def tool( endpoints are available. """ - def decorator(fn: Callable[..., Any]) -> Callable[..., Any]: + def decorator(fn: Callable[P, R]) -> Callable[P, R]: tool_name = name or fn.__name__ # Early validation: Use the shared tool adapter logic to validate @@ -762,18 +777,31 @@ def decorator(fn: Callable[..., Any]) -> Callable[..., Any]: # Support bare usage: @app.tool without parentheses if callable(name) and description is None and structured_output is None: - fn = name # type: ignore[assignment] + _fn = name # type: ignore[assignment] name = None - return decorator(fn) # type: ignore[arg-type] + return decorator(_fn) # type: ignore[arg-type] return decorator + @overload + def async_tool(self, __fn: Callable[P, R]) -> Callable[P, R]: + ... + + @overload + def async_tool( + self, + name: str | None = None, + *, + description: str | None = None, + ) -> Callable[[Callable[P, R]], Callable[P, R]]: + ... + def async_tool( self, name: str | None = None, *, description: str | None = None, - ) -> Callable[[Callable[..., Any]], Callable[..., Any]]: + ): """ Decorator to declare an asynchronous MCP tool. @@ -781,7 +809,7 @@ def async_tool( the standard per-workflow tools (run/get_status) are exposed by the server. """ - def decorator(fn: Callable[..., Any]) -> Callable[..., Any]: + def decorator(fn: Callable[P, R]) -> Callable[P, R]: workflow_name = name or fn.__name__ # Early validation: Use the shared tool adapter logic to validate @@ -812,9 +840,9 @@ def decorator(fn: Callable[..., Any]) -> Callable[..., Any]: # Support bare usage: @app.async_tool without parentheses if callable(name) and description is None: - fn = name # type: ignore[assignment] + _fn = name # type: ignore[assignment] name = None - return decorator(fn) # type: ignore[arg-type] + return decorator(_fn) # type: ignore[arg-type] return decorator diff --git a/src/mcp_agent/cli/commands/init.py b/src/mcp_agent/cli/commands/init.py index 5fc92252d..475110ee6 100644 --- a/src/mcp_agent/cli/commands/init.py +++ b/src/mcp_agent/cli/commands/init.py @@ -66,7 +66,9 @@ def _write_readme(dir_path: Path, content: str, force: bool) -> str | None: if ok: return name # Fallback: print content to console if we couldn't write any variant - console.print("\n[yellow]A README already exists and could not be overwritten.[/yellow]") + console.print( + "\n[yellow]A README already exists and could not be overwritten.[/yellow]" + ) console.print("[bold]Suggested README contents:[/bold]\n") console.print(content) return None @@ -163,7 +165,7 @@ def init( else: # Ask for an alternate filename and ensure it ends with .py alt_name = Prompt.ask( - "Enter a filename to save the agent", default="agent.py" + "Enter a filename to save the agent", default="main.py" ) if not alt_name.endswith(".py"): alt_name += ".py" @@ -263,16 +265,6 @@ def init( if template == "basic": run_file = entry_script_name or "main.py" console.print(f"3. Run your agent: [cyan]uv run {run_file}[/cyan]") - console.print( - f" Or use: [cyan]mcp-agent dev start --script {run_file}[/cyan]" - ) - console.print( - f" Or serve: [cyan]mcp-agent dev serve --script {run_file}[/cyan]" - ) - console.print(" Or chat: [cyan]mcp-agent dev chat[/cyan]") - console.print( - "4. Edit config: [cyan]mcp-agent config edit[/cyan] (then rerun)" - ) elif template == "server": console.print("3. Run the server: [cyan]uv run server.py[/cyan]") console.print( diff --git a/src/mcp_agent/data/examples/mcp_agent_server/elicitation/README.md b/src/mcp_agent/data/examples/mcp_agent_server/elicitation/README.md index 9720bee8d..9cb18bb35 100644 --- a/src/mcp_agent/data/examples/mcp_agent_server/elicitation/README.md +++ b/src/mcp_agent/data/examples/mcp_agent_server/elicitation/README.md @@ -20,3 +20,14 @@ Tools: This example uses console handlers for local testing. In an MCP client UI, the prompt will be displayed to the user. +## Deploy to Cloud (optional) + +1. Set your API keys in `mcp_agent.secrets.yaml`. + +2. From this directory, deploy: + +```bash +uv run mcp-agent deploy elicitation-example +``` + +You’ll receive an app ID and a URL. Use the URL with an MCP client (e.g., MCP Inspector) and append `/sse` to the end. Set the Bearer token in the header to your mcp-agent API key. diff --git a/src/mcp_agent/data/examples/mcp_agent_server/elicitation/server.py b/src/mcp_agent/data/examples/mcp_agent_server/elicitation/server.py index 6128b5116..9c65deb10 100644 --- a/src/mcp_agent/data/examples/mcp_agent_server/elicitation/server.py +++ b/src/mcp_agent/data/examples/mcp_agent_server/elicitation/server.py @@ -18,6 +18,7 @@ from mcp_agent.human_input.handler import console_input_callback from mcp_agent.elicitation.handler import console_elicitation_callback from mcp.types import ElicitRequestedSchema +from pydantic import BaseModel, Field app = MCPApp( @@ -33,25 +34,40 @@ async def confirm_action(action: str, app_ctx: Optional[AppContext] = None) -> s """Ask the user to confirm an action.""" _app = app_ctx.app if app_ctx else app upstream = getattr(_app.context, "upstream_session", None) - schema: ElicitRequestedSchema = { - "type": "object", - "title": "Confirmation", - "properties": {"confirm": {"type": "boolean", "title": "Confirm"}}, - "required": ["confirm"], - } + class ConfirmBooking(BaseModel): + confirm: bool = Field(description="Confirm action?") + notes: str = Field(default="", description="Optional notes") + + schema: ElicitRequestedSchema = ConfirmBooking.model_json_schema() if upstream is not None: result = await upstream.elicit( message=f"Do you want to {action}?", requestedSchema=schema ) - accepted = getattr(result, "action", "") in ("accept", "accepted") - return f"Action '{action}' {'confirmed' if accepted else 'declined'} by user" + if getattr(result, "action", "") in ("accept", "accepted"): + data = ConfirmBooking.model_validate(getattr(result, "content", {})) + return ( + f"Action '{action}' confirmed. Notes: {data.notes or 'None'}" + if data.confirm + else f"Action '{action}' cancelled" + ) + if getattr(result, "action", "") == "decline": + return "Action declined" + return "Action cancelled" # Fallback to console handler if _app.context.elicitation_handler: resp = await _app.context.elicitation_handler( {"message": f"Do you want to {action}?", "requestedSchema": schema} ) - accepted = getattr(resp, "action", "") in ("accept", "accepted") - return f"Action '{action}' {'confirmed' if accepted else 'declined'}" + if getattr(resp, "action", "") in ("accept", "accepted"): + data = ConfirmBooking.model_validate(getattr(resp, "content", {})) + return ( + f"Action '{action}' confirmed. Notes: {data.notes or 'None'}" + if data.confirm + else f"Action '{action}' cancelled" + ) + if getattr(resp, "action", "") == "decline": + return "Action declined" + return "Action cancelled" return f"Action '{action}' confirmed by default" @@ -63,4 +79,3 @@ async def main() -> None: if __name__ == "__main__": asyncio.run(main()) - diff --git a/src/mcp_agent/data/examples/mcp_agent_server/notifications/README.md b/src/mcp_agent/data/examples/mcp_agent_server/notifications/README.md index 0f3aeb2fe..259b4dd33 100644 --- a/src/mcp_agent/data/examples/mcp_agent_server/notifications/README.md +++ b/src/mcp_agent/data/examples/mcp_agent_server/notifications/README.md @@ -21,3 +21,14 @@ Tools: These are best-effort and non-blocking for the server. +## Deploy to Cloud (optional) + +1. Set API keys in `mcp_agent.secrets.yaml` as needed. + +2. Deploy from this directory: + +```bash +uv run mcp-agent deploy notifications-demo +``` + +Use the returned URL with `/sse` in an MCP client. Set the Bearer token in the header to your mcp-agent API key. diff --git a/src/mcp_agent/data/examples/mcp_agent_server/reference/README.md b/src/mcp_agent/data/examples/mcp_agent_server/reference/README.md index 27843ca73..2b94570f5 100644 --- a/src/mcp_agent/data/examples/mcp_agent_server/reference/README.md +++ b/src/mcp_agent/data/examples/mcp_agent_server/reference/README.md @@ -48,3 +48,14 @@ Put your API keys in `mcp_agent.secrets.yaml` or environment variables (`OPENAI_API_KEY`, etc.). The server uses the MCP app configuration (`mcp_agent.config.yaml`) for MCP servers and provider defaults. +## Deploy to Cloud (optional) + +1. Set API keys in `mcp_agent.secrets.yaml`. + +2. From this directory: + +```bash +uv run mcp-agent deploy reference-server +``` + +Use the URL (append `/sse`) in an MCP client and include your mcp-agent API key as a bearer token if required. diff --git a/src/mcp_agent/data/examples/mcp_agent_server/reference/server.py b/src/mcp_agent/data/examples/mcp_agent_server/reference/server.py index 9b49100e4..447f8d335 100644 --- a/src/mcp_agent/data/examples/mcp_agent_server/reference/server.py +++ b/src/mcp_agent/data/examples/mcp_agent_server/reference/server.py @@ -34,6 +34,7 @@ from mcp_agent.workflows.llm.augmented_llm import RequestParams as LLMRequestParams from mcp_agent.workflows.llm.llm_selector import ModelPreferences from mcp.types import ElicitRequestedSchema +from pydantic import BaseModel, Field app = MCPApp( @@ -96,25 +97,44 @@ async def confirm_action( """Ask the user to confirm the action via elicitation.""" _app = app_ctx.app if app_ctx else app upstream = getattr(_app.context, "upstream_session", None) - schema: ElicitRequestedSchema = { - "type": "object", - "title": "Confirmation", - "properties": {"confirm": {"type": "boolean", "title": "Confirm"}}, - "required": ["confirm"], - } + + class ConfirmBooking(BaseModel): + confirm: bool = Field(description="Confirm action?") + notes: str = Field(default="", description="Optional notes") + + schema: ElicitRequestedSchema = ConfirmBooking.model_json_schema() + if upstream is not None: result = await upstream.elicit( message=f"Do you want to {action}?", requestedSchema=schema ) - accepted = getattr(result, "action", "") in ("accept", "accepted") - return f"Action '{action}' {'confirmed' if accepted else 'declined'} by user" + if getattr(result, "action", "") in ("accept", "accepted"): + data = ConfirmBooking.model_validate(getattr(result, "content", {})) + return ( + f"Action '{action}' confirmed. Notes: {data.notes or 'None'}" + if data.confirm + else f"Action '{action}' cancelled" + ) + if getattr(result, "action", "") == "decline": + return "Action declined" + return "Action cancelled" + # Fallback to handler if present if _app.context.elicitation_handler: resp = await _app.context.elicitation_handler( {"message": f"Do you want to {action}?", "requestedSchema": schema} ) - accepted = getattr(resp, "action", "") in ("accept", "accepted") - return f"Action '{action}' {'confirmed' if accepted else 'declined'}" + if getattr(resp, "action", "") in ("accept", "accepted"): + data = ConfirmBooking.model_validate(getattr(resp, "content", {})) + return ( + f"Action '{action}' confirmed. Notes: {data.notes or 'None'}" + if data.confirm + else f"Action '{action}' cancelled" + ) + if getattr(resp, "action", "") == "decline": + return "Action declined" + return "Action cancelled" + return f"Action '{action}' confirmed by default" @@ -165,4 +185,3 @@ def _prompt_echo(message: str) -> str: if __name__ == "__main__": asyncio.run(main()) - diff --git a/src/mcp_agent/data/examples/mcp_agent_server/sampling/README.md b/src/mcp_agent/data/examples/mcp_agent_server/sampling/README.md index 55bbccae3..0e083e12e 100644 --- a/src/mcp_agent/data/examples/mcp_agent_server/sampling/README.md +++ b/src/mcp_agent/data/examples/mcp_agent_server/sampling/README.md @@ -20,3 +20,14 @@ Tools: Add your API key(s) to `mcp_agent.secrets.yaml` or environment variables (e.g. `OPENAI_API_KEY`). +## Deploy to Cloud (optional) + +1) Set API keys in `mcp_agent.secrets.yaml`. + +2) Deploy from this directory: + +```bash +uv run mcp-agent deploy sampling --config-dir . +``` + +Use the returned URL with `/sse` in an MCP client and include the bearer token if needed. diff --git a/src/mcp_agent/data/examples/mcp_agent_server/sampling/server.py b/src/mcp_agent/data/examples/mcp_agent_server/sampling/server.py index 4d6af5a09..3255b65ce 100644 --- a/src/mcp_agent/data/examples/mcp_agent_server/sampling/server.py +++ b/src/mcp_agent/data/examples/mcp_agent_server/sampling/server.py @@ -28,7 +28,11 @@ @app.tool(name="sample_haiku") -async def sample_haiku(topic: str, app_ctx: Optional[AppContext] = None) -> str: +async def sample_haiku( + topic: str, + temperature: float | None = 0.7, + app_ctx: Optional[AppContext] = None, +) -> str: """Generate a short poem using configured LLM settings.""" _app = app_ctx.app if app_ctx else app llm = create_llm( @@ -41,7 +45,7 @@ async def sample_haiku(topic: str, app_ctx: Optional[AppContext] = None) -> str: maxTokens=80, modelPreferences=ModelPreferences(hints=[]), systemPrompt="Write a 3-line haiku.", - temperature=0.7, + temperature=temperature, use_history=False, max_iterations=1, ) @@ -56,4 +60,3 @@ async def main() -> None: if __name__ == "__main__": asyncio.run(main()) - diff --git a/src/mcp_agent/data/templates/README_init.md b/src/mcp_agent/data/templates/README_init.md index 2189a529a..7a2bb3599 100644 --- a/src/mcp_agent/data/templates/README_init.md +++ b/src/mcp_agent/data/templates/README_init.md @@ -1,83 +1,81 @@ # MCP-Agent Starter -Welcome! This project was generated by `mcp-agent init` and includes a small set -of tools that demonstrate the basics of MCP-Agent: +Welcome! This project was generated by `mcp-agent init`. It’s a minimal, readable starting point you can run locally or expose as an MCP server. -- A simple Agent that uses MCP servers and an LLM (`finder_tool`) -- Synchronous and asynchronous tools (`greet`, `reverse_async`) -- Notifications/logging (`notify`) -- Elicitation (user confirmation) (`confirm_action`) -- Sampling with request parameters (`sample_haiku`) -- Loading AgentSpec definitions from config and running them (`agent_catalog`, `run_agent`) +## What’s included -## Quick Start +- An `MCPApp` named `hello_world` (see `main.py`). +- Two tools defined with decorators: + - `finder_agent(request: str, app_ctx?)` + - An Agent that uses the `filesystem` and `fetch` MCP servers plus an LLM to answer the request. + - Logs via the app logger (forwarded to the client as notifications when serving). + - `run_agent_async(agent_name: str = "web_helper", prompt: str, app_ctx?)` + - Loads an `AgentSpec` from `mcp_agent.config.yaml` (`agents.definitions`) and runs it. + - Decorated with `@app.async_tool`: when serving, returns a workflow ID; when run in this script, it awaits and returns the string result. -1) Add your API keys to `mcp_agent.secrets.yaml` (or environment variables): +## Quick start + +1. Add API keys to `mcp_agent.secrets.yaml` (or set env vars): - `OPENAI_API_KEY` (recommended) - `ANTHROPIC_API_KEY` (optional) -2) Review `mcp_agent.config.yaml`. It defines: +2. Review `mcp_agent.config.yaml`: - - Execution engine (default: `asyncio`) + - Execution engine: `asyncio` - Logger settings - - MCP servers (like `filesystem`, `fetch`) - - Optional `agents.definitions` used by the `run_agent` tool + - MCP servers: `filesystem`, `fetch` + - `agents.definitions`: sample agents (`filesystem_helper`, `web_helper`) -3) Run locally: +3. Run locally: ```bash uv run main.py ``` -Or develop with the dev CLI: - -```bash -mcp-agent dev chat -# or -mcp-agent dev serve --script main.py -``` - -## Tools Overview +You’ll see two summaries printed: -All tools are defined in `main.py` using the `@app.tool` or `@app.async_tool` decorators. -Each tool is strongly typed and accepts an optional `app_ctx` parameter to access the -MCPApp context at runtime (e.g., logging, upstream session, config). +- A summary of `README.md` from your current directory. +- A summary of the intro page at modelcontextprotocol.io. -- `finder_tool(request: str)` - - Demonstrates Agent behavior. Creates an Agent with `filesystem` and `fetch`, attaches an LLM, and answers the request. +4. Deploy a remote MCP server: -- `agent_catalog()` - - Lists agent names defined in `mcp_agent.config.yaml` under `agents.definitions`. +### Run as an MCP server -- `run_agent(agent_name: str, prompt: str)` - - Instantiates an Agent from `agents.definitions` and runs an LLM call. +- In `main.py`, UNCOMMENT the server lines that call `create_mcp_server_for_app(agent_app)` and `run_sse_async()`. +- Start the server: `uv run main.py` -- `greet(name: str)` - - Minimal synchronous tool that logs a message and returns a greeting. +When you're ready to deploy, simply run: -- `notify(message: str, level: Literal["debug","info","warning","error"])` - - Demonstrates upstream logging/notifications using the app logger. +```bash +mcp-agent deploy "hello_world" +``` -- `confirm_action(action: str)` - - Demonstrates elicitation. When connected to an MCP client, the user is prompted for confirmation. +- This wraps your app as a hosted MCP SSE server. +- Anything decorated with `@app.tool` (or `@app.async_tool`) runs as a Temporal workflow in the cloud. -- `sample_haiku(topic: str)` - - Demonstrates LLM sampling by constructing RequestParams and calling an LLM. +Notes -- `reverse_async(text: str)` - - Simple asynchronous tool that reverses the input string. +- `app_ctx` is the MCPApp Context (configuration, logger, upstream session, etc.). +- Logging uses `app.logger` and is forwarded as notifications when connected to an MCP client. +- Configuration is read from `mcp_agent.config.yaml` and `mcp_agent.secrets.yaml` (env vars supported). +- The default model is configurable (see `openai.default_model` in config). -## Contexts +## Next steps -- `app_ctx`: MCPApp Context (configuration, logger, upstream session, etc.) -- `ctx`/`mcp_ctx`: FastMCP tool context (provided by the FastMCP server runtime), included for advanced use. +- Tweak `finder_agent` instructions or server list to fit your use case. +- Add more `AgentSpec` entries to `agents.definitions`. +- Add tools with `@app.tool` or `@app.async_tool` as you grow the app. +- Read the docs and explore examples: + - GitHub: https://github.com/lastmile-ai/mcp-agent + - Docs: https://docs.mcp-agent.com/ + - Discord: https://lmai.link/discord/mcp-agent -## Where to next? +## Further reading -- Explore configuration in `mcp_agent.config.yaml` and add more MCP servers. -- Extend `agents.definitions` with more AgentSpec entries. -- Add new tools using `@app.tool` or `@app.async_tool`. +- Configuration reference and secrets management. +- MCP servers (stdio, SSE, streamable_http, websockets) and timeouts. +- Temporal workflows, activities, and logging/notifications when deployed. +- Agents and LLMs: `AgentSpec`, prompts, and model defaults. Happy building! - diff --git a/src/mcp_agent/data/templates/basic_agent.py b/src/mcp_agent/data/templates/basic_agent.py index f357436f5..d4125abab 100644 --- a/src/mcp_agent/data/templates/basic_agent.py +++ b/src/mcp_agent/data/templates/basic_agent.py @@ -1,54 +1,55 @@ """ -Welcome to mcp-agent! -This -Canonical MCP-Agent example for new projects. +Welcome to mcp-agent! We believe MCP is all you need to build and deploy agents. +This is a canonical getting-started example that covers everything you need to know to get started. -This script showcases: - - Setting up a basic Agent that uses the fetch and filesystem MCP servers - - @app.tool and @app.async_tool decorators to define long-running tools +We will cover: + - Hello world agent: Setting up a basic Agent that uses the fetch and filesystem MCP servers to do cool stuff. + - @app.tool and @app.async_tool decorators to expose your agents as long-running tools on an MCP server. - Advanced MCP features: Notifications, sampling, and elicitation + +You can run this example locally using "uv run main.py", and also deploy it as an MCP server using "mcp-agent deploy". + +Let's get started! """ from __future__ import annotations -from typing import Optional, Literal -import os - -from mcp.server.fastmcp import Context as MCPContext -from mcp.types import ElicitRequestedSchema, TextContent, CreateMessageResult +import asyncio +from typing import Optional from mcp_agent.app import MCPApp from mcp_agent.agents.agent import Agent from mcp_agent.agents.agent_spec import AgentSpec from mcp_agent.core.context import Context as AppContext -from mcp_agent.workflows.factory import create_llm -from mcp_agent.workflows.llm.augmented_llm import RequestParams as LLMRequestParams -from mcp_agent.workflows.llm.llm_selector import ModelPreferences - -# If you want to use a different LLM provider, you can import the appropriate AugmentedLLM -# +from mcp_agent.server.app_server import create_mcp_server_for_app +from mcp_agent.workflows.factory import create_agent from mcp_agent.workflows.llm.augmented_llm_openai import OpenAIAugmentedLLM -# Create the MCP App. Configuration is read from mcp_agent.config.yaml/secrets.yaml. -app = MCPApp(name="hello_world", description="Hello world mcp-agent application") +# Create the MCPApp, the root of mcp-agent. +app = MCPApp( + name="hello_world", + description="Hello world mcp-agent application", + # settings= +) -# 1) Agent behavior (first): demonstrate an Agent using MCP servers + LLM -@app.tool(name="finder_tool") -async def finder_tool(request: str, app_ctx: Optional[AppContext] = None) -> str: +# Hello world agent: an Agent using MCP servers + LLM +@app.tool() +async def finder_agent(request: str, app_ctx: Optional[AppContext] = None) -> str: """ - Create an Agent with access to MCP servers (fetch + filesystem), attach an LLM, - and handle the user's request. + Run an Agent with access to MCP servers (fetch + filesystem) to handle the input request. + + Notes: + - @app.tool: + - runs the function as a long-running workflow tool when deployed as an MCP server + - no-op when running this locally as a script + - app_ctx: + - MCPApp Context (configuration, logger, upstream session, etc.) """ - _app = app_ctx.app if app_ctx else app - ctx = _app.context - # Ensure filesystem server can read current working directory (dev-friendly) - try: - if "filesystem" in ctx.config.mcp.servers: - ctx.config.mcp.servers["filesystem"].args.extend([os.getcwd()]) - except Exception: - pass + logger = app_ctx.app.logger + # Logger requests are forwarded as notifications/message to the client over MCP. + logger.info(f"finder_tool called with request: {request}") agent = Agent( name="finder", @@ -57,6 +58,7 @@ async def finder_tool(request: str, app_ctx: Optional[AppContext] = None) -> str " then answer the request concisely." ), server_names=["fetch", "filesystem"], + context=app_ctx, ) async with agent: @@ -65,201 +67,95 @@ async def finder_tool(request: str, app_ctx: Optional[AppContext] = None) -> str return result -# 2) Agent catalog: list agents defined in config (agents.definitions) -@app.tool(name="agent_catalog") -def agent_catalog(app_ctx: Optional[AppContext] = None) -> str: - """List agent names defined under config.agents.definitions.""" - _app = app_ctx.app if app_ctx else app - defs: list[AgentSpec] = ( - getattr(getattr(_app.context.config, "agents", None), "definitions", []) or [] - ) - names = [getattr(d, "name", "") for d in defs if getattr(d, "name", None)] - return ", ".join(names) if names else "(no agents defined in config)" - - -# 3) Run a configured agent by name (from config.agents.definitions) -@app.tool(name="run_agent") +# Run a configured agent by name (defined in mcp_agent.config.yaml) +@app.async_tool(name="run_agent_async") async def run_agent( - agent_name: str, - prompt: str, + agent_name: str = "web_helper", + prompt: str = "Please summarize the first paragraph of https://modelcontextprotocol.io/docs/getting-started/intro", app_ctx: Optional[AppContext] = None, ) -> str: """ - Instantiate an Agent from config.agents.definitions by name and run an LLM call. + Load an agent defined in mcp_agent.config.yaml by name and run it. + + Notes: + - @app.async_tool: + - async version of @app.tool -- returns a workflow ID back (can be used with workflows-get_status tool) + - runs the function as a long-running workflow tool when deployed as an MCP server + - no-op when running this locally as a script """ - _app = app_ctx.app if app_ctx else app - defs: list[AgentSpec] = ( - getattr(getattr(_app.context.config, "agents", None), "definitions", []) or [] + + logger = app_ctx.app.logger + + agent_definitions = ( + app.config.agents.definitions + if app is not None + and app.config is not None + and app.config.agents is not None + and app.config.agents.definitions is not None + else [] ) - spec = next((d for d in defs if getattr(d, "name", None) == agent_name), None) - if spec is None: + + agent_spec: AgentSpec | None = None + for agent_def in agent_definitions: + if agent_def.name == agent_name: + agent_spec = agent_def + break + + if agent_spec is None: + logger.error("Agent not found", data={"name": agent_name}) return f"agent '{agent_name}' not found" - agent = Agent( - name=spec.name, - instruction=spec.instruction, - server_names=spec.server_names or [], - functions=getattr(spec, "functions", []), - context=_app.context, + logger.info( + "Agent found in spec", + data={"name": agent_name, "instruction": agent_spec.instruction}, ) + + agent = create_agent(agent_spec, context=app_ctx) + async with agent: llm = await agent.attach_llm(OpenAIAugmentedLLM) return await llm.generate_str(message=prompt) -# 4) Minimal tool: synchronous, simple types -@app.tool(name="greet") -def greet(name: str, app_ctx: Optional[AppContext] = None) -> str: - """Return a friendly greeting and log it upstream.""" - _app = app_ctx.app if app_ctx else app - _app.logger.info("greet called", data={"name": name}) - return f"Hello, {name}!" - - -# 5) Notify: demonstrate server-side logging notifications -@app.tool(name="notify") -def notify( - message: str, - level: Literal["debug", "info", "warning", "error"] = "info", - app_ctx: Optional[AppContext] = None, - mcp_ctx: Optional[MCPContext] = None, -) -> str: - """ - Send a non-logging notification via the app logger (forwarded upstream). - Tools get access to both the MCPApp Context (app_ctx) and FastMCP Context (mcp_ctx). - """ - _app = app_ctx.app if app_ctx else app - logger = _app.logger - if level == "debug": - logger.debug(message) - elif level == "warning": - logger.warning(message) - elif level == "error": - logger.error(message) - else: - logger.info(message) - return "ok" - - -# 6) Elicit: prompt the user for confirmation (demonstrates elicitation) -@app.tool(name="confirm_action") -async def confirm_action( - action: str, - app_ctx: Optional[AppContext] = None, - ctx: Optional[MCPContext] = None, -) -> str: - """ - Ask the user to confirm an action. When invoked from an MCP client UI, a prompt is shown. - Falls back to the app's elicitation handler if no upstream client is attached. - """ - _app = app_ctx.app if app_ctx else app - upstream = getattr(_app.context, "upstream_session", None) - schema: ElicitRequestedSchema = { - "type": "object", - "title": "Confirmation", - "properties": {"confirm": {"type": "boolean", "title": "Confirm"}}, - "required": ["confirm"], - } - # Prefer upstream elicitation when available - if upstream is not None: - result = await upstream.elicit( - message=f"Do you want to {action}?", requestedSchema=schema +async def main(): + async with app.run() as agent_app: + # Run the agent + readme_summary = await finder_agent( + request="Please summarize the README.md file in this directory.", + app_ctx=agent_app.context, ) - accepted = getattr(result, "action", "") in ("accept", "accepted") - return f"Action '{action}' {'confirmed' if accepted else 'declined'} by user" + print("README.md file summary:") + print(readme_summary) - # Fallback: no upstream client. If an elicitation handler is configured, use it. - if _app.context.elicitation_handler: - resp = await _app.context.elicitation_handler( - {"message": f"Do you want to {action}?", "requestedSchema": schema} + webpage_summary = await run_agent( + agent_name="web_helper", + prompt="Please summarize the first few paragraphs of https://modelcontextprotocol.io/docs/getting-started/intro.", + app_ctx=agent_app.context, ) - accepted = getattr(resp, "action", "") in ("accept", "accepted") - return f"Action '{action}' {'confirmed' if accepted else 'declined'}" - - # Last resort: assume accepted - return f"Action '{action}' confirmed by default" - - -# 7) Sampling: call an LLM to generate a short text -@app.tool(name="sample_haiku") -async def sample_haiku(topic: str, app_ctx: Optional[AppContext] = None) -> str: - """ - Generate a tiny poem using the configured LLM. Model and keys come from config/secrets. - """ - _app = app_ctx.app if app_ctx else app - # Create a simple LLM using current app context (settings and servers) - llm = create_llm( - agent_name="sampling_demo", - server_names=[], - instruction="You are a concise poet.", - context=_app.context, - ) - req = LLMRequestParams( - maxTokens=80, - modelPreferences=ModelPreferences(hints=[]), - systemPrompt="Write a 3-line haiku.", - temperature=0.7, - use_history=False, - max_iterations=1, - ) - text = await llm.generate_str(message=f"Haiku about {topic}", request_params=req) - return text - + print("Webpage summary:") + print(webpage_summary) -# 8) Async tool: demonstrates @app.async_tool (runs asynchronously) -@app.async_tool(name="reverse_async") -async def reverse_async(text: str) -> str: - """Reverse a string asynchronously (example async tool).""" - return text[::-1] + # UNCOMMENT to run this MCPApp as an MCP server + ######################################################### + # Create the MCP server that exposes both workflows and agent configurations, + # optionally using custom FastMCP settings + # mcp_server = create_mcp_server_for_app(agent_app) - -# 6) Router demo (agent factory): route query to specialized agents defined in agents.yaml -@app.tool(name="route_demo") -async def route_demo(query: str, app_ctx: Optional[AppContext] = None) -> str: - """ - Use the agent factory to load agent specs from agents.yaml and route the query - to the best agent using an LLM router. - """ - from pathlib import Path - from mcp_agent.workflows.factory import ( - load_agent_specs_from_file, - create_router_llm, - ) - - _app = app_ctx.app if app_ctx else app - ctx = _app.context - specs = load_agent_specs_from_file(str(Path("agents.yaml").resolve()), context=ctx) - router = await create_router_llm( - server_names=["filesystem", "fetch"], - agents=specs, - provider="openai", - context=ctx, - ) - res = await router.generate_str(query) - return res + # # Run the server + # await mcp_server.run_sse_async() if __name__ == "__main__": - # Optional: run a quick sanity check when executed directly - import asyncio - - async def _smoke(): - async with app.run() as running: - running.logger.info("Example app started") - print( - await finder_tool( - "List files in the current directory", app_ctx=running.context - ) - ) - print("Agents:", await agent_catalog(app_ctx=running.context)) - print( - await run_agent( - "filesystem_helper", - "Summarize README if present", - app_ctx=running.context, - ) - ) - print(await greet("World", app_ctx=running.context)) - print(await sample_haiku("flowers", app_ctx=running.context)) + asyncio.run(main()) - asyncio.run(_smoke()) +# When you're ready to deploy this MCPApp as a remote SSE server, run: +# > mcp-agent deploy "hello_world" +# +# Congrats! You made it to the end of the getting-started example! +# There is a lot more that mcp-agent can do, and we hope you'll explore the rest of the documentation. +# Check out other examples in the mcp-agent repo: +# https://github.com/lastmile-ai/mcp-agent/tree/main/examples +# and read the docs (or ask an mcp-agent to do it for you): +# https://docs.mcp-agent.com/ +# +# Happy mcp-agenting! diff --git a/src/mcp_agent/data/templates/mcp_agent.config.yaml b/src/mcp_agent/data/templates/mcp_agent.config.yaml index 21edbfcd7..ecf88c520 100644 --- a/src/mcp_agent/data/templates/mcp_agent.config.yaml +++ b/src/mcp_agent/data/templates/mcp_agent.config.yaml @@ -1,45 +1,48 @@ # MCP-Agent Configuration File -# Schema reference for IDE autocomplete and validation +# Config definition: https://github.com/lastmile-ai/mcp-agent/blob/main/src/mcp_agent/config.py $schema: https://raw.githubusercontent.com/lastmile-ai/mcp-agent/refs/heads/main/schema/mcp-agent.config.schema.json # Execution engine: asyncio or temporal +# For temporal mode, see: https://github.com/lastmile-ai/mcp-agent/blob/main/examples/temporal/README.md execution_engine: asyncio -# Logger configuration logger: - transports: [file] # Options: console, file - level: info # Options: debug, info, warning, error - progress_display: true # Show progress bars for token usage - path_settings: - path_pattern: "logs/mcp-agent-{unique_id}.jsonl" - unique_id: "timestamp" # Options: "timestamp" or "session_id" - timestamp_format: "%Y%m%d_%H%M%S" + transports: [console, file] + level: info + path: logs/mcp-agent.log -# MCP Servers configuration +# Configure MCP Servers connections (supports stdio, sse, streamable_http, and websockets) mcp: servers: # Filesystem access server filesystem: command: npx - args: ["-y", "@modelcontextprotocol/server-filesystem"] - # Add current directory at runtime with: context.config.mcp.servers["filesystem"].args.extend(["."]) - + args: ["-y", "@modelcontextprotocol/server-filesystem", "."] + # Web fetch server fetch: command: uvx args: ["mcp-server-fetch"] - - # GitHub server (requires GITHUB_PERSONAL_ACCESS_TOKEN in secrets) - # github: - # command: npx - # args: ["-y", "@modelcontextprotocol/server-github"] - - # Brave search server (requires BRAVE_API_KEY in secrets) - # brave-search: - # command: npx - # args: ["-y", "@modelcontextprotocol/server-brave-search"] + #env: # Environment variables passed to the stdio server + # ROOT_PATH: "/workspace" + + # sse_server: + # transport: "sse" + # url: "https://api.example.com/sse" + # headers: + # Authorization: "Bearer ${API_TOKEN}" + + # streamable_http_server: + # transport: streamable_http + # url: "https://api.example.com/mcp" + # headers: + # Authorization: "Bearer ${API_TOKEN}" + # Content-Type: "application/json" + # http_timeout_seconds: 30 + # read_timeout_seconds: 120 + # terminate_on_close: true -# Optional: Define simple agents inline to use with tools like run_agent +# Optional: Define Agent definitions in config agents: definitions: - name: filesystem_helper @@ -51,16 +54,16 @@ agents: # Model provider defaults (API keys go in mcp_agent.secrets.yaml) openai: - default_model: "gpt-4o-mini" - -anthropic: - default_model: "claude-3-5-sonnet-20241022" + default_model: gpt-4o-mini +anthropic: + default_model: claude-sonnet-4-0 # google: # default_model: "gemini-1.5-pro" # OpenTelemetry configuration (optional) # otel: -# endpoint: "http://localhost:4317" -# service_name: "mcp-agent" -# export_interval: 10 +# enabled: true +# exporters: ["file", "otlp"] +# otlp_settings: +# endpoint: "http://localhost:4318/v1/traces" diff --git a/src/mcp_agent/workflows/factory.py b/src/mcp_agent/workflows/factory.py index 46a186774..df22718db 100644 --- a/src/mcp_agent/workflows/factory.py +++ b/src/mcp_agent/workflows/factory.py @@ -1,6 +1,6 @@ from __future__ import annotations -from typing import Any, Callable, List, Literal, Sequence, Tuple +from typing import Any, Callable, List, Literal, Sequence, Tuple, overload import os import re import json @@ -65,6 +65,32 @@ def agent_from_spec(spec: AgentSpec, context: Context | None = None) -> Agent: ) +@overload +def create_llm( + agent: Agent | AgentSpec, + provider: str | None = "openai", + model: str | ModelPreferences | None = None, + request_params: RequestParams | None = None, + context: Context | None = None, +) -> AugmentedLLM: + """ + Create an Augmented LLM from an agent or agent spec. + """ + agent = ( + agent if isinstance(agent, Agent) else agent_from_spec(agent, context=context) + ) + + factory = _llm_factory( + provider=provider, + model=model, + request_params=request_params, + context=context, + ) + + return factory(agent=agent) + + +@overload def create_llm( agent_name: str, server_names: List[str] | None = None, @@ -74,6 +100,10 @@ def create_llm( request_params: RequestParams | None = None, context: Context | None = None, ) -> AugmentedLLM: + """ + Create an Augmented LLM. + """ + agent = agent_from_spec( AgentSpec( name=agent_name, instruction=instruction, server_names=server_names or [] From a4287a25c25b307196ea31b9e8a4bb16fbe74e7e Mon Sep 17 00:00:00 2001 From: Sarmad Qadri Date: Wed, 17 Sep 2025 17:49:17 -0400 Subject: [PATCH 3/3] lint & format --- examples/mcp/mcp_elicitation/cloud/main.py | 10 ++++---- src/mcp_agent/app.py | 24 ++++++++++++------- .../mcp_agent_server/elicitation/client.py | 1 - .../mcp_agent_server/elicitation/server.py | 1 + .../mcp_agent_server/notifications/client.py | 5 ++-- .../mcp_agent_server/notifications/server.py | 1 - .../mcp_agent_server/reference/client.py | 1 - .../mcp_agent_server/sampling/client.py | 1 - src/mcp_agent/data/templates/basic_agent.py | 1 - 9 files changed, 23 insertions(+), 22 deletions(-) diff --git a/examples/mcp/mcp_elicitation/cloud/main.py b/examples/mcp/mcp_elicitation/cloud/main.py index 296b7303a..e3d8b96a4 100644 --- a/examples/mcp/mcp_elicitation/cloud/main.py +++ b/examples/mcp/mcp_elicitation/cloud/main.py @@ -8,10 +8,7 @@ logging.basicConfig(level=logging.INFO) logger = logging.getLogger(__name__) -app = MCPApp( - name="elicitation_demo", - description="Demo of workflow with elicitation" -) +app = MCPApp(name="elicitation_demo", description="Demo of workflow with elicitation") # mcp_context for fastmcp context @@ -24,7 +21,9 @@ class ConfirmBooking(BaseModel): confirm: bool = Field(description="Confirm booking?") notes: str = Field(default="", description="Special requests") - app.logger.info(f"Confirming the use wants to book a table for {party_size} on {date} via elicitation") + app.logger.info( + f"Confirming the use wants to book a table for {party_size} on {date} via elicitation" + ) result = await app.context.upstream_session.elicit( message=f"Confirm booking for {party_size} on {date}?", @@ -42,4 +41,3 @@ class ConfirmBooking(BaseModel): return "Booking declined" elif result.action == "cancel": return "Booking cancelled" - diff --git a/src/mcp_agent/app.py b/src/mcp_agent/app.py index def8da1f2..7908aac40 100644 --- a/src/mcp_agent/app.py +++ b/src/mcp_agent/app.py @@ -4,7 +4,17 @@ import functools from types import MethodType -from typing import Any, Dict, Optional, Type, TypeVar, Callable, TYPE_CHECKING, ParamSpec, overload +from typing import ( + Any, + Dict, + Optional, + Type, + TypeVar, + Callable, + TYPE_CHECKING, + ParamSpec, + overload, +) from datetime import timedelta from contextlib import asynccontextmanager @@ -716,8 +726,7 @@ async def _run(self, *args, **kwargs): # type: ignore[no-redef] return auto_cls @overload - def tool(self, __fn: Callable[P, R]) -> Callable[P, R]: - ... + def tool(self, __fn: Callable[P, R]) -> Callable[P, R]: ... @overload def tool( @@ -726,8 +735,7 @@ def tool( *, description: str | None = None, structured_output: bool | None = None, - ) -> Callable[[Callable[P, R]], Callable[P, R]]: - ... + ) -> Callable[[Callable[P, R]], Callable[P, R]]: ... def tool( self, @@ -784,8 +792,7 @@ def decorator(fn: Callable[P, R]) -> Callable[P, R]: return decorator @overload - def async_tool(self, __fn: Callable[P, R]) -> Callable[P, R]: - ... + def async_tool(self, __fn: Callable[P, R]) -> Callable[P, R]: ... @overload def async_tool( @@ -793,8 +800,7 @@ def async_tool( name: str | None = None, *, description: str | None = None, - ) -> Callable[[Callable[P, R]], Callable[P, R]]: - ... + ) -> Callable[[Callable[P, R]], Callable[P, R]]: ... def async_tool( self, diff --git a/src/mcp_agent/data/examples/mcp_agent_server/elicitation/client.py b/src/mcp_agent/data/examples/mcp_agent_server/elicitation/client.py index e5cb2bc05..5bad8e3b2 100644 --- a/src/mcp_agent/data/examples/mcp_agent_server/elicitation/client.py +++ b/src/mcp_agent/data/examples/mcp_agent_server/elicitation/client.py @@ -73,4 +73,3 @@ async def main() -> None: if __name__ == "__main__": asyncio.run(main()) - diff --git a/src/mcp_agent/data/examples/mcp_agent_server/elicitation/server.py b/src/mcp_agent/data/examples/mcp_agent_server/elicitation/server.py index 9c65deb10..57fbcf83a 100644 --- a/src/mcp_agent/data/examples/mcp_agent_server/elicitation/server.py +++ b/src/mcp_agent/data/examples/mcp_agent_server/elicitation/server.py @@ -34,6 +34,7 @@ async def confirm_action(action: str, app_ctx: Optional[AppContext] = None) -> s """Ask the user to confirm an action.""" _app = app_ctx.app if app_ctx else app upstream = getattr(_app.context, "upstream_session", None) + class ConfirmBooking(BaseModel): confirm: bool = Field(description="Confirm action?") notes: str = Field(default="", description="Optional notes") diff --git a/src/mcp_agent/data/examples/mcp_agent_server/notifications/client.py b/src/mcp_agent/data/examples/mcp_agent_server/notifications/client.py index c21780eac..0b43f90cc 100644 --- a/src/mcp_agent/data/examples/mcp_agent_server/notifications/client.py +++ b/src/mcp_agent/data/examples/mcp_agent_server/notifications/client.py @@ -60,10 +60,11 @@ async def main() -> None: ) as server: await server.set_logging_level("info") await server.call_tool("notify", {"message": "Hello from client"}) - await server.call_tool("notify_progress", {"progress": 0.25, "message": "Quarter"}) + await server.call_tool( + "notify_progress", {"progress": 0.25, "message": "Quarter"} + ) print("Sent notify + notify_progress") if __name__ == "__main__": asyncio.run(main()) - diff --git a/src/mcp_agent/data/examples/mcp_agent_server/notifications/server.py b/src/mcp_agent/data/examples/mcp_agent_server/notifications/server.py index 622d4b2f3..83c987ceb 100644 --- a/src/mcp_agent/data/examples/mcp_agent_server/notifications/server.py +++ b/src/mcp_agent/data/examples/mcp_agent_server/notifications/server.py @@ -70,4 +70,3 @@ async def main() -> None: if __name__ == "__main__": asyncio.run(main()) - diff --git a/src/mcp_agent/data/examples/mcp_agent_server/reference/client.py b/src/mcp_agent/data/examples/mcp_agent_server/reference/client.py index 11d1439fc..9ed0747fb 100644 --- a/src/mcp_agent/data/examples/mcp_agent_server/reference/client.py +++ b/src/mcp_agent/data/examples/mcp_agent_server/reference/client.py @@ -106,4 +106,3 @@ async def main() -> None: if __name__ == "__main__": asyncio.run(main()) - diff --git a/src/mcp_agent/data/examples/mcp_agent_server/sampling/client.py b/src/mcp_agent/data/examples/mcp_agent_server/sampling/client.py index 60c899571..e0e0ca15b 100644 --- a/src/mcp_agent/data/examples/mcp_agent_server/sampling/client.py +++ b/src/mcp_agent/data/examples/mcp_agent_server/sampling/client.py @@ -65,4 +65,3 @@ async def main() -> None: if __name__ == "__main__": asyncio.run(main()) - diff --git a/src/mcp_agent/data/templates/basic_agent.py b/src/mcp_agent/data/templates/basic_agent.py index d4125abab..8cfc33f3a 100644 --- a/src/mcp_agent/data/templates/basic_agent.py +++ b/src/mcp_agent/data/templates/basic_agent.py @@ -21,7 +21,6 @@ from mcp_agent.agents.agent import Agent from mcp_agent.agents.agent_spec import AgentSpec from mcp_agent.core.context import Context as AppContext -from mcp_agent.server.app_server import create_mcp_server_for_app from mcp_agent.workflows.factory import create_agent from mcp_agent.workflows.llm.augmented_llm_openai import OpenAIAugmentedLLM