diff --git a/examples/mcp/mcp_elicitation/cloud/main.py b/examples/mcp/mcp_elicitation/cloud/main.py index 296b7303a..e3d8b96a4 100644 --- a/examples/mcp/mcp_elicitation/cloud/main.py +++ b/examples/mcp/mcp_elicitation/cloud/main.py @@ -8,10 +8,7 @@ logging.basicConfig(level=logging.INFO) logger = logging.getLogger(__name__) -app = MCPApp( - name="elicitation_demo", - description="Demo of workflow with elicitation" -) +app = MCPApp(name="elicitation_demo", description="Demo of workflow with elicitation") # mcp_context for fastmcp context @@ -24,7 +21,9 @@ class ConfirmBooking(BaseModel): confirm: bool = Field(description="Confirm booking?") notes: str = Field(default="", description="Special requests") - app.logger.info(f"Confirming the use wants to book a table for {party_size} on {date} via elicitation") + app.logger.info( + f"Confirming the use wants to book a table for {party_size} on {date} via elicitation" + ) result = await app.context.upstream_session.elicit( message=f"Confirm booking for {party_size} on {date}?", @@ -42,4 +41,3 @@ class ConfirmBooking(BaseModel): return "Booking declined" elif result.action == "cancel": return "Booking cancelled" - diff --git a/src/mcp_agent/app.py b/src/mcp_agent/app.py index 5c1b4f823..7908aac40 100644 --- a/src/mcp_agent/app.py +++ b/src/mcp_agent/app.py @@ -4,7 +4,17 @@ import functools from types import MethodType -from typing import Any, Dict, Optional, Type, TypeVar, Callable, TYPE_CHECKING +from typing import ( + Any, + Dict, + Optional, + Type, + TypeVar, + Callable, + TYPE_CHECKING, + ParamSpec, + overload, +) from datetime import timedelta from contextlib import asynccontextmanager @@ -36,6 +46,7 @@ from mcp_agent.agents.agent_spec import AgentSpec from mcp_agent.executor.workflow import Workflow +P = ParamSpec("P") R = TypeVar("R") @@ -714,13 +725,25 @@ async def _run(self, *args, **kwargs): # type: ignore[no-redef] self.workflow(auto_cls, workflow_id=workflow_name) return auto_cls + @overload + def tool(self, __fn: Callable[P, R]) -> Callable[P, R]: ... + + @overload + def tool( + self, + name: str | None = None, + *, + description: str | None = None, + structured_output: bool | None = None, + ) -> Callable[[Callable[P, R]], Callable[P, R]]: ... + def tool( self, name: str | None = None, *, description: str | None = None, structured_output: bool | None = None, - ) -> Callable[[Callable[..., Any]], Callable[..., Any]]: + ): """ Decorator to declare a synchronous MCP tool that runs via an auto-generated Workflow and waits for completion before returning. @@ -729,7 +752,7 @@ def tool( endpoints are available. """ - def decorator(fn: Callable[..., Any]) -> Callable[..., Any]: + def decorator(fn: Callable[P, R]) -> Callable[P, R]: tool_name = name or fn.__name__ # Early validation: Use the shared tool adapter logic to validate @@ -762,18 +785,29 @@ def decorator(fn: Callable[..., Any]) -> Callable[..., Any]: # Support bare usage: @app.tool without parentheses if callable(name) and description is None and structured_output is None: - fn = name # type: ignore[assignment] + _fn = name # type: ignore[assignment] name = None - return decorator(fn) # type: ignore[arg-type] + return decorator(_fn) # type: ignore[arg-type] return decorator + @overload + def async_tool(self, __fn: Callable[P, R]) -> Callable[P, R]: ... + + @overload def async_tool( self, name: str | None = None, *, description: str | None = None, - ) -> Callable[[Callable[..., Any]], Callable[..., Any]]: + ) -> Callable[[Callable[P, R]], Callable[P, R]]: ... + + def async_tool( + self, + name: str | None = None, + *, + description: str | None = None, + ): """ Decorator to declare an asynchronous MCP tool. @@ -781,7 +815,7 @@ def async_tool( the standard per-workflow tools (run/get_status) are exposed by the server. """ - def decorator(fn: Callable[..., Any]) -> Callable[..., Any]: + def decorator(fn: Callable[P, R]) -> Callable[P, R]: workflow_name = name or fn.__name__ # Early validation: Use the shared tool adapter logic to validate @@ -812,9 +846,9 @@ def decorator(fn: Callable[..., Any]) -> Callable[..., Any]: # Support bare usage: @app.async_tool without parentheses if callable(name) and description is None: - fn = name # type: ignore[assignment] + _fn = name # type: ignore[assignment] name = None - return decorator(fn) # type: ignore[arg-type] + return decorator(_fn) # type: ignore[arg-type] return decorator diff --git a/src/mcp_agent/cli/commands/init.py b/src/mcp_agent/cli/commands/init.py index a7bd07c7c..475110ee6 100644 --- a/src/mcp_agent/cli/commands/init.py +++ b/src/mcp_agent/cli/commands/init.py @@ -45,6 +45,35 @@ def _write(path: Path, content: str, force: bool) -> bool: return False +def _write_readme(dir_path: Path, content: str, force: bool) -> str | None: + """Create a README file with fallback naming if a README already exists. + + Returns the filename created, or None if it could not be written (in which case + the content is printed to console as a fallback). + """ + candidates = [ + "README.md", + "README.mcp-agent.md", + "README.mcp.md", + ] + # Add numeric fallbacks + candidates += [f"README.{i}.md" for i in range(1, 6)] + + for name in candidates: + path = dir_path / name + if not path.exists() or force: + ok = _write(path, content, force) + if ok: + return name + # Fallback: print content to console if we couldn't write any variant + console.print( + "\n[yellow]A README already exists and could not be overwritten.[/yellow]" + ) + console.print("[bold]Suggested README contents:[/bold]\n") + console.print(content) + return None + + @app.callback(invoke_without_command=True) def init( ctx: typer.Context, @@ -136,7 +165,7 @@ def init( else: # Ask for an alternate filename and ensure it ends with .py alt_name = Prompt.ask( - "Enter a filename to save the agent", default="agent.py" + "Enter a filename to save the agent", default="main.py" ) if not alt_name.endswith(".py"): alt_name += ".py" @@ -153,6 +182,15 @@ def init( except Exception: pass + # No separate agents.yaml needed; agent definitions live in mcp_agent.config.yaml + + # Create README for the basic template + readme_content = _load_template("README_init.md") + if readme_content: + created = _write_readme(dir, readme_content, force) + if created: + files_created.append(created) + elif template == "server": server_path = dir / "server.py" server_content = _load_template("basic_agent_server.py") @@ -164,6 +202,13 @@ def init( except Exception: pass + # README for server template + readme_content = _load_template("README_init.md") + if readme_content: + created = _write_readme(dir, readme_content, force) + if created: + files_created.append(created) + elif template == "token": token_path = dir / "token_example.py" token_content = _load_template("token_counter.py") @@ -175,6 +220,12 @@ def init( except Exception: pass + readme_content = _load_template("README_init.md") + if readme_content: + created = _write_readme(dir, readme_content, force) + if created: + files_created.append(created) + elif template == "factory": factory_path = dir / "factory.py" factory_content = _load_template("agent_factory.py") @@ -192,6 +243,12 @@ def init( if agents_content and _write(agents_path, agents_content, force): files_created.append("agents.yaml") + readme_content = _load_template("README_init.md") + if readme_content: + created = _write_readme(dir, readme_content, force) + if created: + files_created.append(created) + # Display results if files_created: console.print("\n[green]✅ Successfully initialized project![/green]") @@ -208,16 +265,6 @@ def init( if template == "basic": run_file = entry_script_name or "main.py" console.print(f"3. Run your agent: [cyan]uv run {run_file}[/cyan]") - console.print( - f" Or use: [cyan]mcp-agent dev start --script {run_file}[/cyan]" - ) - console.print( - f" Or serve: [cyan]mcp-agent dev serve --script {run_file}[/cyan]" - ) - console.print(" Or chat: [cyan]mcp-agent dev chat[/cyan]") - console.print( - "4. Edit config: [cyan]mcp-agent config edit[/cyan] (then rerun)" - ) elif template == "server": console.print("3. Run the server: [cyan]uv run server.py[/cyan]") console.print( @@ -229,9 +276,9 @@ def init( elif template == "factory": console.print("3. Customize agents in [cyan]agents.yaml[/cyan]") console.print("4. Run the factory: [cyan]uv run factory.py[/cyan]") - elif template == "minimal": - console.print("3. Create your agent script") - console.print(" See examples: [cyan]mcp-agent quickstart[/cyan]") + elif template == "minimal": + console.print("3. Create your agent script") + console.print(" See examples: [cyan]mcp-agent quickstart[/cyan]") console.print( "\n[dim]Run [cyan]mcp-agent doctor[/cyan] to check your configuration[/dim]" diff --git a/src/mcp_agent/cli/commands/quickstart.py b/src/mcp_agent/cli/commands/quickstart.py index f79144b86..854f68c0a 100644 --- a/src/mcp_agent/cli/commands/quickstart.py +++ b/src/mcp_agent/cli/commands/quickstart.py @@ -86,6 +86,10 @@ def overview() -> None: ("token-counter", "data/examples/basic/token_counter"), ("agent-factory", "data/examples/basic/agent_factory"), ("basic-agent-server", "data/examples/mcp_agent_server/asyncio"), + ("reference-agent-server", "data/examples/mcp_agent_server/reference"), + ("elicitation", "data/examples/mcp_agent_server/elicitation"), + ("sampling", "data/examples/mcp_agent_server/sampling"), + ("notifications", "data/examples/mcp_agent_server/notifications"), ] for n, p in rows: table.add_row(n, p) @@ -199,3 +203,46 @@ def basic_agent_server( src = EXAMPLE_ROOT / "mcp_agent_server" / "asyncio" copied = _copy_tree(src, dst, force) console.print(f"Copied {copied} set(s) to {dst}") + + +@app.command("reference-agent-server") +def reference_agent_server( + dir: Path = typer.Argument(Path(".")), + force: bool = typer.Option(False, "--force", "-f"), +) -> None: + dst = dir.resolve() / "reference_agent_server" + copied = _copy_pkg_tree("mcp_agent_server/reference", dst, force) + if not copied: + src = EXAMPLE_ROOT / "mcp_agent_server" / "reference" + copied = _copy_tree(src, dst, force) + console.print(f"Copied {copied} set(s) to {dst}") + + +@app.command("elicitation") +def elicitation( + dir: Path = typer.Argument(Path(".")), + force: bool = typer.Option(False, "--force", "-f"), +) -> None: + dst = dir.resolve() / "elicitation" + copied = _copy_pkg_tree("mcp_agent_server/elicitation", dst, force) + console.print(f"Copied {copied} set(s) to {dst}") + + +@app.command("sampling") +def sampling( + dir: Path = typer.Argument(Path(".")), + force: bool = typer.Option(False, "--force", "-f"), +) -> None: + dst = dir.resolve() / "sampling" + copied = _copy_pkg_tree("mcp_agent_server/sampling", dst, force) + console.print(f"Copied {copied} set(s) to {dst}") + + +@app.command("notifications") +def notifications( + dir: Path = typer.Argument(Path(".")), + force: bool = typer.Option(False, "--force", "-f"), +) -> None: + dst = dir.resolve() / "notifications" + copied = _copy_pkg_tree("mcp_agent_server/notifications", dst, force) + console.print(f"Copied {copied} set(s) to {dst}") diff --git a/src/mcp_agent/data/examples/mcp_agent_server/elicitation/README.md b/src/mcp_agent/data/examples/mcp_agent_server/elicitation/README.md new file mode 100644 index 000000000..9cb18bb35 --- /dev/null +++ b/src/mcp_agent/data/examples/mcp_agent_server/elicitation/README.md @@ -0,0 +1,33 @@ +# Elicitation Server + +Minimal server demonstrating user confirmation via elicitation. + +## Run + +```bash +uv run server.py +``` + +Connect with the minimal client: + +```bash +uv run client.py +``` + +Tools: + +- `confirm_action(action: str)` — prompts the user (via upstream client) to accept or decline. + +This example uses console handlers for local testing. In an MCP client UI, the prompt will be displayed to the user. + +## Deploy to Cloud (optional) + +1. Set your API keys in `mcp_agent.secrets.yaml`. + +2. From this directory, deploy: + +```bash +uv run mcp-agent deploy elicitation-example +``` + +You’ll receive an app ID and a URL. Use the URL with an MCP client (e.g., MCP Inspector) and append `/sse` to the end. Set the Bearer token in the header to your mcp-agent API key. diff --git a/src/mcp_agent/data/examples/mcp_agent_server/elicitation/client.py b/src/mcp_agent/data/examples/mcp_agent_server/elicitation/client.py new file mode 100644 index 000000000..5bad8e3b2 --- /dev/null +++ b/src/mcp_agent/data/examples/mcp_agent_server/elicitation/client.py @@ -0,0 +1,75 @@ +""" +Minimal client for the Elicitation Server. + +Run: + uv run client.py +""" + +from __future__ import annotations + +import asyncio +from datetime import timedelta +from typing import Optional + +from mcp_agent.app import MCPApp +from mcp_agent.core.context import Context +from mcp_agent.config import Settings +from mcp_agent.mcp.mcp_agent_client_session import MCPAgentClientSession +from mcp_agent.mcp.gen_client import gen_client +from mcp_agent.human_input.handler import console_input_callback +from mcp_agent.elicitation.handler import console_elicitation_callback +from anyio.streams.memory import MemoryObjectReceiveStream, MemoryObjectSendStream +from mcp import ClientSession +from mcp.types import LoggingMessageNotificationParams + + +def _make_session( + read_stream: MemoryObjectReceiveStream, + write_stream: MemoryObjectSendStream, + read_timeout_seconds: timedelta | None, + context: Optional[Context] = None, +) -> ClientSession: + async def on_server_log(params: LoggingMessageNotificationParams) -> None: + level = params.level.upper() + name = params.logger or "server" + print(f"[SERVER LOG] [{level}] [{name}] {params.data}") + + return MCPAgentClientSession( + read_stream=read_stream, + write_stream=write_stream, + read_timeout_seconds=read_timeout_seconds, + logging_callback=on_server_log, + context=context, + ) + + +async def main() -> None: + settings = Settings(execution_engine="asyncio") + app = MCPApp( + name="elicitation_client", + human_input_callback=console_input_callback, + elicitation_callback=console_elicitation_callback, + settings=settings, + ) + + async with app.run() as client_app: + # Configure server entry + cfg = type("Cfg", (), {})() + cfg.name = "elicitation_server" + cfg.transport = "sse" + cfg.url = "http://127.0.0.1:8000/sse" + client_app.context.server_registry.registry["elicitation_server"] = cfg + + async with gen_client( + "elicitation_server", + client_app.context.server_registry, + client_session_factory=_make_session, + context=client_app.context, + ) as server: + await server.set_logging_level("info") + res = await server.call_tool("confirm_action", {"action": "proceed"}) + print("confirm_action:", res.content[0].text if res.content else None) + + +if __name__ == "__main__": + asyncio.run(main()) diff --git a/src/mcp_agent/data/examples/mcp_agent_server/elicitation/server.py b/src/mcp_agent/data/examples/mcp_agent_server/elicitation/server.py new file mode 100644 index 000000000..57fbcf83a --- /dev/null +++ b/src/mcp_agent/data/examples/mcp_agent_server/elicitation/server.py @@ -0,0 +1,82 @@ +""" +Elicitation Server (asyncio) + +Demonstrates user confirmation via elicitation. + +Run: + uv run server.py +""" + +from __future__ import annotations + +import asyncio +from typing import Optional + +from mcp_agent.app import MCPApp +from mcp_agent.core.context import Context as AppContext +from mcp_agent.server.app_server import create_mcp_server_for_app +from mcp_agent.human_input.handler import console_input_callback +from mcp_agent.elicitation.handler import console_elicitation_callback +from mcp.types import ElicitRequestedSchema +from pydantic import BaseModel, Field + + +app = MCPApp( + name="elicitation_server", + description="Minimal server showing elicitation (user confirmation)", + human_input_callback=console_input_callback, + elicitation_callback=console_elicitation_callback, +) + + +@app.tool(name="confirm_action") +async def confirm_action(action: str, app_ctx: Optional[AppContext] = None) -> str: + """Ask the user to confirm an action.""" + _app = app_ctx.app if app_ctx else app + upstream = getattr(_app.context, "upstream_session", None) + + class ConfirmBooking(BaseModel): + confirm: bool = Field(description="Confirm action?") + notes: str = Field(default="", description="Optional notes") + + schema: ElicitRequestedSchema = ConfirmBooking.model_json_schema() + if upstream is not None: + result = await upstream.elicit( + message=f"Do you want to {action}?", requestedSchema=schema + ) + if getattr(result, "action", "") in ("accept", "accepted"): + data = ConfirmBooking.model_validate(getattr(result, "content", {})) + return ( + f"Action '{action}' confirmed. Notes: {data.notes or 'None'}" + if data.confirm + else f"Action '{action}' cancelled" + ) + if getattr(result, "action", "") == "decline": + return "Action declined" + return "Action cancelled" + # Fallback to console handler + if _app.context.elicitation_handler: + resp = await _app.context.elicitation_handler( + {"message": f"Do you want to {action}?", "requestedSchema": schema} + ) + if getattr(resp, "action", "") in ("accept", "accepted"): + data = ConfirmBooking.model_validate(getattr(resp, "content", {})) + return ( + f"Action '{action}' confirmed. Notes: {data.notes or 'None'}" + if data.confirm + else f"Action '{action}' cancelled" + ) + if getattr(resp, "action", "") == "decline": + return "Action declined" + return "Action cancelled" + return f"Action '{action}' confirmed by default" + + +async def main() -> None: + async with app.run() as agent_app: + mcp_server = create_mcp_server_for_app(agent_app) + await mcp_server.run_sse_async() + + +if __name__ == "__main__": + asyncio.run(main()) diff --git a/src/mcp_agent/data/examples/mcp_agent_server/notifications/README.md b/src/mcp_agent/data/examples/mcp_agent_server/notifications/README.md new file mode 100644 index 000000000..259b4dd33 --- /dev/null +++ b/src/mcp_agent/data/examples/mcp_agent_server/notifications/README.md @@ -0,0 +1,34 @@ +# Notifications Server + +Minimal server demonstrating logging and non-logging notifications. + +## Run + +```bash +uv run server.py +``` + +Connect with the minimal client: + +```bash +uv run client.py +``` + +Tools: + +- `notify(message: str, level: str='info')` — forwards logs to the upstream client. +- `notify_progress(progress: float, message: Optional[str])` — sends a progress notification. + +These are best-effort and non-blocking for the server. + +## Deploy to Cloud (optional) + +1. Set API keys in `mcp_agent.secrets.yaml` as needed. + +2. Deploy from this directory: + +```bash +uv run mcp-agent deploy notifications-demo +``` + +Use the returned URL with `/sse` in an MCP client. Set the Bearer token in the header to your mcp-agent API key. diff --git a/src/mcp_agent/data/examples/mcp_agent_server/notifications/client.py b/src/mcp_agent/data/examples/mcp_agent_server/notifications/client.py new file mode 100644 index 000000000..0b43f90cc --- /dev/null +++ b/src/mcp_agent/data/examples/mcp_agent_server/notifications/client.py @@ -0,0 +1,70 @@ +""" +Minimal client for the Notifications Server. + +Run: + uv run client.py +""" + +from __future__ import annotations + +import asyncio +from datetime import timedelta +from typing import Optional + +from mcp_agent.app import MCPApp +from mcp_agent.core.context import Context +from mcp_agent.config import Settings +from mcp_agent.mcp.mcp_agent_client_session import MCPAgentClientSession +from mcp_agent.mcp.gen_client import gen_client +from anyio.streams.memory import MemoryObjectReceiveStream, MemoryObjectSendStream +from mcp import ClientSession +from mcp.types import LoggingMessageNotificationParams + + +def _make_session( + read_stream: MemoryObjectReceiveStream, + write_stream: MemoryObjectSendStream, + read_timeout_seconds: timedelta | None, + context: Optional[Context] = None, +) -> ClientSession: + async def on_server_log(params: LoggingMessageNotificationParams) -> None: + level = params.level.upper() + name = params.logger or "server" + print(f"[SERVER LOG] [{level}] [{name}] {params.data}") + + return MCPAgentClientSession( + read_stream=read_stream, + write_stream=write_stream, + read_timeout_seconds=read_timeout_seconds, + logging_callback=on_server_log, + context=context, + ) + + +async def main() -> None: + settings = Settings(execution_engine="asyncio") + app = MCPApp(name="notifications_client", settings=settings) + + async with app.run() as client_app: + cfg = type("Cfg", (), {})() + cfg.name = "notifications_server" + cfg.transport = "sse" + cfg.url = "http://127.0.0.1:8000/sse" + client_app.context.server_registry.registry["notifications_server"] = cfg + + async with gen_client( + "notifications_server", + client_app.context.server_registry, + client_session_factory=_make_session, + context=client_app.context, + ) as server: + await server.set_logging_level("info") + await server.call_tool("notify", {"message": "Hello from client"}) + await server.call_tool( + "notify_progress", {"progress": 0.25, "message": "Quarter"} + ) + print("Sent notify + notify_progress") + + +if __name__ == "__main__": + asyncio.run(main()) diff --git a/src/mcp_agent/data/examples/mcp_agent_server/notifications/server.py b/src/mcp_agent/data/examples/mcp_agent_server/notifications/server.py new file mode 100644 index 000000000..83c987ceb --- /dev/null +++ b/src/mcp_agent/data/examples/mcp_agent_server/notifications/server.py @@ -0,0 +1,72 @@ +""" +Notifications Server (asyncio) + +Demonstrates logging and non-logging notifications. + +Run: + uv run server.py +""" + +from __future__ import annotations + +import asyncio +from typing import Optional, Literal + +from mcp_agent.app import MCPApp +from mcp_agent.core.context import Context as AppContext +from mcp_agent.server.app_server import create_mcp_server_for_app + + +app = MCPApp( + name="notifications_server", + description="Minimal server showing notifications and logging", +) + + +@app.tool(name="notify") +def notify( + message: str, + level: Literal["debug", "info", "warning", "error"] = "info", + app_ctx: Optional[AppContext] = None, +) -> str: + """Send an upstream log/notification at the requested level.""" + _app = app_ctx.app if app_ctx else app + logger = _app.logger + if level == "debug": + logger.debug(message) + elif level == "warning": + logger.warning(message) + elif level == "error": + logger.error(message) + else: + logger.info(message) + return "ok" + + +@app.tool(name="notify_progress") +async def notify_progress( + progress: float = 0.5, + message: str | None = "Demo progress", + app_ctx: Optional[AppContext] = None, +) -> str: + """Send a progress notification via upstream session (best-effort).""" + _app = app_ctx.app if app_ctx else app + upstream = getattr(_app.context, "upstream_session", None) + if upstream is None: + _app.logger.warning("No upstream session to notify") + return "no-upstream" + await upstream.send_progress_notification( + progress_token="notifications-demo", progress=progress, message=message + ) + _app.logger.info("Sent notifications/progress") + return "ok" + + +async def main() -> None: + async with app.run() as agent_app: + mcp_server = create_mcp_server_for_app(agent_app) + await mcp_server.run_sse_async() + + +if __name__ == "__main__": + asyncio.run(main()) diff --git a/src/mcp_agent/data/examples/mcp_agent_server/reference/README.md b/src/mcp_agent/data/examples/mcp_agent_server/reference/README.md new file mode 100644 index 000000000..2b94570f5 --- /dev/null +++ b/src/mcp_agent/data/examples/mcp_agent_server/reference/README.md @@ -0,0 +1,61 @@ +# Reference Agent Server + +This is a clean, strongly-typed example of an MCP Agent server showcasing: + +- Agent behavior with MCP servers (fetch + filesystem) and an LLM +- Tools implemented with `@app.tool` and `@app.async_tool` +- Notifications and logging via `app.logger` +- Elicitation (user confirmation) proxied to the upstream client +- Sampling (LLM call) with simple `RequestParams` +- Prompts and Resources registered on the FastMCP server + +## Run the server + +```bash +uv run server.py +``` + +This starts an SSE server at `http://127.0.0.1:8000/sse`. + +## Try it with the minimal client + +```bash +uv run client.py +``` + +The client connects over SSE, sets logging level, and exercises tools: + +- `finder_tool` — Agent + LLM + MCP servers +- `notify` — logging/notifications +- `sample_haiku` — LLM sampling +- `confirm_action` — elicitation prompt + +## Prompts & Resources + +The server registers a couple of demo resources and a simple prompt: + +- Resources: + - `demo://docs/readme` — sample README content + - `demo://{city}/weather` — simple weather string +- Prompt: + - `echo(message: str)` — returns `Prompt: {message}` + +You can use any MCP client capable of listing resources/prompts to explore these. + +## Configuration + +Put your API keys in `mcp_agent.secrets.yaml` or environment variables +(`OPENAI_API_KEY`, etc.). The server uses the MCP app configuration +(`mcp_agent.config.yaml`) for MCP servers and provider defaults. + +## Deploy to Cloud (optional) + +1. Set API keys in `mcp_agent.secrets.yaml`. + +2. From this directory: + +```bash +uv run mcp-agent deploy reference-server +``` + +Use the URL (append `/sse`) in an MCP client and include your mcp-agent API key as a bearer token if required. diff --git a/src/mcp_agent/data/examples/mcp_agent_server/reference/client.py b/src/mcp_agent/data/examples/mcp_agent_server/reference/client.py new file mode 100644 index 000000000..9ed0747fb --- /dev/null +++ b/src/mcp_agent/data/examples/mcp_agent_server/reference/client.py @@ -0,0 +1,108 @@ +""" +Minimal client for the Reference Agent Server. + +Connects to the server over SSE and exercises tools: + - finder_tool, notify, sample_haiku, confirm_action + - list tools and fetch demo prompt/resource + +Run: + uv run client.py +""" + +from __future__ import annotations + +import asyncio +from datetime import timedelta +from typing import Optional + +from mcp_agent.app import MCPApp +from mcp_agent.core.context import Context +from mcp_agent.config import Settings +from mcp_agent.mcp.gen_client import gen_client +from mcp_agent.human_input.handler import console_input_callback +from mcp_agent.elicitation.handler import console_elicitation_callback +from anyio.streams.memory import MemoryObjectReceiveStream, MemoryObjectSendStream +from mcp import ClientSession +from mcp_agent.mcp.mcp_agent_client_session import MCPAgentClientSession +from mcp.types import LoggingMessageNotificationParams + + +def _make_session( + read_stream: MemoryObjectReceiveStream, + write_stream: MemoryObjectSendStream, + read_timeout_seconds: timedelta | None, + context: Optional[Context] = None, +) -> ClientSession: + async def on_server_log(params: LoggingMessageNotificationParams) -> None: + level = params.level.upper() + name = params.logger or "server" + print(f"[SERVER LOG] [{level}] [{name}] {params.data}") + + return MCPAgentClientSession( + read_stream=read_stream, + write_stream=write_stream, + read_timeout_seconds=read_timeout_seconds, + logging_callback=on_server_log, + context=context, + ) + + +async def main() -> None: + # Force asyncio executor locally for client-side flows (sampling/elicitation callbacks) + settings = Settings(execution_engine="asyncio") + app = MCPApp( + name="reference_client", + human_input_callback=console_input_callback, + elicitation_callback=console_elicitation_callback, + settings=settings, + ) + + async with app.run() as client_app: + client_app.logger.info("Connecting to reference server...") + + # Server definition provided inline + client_app.context.server_registry.registry["reference_agent_server"] = ( + client_app.context.server_registry.registry.get("reference_agent_server") + or type("_Cfg", (), {})() + ) + cfg = client_app.context.server_registry.registry["reference_agent_server"] + cfg.name = "reference_agent_server" + cfg.transport = "sse" + cfg.url = "http://127.0.0.1:8000/sse" + + async with gen_client( + "reference_agent_server", + client_app.context.server_registry, + client_session_factory=_make_session, + context=client_app.context, + ) as server: + # Ask server to set logging level + await server.set_logging_level("info") + + # List tools + tools = await server.list_tools() + print("Tools:", [t.name for t in tools.tools]) + + # Run finder_tool + res = await server.call_tool( + "finder_tool", + {"request": "List files in current directory and summarize"}, + ) + print("finder_tool:", res.content[0].text if res.content else None) + + # Notify + await server.call_tool("notify", {"message": "Hello from client"}) + + # Sampling + res = await server.call_tool("sample_haiku", {"topic": "clouds"}) + print("sample_haiku:", res.content[0].text if res.content else None) + + # Elicitation demo + res = await server.call_tool("confirm_action", {"action": "proceed"}) + print("confirm_action:", res.content[0].text if res.content else None) + + # Exercise FastMCP prompt/resource via list_tools isn't enough; show resource URIs in README + + +if __name__ == "__main__": + asyncio.run(main()) diff --git a/src/mcp_agent/data/examples/mcp_agent_server/reference/server.py b/src/mcp_agent/data/examples/mcp_agent_server/reference/server.py new file mode 100644 index 000000000..447f8d335 --- /dev/null +++ b/src/mcp_agent/data/examples/mcp_agent_server/reference/server.py @@ -0,0 +1,187 @@ +""" +Reference Agent Server (asyncio) + +Demonstrates: + - Agent behavior with MCP servers (fetch + filesystem) and an LLM + - Tools using @app.tool and @app.async_tool + - Notifications and logging via app.logger + - Elicitation (user confirmation) proxied to upstream client + - Sampling (LLM request) with simple RequestParams + - Prompts and Resources registered on the FastMCP server + +Run: + uv run server.py + +Test client: + uv run client.py +""" + +from __future__ import annotations + +import asyncio +import os +from typing import Optional, Literal + +from mcp_agent.app import MCPApp +from mcp_agent.core.context import Context as AppContext +from mcp_agent.server.app_server import create_mcp_server_for_app +from mcp_agent.human_input.handler import console_input_callback +from mcp_agent.elicitation.handler import console_elicitation_callback + +from mcp_agent.agents.agent import Agent +from mcp_agent.workflows.factory import create_llm +from mcp_agent.workflows.llm.augmented_llm_openai import OpenAIAugmentedLLM +from mcp_agent.workflows.llm.augmented_llm import RequestParams as LLMRequestParams +from mcp_agent.workflows.llm.llm_selector import ModelPreferences +from mcp.types import ElicitRequestedSchema +from pydantic import BaseModel, Field + + +app = MCPApp( + name="reference_agent_server", + description="Reference server demonstrating agent + tools + prompts + resources", + human_input_callback=console_input_callback, + elicitation_callback=console_elicitation_callback, +) + + +@app.tool(name="finder_tool") +async def finder_tool(request: str, app_ctx: Optional[AppContext] = None) -> str: + """Agent that can use filesystem+fetch and an LLM to answer the request.""" + _app = app_ctx.app if app_ctx else app + ctx = _app.context + try: + if "filesystem" in ctx.config.mcp.servers: + ctx.config.mcp.servers["filesystem"].args.extend([os.getcwd()]) + except Exception: + pass + + agent = Agent( + name="finder", + instruction=( + "Use MCP servers to fetch and read files, then answer the user's query concisely." + ), + server_names=["fetch", "filesystem"], + context=ctx, + ) + async with agent: + llm = await agent.attach_llm(OpenAIAugmentedLLM) + return await llm.generate_str(message=request) + + +@app.tool(name="notify") +def notify( + message: str, + level: Literal["debug", "info", "warning", "error"] = "info", + app_ctx: Optional[AppContext] = None, +) -> str: + """Send an upstream log/notification at the requested level.""" + _app = app_ctx.app if app_ctx else app + logger = _app.logger + if level == "debug": + logger.debug(message) + elif level == "warning": + logger.warning(message) + elif level == "error": + logger.error(message) + else: + logger.info(message) + return "ok" + + +@app.tool(name="confirm_action") +async def confirm_action( + action: str, + app_ctx: Optional[AppContext] = None, +) -> str: + """Ask the user to confirm the action via elicitation.""" + _app = app_ctx.app if app_ctx else app + upstream = getattr(_app.context, "upstream_session", None) + + class ConfirmBooking(BaseModel): + confirm: bool = Field(description="Confirm action?") + notes: str = Field(default="", description="Optional notes") + + schema: ElicitRequestedSchema = ConfirmBooking.model_json_schema() + + if upstream is not None: + result = await upstream.elicit( + message=f"Do you want to {action}?", requestedSchema=schema + ) + if getattr(result, "action", "") in ("accept", "accepted"): + data = ConfirmBooking.model_validate(getattr(result, "content", {})) + return ( + f"Action '{action}' confirmed. Notes: {data.notes or 'None'}" + if data.confirm + else f"Action '{action}' cancelled" + ) + if getattr(result, "action", "") == "decline": + return "Action declined" + return "Action cancelled" + + # Fallback to handler if present + if _app.context.elicitation_handler: + resp = await _app.context.elicitation_handler( + {"message": f"Do you want to {action}?", "requestedSchema": schema} + ) + if getattr(resp, "action", "") in ("accept", "accepted"): + data = ConfirmBooking.model_validate(getattr(resp, "content", {})) + return ( + f"Action '{action}' confirmed. Notes: {data.notes or 'None'}" + if data.confirm + else f"Action '{action}' cancelled" + ) + if getattr(resp, "action", "") == "decline": + return "Action declined" + return "Action cancelled" + + return f"Action '{action}' confirmed by default" + + +@app.tool(name="sample_haiku") +async def sample_haiku(topic: str, app_ctx: Optional[AppContext] = None) -> str: + """Generate a short poem using configured LLM settings.""" + _app = app_ctx.app if app_ctx else app + llm = create_llm( + agent_name="sampling_demo", + server_names=[], + instruction="You are a concise poet.", + context=_app.context, + ) + req = LLMRequestParams( + maxTokens=80, + modelPreferences=ModelPreferences(hints=[]), + systemPrompt="Write a 3-line haiku.", + temperature=0.7, + use_history=False, + max_iterations=1, + ) + return await llm.generate_str(message=f"Haiku about {topic}", request_params=req) + + +async def main() -> None: + async with app.run() as agent_app: + # Create MCP server (FastMCP) that exposes tools; then add prompts/resources + mcp_server = create_mcp_server_for_app(agent_app) + + # Register a couple of demo resources + def _res_readme() -> str: + return "# Demo Resource\n\nThis is a README resource provided by the reference server." + + def _res_weather(city: str) -> str: + return f"It is sunny in {city} today!" + + mcp_server.resource("demo://docs/readme")(_res_readme) + mcp_server.resource("demo://{city}/weather")(_res_weather) + + # Register a simple prompt + def _prompt_echo(message: str) -> str: + return f"Prompt: {message}" + + mcp_server.prompt()(_prompt_echo) + + await mcp_server.run_sse_async() + + +if __name__ == "__main__": + asyncio.run(main()) diff --git a/src/mcp_agent/data/examples/mcp_agent_server/sampling/README.md b/src/mcp_agent/data/examples/mcp_agent_server/sampling/README.md new file mode 100644 index 000000000..0e083e12e --- /dev/null +++ b/src/mcp_agent/data/examples/mcp_agent_server/sampling/README.md @@ -0,0 +1,33 @@ +# Sampling Server + +Minimal server demonstrating LLM sampling. + +## Run + +```bash +uv run server.py +``` + +Connect with the minimal client: + +```bash +uv run client.py +``` + +Tools: + +- `sample_haiku(topic: str)` — generates a short poem using configured LLM settings. + +Add your API key(s) to `mcp_agent.secrets.yaml` or environment variables (e.g. `OPENAI_API_KEY`). + +## Deploy to Cloud (optional) + +1) Set API keys in `mcp_agent.secrets.yaml`. + +2) Deploy from this directory: + +```bash +uv run mcp-agent deploy sampling --config-dir . +``` + +Use the returned URL with `/sse` in an MCP client and include the bearer token if needed. diff --git a/src/mcp_agent/data/examples/mcp_agent_server/sampling/client.py b/src/mcp_agent/data/examples/mcp_agent_server/sampling/client.py new file mode 100644 index 000000000..e0e0ca15b --- /dev/null +++ b/src/mcp_agent/data/examples/mcp_agent_server/sampling/client.py @@ -0,0 +1,67 @@ +""" +Minimal client for the Sampling Server. + +Run: + uv run client.py +""" + +from __future__ import annotations + +import asyncio +from datetime import timedelta +from typing import Optional + +from mcp_agent.app import MCPApp +from mcp_agent.core.context import Context +from mcp_agent.config import Settings +from mcp_agent.mcp.mcp_agent_client_session import MCPAgentClientSession +from mcp_agent.mcp.gen_client import gen_client +from anyio.streams.memory import MemoryObjectReceiveStream, MemoryObjectSendStream +from mcp import ClientSession +from mcp.types import LoggingMessageNotificationParams + + +def _make_session( + read_stream: MemoryObjectReceiveStream, + write_stream: MemoryObjectSendStream, + read_timeout_seconds: timedelta | None, + context: Optional[Context] = None, +) -> ClientSession: + async def on_server_log(params: LoggingMessageNotificationParams) -> None: + level = params.level.upper() + name = params.logger or "server" + print(f"[SERVER LOG] [{level}] [{name}] {params.data}") + + return MCPAgentClientSession( + read_stream=read_stream, + write_stream=write_stream, + read_timeout_seconds=read_timeout_seconds, + logging_callback=on_server_log, + context=context, + ) + + +async def main() -> None: + settings = Settings(execution_engine="asyncio") + app = MCPApp(name="sampling_client", settings=settings) + + async with app.run() as client_app: + cfg = type("Cfg", (), {})() + cfg.name = "sampling_server" + cfg.transport = "sse" + cfg.url = "http://127.0.0.1:8000/sse" + client_app.context.server_registry.registry["sampling_server"] = cfg + + async with gen_client( + "sampling_server", + client_app.context.server_registry, + client_session_factory=_make_session, + context=client_app.context, + ) as server: + await server.set_logging_level("info") + res = await server.call_tool("sample_haiku", {"topic": "mountains"}) + print("sample_haiku:", res.content[0].text if res.content else None) + + +if __name__ == "__main__": + asyncio.run(main()) diff --git a/src/mcp_agent/data/examples/mcp_agent_server/sampling/server.py b/src/mcp_agent/data/examples/mcp_agent_server/sampling/server.py new file mode 100644 index 000000000..3255b65ce --- /dev/null +++ b/src/mcp_agent/data/examples/mcp_agent_server/sampling/server.py @@ -0,0 +1,62 @@ +""" +Sampling Server (asyncio) + +Demonstrates a minimal LLM sampling tool. + +Run: + uv run server.py +""" + +from __future__ import annotations + +import asyncio +from typing import Optional + +from mcp_agent.app import MCPApp +from mcp_agent.core.context import Context as AppContext +from mcp_agent.server.app_server import create_mcp_server_for_app +from mcp_agent.workflows.factory import create_llm +from mcp_agent.workflows.llm.augmented_llm import RequestParams as LLMRequestParams +from mcp_agent.workflows.llm.llm_selector import ModelPreferences + + +app = MCPApp( + name="sampling_server", + description="Minimal server showing LLM sampling", + human_input_callback=None, +) + + +@app.tool(name="sample_haiku") +async def sample_haiku( + topic: str, + temperature: float | None = 0.7, + app_ctx: Optional[AppContext] = None, +) -> str: + """Generate a short poem using configured LLM settings.""" + _app = app_ctx.app if app_ctx else app + llm = create_llm( + agent_name="sampling_demo", + server_names=[], + instruction="You are a concise poet.", + context=_app.context, + ) + req = LLMRequestParams( + maxTokens=80, + modelPreferences=ModelPreferences(hints=[]), + systemPrompt="Write a 3-line haiku.", + temperature=temperature, + use_history=False, + max_iterations=1, + ) + return await llm.generate_str(message=f"Haiku about {topic}", request_params=req) + + +async def main() -> None: + async with app.run() as agent_app: + mcp_server = create_mcp_server_for_app(agent_app) + await mcp_server.run_sse_async() + + +if __name__ == "__main__": + asyncio.run(main()) diff --git a/src/mcp_agent/data/templates/README_init.md b/src/mcp_agent/data/templates/README_init.md new file mode 100644 index 000000000..7a2bb3599 --- /dev/null +++ b/src/mcp_agent/data/templates/README_init.md @@ -0,0 +1,81 @@ +# MCP-Agent Starter + +Welcome! This project was generated by `mcp-agent init`. It’s a minimal, readable starting point you can run locally or expose as an MCP server. + +## What’s included + +- An `MCPApp` named `hello_world` (see `main.py`). +- Two tools defined with decorators: + - `finder_agent(request: str, app_ctx?)` + - An Agent that uses the `filesystem` and `fetch` MCP servers plus an LLM to answer the request. + - Logs via the app logger (forwarded to the client as notifications when serving). + - `run_agent_async(agent_name: str = "web_helper", prompt: str, app_ctx?)` + - Loads an `AgentSpec` from `mcp_agent.config.yaml` (`agents.definitions`) and runs it. + - Decorated with `@app.async_tool`: when serving, returns a workflow ID; when run in this script, it awaits and returns the string result. + +## Quick start + +1. Add API keys to `mcp_agent.secrets.yaml` (or set env vars): + + - `OPENAI_API_KEY` (recommended) + - `ANTHROPIC_API_KEY` (optional) + +2. Review `mcp_agent.config.yaml`: + + - Execution engine: `asyncio` + - Logger settings + - MCP servers: `filesystem`, `fetch` + - `agents.definitions`: sample agents (`filesystem_helper`, `web_helper`) + +3. Run locally: + +```bash +uv run main.py +``` + +You’ll see two summaries printed: + +- A summary of `README.md` from your current directory. +- A summary of the intro page at modelcontextprotocol.io. + +4. Deploy a remote MCP server: + +### Run as an MCP server + +- In `main.py`, UNCOMMENT the server lines that call `create_mcp_server_for_app(agent_app)` and `run_sse_async()`. +- Start the server: `uv run main.py` + +When you're ready to deploy, simply run: + +```bash +mcp-agent deploy "hello_world" +``` + +- This wraps your app as a hosted MCP SSE server. +- Anything decorated with `@app.tool` (or `@app.async_tool`) runs as a Temporal workflow in the cloud. + +Notes + +- `app_ctx` is the MCPApp Context (configuration, logger, upstream session, etc.). +- Logging uses `app.logger` and is forwarded as notifications when connected to an MCP client. +- Configuration is read from `mcp_agent.config.yaml` and `mcp_agent.secrets.yaml` (env vars supported). +- The default model is configurable (see `openai.default_model` in config). + +## Next steps + +- Tweak `finder_agent` instructions or server list to fit your use case. +- Add more `AgentSpec` entries to `agents.definitions`. +- Add tools with `@app.tool` or `@app.async_tool` as you grow the app. +- Read the docs and explore examples: + - GitHub: https://github.com/lastmile-ai/mcp-agent + - Docs: https://docs.mcp-agent.com/ + - Discord: https://lmai.link/discord/mcp-agent + +## Further reading + +- Configuration reference and secrets management. +- MCP servers (stdio, SSE, streamable_http, websockets) and timeouts. +- Temporal workflows, activities, and logging/notifications when deployed. +- Agents and LLMs: `AgentSpec`, prompts, and model defaults. + +Happy building! diff --git a/src/mcp_agent/data/templates/basic_agent.py b/src/mcp_agent/data/templates/basic_agent.py index 77a8ccc74..8cfc33f3a 100644 --- a/src/mcp_agent/data/templates/basic_agent.py +++ b/src/mcp_agent/data/templates/basic_agent.py @@ -1,151 +1,160 @@ +""" +Welcome to mcp-agent! We believe MCP is all you need to build and deploy agents. +This is a canonical getting-started example that covers everything you need to know to get started. + +We will cover: + - Hello world agent: Setting up a basic Agent that uses the fetch and filesystem MCP servers to do cool stuff. + - @app.tool and @app.async_tool decorators to expose your agents as long-running tools on an MCP server. + - Advanced MCP features: Notifications, sampling, and elicitation + +You can run this example locally using "uv run main.py", and also deploy it as an MCP server using "mcp-agent deploy". + +Let's get started! +""" + +from __future__ import annotations + import asyncio -import os -import time +from typing import Optional from mcp_agent.app import MCPApp -from mcp_agent.config import ( - Settings, - LoggerSettings, - MCPSettings, - MCPServerSettings, - OpenAISettings, - AnthropicSettings, -) from mcp_agent.agents.agent import Agent -from mcp_agent.workflows.llm.augmented_llm import RequestParams -from mcp_agent.workflows.llm.llm_selector import ModelPreferences -from mcp_agent.workflows.llm.augmented_llm_anthropic import AnthropicAugmentedLLM +from mcp_agent.agents.agent_spec import AgentSpec +from mcp_agent.core.context import Context as AppContext +from mcp_agent.workflows.factory import create_agent from mcp_agent.workflows.llm.augmented_llm_openai import OpenAIAugmentedLLM -from mcp_agent.tracing.token_counter import TokenSummary - -settings = Settings( - execution_engine="asyncio", - logger=LoggerSettings(type="file", level="debug"), - mcp=MCPSettings( - servers={ - "fetch": MCPServerSettings( - command="uvx", - args=["mcp-server-fetch"], - ), - "filesystem": MCPServerSettings( - command="npx", - args=["-y", "@modelcontextprotocol/server-filesystem"], - ), - } - ), - openai=OpenAISettings( - api_key="sk-my-openai-api-key", - default_model="gpt-4o-mini", - ), - anthropic=AnthropicSettings( - api_key="sk-my-anthropic-api-key", - ), -) -# Settings can either be specified programmatically, -# or loaded from mcp_agent.config.yaml/mcp_agent.secrets.yaml -app = MCPApp(name="mcp_basic_agent") # settings=settings) +# Create the MCPApp, the root of mcp-agent. +app = MCPApp( + name="hello_world", + description="Hello world mcp-agent application", + # settings= +) -async def example_usage(): +# Hello world agent: an Agent using MCP servers + LLM +@app.tool() +async def finder_agent(request: str, app_ctx: Optional[AppContext] = None) -> str: + """ + Run an Agent with access to MCP servers (fetch + filesystem) to handle the input request. + + Notes: + - @app.tool: + - runs the function as a long-running workflow tool when deployed as an MCP server + - no-op when running this locally as a script + - app_ctx: + - MCPApp Context (configuration, logger, upstream session, etc.) + """ + + logger = app_ctx.app.logger + # Logger requests are forwarded as notifications/message to the client over MCP. + logger.info(f"finder_tool called with request: {request}") + + agent = Agent( + name="finder", + instruction=( + "You are a helpful assistant. Use MCP servers to fetch and read files," + " then answer the request concisely." + ), + server_names=["fetch", "filesystem"], + context=app_ctx, + ) + + async with agent: + llm = await agent.attach_llm(OpenAIAugmentedLLM) + result = await llm.generate_str(message=request) + return result + + +# Run a configured agent by name (defined in mcp_agent.config.yaml) +@app.async_tool(name="run_agent_async") +async def run_agent( + agent_name: str = "web_helper", + prompt: str = "Please summarize the first paragraph of https://modelcontextprotocol.io/docs/getting-started/intro", + app_ctx: Optional[AppContext] = None, +) -> str: + """ + Load an agent defined in mcp_agent.config.yaml by name and run it. + + Notes: + - @app.async_tool: + - async version of @app.tool -- returns a workflow ID back (can be used with workflows-get_status tool) + - runs the function as a long-running workflow tool when deployed as an MCP server + - no-op when running this locally as a script + """ + + logger = app_ctx.app.logger + + agent_definitions = ( + app.config.agents.definitions + if app is not None + and app.config is not None + and app.config.agents is not None + and app.config.agents.definitions is not None + else [] + ) + + agent_spec: AgentSpec | None = None + for agent_def in agent_definitions: + if agent_def.name == agent_name: + agent_spec = agent_def + break + + if agent_spec is None: + logger.error("Agent not found", data={"name": agent_name}) + return f"agent '{agent_name}' not found" + + logger.info( + "Agent found in spec", + data={"name": agent_name, "instruction": agent_spec.instruction}, + ) + + agent = create_agent(agent_spec, context=app_ctx) + + async with agent: + llm = await agent.attach_llm(OpenAIAugmentedLLM) + return await llm.generate_str(message=prompt) + + +async def main(): async with app.run() as agent_app: - logger = agent_app.logger - context = agent_app.context - - logger.info("Current config:", data=context.config.model_dump()) - - # Add the current directory to the filesystem server's args - context.config.mcp.servers["filesystem"].args.extend([os.getcwd()]) + # Run the agent + readme_summary = await finder_agent( + request="Please summarize the README.md file in this directory.", + app_ctx=agent_app.context, + ) + print("README.md file summary:") + print(readme_summary) - finder_agent = Agent( - name="finder", - instruction="""You are an agent with access to the filesystem, - as well as the ability to fetch URLs. Your job is to identify - the closest match to a user's request, make the appropriate tool calls, - and return the URI and CONTENTS of the closest match.""", - server_names=["fetch", "filesystem"], + webpage_summary = await run_agent( + agent_name="web_helper", + prompt="Please summarize the first few paragraphs of https://modelcontextprotocol.io/docs/getting-started/intro.", + app_ctx=agent_app.context, ) + print("Webpage summary:") + print(webpage_summary) - async with finder_agent: - logger.info("finder: Connected to server, calling list_tools...") - result = await finder_agent.list_tools() - logger.info("Tools available:", data=result.model_dump()) - - llm = await finder_agent.attach_llm(OpenAIAugmentedLLM) - result = await llm.generate_str( - message="Print the contents of mcp_agent.config.yaml verbatim", - ) - logger.info(f"mcp_agent.config.yaml contents: {result}") - - # Let's switch the same agent to a different LLM - llm = await finder_agent.attach_llm(AnthropicAugmentedLLM) - - result = await llm.generate_str( - message="Print the first 2 paragraphs of https://modelcontextprotocol.io/introduction", - ) - logger.info(f"First 2 paragraphs of Model Context Protocol docs: {result}") - - # Multi-turn conversations - result = await llm.generate_str( - message="Summarize those paragraphs in a 128 character tweet", - # You can configure advanced options by setting the request_params object - request_params=RequestParams( - # See https://modelcontextprotocol.io/docs/concepts/sampling#model-preferences for more details - modelPreferences=ModelPreferences( - costPriority=0.1, speedPriority=0.2, intelligencePriority=0.7 - ), - # You can also set the model directly using the 'model' field - # Generally request_params type aligns with the Sampling API type in MCP - ), - ) - logger.info(f"Paragraph as a tweet: {result}") - - # Display final comprehensive token usage summary (use app convenience) - await display_token_summary(agent_app, finder_agent) - - -async def display_token_summary(app_ctx: MCPApp, agent: Agent | None = None): - """Display comprehensive token usage summary using app/agent convenience APIs.""" - summary: TokenSummary = await app_ctx.get_token_summary() - - print("\n" + "=" * 50) - print("TOKEN USAGE SUMMARY") - print("=" * 50) - - # Total usage and cost - print("\nTotal Usage:") - print(f" Total tokens: {summary.usage.total_tokens:,}") - print(f" Input tokens: {summary.usage.input_tokens:,}") - print(f" Output tokens: {summary.usage.output_tokens:,}") - print(f" Total cost: ${summary.cost:.4f}") - - # Breakdown by model - if summary.model_usage: - print("\nBreakdown by Model:") - for model_key, data in summary.model_usage.items(): - print(f"\n {model_key}:") - print( - f" Tokens: {data.usage.total_tokens:,} (input: {data.usage.input_tokens:,}, output: {data.usage.output_tokens:,})" - ) - print(f" Cost: ${data.cost:.4f}") - - # Optional: show a specific agent's aggregated usage - if agent is not None: - agent_usage = await agent.get_token_usage() - if agent_usage: - print("\nAgent Usage:") - print(f" Agent: {agent.name}") - print(f" Total tokens: {agent_usage.total_tokens:,}") - print(f" Input tokens: {agent_usage.input_tokens:,}") - print(f" Output tokens: {agent_usage.output_tokens:,}") - - print("\n" + "=" * 50) + # UNCOMMENT to run this MCPApp as an MCP server + ######################################################### + # Create the MCP server that exposes both workflows and agent configurations, + # optionally using custom FastMCP settings + # mcp_server = create_mcp_server_for_app(agent_app) + # # Run the server + # await mcp_server.run_sse_async() -if __name__ == "__main__": - start = time.time() - asyncio.run(example_usage()) - end = time.time() - t = end - start - print(f"Total run time: {t:.2f}s") +if __name__ == "__main__": + asyncio.run(main()) + +# When you're ready to deploy this MCPApp as a remote SSE server, run: +# > mcp-agent deploy "hello_world" +# +# Congrats! You made it to the end of the getting-started example! +# There is a lot more that mcp-agent can do, and we hope you'll explore the rest of the documentation. +# Check out other examples in the mcp-agent repo: +# https://github.com/lastmile-ai/mcp-agent/tree/main/examples +# and read the docs (or ask an mcp-agent to do it for you): +# https://docs.mcp-agent.com/ +# +# Happy mcp-agenting! diff --git a/src/mcp_agent/data/templates/mcp_agent.config.yaml b/src/mcp_agent/data/templates/mcp_agent.config.yaml index a9f8f1ee4..ecf88c520 100644 --- a/src/mcp_agent/data/templates/mcp_agent.config.yaml +++ b/src/mcp_agent/data/templates/mcp_agent.config.yaml @@ -1,56 +1,69 @@ # MCP-Agent Configuration File -# Schema reference for IDE autocomplete and validation +# Config definition: https://github.com/lastmile-ai/mcp-agent/blob/main/src/mcp_agent/config.py $schema: https://raw.githubusercontent.com/lastmile-ai/mcp-agent/refs/heads/main/schema/mcp-agent.config.schema.json # Execution engine: asyncio or temporal +# For temporal mode, see: https://github.com/lastmile-ai/mcp-agent/blob/main/examples/temporal/README.md execution_engine: asyncio -# Logger configuration logger: - transports: [file] # Options: console, file - level: info # Options: debug, info, warning, error - progress_display: true # Show progress bars for token usage - path_settings: - path_pattern: "logs/mcp-agent-{unique_id}.jsonl" - unique_id: "timestamp" # Options: "timestamp" or "session_id" - timestamp_format: "%Y%m%d_%H%M%S" - -# MCP Servers configuration + transports: [console, file] + level: info + path: logs/mcp-agent.log + +# Configure MCP Servers connections (supports stdio, sse, streamable_http, and websockets) mcp: servers: # Filesystem access server filesystem: command: npx - args: ["-y", "@modelcontextprotocol/server-filesystem"] - # Add current directory at runtime with: context.config.mcp.servers["filesystem"].args.extend(["."]) - + args: ["-y", "@modelcontextprotocol/server-filesystem", "."] + # Web fetch server fetch: command: uvx args: ["mcp-server-fetch"] - - # GitHub server (requires GITHUB_PERSONAL_ACCESS_TOKEN in secrets) - # github: - # command: npx - # args: ["-y", "@modelcontextprotocol/server-github"] - - # Brave search server (requires BRAVE_API_KEY in secrets) - # brave-search: - # command: npx - # args: ["-y", "@modelcontextprotocol/server-brave-search"] + #env: # Environment variables passed to the stdio server + # ROOT_PATH: "/workspace" + + # sse_server: + # transport: "sse" + # url: "https://api.example.com/sse" + # headers: + # Authorization: "Bearer ${API_TOKEN}" + + # streamable_http_server: + # transport: streamable_http + # url: "https://api.example.com/mcp" + # headers: + # Authorization: "Bearer ${API_TOKEN}" + # Content-Type: "application/json" + # http_timeout_seconds: 30 + # read_timeout_seconds: 120 + # terminate_on_close: true + +# Optional: Define Agent definitions in config +agents: + definitions: + - name: filesystem_helper + instruction: "You can read files and summarize their contents." + server_names: [filesystem] + - name: web_helper + instruction: "You can fetch web pages and summarize their content." + server_names: [fetch] # Model provider defaults (API keys go in mcp_agent.secrets.yaml) openai: - default_model: "gpt-4o-mini" - -anthropic: - default_model: "claude-3-5-sonnet-20241022" + default_model: gpt-4o-mini +anthropic: + default_model: claude-sonnet-4-0 # google: # default_model: "gemini-1.5-pro" # OpenTelemetry configuration (optional) # otel: -# endpoint: "http://localhost:4317" -# service_name: "mcp-agent" -# export_interval: 10 \ No newline at end of file +# enabled: true +# exporters: ["file", "otlp"] +# otlp_settings: +# endpoint: "http://localhost:4318/v1/traces" diff --git a/src/mcp_agent/workflows/factory.py b/src/mcp_agent/workflows/factory.py index 46a186774..df22718db 100644 --- a/src/mcp_agent/workflows/factory.py +++ b/src/mcp_agent/workflows/factory.py @@ -1,6 +1,6 @@ from __future__ import annotations -from typing import Any, Callable, List, Literal, Sequence, Tuple +from typing import Any, Callable, List, Literal, Sequence, Tuple, overload import os import re import json @@ -65,6 +65,32 @@ def agent_from_spec(spec: AgentSpec, context: Context | None = None) -> Agent: ) +@overload +def create_llm( + agent: Agent | AgentSpec, + provider: str | None = "openai", + model: str | ModelPreferences | None = None, + request_params: RequestParams | None = None, + context: Context | None = None, +) -> AugmentedLLM: + """ + Create an Augmented LLM from an agent or agent spec. + """ + agent = ( + agent if isinstance(agent, Agent) else agent_from_spec(agent, context=context) + ) + + factory = _llm_factory( + provider=provider, + model=model, + request_params=request_params, + context=context, + ) + + return factory(agent=agent) + + +@overload def create_llm( agent_name: str, server_names: List[str] | None = None, @@ -74,6 +100,10 @@ def create_llm( request_params: RequestParams | None = None, context: Context | None = None, ) -> AugmentedLLM: + """ + Create an Augmented LLM. + """ + agent = agent_from_spec( AgentSpec( name=agent_name, instruction=instruction, server_names=server_names or []