diff --git a/src/mcp_agent/cli/commands/init.py b/src/mcp_agent/cli/commands/init.py index e32bf6cc0..eae4e04dc 100644 --- a/src/mcp_agent/cli/commands/init.py +++ b/src/mcp_agent/cli/commands/init.py @@ -153,8 +153,8 @@ def init( scaffolding_templates = { "basic": "Simple agent with filesystem and fetch capabilities", "server": "MCP server with workflow and parallel agents", - "token": "Token counting example with monitoring", - "factory": "Agent factory with router-based selection", + # "token": "Token counting example with monitoring", + # "factory": "Agent factory with router-based selection", "minimal": "Minimal configuration files only", } @@ -360,7 +360,7 @@ def init( # No separate agents.yaml needed; agent definitions live in mcp_agent.config.yaml # Create README for the basic template - readme_content = _load_template("README_init.md") + readme_content = _load_template("README_basic.md") if readme_content: created = _write_readme(dir, readme_content, force) if created: @@ -374,10 +374,10 @@ def init( files_created.append(created) elif template == "server": - server_path = dir / "server.py" + server_path = dir / "main.py" server_content = _load_template("basic_agent_server.py") if server_content and _write(server_path, server_content, force): - files_created.append("server.py") + files_created.append("main.py") # Make executable try: server_path.chmod(server_path.stat().st_mode | 0o111) @@ -385,12 +385,19 @@ def init( pass # README for server template - readme_content = _load_template("README_init.md") + readme_content = _load_template("README_server.md") if readme_content: created = _write_readme(dir, readme_content, force) if created: files_created.append(created) + # Add basic requirements.txt + requirements_content = _load_template("requirements.txt") + if requirements_content: + created = _write_requirements(dir, requirements_content, force) + if created: + files_created.append(created) + elif template == "token": token_path = dir / "token_example.py" token_content = _load_template("token_counter.py") @@ -402,7 +409,7 @@ def init( except Exception: pass - readme_content = _load_template("README_init.md") + readme_content = _load_template("README_token.md") if readme_content: created = _write_readme(dir, readme_content, force) if created: @@ -425,7 +432,7 @@ def init( if agents_content and _write(agents_path, agents_content, force): files_created.append("agents.yaml") - readme_content = _load_template("README_init.md") + readme_content = _load_template("README_factory.md") if readme_content: created = _write_readme(dir, readme_content, force) if created: @@ -448,9 +455,9 @@ def init( run_file = entry_script_name or "main.py" console.print(f"3. Run your agent: [cyan]uv run {run_file}[/cyan]") elif template == "server": - console.print("3. Run the server: [cyan]uv run server.py[/cyan]") + console.print("3. Run the server: [cyan]uv run main.py[/cyan]") console.print( - " Or serve: [cyan]mcp-agent dev serve --script server.py[/cyan]" + " Or serve: [cyan]mcp-agent dev serve --script main.py[/cyan]" ) elif template == "token": console.print("3. Run the example: [cyan]uv run token_example.py[/cyan]") diff --git a/src/mcp_agent/data/templates/README_init.md b/src/mcp_agent/data/templates/README_basic.md similarity index 100% rename from src/mcp_agent/data/templates/README_init.md rename to src/mcp_agent/data/templates/README_basic.md diff --git a/src/mcp_agent/data/templates/README_factory.md b/src/mcp_agent/data/templates/README_factory.md new file mode 100644 index 000000000..e69de29bb diff --git a/src/mcp_agent/data/templates/README_server.md b/src/mcp_agent/data/templates/README_server.md new file mode 100644 index 000000000..f7b21b154 --- /dev/null +++ b/src/mcp_agent/data/templates/README_server.md @@ -0,0 +1,143 @@ +# MCP-Agent Server Starter + +Welcome! This project was generated by `mcp-agent init`. It demonstrates how to expose your mcp-agent application as an MCP server, making your agentic workflows available to any MCP client. + +## What's included + +- An `MCPApp` named `basic_agent_server` (see `main.py`). +- A workflow class `BasicAgentWorkflow`: + - Uses `Agent` to connect to `filesystem` and `fetch` MCP servers. + - Demonstrates multi-turn conversations with an LLM (OpenAI). + - Shows how to configure model preferences for specific requests. +- A tool function decorated with `@app.tool`: + - `grade_story(story: str, app_ctx?)` - Grades a student's short story using parallel agents (proofreader, fact checker, style enforcer) via `ParallelLLM`. + - Returns the final result directly to the caller (no polling needed). +- Server logs are forwarded to connected MCP clients as notifications. + +## What gets exposed as MCP tools + +When you run `main.py`, your MCP server exposes: + +- `workflows-list` - Lists available workflows and their parameter schemas +- `workflows-BasicAgentWorkflow-run` - Executes the BasicAgentWorkflow with input +- `workflows-get_status` - Get status for a running workflow by `run_id` +- `workflows-cancel` - Cancel a running workflow +- `grade_story` - Synchronous tool that grades a short story and returns the final result + +## Quick start + +1. Add your OpenAI API key to `mcp_agent.secrets.yaml` (or set `OPENAI_API_KEY` env var). + +NOTE: You can use another supported provider (e.g. Anthropic) instead, just be sure to set its API key in the `mcp_agent.secrets.yaml` (or set its env var) and import/use the relevant `AugmentedLLM` in `main.py`. + +2. Review `mcp_agent.config.yaml`: + + - Execution engine: `asyncio` + - Logger settings + - MCP servers: `filesystem`, `fetch` + +3. Install dependencies and run the server: + +```bash +uv pip install -r requirements.txt +uv run main.py +``` + +The server will start and expose its tools over sse. You'll see: + +```bash +Creating MCP server for basic_agent_server +Registered workflows: + - BasicAgentWorkflow +MCP Server settings: ... +``` + +4. Connect with an MCP client: + +You can connect to this server using any MCP client. For example, use [MCP Inspector](https://github.com/modelcontextprotocol/inspector) to explore and test: + +```bash +npx @modelcontextprotocol/inspector --transport sse --server-url http://127.0.0.1:8000/sse +``` + +This will launch the inspector UI where you can: + +- See all available tools (`grade_story`, `workflows-BasicAgentWorkflow-run`, etc.) +- Test workflow execution +- View request/response details + +5. Deploy as a remote MCP server: + +When you're ready to deploy, ensure the required API keys are set in `mcp_agent.secrets.yaml` and then run: + +```bash +uv run mcp-agent login +``` + +to authenticate to mcp-agent cloud. You will be redirected to the login page, create an mcp-agent cloud account through Google or Github. + +Set up your mcp-agent cloud API Key and copy & paste it into your terminal + +```bash +INFO: Directing to MCP Agent Cloud API login... +Please enter your API key 🔑: +``` + +In your terminal, deploy the MCP app: + +```bash +uv run mcp-agent deploy basic_agent_server +``` + +You will then be prompted to specify the type of secret to save your OpenAI API key as. Select (1) deployment secret so that it is available to the deployed server. + +The `deploy` command will bundle the app files and deploy them, wrapping your app as a hosted MCP SSE server with a URL of the form: +`https://.deployments.mcp-agent.com`. + +Anything decorated with `@app.tool` (or `@app.async_tool`) runs as a Temporal workflow in the cloud. + +Since the mcp-agent app is exposed as an MCP server, it can be used in any MCP client just +like any other MCP server. For example, you can inspect and test the server using MCP Inspector: + +```bash +npx @modelcontextprotocol/inspector --transport sse --server-url https://.deployments.mcp-agent.com/sse +``` + +## Notes + +- `app_ctx` is the MCPApp Context (configuration, logger, upstream session, etc.). +- Logging uses `app.logger` and is forwarded as notifications when connected to an MCP client. +- Configuration is read from `mcp_agent.config.yaml` and `mcp_agent.secrets.yaml` (env vars supported). +- The default model is configurable (see `openai.default_model` in config). +- The server runs in `asyncio` mode and exposes tools via sse by default. + +## Key concepts demonstrated + +- **Creating workflows**: Use the `@app.workflow` decorator and `Workflow` base class to define reusable workflows. +- **Defining tools**: Use `@app.tool` for synchronous tools that return results immediately. +- **Using agents**: Create `Agent` instances with specific instructions and server access (filesystem, fetch, etc.). +- **Parallel execution**: Use `ParallelLLM` to run multiple agents in parallel and aggregate their results. +- **Multi-turn conversations**: LLMs maintain conversation context across multiple `generate_str()` calls. +- **Model preferences**: Configure model selection via `RequestParams` and `ModelPreferences`. +- **Server creation**: Use `create_mcp_server_for_app()` to wrap your MCPApp as an MCP server. + +## Next steps + +- Modify the `BasicAgentWorkflow` instructions or server list to fit your use case. +- Add more tools with `@app.tool` or `@app.async_tool` as you grow the app. +- Explore the `grade_story` tool to understand parallel agent execution. +- Customize the agents used by `ParallelLLM` (proofreader, fact checker, style enforcer). +- Read the docs and explore examples: + - GitHub: https://github.com/lastmile-ai/mcp-agent + - Docs: https://docs.mcp-agent.com/ + - Discord: https://lmai.link/discord/mcp-agent + +## Further reading + +- Configuration reference and secrets management. +- MCP servers (stdio, SSE, streamable_http, websockets) and timeouts. +- Temporal workflows, activities, and logging/notifications when deployed. +- Agents and LLMs: `AgentSpec`, prompts, and model defaults. +- Using `@app.async_tool` for long-running workflows (returns workflow_id/run_id for polling). + +Happy building! diff --git a/src/mcp_agent/data/templates/README_token.md b/src/mcp_agent/data/templates/README_token.md new file mode 100644 index 000000000..e69de29bb diff --git a/src/mcp_agent/data/templates/basic_agent_server.py b/src/mcp_agent/data/templates/basic_agent_server.py index 367c4dc37..c6243fe55 100644 --- a/src/mcp_agent/data/templates/basic_agent_server.py +++ b/src/mcp_agent/data/templates/basic_agent_server.py @@ -7,7 +7,6 @@ 3. Declarative agent configuration using FastMCPApp decorators """ -import argparse import asyncio import os from typing import Optional @@ -20,14 +19,15 @@ from mcp_agent.agents.agent import Agent from mcp_agent.workflows.llm.augmented_llm import RequestParams from mcp_agent.workflows.llm.llm_selector import ModelPreferences -from mcp_agent.workflows.llm.augmented_llm_anthropic import AnthropicAugmentedLLM + +# We are using the OpenAI augmented LLM for this example but you can swap with others (e.g. AnthropicAugmentedLLM) from mcp_agent.workflows.llm.augmented_llm_openai import OpenAIAugmentedLLM from mcp_agent.workflows.parallel.parallel_llm import ParallelLLM from mcp_agent.executor.workflow import Workflow, WorkflowResult # Note: This is purely optional: # if not provided, a default FastMCP server will be created by MCPApp using create_mcp_server_for_app() -mcp = FastMCP(name="basic_agent_server", description="My basic agent server example.") +mcp = FastMCP(name="basic_agent_server") # Define the MCPApp instance. The server created for this app will advertise the # MCP logging capability and forward structured logs upstream to connected clients. @@ -57,8 +57,8 @@ async def run(self, input: str) -> WorkflowResult[str]: WorkflowResult containing the processed data. """ - logger = app.logger context = app.context + logger = context.logger logger.info("Current config:", data=context.config.model_dump()) logger.info( @@ -82,7 +82,7 @@ async def run(self, input: str) -> WorkflowResult[str]: result = await finder_agent.list_tools() logger.info("Tools available:", data=result.model_dump()) - llm = await finder_agent.attach_llm(AnthropicAugmentedLLM) + llm = await finder_agent.attach_llm(OpenAIAugmentedLLM) result = await llm.generate_str( message=input, @@ -123,12 +123,8 @@ async def grade_story(story: str, app_ctx: Optional[AppContext] = None) -> str: story: The student's short story to grade app_ctx: Optional MCPApp context for accessing app resources and logging """ - # Use the context's app if available for proper logging with upstream_session - _app = app_ctx.app if app_ctx else app - # Ensure the app's logger is bound to the current context with upstream_session - if _app._logger and hasattr(_app._logger, "_bound_context"): - _app._logger._bound_context = app_ctx - logger = _app.logger + context = app_ctx or app.context + logger = context.logger logger.info(f"grade_story: Received input: {story}") proofreader = Agent( @@ -184,15 +180,6 @@ async def grade_story(story: str, app_ctx: Optional[AppContext] = None) -> str: async def main(): - parser = argparse.ArgumentParser() - parser.add_argument( - "--custom-fastmcp-settings", - action="store_true", - help="Enable custom FastMCP settings for the server", - ) - args = parser.parse_args() - use_custom_fastmcp_settings = args.custom_fastmcp_settings - async with app.run() as agent_app: # Add the current directory to the filesystem server's args if needed context = agent_app.context @@ -200,24 +187,17 @@ async def main(): context.config.mcp.servers["filesystem"].args.extend([os.getcwd()]) # Log registered workflows and agent configurations - agent_app.logger.info(f"Creating MCP server for {agent_app.name}") + context.logger.info(f"Creating MCP server for {agent_app.name}") - agent_app.logger.info("Registered workflows:") + context.logger.info("Registered workflows:") for workflow_id in agent_app.workflows: - agent_app.logger.info(f" - {workflow_id}") - - # Create the MCP server that exposes both workflows and agent configurations, - # optionally using custom FastMCP settings - fast_mcp_settings = ( - {"host": "localhost", "port": 8001, "debug": True, "log_level": "DEBUG"} - if use_custom_fastmcp_settings - else None - ) - mcp_server = create_mcp_server_for_app(agent_app, **(fast_mcp_settings or {})) - agent_app.logger.info(f"MCP Server settings: {mcp_server.settings}") + context.logger.info(f" - {workflow_id}") + + mcp_server = create_mcp_server_for_app(agent_app) + context.logger.info(f"MCP Server settings: {mcp_server.settings}") # Run the server - await mcp_server.run_stdio_async() + await mcp_server.run_sse_async() if __name__ == "__main__":