diff --git a/src/mcp_agent/cli/commands/init.py b/src/mcp_agent/cli/commands/init.py index 81c4aad33..4f44ca1e6 100644 --- a/src/mcp_agent/cli/commands/init.py +++ b/src/mcp_agent/cli/commands/init.py @@ -133,8 +133,7 @@ def init( scaffolding_templates = { "basic": "Simple agent with filesystem and fetch capabilities", "server": "MCP server with workflow and parallel agents", - # "token": "Token counting example with monitoring", - # "factory": "Agent factory with router-based selection", + "factory": "Agent factory with router-based selection", "minimal": "Minimal configuration files only", } @@ -364,28 +363,11 @@ def init( if created: files_created.append(created) - elif template == "token": - token_path = dir / "token_example.py" - token_content = _load_template("token_counter.py") - if token_content and _write(token_path, token_content, force): - files_created.append("token_example.py") - # Make executable - try: - token_path.chmod(token_path.stat().st_mode | 0o111) - except Exception: - pass - - readme_content = _load_template("README_token.md") - if readme_content: - created = _write_readme(dir, readme_content, force) - if created: - files_created.append(created) - elif template == "factory": - factory_path = dir / "factory.py" + factory_path = dir / "main.py" factory_content = _load_template("agent_factory.py") if factory_content and _write(factory_path, factory_content, force): - files_created.append("factory.py") + files_created.append("main.py") # Make executable try: factory_path.chmod(factory_path.stat().st_mode | 0o111) @@ -425,12 +407,9 @@ def init( console.print( " Or serve: [cyan]mcp-agent dev serve --script main.py[/cyan]" ) - elif template == "token": - console.print("3. Run the example: [cyan]uv run token_example.py[/cyan]") - console.print(" Watch token usage in real-time!") elif template == "factory": console.print("3. Customize agents in [cyan]agents.yaml[/cyan]") - console.print("4. Run the factory: [cyan]uv run factory.py[/cyan]") + console.print("4. Run the factory: [cyan]uv run main.py[/cyan]") elif template == "minimal": console.print("3. Create your agent script") console.print(" See examples: [cyan]mcp-agent init --list[/cyan]") @@ -459,7 +438,6 @@ def interactive( templates = { "1": ("basic", "Simple agent with filesystem and fetch"), "2": ("server", "MCP server with workflows"), - "3": ("token", "Token counting with monitoring"), "4": ("factory", "Agent factory with routing"), "5": ("minimal", "Config files only"), } diff --git a/src/mcp_agent/data/examples/cloud/mcp/README.md b/src/mcp_agent/data/examples/cloud/mcp/README.md index ece7e90b2..afb5ba5d4 100644 --- a/src/mcp_agent/data/examples/cloud/mcp/README.md +++ b/src/mcp_agent/data/examples/cloud/mcp/README.md @@ -184,3 +184,12 @@ This will launch the MCP Inspector UI where you can: - See all available tools - Test workflow execution - View request/response details + +Make sure Inspector is configured with the following settings: + +| Setting | Value | +| ---------------- | --------------------------------------------------- | +| _Transport Type_ | _SSE_ | +| _SSE_ | _https://[server_id].deployments.mcp-agent.com/sse_ | +| _Header Name_ | _Authorization_ | +| _Bearer Token_ | _your-mcp-agent-cloud-api-token_ | diff --git a/src/mcp_agent/data/templates/README_basic.md b/src/mcp_agent/data/templates/README_basic.md index 17dc0a6ed..fb434ee31 100644 --- a/src/mcp_agent/data/templates/README_basic.md +++ b/src/mcp_agent/data/templates/README_basic.md @@ -1,4 +1,4 @@ -# MCP-Agent Starter +# mcp-agent Starter Welcome! This project was generated by `mcp-agent init`. It’s a minimal, readable starting point you can run locally or expose as an MCP server. @@ -15,23 +15,15 @@ Welcome! This project was generated by `mcp-agent init`. It’s a minimal, reada ## Quick start -1. Add API keys to `mcp_agent.secrets.yaml` (or set env vars): +1. Add your OpenAI API key to `mcp_agent.secrets.yaml` (or set `OPENAI_API_KEY` env var). -NOTE: You can use another supported provider (e.g. Anthropic) instead, just be sure to set its API key in the `mcp_agent.secrets.yaml` (or set its env var) and import/use the relevant `AugmentedLLM` in `main.py`. +NOTE: You can use another supported provider (e.g. Anthropic) instead, just be sure to set its API key in the `mcp_agent.secrets.yaml` (or set its env var) and update the `provider` parameter in `factory.py`. -- `OPENAI_API_KEY` (recommended) -- `ANTHROPIC_API_KEY` (optional) - -2. Review `mcp_agent.config.yaml`: - - - Execution engine: `asyncio` - - Logger settings - - MCP servers: `filesystem`, `fetch` - - `agents.definitions`: sample agents (`filesystem_helper`, `web_helper`) - -3. Run locally: +2. Install dependencies and run locally: ```bash +uv init +uv add "mcp-agent[openai]" uv run main.py ``` @@ -40,7 +32,7 @@ You’ll see two summaries printed: - A summary of `README.md` from your current directory. - A summary of the intro page at modelcontextprotocol.io. -4. Run locally as an MCP server: +3. Run locally as an MCP server: - In `main.py`, UNCOMMENT the server lines that call `create_mcp_server_for_app(agent_app)` and `run_sse_async()`. @@ -54,7 +46,7 @@ You’ll see two summaries printed: npx @modelcontextprotocol/inspector --transport sse --server-url http://127.0.0.1:8000/sse ``` -5. Deploy a remote MCP server: +4. Deploy a remote MCP server: When you're ready to deploy, ensure the required API keys are set in `mcp_agent.secrets.yaml` and then run: @@ -62,7 +54,7 @@ When you're ready to deploy, ensure the required API keys are set in `mcp_agent. uv run mcp-agent login ``` -to authenticate to mcp-agent cloud. You will be redirected to the login page, create an mcp-agent cloud account through Google or Github. +to authenticate to mcp-agent cloud. You will be redirected to the login page to create an mcp-agent cloud account through Google or GitHub. Set up your mcp-agent cloud API Key and copy & paste it into your terminal @@ -74,7 +66,7 @@ Please enter your API key 🔑: In your terminal, deploy the MCP app: ```bash -uv run mcp-agent deploy hello_world --no-auth +uv run mcp-agent deploy hello_world ``` The `deploy` command will bundle the app files and deploy them, wrapping your app as a hosted MCP SSE server with a URL of the form: @@ -89,6 +81,15 @@ like any other MCP server. For example, you can inspect and test the server usin npx @modelcontextprotocol/inspector --transport sse --server-url https://.deployments.mcp-agent.com/sse ``` +Make sure Inspector is configured with the following settings: + +| Setting | Value | +| ---------------- | --------------------------------------------------- | +| _Transport Type_ | _SSE_ | +| _SSE_ | _https://[server_id].deployments.mcp-agent.com/sse_ | +| _Header Name_ | _Authorization_ | +| _Bearer Token_ | _your-mcp-agent-cloud-api-token_ | + ## Notes - `app_ctx` is the MCPApp Context (configuration, logger, upstream session, etc.). @@ -106,11 +107,4 @@ npx @modelcontextprotocol/inspector --transport sse --server-url https://.deployments.mcp-agent.com`. + +Anything decorated with `@app.async_tool` (or `@app.tool`) runs as a Temporal workflow in the cloud. + +Since the mcp-agent app is exposed as an MCP server, it can be used in any MCP client just +like any other MCP server. For example, you can inspect and test the server using MCP Inspector: + +```bash +npx @modelcontextprotocol/inspector --transport sse --server-url https://.deployments.mcp-agent.com/sse +``` + +Make sure Inspector is configured with the following settings: + +| Setting | Value | +| ---------------- | --------------------------------------------------- | +| _Transport Type_ | _SSE_ | +| _SSE_ | _https://[server_id].deployments.mcp-agent.com/sse_ | +| _Header Name_ | _Authorization_ | +| _Bearer Token_ | _your-mcp-agent-cloud-api-token_ | + +## Next steps + +- Tweak the agent definitions in `agents.yaml` to fit your use case. +- Try other factory workflows, such as Orchestrator. +- Add tools with `@app.tool` or `@app.async_tool` as you grow the app. +- Read the docs and explore examples: + - GitHub: https://github.com/lastmile-ai/mcp-agent + - Docs: https://docs.mcp-agent.com/ + - Discord: https://lmai.link/discord/mcp-agent + +Happy building! diff --git a/src/mcp_agent/data/templates/README_server.md b/src/mcp_agent/data/templates/README_server.md index f7b21b154..f85e3d708 100644 --- a/src/mcp_agent/data/templates/README_server.md +++ b/src/mcp_agent/data/templates/README_server.md @@ -1,4 +1,4 @@ -# MCP-Agent Server Starter +# mcp-agent Server Starter Welcome! This project was generated by `mcp-agent init`. It demonstrates how to expose your mcp-agent application as an MCP server, making your agentic workflows available to any MCP client. @@ -39,7 +39,8 @@ NOTE: You can use another supported provider (e.g. Anthropic) instead, just be s 3. Install dependencies and run the server: ```bash -uv pip install -r requirements.txt +uv init +uv add "mcp-agent[openai]" uv run main.py ``` @@ -74,7 +75,7 @@ When you're ready to deploy, ensure the required API keys are set in `mcp_agent. uv run mcp-agent login ``` -to authenticate to mcp-agent cloud. You will be redirected to the login page, create an mcp-agent cloud account through Google or Github. +to authenticate to mcp-agent cloud. You will be redirected to the login page to create an mcp-agent cloud account through Google or Github. Set up your mcp-agent cloud API Key and copy & paste it into your terminal @@ -103,6 +104,15 @@ like any other MCP server. For example, you can inspect and test the server usin npx @modelcontextprotocol/inspector --transport sse --server-url https://.deployments.mcp-agent.com/sse ``` +Make sure Inspector is configured with the following settings: + +| Setting | Value | +| ---------------- | --------------------------------------------------- | +| _Transport Type_ | _SSE_ | +| _SSE_ | _https://[server_id].deployments.mcp-agent.com/sse_ | +| _Header Name_ | _Authorization_ | +| _Bearer Token_ | _your-mcp-agent-cloud-api-token_ | + ## Notes - `app_ctx` is the MCPApp Context (configuration, logger, upstream session, etc.). @@ -132,12 +142,4 @@ npx @modelcontextprotocol/inspector --transport sse --server-url https:// str: + """Route a prompt to the appropriate agent using an LLMRouter.""" + context = app_ctx or app.context + + # Add current directory to filesystem server (if needed by your setup) + context.config.mcp.servers["filesystem"].args.extend(["."]) + + agents_path = Path(__file__).resolve().parent / "agents.yaml" + specs = load_agent_specs_from_file(str(agents_path), context=context) + + router = await create_router_llm( + server_names=["filesystem", "fetch"], + agents=specs, + provider="openai", + context=context, + ) + + response = await router.generate_str(prompt) + return response + async def main(): - async with MCPApp(name="factory_demo").run() as agent_app: - context = agent_app.context - # Add current directory to filesystem server (if needed by your setup) - context.config.mcp.servers["filesystem"].args.extend(["."]) - - agents_path = Path(__file__).resolve().parent / "agents.yaml" - specs = load_agent_specs_from_file(str(agents_path), context=context) - - router = await create_router_llm( - server_names=["filesystem", "fetch"], - agents=specs, - provider="openai", - context=context, + async with app.run() as agent_app: + route_res = await route_prompt( + prompt="Find the README and summarize it", app_ctx=agent_app.context ) - res = await router.generate_str("Find the README and summarize it") - print("Routing result:", res) + print("Routing result:", route_res) if __name__ == "__main__": diff --git a/src/mcp_agent/data/templates/agents.yaml b/src/mcp_agent/data/templates/agents.yaml index f505732f1..d7d8b733c 100644 --- a/src/mcp_agent/data/templates/agents.yaml +++ b/src/mcp_agent/data/templates/agents.yaml @@ -2,7 +2,7 @@ # This file defines multiple specialized agents that can be dynamically selected # File system agent - searches and reads local files -filesystem_agent: +- filesystem_agent: name: filesystem_agent instruction: | You are a filesystem expert. Your role is to: @@ -15,7 +15,7 @@ filesystem_agent: - filesystem # Web research agent - fetches and analyzes web content -web_agent: +- web_agent: name: web_agent instruction: | You are a web research specialist. Your role is to: @@ -28,7 +28,7 @@ web_agent: - fetch # Code analysis agent - analyzes code structure and quality -code_analyst: +- code_analyst: name: code_analyst instruction: | You are a code analysis expert. Your role is to: @@ -41,7 +41,7 @@ code_analyst: - filesystem # Documentation agent - generates and maintains documentation -doc_writer: +- doc_writer: name: doc_writer instruction: | You are a documentation specialist. Your role is to: @@ -54,7 +54,7 @@ doc_writer: - filesystem # General assistant - handles miscellaneous tasks -general_assistant: +- general_assistant: name: general_assistant instruction: | You are a helpful general assistant. Your role is to: @@ -63,4 +63,4 @@ general_assistant: - Assist with various tasks - Route complex requests to specialized agents Be helpful, accurate, and concise in your responses. - server_names: [] \ No newline at end of file + server_names: []