Skip to content
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
30 changes: 4 additions & 26 deletions src/mcp_agent/cli/commands/init.py
Original file line number Diff line number Diff line change
Expand Up @@ -133,8 +133,7 @@ def init(
scaffolding_templates = {
"basic": "Simple agent with filesystem and fetch capabilities",
"server": "MCP server with workflow and parallel agents",
# "token": "Token counting example with monitoring",
# "factory": "Agent factory with router-based selection",
"factory": "Agent factory with router-based selection",
"minimal": "Minimal configuration files only",
}

Expand Down Expand Up @@ -364,28 +363,11 @@ def init(
if created:
files_created.append(created)

elif template == "token":
token_path = dir / "token_example.py"
token_content = _load_template("token_counter.py")
if token_content and _write(token_path, token_content, force):
files_created.append("token_example.py")
# Make executable
try:
token_path.chmod(token_path.stat().st_mode | 0o111)
except Exception:
pass

readme_content = _load_template("README_token.md")
if readme_content:
created = _write_readme(dir, readme_content, force)
if created:
files_created.append(created)

elif template == "factory":
factory_path = dir / "factory.py"
factory_path = dir / "main.py"
factory_content = _load_template("agent_factory.py")
if factory_content and _write(factory_path, factory_content, force):
files_created.append("factory.py")
files_created.append("main.py")
# Make executable
try:
factory_path.chmod(factory_path.stat().st_mode | 0o111)
Expand Down Expand Up @@ -425,12 +407,9 @@ def init(
console.print(
" Or serve: [cyan]mcp-agent dev serve --script main.py[/cyan]"
)
elif template == "token":
console.print("3. Run the example: [cyan]uv run token_example.py[/cyan]")
console.print(" Watch token usage in real-time!")
elif template == "factory":
console.print("3. Customize agents in [cyan]agents.yaml[/cyan]")
console.print("4. Run the factory: [cyan]uv run factory.py[/cyan]")
console.print("4. Run the factory: [cyan]uv run main.py[/cyan]")
elif template == "minimal":
console.print("3. Create your agent script")
console.print(" See examples: [cyan]mcp-agent init --list[/cyan]")
Expand Down Expand Up @@ -459,7 +438,6 @@ def interactive(
templates = {
"1": ("basic", "Simple agent with filesystem and fetch"),
"2": ("server", "MCP server with workflows"),
"3": ("token", "Token counting with monitoring"),
"4": ("factory", "Agent factory with routing"),
"5": ("minimal", "Config files only"),
}
Expand Down
9 changes: 9 additions & 0 deletions src/mcp_agent/data/examples/cloud/mcp/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -184,3 +184,12 @@ This will launch the MCP Inspector UI where you can:
- See all available tools
- Test workflow execution
- View request/response details

Make sure Inspector is configured with the following settings:

| Setting | Value |
| ---------------- | --------------------------------------------------- |
| _Transport Type_ | _SSE_ |
| _SSE_ | _https://[server_id].deployments.mcp-agent.com/sse_ |
| _Header Name_ | _Authorization_ |
| _Bearer Token_ | _your-mcp-agent-cloud-api-token_ |
44 changes: 19 additions & 25 deletions src/mcp_agent/data/templates/README_basic.md
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
# MCP-Agent Starter
# mcp-agent Starter

Welcome! This project was generated by `mcp-agent init`. It’s a minimal, readable starting point you can run locally or expose as an MCP server.

Expand All @@ -15,23 +15,15 @@ Welcome! This project was generated by `mcp-agent init`. It’s a minimal, reada

## Quick start

1. Add API keys to `mcp_agent.secrets.yaml` (or set env vars):
1. Add your OpenAI API key to `mcp_agent.secrets.yaml` (or set `OPENAI_API_KEY` env var).

NOTE: You can use another supported provider (e.g. Anthropic) instead, just be sure to set its API key in the `mcp_agent.secrets.yaml` (or set its env var) and import/use the relevant `AugmentedLLM` in `main.py`.
NOTE: You can use another supported provider (e.g. Anthropic) instead, just be sure to set its API key in the `mcp_agent.secrets.yaml` (or set its env var) and update the `provider` parameter in `factory.py`.

- `OPENAI_API_KEY` (recommended)
- `ANTHROPIC_API_KEY` (optional)

2. Review `mcp_agent.config.yaml`:

- Execution engine: `asyncio`
- Logger settings
- MCP servers: `filesystem`, `fetch`
- `agents.definitions`: sample agents (`filesystem_helper`, `web_helper`)

3. Run locally:
2. Install dependencies and run locally:

```bash
uv init
uv add "mcp-agent[openai]"
uv run main.py
```

Expand All @@ -40,7 +32,7 @@ You’ll see two summaries printed:
- A summary of `README.md` from your current directory.
- A summary of the intro page at modelcontextprotocol.io.

4. Run locally as an MCP server:
3. Run locally as an MCP server:

- In `main.py`, UNCOMMENT the server lines that call `create_mcp_server_for_app(agent_app)` and `run_sse_async()`.

Expand All @@ -54,15 +46,15 @@ You’ll see two summaries printed:
npx @modelcontextprotocol/inspector --transport sse --server-url http://127.0.0.1:8000/sse
```

5. Deploy a remote MCP server:
4. Deploy a remote MCP server:

When you're ready to deploy, ensure the required API keys are set in `mcp_agent.secrets.yaml` and then run:

```bash
uv run mcp-agent login
```

to authenticate to mcp-agent cloud. You will be redirected to the login page, create an mcp-agent cloud account through Google or Github.
to authenticate to mcp-agent cloud. You will be redirected to the login page to create an mcp-agent cloud account through Google or GitHub.

Set up your mcp-agent cloud API Key and copy & paste it into your terminal

Expand All @@ -74,7 +66,7 @@ Please enter your API key 🔑:
In your terminal, deploy the MCP app:

```bash
uv run mcp-agent deploy hello_world --no-auth
uv run mcp-agent deploy hello_world
```

The `deploy` command will bundle the app files and deploy them, wrapping your app as a hosted MCP SSE server with a URL of the form:
Expand All @@ -89,6 +81,15 @@ like any other MCP server. For example, you can inspect and test the server usin
npx @modelcontextprotocol/inspector --transport sse --server-url https://<server_id>.deployments.mcp-agent.com/sse
```

Make sure Inspector is configured with the following settings:

| Setting | Value |
| ---------------- | --------------------------------------------------- |
| _Transport Type_ | _SSE_ |
| _SSE_ | _https://[server_id].deployments.mcp-agent.com/sse_ |
| _Header Name_ | _Authorization_ |
| _Bearer Token_ | _your-mcp-agent-cloud-api-token_ |

## Notes

- `app_ctx` is the MCPApp Context (configuration, logger, upstream session, etc.).
Expand All @@ -106,11 +107,4 @@ npx @modelcontextprotocol/inspector --transport sse --server-url https://<server
- Docs: https://docs.mcp-agent.com/
- Discord: https://lmai.link/discord/mcp-agent

## Further reading

- Configuration reference and secrets management.
- MCP servers (stdio, SSE, streamable_http, websockets) and timeouts.
- Temporal workflows, activities, and logging/notifications when deployed.
- Agents and LLMs: `AgentSpec`, prompts, and model defaults.

Happy building!
86 changes: 86 additions & 0 deletions src/mcp_agent/data/templates/README_factory.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,86 @@
# mcp-agent Factory Starter

Welcome! This project was generated by `mcp-agent init`. It demonstrates how to use the agent factory pattern with `LLMRouter` to intelligently route prompts to the appropriate agents based on their capabilities. This is just one of the many useful [workflow patterns](https://docs.mcp-agent.com/mcp-agent-sdk/overview#workflow-patterns) supported by mcp-agent out of the box.

## What's included

- An `MCPApp` named `factory_demo` (see `main.py`).
- A tool defined with a decorator:
- `route_prompt(prompt: str, app_ctx?)` - Routes prompts to the right agent using `create_router_llm`.
- Loads agent specifications from `agents.yaml` (finder and coder agents).
- Automatically selects the best agent for each request based on server capabilities.
- `agents.yaml` - Contains agent specifications with different capabilities:
- `finder`: Can read files and fetch URLs (filesystem + fetch servers)
- `coder`: Can inspect and modify code files (filesystem server only)

## Quick start

1. Add your OpenAI API key to `mcp_agent.secrets.yaml` (or set `OPENAI_API_KEY` env var).

NOTE: You can use another supported provider (e.g. Anthropic) instead, just be sure to set its API key in the `mcp_agent.secrets.yaml` (or set its env var) and update the `provider` parameter in `main.py`.

2. Install dependencies and run locally:

```bash
uv init
uv add "mcp-agent[openai]"
uv run main.py
```

You'll see the router automatically select the appropriate agent and execute your request. The router intelligently chose the `finder` agent because the task requires reading a file (filesystem capability).

3. Deploy a remote MCP server:

When you're ready to deploy, ensure the required API keys are set in `mcp_agent.secrets.yaml` and then run:

```bash
uv run mcp-agent login
```

to authenticate to mcp-agent cloud. You will be redirected to the login page to create an mcp-agent cloud account through Google or Github.

Set up your mcp-agent cloud API Key and copy & paste it into your terminal

```bash
INFO: Directing to MCP Agent Cloud API login...
Please enter your API key 🔑:
```

In your terminal, deploy the MCP app:

```bash
uv run mcp-agent deploy agent_factory
```

The `deploy` command will bundle the app files and deploy them, wrapping your app as a hosted MCP SSE server with a URL of the form:
`https://<server_id>.deployments.mcp-agent.com`.

Anything decorated with `@app.async_tool` (or `@app.tool`) runs as a Temporal workflow in the cloud.

Since the mcp-agent app is exposed as an MCP server, it can be used in any MCP client just
like any other MCP server. For example, you can inspect and test the server using MCP Inspector:

```bash
npx @modelcontextprotocol/inspector --transport sse --server-url https://<server_id>.deployments.mcp-agent.com/sse
```

Make sure Inspector is configured with the following settings:

| Setting | Value |
| ---------------- | --------------------------------------------------- |
| _Transport Type_ | _SSE_ |
| _SSE_ | _https://[server_id].deployments.mcp-agent.com/sse_ |
| _Header Name_ | _Authorization_ |
| _Bearer Token_ | _your-mcp-agent-cloud-api-token_ |

## Next steps

- Tweak the agent definitions in `agents.yaml` to fit your use case.
- Try other factory workflows, such as Orchestrator.
- Add tools with `@app.tool` or `@app.async_tool` as you grow the app.
- Read the docs and explore examples:
- GitHub: https://github.com/lastmile-ai/mcp-agent
- Docs: https://docs.mcp-agent.com/
- Discord: https://lmai.link/discord/mcp-agent

Happy building!
24 changes: 13 additions & 11 deletions src/mcp_agent/data/templates/README_server.md
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
# MCP-Agent Server Starter
# mcp-agent Server Starter

Welcome! This project was generated by `mcp-agent init`. It demonstrates how to expose your mcp-agent application as an MCP server, making your agentic workflows available to any MCP client.

Expand Down Expand Up @@ -39,7 +39,8 @@ NOTE: You can use another supported provider (e.g. Anthropic) instead, just be s
3. Install dependencies and run the server:

```bash
uv pip install -r requirements.txt
uv init
uv add "mcp-agent[openai]"
uv run main.py
```

Expand Down Expand Up @@ -74,7 +75,7 @@ When you're ready to deploy, ensure the required API keys are set in `mcp_agent.
uv run mcp-agent login
```

to authenticate to mcp-agent cloud. You will be redirected to the login page, create an mcp-agent cloud account through Google or Github.
to authenticate to mcp-agent cloud. You will be redirected to the login page to create an mcp-agent cloud account through Google or Github.

Set up your mcp-agent cloud API Key and copy & paste it into your terminal

Expand Down Expand Up @@ -103,6 +104,15 @@ like any other MCP server. For example, you can inspect and test the server usin
npx @modelcontextprotocol/inspector --transport sse --server-url https://<server_id>.deployments.mcp-agent.com/sse
```

Make sure Inspector is configured with the following settings:

| Setting | Value |
| ---------------- | --------------------------------------------------- |
| _Transport Type_ | _SSE_ |
| _SSE_ | _https://[server_id].deployments.mcp-agent.com/sse_ |
| _Header Name_ | _Authorization_ |
| _Bearer Token_ | _your-mcp-agent-cloud-api-token_ |

## Notes

- `app_ctx` is the MCPApp Context (configuration, logger, upstream session, etc.).
Expand Down Expand Up @@ -132,12 +142,4 @@ npx @modelcontextprotocol/inspector --transport sse --server-url https://<server
- Docs: https://docs.mcp-agent.com/
- Discord: https://lmai.link/discord/mcp-agent

## Further reading

- Configuration reference and secrets management.
- MCP servers (stdio, SSE, streamable_http, websockets) and timeouts.
- Temporal workflows, activities, and logging/notifications when deployed.
- Agents and LLMs: `AgentSpec`, prompts, and model defaults.
- Using `@app.async_tool` for long-running workflows (returns workflow_id/run_id for polling).

Happy building!
Empty file.
50 changes: 33 additions & 17 deletions src/mcp_agent/data/templates/agent_factory.py
Original file line number Diff line number Diff line change
@@ -1,32 +1,48 @@
import asyncio

from pathlib import Path

from mcp.server.fastmcp import Context

from mcp_agent.app import MCPApp
from mcp_agent.workflows.factory import (
load_agent_specs_from_file,
create_router_llm,
load_agent_specs_from_file,
)

app = MCPApp(name="factory_demo", description="Demo of agent factory with LLM routing")


@app.async_tool()
async def route_prompt(
prompt: str = "Find the README and summarize it", app_ctx: Context | None = None
) -> str:
"""Route a prompt to the appropriate agent using an LLMRouter."""
context = app_ctx or app.context

# Add current directory to filesystem server (if needed by your setup)
context.config.mcp.servers["filesystem"].args.extend(["."])

agents_path = Path(__file__).resolve().parent / "agents.yaml"
specs = load_agent_specs_from_file(str(agents_path), context=context)

router = await create_router_llm(
server_names=["filesystem", "fetch"],
agents=specs,
provider="openai",
context=context,
)

response = await router.generate_str(prompt)
return response


async def main():
async with MCPApp(name="factory_demo").run() as agent_app:
context = agent_app.context
# Add current directory to filesystem server (if needed by your setup)
context.config.mcp.servers["filesystem"].args.extend(["."])

agents_path = Path(__file__).resolve().parent / "agents.yaml"
specs = load_agent_specs_from_file(str(agents_path), context=context)

router = await create_router_llm(
server_names=["filesystem", "fetch"],
agents=specs,
provider="openai",
context=context,
async with app.run() as agent_app:
route_res = await route_prompt(
prompt="Find the README and summarize it", app_ctx=agent_app.context
)

res = await router.generate_str("Find the README and summarize it")
print("Routing result:", res)
print("Routing result:", route_res)


if __name__ == "__main__":
Expand Down
Loading
Loading