Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
23 changes: 23 additions & 0 deletions examples/mcp_basic_qwen_agent/README.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,23 @@
# MCP Qwen Agent Example - "Finder" Agent

This example demonstrates how to create and run a basic "Finder" Agent using Qwen models via Ollama's OpenAI-compatible API and MCP. The Agent has access to both the `fetch` and `filesystem` MCP servers, enabling it to retrieve information from URLs and the local file system.

## Prerequisites

- [Ollama](https://ollama.ai/) installed and running
- Qwen model pulled in Ollama (run `ollama pull qwen2.5:32b`)

## Setup

Before running the agent, ensure you have Ollama installed and the Qwen models pulled:

```bash
# Install Ollama (Mac/Linux)
curl -fsSL https://ollama.com/install.sh | sh

# Start the Ollama service
ollama serve

# Pull the Qwen model (in another terminal)
ollama pull qwen2.5:32b
```
95 changes: 95 additions & 0 deletions examples/mcp_basic_qwen_agent/main.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,95 @@
import asyncio
import os
import time

from mcp_agent.app import MCPApp
from mcp_agent.config import (
Settings,
LoggerSettings,
MCPSettings,
MCPServerSettings,
QwenSettings,
)
from mcp_agent.agents.agent import Agent
from mcp_agent.workflows.llm.augmented_llm import RequestParams
from mcp_agent.workflows.llm.augmented_llm_qwen import QwenAugmentedLLM

settings = Settings(
execution_engine="asyncio",
logger=LoggerSettings(type="file", level="debug"),
mcp=MCPSettings(
servers={
"fetch": MCPServerSettings(
command="uvx",
args=["mcp-server-fetch"],
),
"filesystem": MCPServerSettings(
command="npx",
args=["-y", "@modelcontextprotocol/server-filesystem"],
),
}
),
qwen=QwenSettings(
api_key="ollama", # Default for Ollama
base_url="http://localhost:11434/v1", # Ollama's OpenAI-compatible endpoint
default_model="qwen2.5:32b",
),
)

# Settings can either be specified programmatically,
# or loaded from mcp_agent.config.yaml/mcp_agent.secrets.yaml
app = MCPApp(name="mcp_basic_qwen_agent") # settings=settings)


async def example_usage():
async with app.run() as agent_app:
logger = agent_app.logger
context = agent_app.context

logger.info("Current config:", data=context.config.model_dump())

# Add the current directory to the filesystem server's args
context.config.mcp.servers["filesystem"].args.extend([os.getcwd()])

finder_agent = Agent(
name="finder",
instruction="""You are an agent with access to the filesystem,
as well as the ability to fetch URLs. Your job is to identify
the closest match to a user's request, make the appropriate tool calls,
and return the URI and CONTENTS of the closest match.""",
server_names=["fetch", "filesystem"],
)

async with finder_agent:
logger.info("finder: Connected to server, calling list_tools...")
result = await finder_agent.list_tools()
logger.info("Tools available:", data=result.model_dump())

llm = await finder_agent.attach_llm(QwenAugmentedLLM)
result = await llm.generate_str(
message="Print the contents of mcp_agent.config.yaml verbatim",
)
logger.info(f"mcp_agent.config.yaml contents: {result}")

result = await llm.generate_str(
message="Print the first 2 paragraphs of https://modelcontextprotocol.io/introduction",
)
logger.info(f"First 2 paragraphs of Model Context Protocol docs: {result}")

# Multi-turn conversations with specific model
result = await llm.generate_str(
message="Summarize those paragraphs in a 128 character tweet",
request_params=RequestParams(
model="qwen2.5:32b", # You can specify different Qwen models
),
)
logger.info(f"Paragraph as a tweet: {result}")


if __name__ == "__main__":
start = time.time()
asyncio.run(example_usage())
end = time.time()
t = end - start

print(f"Total run time: {t:.2f}s")
26 changes: 26 additions & 0 deletions examples/mcp_basic_qwen_agent/mcp_agent.config.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,26 @@
$schema: ../../schema/mcp-agent.config.schema.json

execution_engine: asyncio
logger:
transports: [console, file]
level: debug
progress_display: true
path_settings:
path_pattern: "logs/mcp-agent-{unique_id}.jsonl"
unique_id: "timestamp" # Options: "timestamp" or "session_id"
timestamp_format: "%Y%m%d_%H%M%S"

mcp:
servers:
fetch:
command: "uvx"
args: ["mcp-server-fetch"]
filesystem:
command: "npx"
args: ["-y", "@modelcontextprotocol/server-filesystem"]

qwen:
# Ollama OpenAI-compatible API settings
api_key: "ollama"
base_url: "http://localhost:11434/v1"
default_model: "qwen2.5:32b"
103 changes: 55 additions & 48 deletions schema/mcp-agent.config.schema.json
Original file line number Diff line number Diff line change
Expand Up @@ -33,10 +33,7 @@
"type": "string"
}
},
"required": [
"api_key",
"endpoint"
],
"required": ["api_key", "endpoint"],
"title": "AzureSettings",
"type": "object"
},
Expand Down Expand Up @@ -188,10 +185,7 @@
},
"unique_id": {
"default": "timestamp",
"enum": [
"timestamp",
"session_id"
],
"enum": ["timestamp", "session_id"],
"title": "Unique Id",
"type": "string"
},
Expand All @@ -209,24 +203,14 @@
"properties": {
"type": {
"default": "console",
"enum": [
"none",
"console",
"file",
"http"
],
"enum": ["none", "console", "file", "http"],
"title": "Type",
"type": "string"
},
"transports": {
"default": [],
"items": {
"enum": [
"none",
"console",
"file",
"http"
],
"enum": ["none", "console", "file", "http"],
"type": "string"
},
"title": "Transports",
Expand All @@ -235,12 +219,7 @@
},
"level": {
"default": "info",
"enum": [
"debug",
"info",
"warning",
"error"
],
"enum": ["debug", "info", "warning", "error"],
"title": "Level",
"type": "string",
"description": "Minimum logging level"
Expand Down Expand Up @@ -361,9 +340,7 @@
"description": "Optional URI alias for presentation to the server"
}
},
"required": [
"uri"
],
"required": ["uri"],
"title": "MCPRootSettings",
"type": "object"
},
Expand Down Expand Up @@ -418,11 +395,7 @@
},
"transport": {
"default": "stdio",
"enum": [
"stdio",
"sse",
"websocket"
],
"enum": ["stdio", "sse", "websocket"],
"title": "Transport",
"type": "string",
"description": "The transport mechanism."
Expand Down Expand Up @@ -580,11 +553,7 @@
},
"reasoning_effort": {
"default": "medium",
"enum": [
"low",
"medium",
"high"
],
"enum": ["low", "medium", "high"],
"title": "Reasoning Effort",
"type": "string"
},
Expand Down Expand Up @@ -699,10 +668,7 @@
"title": "Api Key"
}
},
"required": [
"host",
"task_queue"
],
"required": ["host", "task_queue"],
"title": "TemporalSettings",
"type": "object"
},
Expand All @@ -724,6 +690,38 @@
},
"title": "UsageTelemetrySettings",
"type": "object"
},
"QwenSettings": {
"additionalProperties": true,
"description": "Settings for using Qwen models through Ollama's OpenAI-compatible API.",
"properties": {
"api_key": {
"anyOf": [
{
"type": "string"
},
{
"type": "null"
}
],
"default": null,
"title": "Api Key"
},
"base_url": {
"title": "Base Url",
"type": "string",
"description": "Base URL for the Ollama API (e.g., http://localhost:11434/v1)"
},
"default_model": {
"default": "qwen2.5-coder-32b-instruct",
"title": "Default Model",
"type": "string",
"description": "Default Qwen model to use"
}
},
"required": ["base_url"],
"title": "QwenSettings",
"type": "object"
}
},
"additionalProperties": true,
Expand All @@ -745,10 +743,7 @@
},
"execution_engine": {
"default": "asyncio",
"enum": [
"asyncio",
"temporal"
],
"enum": ["asyncio", "temporal"],
"title": "Execution Engine",
"type": "string",
"description": "Execution engine for the MCP Agent application"
Expand Down Expand Up @@ -896,9 +891,21 @@
"enable_detailed_telemetry": false
},
"description": "Usage tracking settings for the MCP Agent application"
},
"qwen": {
"anyOf": [
{
"$ref": "#/$defs/QwenSettings"
},
{
"type": "null"
}
],
"default": null,
"description": "Settings for using Qwen models through Ollama's OpenAI-compatible API"
}
},
"title": "MCP Agent Configuration Schema",
"type": "object",
"$schema": "http://json-schema.org/draft-07/schema#"
}
}
20 changes: 20 additions & 0 deletions src/mcp_agent/config.py
Original file line number Diff line number Diff line change
Expand Up @@ -282,6 +282,23 @@ class LoggerSettings(BaseModel):
"""HTTP timeout seconds for event transport"""


class QwenSettings(BaseModel):
"""
Settings for using Qwen models through Ollama's OpenAI-compatible API.
"""

api_key: str | None = None
"""API key for authentication."""

base_url: str
"""Base URL for the Ollama API (e.g., http://localhost:11434/v1)."""

default_model: str = "qwen2.5-coder-32b-instruct"
"""Default Qwen model to use."""

model_config = ConfigDict(extra="allow", arbitrary_types_allowed=True)


class Settings(BaseSettings):
"""
Settings class for the MCP Agent application.
Expand Down Expand Up @@ -331,6 +348,9 @@ class Settings(BaseSettings):
usage_telemetry: UsageTelemetrySettings | None = UsageTelemetrySettings()
"""Usage tracking settings for the MCP Agent application"""

qwen: QwenSettings | None = None
"""Settings for using Qwen models in the MCP Agent application"""

@classmethod
def find_config(cls) -> Path | None:
"""Find the config file in the current directory or parent directories."""
Expand Down
Loading
Loading