Skip to content

Commit 5024095

Browse files
authored
Merge pull request #6 from madebygps/main
Adds tool filtering example
2 parents 6a1b0a5 + f541242 commit 5024095

File tree

3 files changed

+111
-0
lines changed

3 files changed

+111
-0
lines changed

README.md

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -48,6 +48,8 @@ Run any script with: `uv run <script_name>`
4848
- **basic_mcp_http.py** - MCP server with HTTP transport on port 8000
4949
- **basic_mcp_stdio.py** - MCP server with stdio transport for VS Code integration
5050
- **langchainv1_mcp_http.py** - LangChain agent with MCP integration
51+
- **langchainv1_mcp_github.py** - LangChain tool filtering demo with GitHub MCP (requires `GITHUB_TOKEN`)
52+
- **openai_agents_tool_filtering.py** - OpenAI Agents SDK tool filtering demo with Microsoft Learn MCP
5153
- **agentframework_mcp_learn.py** - Microsoft Agent Framework integration with MCP
5254

5355
## MCP Server Configuration

langchainv1_mcp_github.py

Lines changed: 108 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,108 @@
1+
"""LangChain MCP Tool Filtering Example
2+
3+
Demonstrates how to filter MCP tools to create safe, focused agents.
4+
Shows filtering for read-only research agent using GitHub MCP server.
5+
"""
6+
7+
import asyncio
8+
import os
9+
from dotenv import load_dotenv
10+
from rich import print as rprint
11+
from rich.panel import Panel
12+
from rich.console import Console
13+
import azure.identity
14+
from langchain.agents import create_agent
15+
from langchain_core.messages import HumanMessage
16+
from langchain_mcp_adapters.client import MultiServerMCPClient
17+
from langchain_openai import AzureChatOpenAI, ChatOpenAI
18+
from pydantic import SecretStr
19+
20+
load_dotenv(override=True)
21+
22+
# Configure model
23+
API_HOST = os.getenv("API_HOST", "github")
24+
if API_HOST == "azure":
25+
token_provider = azure.identity.get_bearer_token_provider(
26+
azure.identity.DefaultAzureCredential(),
27+
"https://cognitiveservices.azure.com/.default",
28+
)
29+
model = AzureChatOpenAI(
30+
azure_endpoint=os.environ["AZURE_OPENAI_ENDPOINT"],
31+
azure_deployment=os.environ["AZURE_OPENAI_CHAT_DEPLOYMENT"],
32+
api_version=os.environ["AZURE_OPENAI_VERSION"],
33+
azure_ad_token_provider=token_provider,
34+
)
35+
elif API_HOST == "github":
36+
model = ChatOpenAI(
37+
model=os.getenv("GITHUB_MODEL", "gpt-4o"),
38+
base_url="https://models.inference.ai.azure.com",
39+
api_key=SecretStr(os.environ["GITHUB_TOKEN"]),
40+
)
41+
elif API_HOST == "ollama":
42+
model = ChatOpenAI(
43+
model=os.environ.get("OLLAMA_MODEL", "llama3.1"),
44+
base_url=os.environ.get("OLLAMA_ENDPOINT", "http://localhost:11434/v1"),
45+
api_key=SecretStr("none"),
46+
)
47+
48+
console = Console()
49+
50+
51+
async def main():
52+
"""Create a safe research agent with filtered read-only tools"""
53+
console.print("\n[bold white on blue] LangChain Tool Filtering Demo [/bold white on blue]\n")
54+
55+
console.print(Panel.fit(
56+
"[bold cyan]GitHub Research Agent (Read-Only)[/bold cyan]\n"
57+
"Filtered to only safe search tools",
58+
border_style="cyan"
59+
))
60+
61+
mcp_client = MultiServerMCPClient({
62+
"github": {
63+
"url": "https://api.githubcopilot.com/mcp/",
64+
"transport": "streamable_http",
65+
"headers": {"Authorization": f"Bearer {os.environ['GITHUB_TOKEN']}"},
66+
}
67+
})
68+
69+
# Get all tools and show what we're filtering out
70+
all_tools = await mcp_client.get_tools()
71+
72+
console.print(f"[dim]Total tools available: {len(all_tools)}[/dim]\n")
73+
74+
# Filter to ONLY read operations
75+
safe_tool_names = ['search_repositories', 'search_code', 'search_issues']
76+
filtered_tools = [t for t in all_tools if t.name in safe_tool_names]
77+
78+
console.print("[bold cyan]Filtered Tools (read-only):[/bold cyan]")
79+
for tool in filtered_tools:
80+
console.print(f" ✓ {tool.name}")
81+
82+
# Show what was filtered out
83+
blocked_tools = [t for t in all_tools if 'create' in t.name or 'update' in t.name or 'fork' in t.name]
84+
if blocked_tools:
85+
console.print(f"\n[dim]Blocked tools ({len(blocked_tools)}): " + ", ".join([t.name for t in blocked_tools[:5]]) + "...[/dim]")
86+
87+
console.print()
88+
89+
# Create agent with filtered tools
90+
agent = create_agent(
91+
model,
92+
tools=filtered_tools,
93+
prompt="You help users research GitHub repositories. Search and analyze information."
94+
)
95+
96+
query = "Find popular Python MCP server repositories"
97+
rprint(f"[bold]Query:[/bold] {query}\n")
98+
99+
try:
100+
result = await agent.ainvoke({"messages": [HumanMessage(content=query)]})
101+
rprint(f"[bold green]Result:[/bold green]\n{result['messages'][-1].content}\n")
102+
except Exception as e:
103+
rprint(f"[bold red]Error:[/bold red] {str(e)}\n")
104+
105+
106+
107+
if __name__ == "__main__":
108+
asyncio.run(main())

spanish/README.md

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -49,6 +49,7 @@ Ejecuta cualquier script con: `uv run <script_name>`
4949
- **basic_mcp_stdio.py** - MCP server con transporte stdio (útil para integración con VS Code / Copilot)
5050
- **langchainv1_mcp_http.py** - Agente LangChain que usa MCP tools
5151
- **agentframework_mcp_learn.py** - Integración con Microsoft Agent Framework y MCP
52+
- **langchainv1_mcp_github.py** - LangChain tool filtering demo con GitHub MCP (necesita `GITHUB_TOKEN`)
5253

5354
## Configuración del servidor MCP
5455

0 commit comments

Comments
 (0)