|
| 1 | +"""LangChain MCP Tool Filtering Example |
| 2 | +
|
| 3 | +Demonstrates how to filter MCP tools to create safe, focused agents. |
| 4 | +Shows filtering for read-only research agent using GitHub MCP server. |
| 5 | +""" |
| 6 | + |
| 7 | +import asyncio |
| 8 | +import os |
| 9 | +from dotenv import load_dotenv |
| 10 | +from rich import print as rprint |
| 11 | +from rich.panel import Panel |
| 12 | +from rich.console import Console |
| 13 | +import azure.identity |
| 14 | +from langchain.agents import create_agent |
| 15 | +from langchain_core.messages import HumanMessage |
| 16 | +from langchain_mcp_adapters.client import MultiServerMCPClient |
| 17 | +from langchain_openai import AzureChatOpenAI, ChatOpenAI |
| 18 | +from pydantic import SecretStr |
| 19 | + |
| 20 | +load_dotenv(override=True) |
| 21 | + |
| 22 | +# Configure model |
| 23 | +API_HOST = os.getenv("API_HOST", "github") |
| 24 | +if API_HOST == "azure": |
| 25 | + token_provider = azure.identity.get_bearer_token_provider( |
| 26 | + azure.identity.DefaultAzureCredential(), |
| 27 | + "https://cognitiveservices.azure.com/.default", |
| 28 | + ) |
| 29 | + model = AzureChatOpenAI( |
| 30 | + azure_endpoint=os.environ["AZURE_OPENAI_ENDPOINT"], |
| 31 | + azure_deployment=os.environ["AZURE_OPENAI_CHAT_DEPLOYMENT"], |
| 32 | + api_version=os.environ["AZURE_OPENAI_VERSION"], |
| 33 | + azure_ad_token_provider=token_provider, |
| 34 | + ) |
| 35 | +elif API_HOST == "github": |
| 36 | + model = ChatOpenAI( |
| 37 | + model=os.getenv("GITHUB_MODEL", "gpt-4o"), |
| 38 | + base_url="https://models.inference.ai.azure.com", |
| 39 | + api_key=SecretStr(os.environ["GITHUB_TOKEN"]), |
| 40 | + ) |
| 41 | +elif API_HOST == "ollama": |
| 42 | + model = ChatOpenAI( |
| 43 | + model=os.environ.get("OLLAMA_MODEL", "llama3.1"), |
| 44 | + base_url=os.environ.get("OLLAMA_ENDPOINT", "http://localhost:11434/v1"), |
| 45 | + api_key=SecretStr("none"), |
| 46 | + ) |
| 47 | + |
| 48 | +console = Console() |
| 49 | + |
| 50 | + |
| 51 | +async def main(): |
| 52 | + """Create a safe research agent with filtered read-only tools""" |
| 53 | + console.print("\n[bold white on blue] LangChain Tool Filtering Demo [/bold white on blue]\n") |
| 54 | + |
| 55 | + console.print(Panel.fit( |
| 56 | + "[bold cyan]GitHub Research Agent (Read-Only)[/bold cyan]\n" |
| 57 | + "Filtered to only safe search tools", |
| 58 | + border_style="cyan" |
| 59 | + )) |
| 60 | + |
| 61 | + mcp_client = MultiServerMCPClient({ |
| 62 | + "github": { |
| 63 | + "url": "https://api.githubcopilot.com/mcp/", |
| 64 | + "transport": "streamable_http", |
| 65 | + "headers": {"Authorization": f"Bearer {os.environ['GITHUB_TOKEN']}"}, |
| 66 | + } |
| 67 | + }) |
| 68 | + |
| 69 | + # Get all tools and show what we're filtering out |
| 70 | + all_tools = await mcp_client.get_tools() |
| 71 | + |
| 72 | + console.print(f"[dim]Total tools available: {len(all_tools)}[/dim]\n") |
| 73 | + |
| 74 | + # Filter to ONLY read operations |
| 75 | + safe_tool_names = ['search_repositories', 'search_code', 'search_issues'] |
| 76 | + filtered_tools = [t for t in all_tools if t.name in safe_tool_names] |
| 77 | + |
| 78 | + console.print("[bold cyan]Filtered Tools (read-only):[/bold cyan]") |
| 79 | + for tool in filtered_tools: |
| 80 | + console.print(f" ✓ {tool.name}") |
| 81 | + |
| 82 | + # Show what was filtered out |
| 83 | + blocked_tools = [t for t in all_tools if 'create' in t.name or 'update' in t.name or 'fork' in t.name] |
| 84 | + if blocked_tools: |
| 85 | + console.print(f"\n[dim]Blocked tools ({len(blocked_tools)}): " + ", ".join([t.name for t in blocked_tools[:5]]) + "...[/dim]") |
| 86 | + |
| 87 | + console.print() |
| 88 | + |
| 89 | + # Create agent with filtered tools |
| 90 | + agent = create_agent( |
| 91 | + model, |
| 92 | + tools=filtered_tools, |
| 93 | + prompt="You help users research GitHub repositories. Search and analyze information." |
| 94 | + ) |
| 95 | + |
| 96 | + query = "Find popular Python MCP server repositories" |
| 97 | + rprint(f"[bold]Query:[/bold] {query}\n") |
| 98 | + |
| 99 | + try: |
| 100 | + result = await agent.ainvoke({"messages": [HumanMessage(content=query)]}) |
| 101 | + rprint(f"[bold green]Result:[/bold green]\n{result['messages'][-1].content}\n") |
| 102 | + except Exception as e: |
| 103 | + rprint(f"[bold red]Error:[/bold red] {str(e)}\n") |
| 104 | + |
| 105 | + |
| 106 | + |
| 107 | +if __name__ == "__main__": |
| 108 | + asyncio.run(main()) |
0 commit comments