diff --git a/.cursorrules b/.cursorrules index 0e6f354..1070548 100644 --- a/.cursorrules +++ b/.cursorrules @@ -86,6 +86,32 @@ Snippet: This is a snippet of the search result ``` If needed, you can further use the `web_scraper.py` file to scrape the web page content. +## MCP Integration + +The Model Context Protocol (MCP) standardizes how applications provide context to Large Language Models (LLMs). MCP allows for secure and flexible data integration, making it easier to build sophisticated AI applications. The following tools and instructions are available for MCP integration: + +1. MCP Configuration: + - Add MCP-related configurations to your `.env` file. + - Example configurations: + ``` + MCP_SERVER_URL=http://your-mcp-server-url + MCP_API_KEY=your_mcp_api_key + ``` + +2. MCP Server Usage: + - You can interact with MCP servers using the `tools/llm_api.py` file. + - Example usage: + ```python + from llm_api import create_llm_client, query_llm + + # Create MCP client + client = create_llm_client(provider="mcp") + + # Query MCP server + response = query_llm("Your prompt here", client=client) + print(response) + ``` + # Lessons ## User Specified Lessons diff --git a/.env.example b/.env.example index 12ff991..59866d4 100644 --- a/.env.example +++ b/.env.example @@ -3,4 +3,8 @@ ANTHROPIC_API_KEY=your_anthropic_api_key_here DEEPSEEK_API_KEY=your_deepseek_api_key_here GOOGLE_API_KEY=your_google_api_key_here AZURE_OPENAI_API_KEY=your_azure_openai_api_key_here -AZURE_OPENAI_MODEL_DEPLOYMENT=gpt-4o-ms \ No newline at end of file +AZURE_OPENAI_MODEL_DEPLOYMENT=gpt-4o-ms + +# MCP-related configurations +MCP_SERVER_URL=your_mcp_server_url_here +MCP_API_KEY=your_mcp_api_key_here diff --git a/README.md b/README.md index 2bbdf1d..8523c41 100644 --- a/README.md +++ b/README.md @@ -47,7 +47,11 @@ Devin impressed many by acting like an intern who writes its own plan, updates t 4. Self-Evolution Whenever you correct the AI, it can update its "lessons learned" in .cursorrules. Over time, it accumulates project-specific knowledge and gets smarter with each iteration. It makes AI a coachable and coach-worthy partner. - + +5. Model Context Protocol (MCP) Integration + + The repository now supports the Model Context Protocol (MCP), which standardizes how applications provide context to Large Language Models (LLMs). MCP allows for secure and flexible data integration, making it easier to build sophisticated AI applications. For more information, visit the [MCP introduction page](https://modelcontextprotocol.io/introduction). + ## Usage 1. Choose your setup method: @@ -56,6 +60,7 @@ Devin impressed many by acting like an intern who writes its own plan, updates t 2. Configure your environment: - Set up your API keys (optional) + - Add MCP-related configurations to your `.env` file 3. Start exploring advanced tasks—such as data gathering, building quick prototypes, or cross-referencing external resources—in a fully agentic manner. diff --git a/tools/llm_api.py b/tools/llm_api.py index 86072a5..ef18ebe 100644 --- a/tools/llm_api.py +++ b/tools/llm_api.py @@ -11,6 +11,7 @@ import base64 from typing import Optional, Union, List import mimetypes +import requests def load_environment(): """Load environment variables from .env files in order of precedence""" @@ -108,6 +109,12 @@ def create_llm_client(provider="openai"): base_url="http://192.168.180.137:8006/v1", api_key="not-needed" ) + elif provider == "mcp": + api_key = os.getenv('MCP_API_KEY') + server_url = os.getenv('MCP_SERVER_URL') + if not api_key or not server_url: + raise ValueError("MCP_API_KEY or MCP_SERVER_URL not found in environment variables") + return {"api_key": api_key, "server_url": server_url} else: raise ValueError(f"Unsupported provider: {provider}") @@ -209,6 +216,19 @@ def query_llm(prompt: str, client=None, model=None, provider="openai", image_pat model = client.GenerativeModel(model) response = model.generate_content(prompt) return response.text + + elif provider == "mcp": + headers = { + "Authorization": f"Bearer {client['api_key']}", + "Content-Type": "application/json" + } + data = { + "prompt": prompt, + "model": model + } + response = requests.post(client['server_url'], headers=headers, json=data) + response.raise_for_status() + return response.json().get("response") except Exception as e: print(f"Error querying LLM: {e}", file=sys.stderr) @@ -217,7 +237,7 @@ def query_llm(prompt: str, client=None, model=None, provider="openai", image_pat def main(): parser = argparse.ArgumentParser(description='Query an LLM with a prompt') parser.add_argument('--prompt', type=str, help='The prompt to send to the LLM', required=True) - parser.add_argument('--provider', choices=['openai','anthropic','gemini','local','deepseek','azure'], default='openai', help='The API provider to use') + parser.add_argument('--provider', choices=['openai','anthropic','gemini','local','deepseek','azure', 'mcp'], default='openai', help='The API provider to use') parser.add_argument('--model', type=str, help='The model to use (default depends on provider)') parser.add_argument('--image', type=str, help='Path to an image file to attach to the prompt') args = parser.parse_args() @@ -242,4 +262,4 @@ def main(): print("Failed to get response from LLM") if __name__ == "__main__": - main() \ No newline at end of file + main()