|
| 1 | +"""ChatGPT-compatible MCP tools for Basic Memory. |
| 2 | +
|
| 3 | +These adapters expose Basic Memory's search/fetch functionality using the exact |
| 4 | +tool names and response structure OpenAI's MCP clients expect: each call returns |
| 5 | +a list containing a single `{"type": "text", "text": "{...json...}"}` item. |
| 6 | +""" |
| 7 | + |
| 8 | +import json |
| 9 | +from typing import Any, Dict, List, Optional |
| 10 | +from loguru import logger |
| 11 | +from fastmcp import Context |
| 12 | + |
| 13 | +from basic_memory.mcp.server import mcp |
| 14 | +from basic_memory.mcp.tools.search import search_notes |
| 15 | +from basic_memory.mcp.tools.read_note import read_note |
| 16 | +from basic_memory.schemas.search import SearchResponse |
| 17 | + |
| 18 | + |
| 19 | +def _format_search_results_for_chatgpt(results: SearchResponse) -> List[Dict[str, Any]]: |
| 20 | + """Format search results according to ChatGPT's expected schema. |
| 21 | +
|
| 22 | + Returns a list of result objects with id, title, and url fields. |
| 23 | + """ |
| 24 | + formatted_results = [] |
| 25 | + |
| 26 | + for result in results.results: |
| 27 | + formatted_result = { |
| 28 | + "id": result.permalink or f"doc-{len(formatted_results)}", |
| 29 | + "title": result.title if result.title and result.title.strip() else "Untitled", |
| 30 | + "url": result.permalink or "" |
| 31 | + } |
| 32 | + formatted_results.append(formatted_result) |
| 33 | + |
| 34 | + return formatted_results |
| 35 | + |
| 36 | + |
| 37 | +def _format_document_for_chatgpt( |
| 38 | + content: str, identifier: str, title: Optional[str] = None |
| 39 | +) -> Dict[str, Any]: |
| 40 | + """Format document content according to ChatGPT's expected schema. |
| 41 | +
|
| 42 | + Returns a document object with id, title, text, url, and metadata fields. |
| 43 | + """ |
| 44 | + # Extract title from markdown content if not provided |
| 45 | + if not title and isinstance(content, str): |
| 46 | + lines = content.split('\n') |
| 47 | + if lines and lines[0].startswith('# '): |
| 48 | + title = lines[0][2:].strip() |
| 49 | + else: |
| 50 | + title = identifier.split('/')[-1].replace('-', ' ').title() |
| 51 | + |
| 52 | + # Ensure title is never None |
| 53 | + if not title: |
| 54 | + title = "Untitled Document" |
| 55 | + |
| 56 | + # Handle error cases |
| 57 | + if isinstance(content, str) and content.startswith("# Note Not Found"): |
| 58 | + return { |
| 59 | + "id": identifier, |
| 60 | + "title": title or "Document Not Found", |
| 61 | + "text": content, |
| 62 | + "url": identifier, |
| 63 | + "metadata": {"error": "Document not found"} |
| 64 | + } |
| 65 | + |
| 66 | + return { |
| 67 | + "id": identifier, |
| 68 | + "title": title or "Untitled Document", |
| 69 | + "text": content, |
| 70 | + "url": identifier, |
| 71 | + "metadata": {"format": "markdown"} |
| 72 | + } |
| 73 | + |
| 74 | + |
| 75 | +@mcp.tool( |
| 76 | + description="Search for content across the knowledge base" |
| 77 | +) |
| 78 | +async def search( |
| 79 | + query: str, |
| 80 | + context: Context | None = None, |
| 81 | +) -> List[Dict[str, Any]]: |
| 82 | + """ChatGPT/OpenAI MCP search adapter returning a single text content item. |
| 83 | +
|
| 84 | + Args: |
| 85 | + query: Search query (full-text syntax supported by `search_notes`) |
| 86 | + context: Optional FastMCP context passed through for auth/session data |
| 87 | +
|
| 88 | + Returns: |
| 89 | + List with one dict: `{ "type": "text", "text": "{...JSON...}" }` |
| 90 | + where the JSON body contains `results`, `total_count`, and echo of `query`. |
| 91 | + """ |
| 92 | + logger.info(f"ChatGPT search request: query='{query}'") |
| 93 | + |
| 94 | + try: |
| 95 | + # Call underlying search_notes with sensible defaults for ChatGPT |
| 96 | + results = await search_notes.fn( |
| 97 | + query=query, |
| 98 | + project=None, # Let project resolution happen automatically |
| 99 | + page=1, |
| 100 | + page_size=10, # Reasonable default for ChatGPT consumption |
| 101 | + search_type="text", # Default to full-text search |
| 102 | + context=context |
| 103 | + ) |
| 104 | + |
| 105 | + # Handle string error responses from search_notes |
| 106 | + if isinstance(results, str): |
| 107 | + logger.warning(f"Search failed with error: {results[:100]}...") |
| 108 | + search_results = { |
| 109 | + "results": [], |
| 110 | + "error": "Search failed", |
| 111 | + "error_details": results[:500] # Truncate long error messages |
| 112 | + } |
| 113 | + else: |
| 114 | + # Format successful results for ChatGPT |
| 115 | + formatted_results = _format_search_results_for_chatgpt(results) |
| 116 | + search_results = { |
| 117 | + "results": formatted_results, |
| 118 | + "total_count": len(results.results), # Use actual count from results |
| 119 | + "query": query |
| 120 | + } |
| 121 | + logger.info(f"Search completed: {len(formatted_results)} results returned") |
| 122 | + |
| 123 | + # Return in MCP content array format as required by OpenAI |
| 124 | + return [ |
| 125 | + { |
| 126 | + "type": "text", |
| 127 | + "text": json.dumps(search_results, ensure_ascii=False) |
| 128 | + } |
| 129 | + ] |
| 130 | + |
| 131 | + except Exception as e: |
| 132 | + logger.error(f"ChatGPT search failed for query '{query}': {e}") |
| 133 | + error_results = { |
| 134 | + "results": [], |
| 135 | + "error": "Internal search error", |
| 136 | + "error_message": str(e)[:200] |
| 137 | + } |
| 138 | + return [ |
| 139 | + { |
| 140 | + "type": "text", |
| 141 | + "text": json.dumps(error_results, ensure_ascii=False) |
| 142 | + } |
| 143 | + ] |
| 144 | + |
| 145 | + |
| 146 | +@mcp.tool( |
| 147 | + description="Fetch the full contents of a search result document" |
| 148 | +) |
| 149 | +async def fetch( |
| 150 | + id: str, |
| 151 | + context: Context | None = None, |
| 152 | +) -> List[Dict[str, Any]]: |
| 153 | + """ChatGPT/OpenAI MCP fetch adapter returning a single text content item. |
| 154 | +
|
| 155 | + Args: |
| 156 | + id: Document identifier (permalink, title, or memory URL) |
| 157 | + context: Optional FastMCP context passed through for auth/session data |
| 158 | +
|
| 159 | + Returns: |
| 160 | + List with one dict: `{ "type": "text", "text": "{...JSON...}" }` |
| 161 | + where the JSON body includes `id`, `title`, `text`, `url`, and metadata. |
| 162 | + """ |
| 163 | + logger.info(f"ChatGPT fetch request: id='{id}'") |
| 164 | + |
| 165 | + try: |
| 166 | + # Call underlying read_note function |
| 167 | + content = await read_note.fn( |
| 168 | + identifier=id, |
| 169 | + project=None, # Let project resolution happen automatically |
| 170 | + page=1, |
| 171 | + page_size=10, # Default pagination |
| 172 | + context=context |
| 173 | + ) |
| 174 | + |
| 175 | + # Format the document for ChatGPT |
| 176 | + document = _format_document_for_chatgpt(content, id) |
| 177 | + |
| 178 | + logger.info(f"Fetch completed: id='{id}', content_length={len(document.get('text', ''))}") |
| 179 | + |
| 180 | + # Return in MCP content array format as required by OpenAI |
| 181 | + return [ |
| 182 | + { |
| 183 | + "type": "text", |
| 184 | + "text": json.dumps(document, ensure_ascii=False) |
| 185 | + } |
| 186 | + ] |
| 187 | + |
| 188 | + except Exception as e: |
| 189 | + logger.error(f"ChatGPT fetch failed for id '{id}': {e}") |
| 190 | + error_document = { |
| 191 | + "id": id, |
| 192 | + "title": "Fetch Error", |
| 193 | + "text": f"Failed to fetch document: {str(e)[:200]}", |
| 194 | + "url": id, |
| 195 | + "metadata": {"error": "Fetch failed"} |
| 196 | + } |
| 197 | + return [ |
| 198 | + { |
| 199 | + "type": "text", |
| 200 | + "text": json.dumps(error_document, ensure_ascii=False) |
| 201 | + } |
| 202 | + ] |
0 commit comments