|
| 1 | +--- |
| 2 | +title: "Logging tool activity" |
| 3 | +description: "Learn how to log tool activity in your MCP server" |
| 4 | +--- |
| 5 | + |
| 6 | +import { Steps, Tabs, Callout } from "nextra/components"; |
| 7 | + |
| 8 | +# Logging tool activity |
| 9 | + |
| 10 | +<GuideOverview> |
| 11 | +<GuideOverview.Outcomes> |
| 12 | + |
| 13 | +Learn how to log tool activity in your MCP server with various levels and patterns available through the MCP protocol for debugging and monitoring. Jump to [Example Code](#example-code) to see the complete code. |
| 14 | + |
| 15 | +</GuideOverview.Outcomes> |
| 16 | + |
| 17 | +<GuideOverview.Prerequisites> |
| 18 | + |
| 19 | +- [Arcade account](https://api.arcade.dev/signup) |
| 20 | +- [Arcade CLI](/home/quickstart) |
| 21 | +- [An MCP Server](/home/build-tools/create-a-mcp-server) |
| 22 | +- [uv package manager](https://docs.astral.sh/uv/getting-started/installation/) |
| 23 | + |
| 24 | +</GuideOverview.Prerequisites> |
| 25 | + |
| 26 | +<GuideOverview.YouWillLearn> |
| 27 | + |
| 28 | +- How to log tool activity in your MCP server with various levels and patterns for debugging and monitoring. |
| 29 | + |
| 30 | +</GuideOverview.YouWillLearn> |
| 31 | +</GuideOverview> |
| 32 | + |
| 33 | +## Log Levels |
| 34 | + |
| 35 | +MCP supports standard log levels including debug, info, warning, and error: |
| 36 | + |
| 37 | +```python |
| 38 | +await context.log.debug("Detailed debugging information") |
| 39 | +await context.log.info("General information") |
| 40 | +await context.log.warning("Warning messages") |
| 41 | +await context.log.error("Error messages") |
| 42 | +``` |
| 43 | + |
| 44 | +## Structured Logging |
| 45 | + |
| 46 | +Log with context and metadata: |
| 47 | + |
| 48 | +```python |
| 49 | +# Include user context |
| 50 | +await context.log.info( |
| 51 | + f"Action performed by user: {context.user_id}" |
| 52 | +) |
| 53 | + |
| 54 | +# Add operation details |
| 55 | +await context.log.debug( |
| 56 | + f"Processing {item_count} items with options: {options}" |
| 57 | +) |
| 58 | +``` |
| 59 | + |
| 60 | +## Error Logging |
| 61 | + |
| 62 | +Proper error handling and logging is important for debugging and monitoring. When an error occurs, you should log the error with the type and message, and log the traceback at debug level like this: |
| 63 | + |
| 64 | +```python |
| 65 | +try: |
| 66 | + # Operation that might fail |
| 67 | + result = risky_operation() |
| 68 | +except Exception as e: |
| 69 | + # Log error with type and message |
| 70 | + await context.log.error( |
| 71 | + f"Operation failed: {type(e).__name__}: {str(e)}" |
| 72 | + ) |
| 73 | + |
| 74 | + # Log traceback at debug level |
| 75 | + await context.log.debug( |
| 76 | + f"Traceback:\n{traceback.format_exc()}" |
| 77 | + ) |
| 78 | +``` |
| 79 | + |
| 80 | +## Progress Logging for Long-Running Operations |
| 81 | + |
| 82 | +Track long-running operations with progress updates like this: |
| 83 | + |
| 84 | +```python |
| 85 | +for i, item in enumerate(items): |
| 86 | + # Log progress |
| 87 | + await context.log.debug( |
| 88 | + f"Progress: {i+1}/{len(items)} ({(i+1)/len(items)*100:.0f}%)" |
| 89 | + ) |
| 90 | + |
| 91 | + # Process item |
| 92 | + process(item) |
| 93 | +``` |
| 94 | + |
| 95 | +## Batch Processing |
| 96 | + |
| 97 | +Log batch operations effectively by logging both individual items at debug level and a summary at info level like this: |
| 98 | + |
| 99 | +```python |
| 100 | +# Log batch start |
| 101 | +await context.log.info(f"Starting batch of {count} items") |
| 102 | + |
| 103 | +# Log individual items at debug level |
| 104 | +for item in items: |
| 105 | + await context.log.debug(f"Processing: {item}") |
| 106 | + |
| 107 | +# Log summary |
| 108 | +await context.log.info( |
| 109 | + f"Batch complete: {success_count} successful, {fail_count} failed" |
| 110 | +) |
| 111 | +``` |
| 112 | + |
| 113 | +## Best Practices |
| 114 | + |
| 115 | +1. **Use Appropriate Levels**: Debug for details, info for general flow, warning for issues, error for failures |
| 116 | +2. **Include Context**: Always include relevant context like user ID, operation names, counts |
| 117 | +3. **Structure Messages**: Use consistent message formats for easier parsing |
| 118 | +4. **Handle Errors Gracefully**: Log errors with enough detail to debug but not expose sensitive data |
| 119 | +5. **Progress Updates**: For long operations, provide regular progress updates |
| 120 | +6. **Batch Summaries**: For batch operations, log both individual items (debug) and summaries (info) |
| 121 | +7. **Performance Considerations**: Be mindful of log volume in production environments |
| 122 | + |
| 123 | +## Key Concepts |
| 124 | + |
| 125 | +## Example Code |
| 126 | + |
| 127 | +```python filename="logging.py" |
| 128 | +#!/usr/bin/env python |
| 129 | + |
| 130 | +import asyncio |
| 131 | +import time |
| 132 | +import traceback |
| 133 | +from typing import Annotated, Optional |
| 134 | + |
| 135 | +from arcade_mcp_server import Context, MCPApp |
| 136 | + |
| 137 | +# Create the app with debug logging |
| 138 | +app = MCPApp(name="logging_examples", version="0.1.0", log_level="DEBUG") |
| 139 | + |
| 140 | + |
| 141 | +@app.tool |
| 142 | +async def demonstrate_log_levels( |
| 143 | + context: Context, message: Annotated[str, "Base message to log at different levels"] |
| 144 | +) -> Annotated[dict, "Summary of logged messages"]: |
| 145 | + """Demonstrate all MCP logging levels.""" |
| 146 | + |
| 147 | + # Log at each level |
| 148 | + levels = ["debug", "info", "warning", "error"] |
| 149 | + logged = {} |
| 150 | + |
| 151 | + for level in levels: |
| 152 | + log_message = f"[{level.upper()}] {message}" |
| 153 | + await context.log(level, log_message) |
| 154 | + logged[level] = log_message |
| 155 | + |
| 156 | + return {"logged_messages": logged, "note": "Check your MCP client to see these messages"} |
| 157 | + |
| 158 | + |
| 159 | +@app.tool |
| 160 | +async def timed_operation( |
| 161 | + context: Context, |
| 162 | + operation_name: Annotated[str, "Name of the operation"], |
| 163 | + duration_seconds: Annotated[float, "How long the operation takes"] = 2.0, |
| 164 | +) -> Annotated[dict, "Operation timing details"]: |
| 165 | + """Perform a timed operation with detailed logging.""" |
| 166 | + |
| 167 | + start_time = time.time() |
| 168 | + |
| 169 | + # Log operation start |
| 170 | + await context.log.info( |
| 171 | + f"Starting operation: {operation_name} (expected duration: {duration_seconds}s)" |
| 172 | + ) |
| 173 | + |
| 174 | + # Simulate work with progress logging |
| 175 | + steps = 5 |
| 176 | + for i in range(steps): |
| 177 | + await context.log.debug(f"Progress: step {i + 1}/{steps} ({(i + 1) / steps * 100:.0f}%)") |
| 178 | + |
| 179 | + await asyncio.sleep(duration_seconds / steps) |
| 180 | + |
| 181 | + # Calculate results |
| 182 | + end_time = time.time() |
| 183 | + actual_duration = end_time - start_time |
| 184 | + |
| 185 | + # Log completion |
| 186 | + await context.log.info(f"Completed operation: {operation_name} in {actual_duration:.2f}s") |
| 187 | + |
| 188 | + return { |
| 189 | + "operation": operation_name, |
| 190 | + "expected_duration": duration_seconds, |
| 191 | + "actual_duration": round(actual_duration, 2), |
| 192 | + "start_time": start_time, |
| 193 | + "end_time": end_time, |
| 194 | + } |
| 195 | + |
| 196 | + |
| 197 | +@app.tool |
| 198 | +async def error_handling_example( |
| 199 | + context: Context, |
| 200 | + should_fail: Annotated[bool, "Whether to simulate an error"], |
| 201 | + error_type: Annotated[str, "Type of error to simulate"] = "ValueError", |
| 202 | +) -> Annotated[dict, "Result or error details"]: |
| 203 | + """Demonstrate error logging and handling.""" |
| 204 | + |
| 205 | + try: |
| 206 | + await context.log.debug(f"Error handling test: should_fail={should_fail}") |
| 207 | + |
| 208 | + if should_fail: |
| 209 | + if error_type == "ValueError": |
| 210 | + raise ValueError("This is a simulated value error") # noqa: TRY301 |
| 211 | + elif error_type == "KeyError": |
| 212 | + raise KeyError("missing_key") # noqa: TRY301 |
| 213 | + elif error_type == "ZeroDivisionError": |
| 214 | + result = 1 / 0 |
| 215 | + return {"result": result} |
| 216 | + else: |
| 217 | + raise Exception(f"Generic error of type: {error_type}") # noqa: TRY002, TRY301 |
| 218 | + |
| 219 | + # Success case |
| 220 | + await context.log.info("Operation completed successfully") |
| 221 | + |
| 222 | + except Exception as e: |
| 223 | + # Log the error with details |
| 224 | + await context.log.error(f"Operation failed with {type(e).__name__}: {e!s}") |
| 225 | + |
| 226 | + # Log traceback separately at debug level |
| 227 | + await context.log.debug(f"Traceback:\n{traceback.format_exc()}") |
| 228 | + |
| 229 | + return { |
| 230 | + "status": "error", |
| 231 | + "error_type": type(e).__name__, |
| 232 | + "error_message": str(e), |
| 233 | + "handled": True, |
| 234 | + } |
| 235 | + else: |
| 236 | + return {"status": "success", "message": "No errors occurred"} |
| 237 | + |
| 238 | + |
| 239 | +@app.tool |
| 240 | +async def structured_logging( |
| 241 | + context: Context, |
| 242 | + user_action: Annotated[str, "Action the user is performing"], |
| 243 | + metadata: Annotated[dict | None, "Additional metadata to log"] = None, |
| 244 | +) -> Annotated[str, "Confirmation message"]: |
| 245 | + """Demonstrate structured logging patterns.""" |
| 246 | + |
| 247 | + # Log main action |
| 248 | + await context.log.info( |
| 249 | + f"User action: {user_action} (user_id: {context.user_id or 'anonymous'})" |
| 250 | + ) |
| 251 | + |
| 252 | + # Log additional details at debug level |
| 253 | + await context.log.debug( |
| 254 | + f"Context details: {len(context.secrets) if context.secrets else 0} secrets available" |
| 255 | + ) |
| 256 | + |
| 257 | + # Log metadata if provided |
| 258 | + if metadata: |
| 259 | + await context.log.debug(f"Custom metadata: {metadata}") |
| 260 | + |
| 261 | + return f"Logged user action: {user_action}" |
| 262 | + |
| 263 | + |
| 264 | +@app.tool |
| 265 | +async def batch_processing_logs( |
| 266 | + context: Context, |
| 267 | + items: Annotated[list[str], "Items to process"], |
| 268 | + fail_on_item: Annotated[Optional[str], "Item that should fail"] = None, |
| 269 | +) -> Annotated[dict, "Processing results with detailed logs"]: |
| 270 | + """Process items with detailed logging for each step.""" |
| 271 | + |
| 272 | + results: dict[str, list] = {"successful": [], "failed": []} |
| 273 | + |
| 274 | + await context.log.info(f"Starting batch processing of {len(items)} items") |
| 275 | + |
| 276 | + for i, item in enumerate(items): |
| 277 | + try: |
| 278 | + # Log item start |
| 279 | + await context.log.debug(f"Processing item {i + 1}/{len(items)}: {item}") |
| 280 | + |
| 281 | + # Simulate failure if requested |
| 282 | + if item == fail_on_item: |
| 283 | + raise ValueError(f"Simulated failure for item: {item}") # noqa: TRY301 |
| 284 | + |
| 285 | + # Simulate processing |
| 286 | + await asyncio.sleep(0.1) |
| 287 | + |
| 288 | + results["successful"].append(item) |
| 289 | + |
| 290 | + except Exception as e: |
| 291 | + await context.log.warning(f"Failed to process '{item}': {e!s}") |
| 292 | + results["failed"].append({"item": item, "error": str(e)}) |
| 293 | + |
| 294 | + # Log summary |
| 295 | + await context.log.info( |
| 296 | + f"Batch processing complete: {len(results['successful'])} successful, " |
| 297 | + f"{len(results['failed'])} failed", |
| 298 | + ) |
| 299 | + |
| 300 | + return results |
| 301 | + |
| 302 | + |
| 303 | +if __name__ == "__main__": |
| 304 | + # Run the server |
| 305 | + app.run(host="127.0.0.1", port=8000) |
| 306 | +``` |
| 307 | + |
| 308 | +### Run your MCP server |
| 309 | + |
| 310 | +Set `log_level="DEBUG"` in `MCPApp` to see debug logs. |
| 311 | + |
| 312 | +<Tabs |
| 313 | + items={["HTTP transport (default)", "stdio transport (for Claude Desktop)"]} |
| 314 | + storageKey="preferredTransport" |
| 315 | +> |
| 316 | + <Tabs.Tab> |
| 317 | + |
| 318 | +```bash |
| 319 | +uv run logging.py http |
| 320 | +``` |
| 321 | + |
| 322 | +For HTTP transport, view your server's API docs at [http://127.0.0.1:8000/docs](http://127.0.0.1:8000/docs). |
| 323 | + |
| 324 | +</Tabs.Tab> |
| 325 | +<Tabs.Tab> |
| 326 | + |
| 327 | +```bash |
| 328 | +uv run logging.py stdio |
| 329 | +``` |
| 330 | + |
| 331 | +</Tabs.Tab> |
| 332 | + |
| 333 | +</Tabs> |
0 commit comments