diff --git a/.gitignore b/.gitignore index aa1fee1f..8205d38c 100644 --- a/.gitignore +++ b/.gitignore @@ -9,4 +9,6 @@ venv .DS_Store .idea/ .vscode/ -uv.lock \ No newline at end of file +uv.lock +build/ +dist/ diff --git a/README.md b/README.md index 92cde72d..eb7a72c0 100644 --- a/README.md +++ b/README.md @@ -327,6 +327,34 @@ See also [the llm tag](https://simonwillison.net/tags/llm/) on my blog. * [Browsing logs using Datasette](https://llm.datasette.io/en/stable/logging.html#browsing-logs-using-datasette) * [Backing up your database](https://llm.datasette.io/en/stable/logging.html#backing-up-your-database) * [SQL schema](https://llm.datasette.io/en/stable/logging.html#sql-schema) +* [HTTP Debugging and Logging](https://llm.datasette.io/en/stable/http-debugging.html) + * [Quick Start](https://llm.datasette.io/en/stable/http-debugging.html#quick-start) + * [Environment Variables](https://llm.datasette.io/en/stable/http-debugging.html#environment-variables) + * [CLI Flags](https://llm.datasette.io/en/stable/http-debugging.html#cli-flags) + * [What Gets Logged](https://llm.datasette.io/en/stable/http-debugging.html#what-gets-logged) + * [INFO Level (`LLM_HTTP_LOGGING=1`)](https://llm.datasette.io/en/stable/http-debugging.html#info-level-llm-http-logging-1) + * [DEBUG Level (`LLM_HTTP_DEBUG=1`)](https://llm.datasette.io/en/stable/http-debugging.html#debug-level-llm-http-debug-1) + * [Provider-Specific Debugging](https://llm.datasette.io/en/stable/http-debugging.html#provider-specific-debugging) + * [OpenAI Models (o1, o3, GPT-4, etc.)](https://llm.datasette.io/en/stable/http-debugging.html#openai-models-o1-o3-gpt-4-etc) + * [Gemini Models (including 2.5-pro reasoning)](https://llm.datasette.io/en/stable/http-debugging.html#gemini-models-including-2-5-pro-reasoning) + * [Anthropic Claude Models (including reasoning)](https://llm.datasette.io/en/stable/http-debugging.html#anthropic-claude-models-including-reasoning) + * [Real-World Use Cases](https://llm.datasette.io/en/stable/http-debugging.html#real-world-use-cases) + * [Debugging Reasoning Models](https://llm.datasette.io/en/stable/http-debugging.html#debugging-reasoning-models) + * [Investigating API Errors](https://llm.datasette.io/en/stable/http-debugging.html#investigating-api-errors) + * [Understanding Token Usage](https://llm.datasette.io/en/stable/http-debugging.html#understanding-token-usage) + * [Monitoring Rate Limits](https://llm.datasette.io/en/stable/http-debugging.html#monitoring-rate-limits) + * [Advanced Configuration](https://llm.datasette.io/en/stable/http-debugging.html#advanced-configuration) + * [Combining with LLM Logging](https://llm.datasette.io/en/stable/http-debugging.html#combining-with-llm-logging) + * [Filtering Logs](https://llm.datasette.io/en/stable/http-debugging.html#filtering-logs) + * [Performance Considerations](https://llm.datasette.io/en/stable/http-debugging.html#performance-considerations) + * [Troubleshooting](https://llm.datasette.io/en/stable/http-debugging.html#troubleshooting) + * [Common Issues](https://llm.datasette.io/en/stable/http-debugging.html#common-issues) + * [Integration with External Tools](https://llm.datasette.io/en/stable/http-debugging.html#integration-with-external-tools) + * [Security Considerations](https://llm.datasette.io/en/stable/http-debugging.html#security-considerations) + * [Backward Compatibility](https://llm.datasette.io/en/stable/http-debugging.html#backward-compatibility) + * [Legacy Support](https://llm.datasette.io/en/stable/http-debugging.html#legacy-support) + * [Migration Path](https://llm.datasette.io/en/stable/http-debugging.html#migration-path) + * [Related Documentation](https://llm.datasette.io/en/stable/http-debugging.html#related-documentation) * [Related tools](https://llm.datasette.io/en/stable/related-tools.html) * [strip-tags](https://llm.datasette.io/en/stable/related-tools.html#strip-tags) * [ttok](https://llm.datasette.io/en/stable/related-tools.html#ttok) diff --git a/docs/help.md b/docs/help.md index a5a63566..1d1c3c7e 100644 --- a/docs/help.md +++ b/docs/help.md @@ -63,9 +63,16 @@ Usage: llm [OPTIONS] COMMAND [ARGS]... llm prompt --help + Environment variables for HTTP debugging: - LLM_HTTP_LOGGING=1: Enable HTTP + request/response logging - LLM_HTTP_DEBUG=1: Enable verbose HTTP debugging - + LLM_OPENAI_SHOW_RESPONSES=1: Legacy OpenAI-only debugging + Options: - --version Show the version and exit. - -h, --help Show this message and exit. + --version Show the version and exit. + --http-logging Enable HTTP request/response logging for debugging + --http-debug Enable verbose HTTP debugging (includes connection details) + --no-color Disable colored output in HTTP logging + -h, --help Show this message and exit. Commands: prompt* Execute a prompt @@ -1082,4 +1089,4 @@ Options: --key TEXT OpenAI API key -h, --help Show this message and exit. ``` - \ No newline at end of file + diff --git a/docs/http-debugging.md b/docs/http-debugging.md new file mode 100644 index 00000000..c981a2a7 --- /dev/null +++ b/docs/http-debugging.md @@ -0,0 +1,274 @@ +(http-debugging)= +# HTTP Debugging and Logging + +LLM CLI provides comprehensive HTTP logging capabilities to debug requests and responses across all model providers. This feature enables you to see exactly what's being sent to and received from AI APIs, making it invaluable for troubleshooting and understanding model behavior. + +## Quick Start + +Enable HTTP logging for all providers: + +```bash +# Basic HTTP logging (shows requests and responses) +LLM_HTTP_LOGGING=1 llm -m gpt-4o "Hello world" + +# Verbose HTTP debugging (includes connection details) +LLM_HTTP_DEBUG=1 llm -m gemini-2.5-pro "Reasoning task" + +# Using CLI flags (alternative to environment variables) +llm --http-logging -m claude-4-sonnet "Debug this request" +llm --http-debug -m o3 "Show all HTTP details" + +# For development/testing without global install +LLM_HTTP_DEBUG=1 uv run llm -m gpt-4o "Test prompt" +LLM_HTTP_DEBUG=1 python -m llm -m gpt-4o "Test prompt" # if already installed +``` + +## Environment Variables + +| Variable | Level | Description | +|----------|-------|-------------| +| `LLM_HTTP_LOGGING=1` | INFO | Shows HTTP requests and responses | +| `LLM_HTTP_DEBUG=1` | DEBUG | Shows detailed connection info and headers | +| `LLM_HTTP_VERBOSE=1` | DEBUG | Alias for `LLM_HTTP_DEBUG` | +| `LLM_OPENAI_SHOW_RESPONSES=1` | INFO | Legacy OpenAI-only debugging (still supported) | + +## CLI Flags + +```bash +llm --http-logging [command] # Enable INFO-level HTTP logging +llm --http-debug [command] # Enable DEBUG-level HTTP logging +``` + +## What Gets Logged + +### INFO Level (`LLM_HTTP_LOGGING=1`) + +Shows high-level HTTP request information: + +``` +httpx - INFO - HTTP Request: POST https://api.openai.com/v1/chat/completions "HTTP/1.1 200 OK" +httpx - INFO - HTTP Request: POST https://generativelanguage.googleapis.com/v1beta/models/gemini-2.5-pro:streamGenerateContent "HTTP/1.1 200 OK" +``` + +### DEBUG Level (`LLM_HTTP_DEBUG=1`) + +Shows detailed connection and protocol information: + +``` +httpcore.connection - DEBUG - connect_tcp.started host='api.openai.com' port=443 +httpcore.connection - DEBUG - connect_tcp.complete return_value= +httpcore.connection - DEBUG - start_tls.started ssl_context= server_hostname='api.openai.com' +httpcore.http11 - DEBUG - send_request_headers.started request= +httpcore.http11 - DEBUG - send_request_body.started request= +httpcore.http11 - DEBUG - receive_response_headers.started request= +httpcore.http11 - DEBUG - receive_response_headers.complete return_value=(b'HTTP/1.1', 200, b'OK', [...headers...]) +httpx - INFO - HTTP Request: POST https://api.openai.com/v1/chat/completions "HTTP/1.1 200 OK" +httpcore.http11 - DEBUG - receive_response_body.started request= +httpcore.http11 - DEBUG - receive_response_body.complete +``` + +## Provider-Specific Debugging + +### OpenAI Models (o1, o3, GPT-4, etc.) + +**What you can see:** +- Reasoning token usage: `reasoning_tokens`, `input_tokens`, `output_tokens` +- Request parameters for reasoning models +- Tool calling request/response cycles +- Rate limiting headers + +**Example:** +```bash +export LLM_HTTP_DEBUG=1 +llm -m o3 "Solve this step by step: What is 15% of 240?" +``` +Shows reasoning parameters and token usage in HTTP traffic. + +### Gemini Models (including 2.5-pro reasoning) + +**What you can see:** +- Thinking budget configuration: `thinking_config: {"thinking_budget": 1000}` +- Response with thinking tokens: `thoughtsTokenCount: 500` +- Streaming response chunks +- Model-specific parameters + +**Example:** +```bash +export LLM_HTTP_DEBUG=1 +llm -m gemini-2.5-pro "Think carefully about this complex problem" +``` +Shows direct HTTP calls to Google's API with thinking parameters. + +### Anthropic Claude Models (including reasoning) + +**What you can see:** +- Thinking configuration: `thinking: {"type": "enabled", "budget_tokens": 1000}` +- Beta API usage indicators +- Message structure and tool calls +- Thinking parameters in requests + +**Example:** +```bash +export LLM_HTTP_DEBUG=1 +llm -m claude-4-sonnet "Analyze this problem methodically" +``` +Shows Anthropic SDK's HTTP traffic including reasoning config. + +## Real-World Use Cases + +### Debugging Reasoning Models + +See what reasoning parameters are being sent: + +```bash +export LLM_HTTP_DEBUG=1 + +# OpenAI o3 reasoning +llm -m o3 "Complex math problem" --max-tokens 1000 + +# Gemini with thinking budget +llm -m gemini-2.5-pro "Reasoning task" + +# Claude with thinking enabled +llm -m claude-4-sonnet "Analytical problem" +``` + +### Investigating API Errors + +Debug failed requests: + +```bash +export LLM_HTTP_DEBUG=1 +llm -m gpt-4o "Test prompt" +# Shows exact error responses, status codes, headers +``` + +### Understanding Token Usage + +See how tokens are calculated: + +```bash +export LLM_HTTP_LOGGING=1 +llm -m o3-mini "Short prompt" +# Response shows: reasoning_tokens, input_tokens, output_tokens +``` + +### Monitoring Rate Limits + +Track API usage and limits: + +```bash +export LLM_HTTP_DEBUG=1 +llm -m gpt-4o "Test" +# Shows rate limit headers in response +``` + +## Advanced Configuration + +### Combining with LLM Logging + +HTTP logging works alongside LLM's built-in SQLite logging: + +```bash +# Enable both HTTP debugging and LLM logging +export LLM_HTTP_LOGGING=1 +llm logs on +llm -m gpt-4o "Test prompt" + +# View logged interactions +llm logs list --json +``` + +### Filtering Logs + +Focus on specific providers or domains: + +```bash +# Only log OpenAI requests +export LLM_HTTP_DEBUG=1 +llm -m gpt-4o "Test" 2>&1 | grep "api.openai.com" + +# Only log Gemini requests +llm -m gemini-2.5-pro "Test" 2>&1 | grep "generativelanguage.googleapis.com" +``` + +### Performance Considerations + +HTTP logging adds overhead. For production use: + +```bash +# Use INFO level for minimal overhead +export LLM_HTTP_LOGGING=1 + +# Avoid DEBUG level in production +# export LLM_HTTP_DEBUG=1 # Don't use this in production +``` + +## Troubleshooting + +### Common Issues + +**No logs appearing:** +- Verify environment variable is set: `echo $LLM_HTTP_LOGGING` +- Check that the provider actually uses HTTP (not all providers do) +- Some providers may use custom logging that bypasses this system + +**Too much output:** +- Use `LLM_HTTP_LOGGING=1` instead of `LLM_HTTP_DEBUG=1` +- Redirect stderr: `llm prompt "test" 2>/dev/null` + +**Missing reasoning details:** +- Reasoning chains are generated server-side and not exposed in HTTP logs +- You'll see token counts but not the actual reasoning text +- This is a limitation of the provider APIs, not LLM CLI + +### Integration with External Tools + +**Using with jq for JSON parsing:** +```bash +export LLM_HTTP_DEBUG=1 +llm -m gpt-4o "test" 2>&1 | grep -A 10 "Request:" | jq '.' +``` + +**Saving logs to files:** +```bash +export LLM_HTTP_LOGGING=1 +llm -m gemini-2.5-pro "test" 2>debug.log +``` + +## Security Considerations + +⚠️ **Important Security Notes:** + +- HTTP logs may contain API keys in headers (these are automatically redacted) +- Request/response bodies may contain sensitive data +- Never commit log files containing real API interactions +- Use environment variables instead of command-line flags in scripts to avoid shell history + +## Backward Compatibility + +The new HTTP logging system maintains full backward compatibility. + +### Legacy Support + +- `LLM_OPENAI_SHOW_RESPONSES=1` still works (OpenAI only) +- New variables extend functionality to all providers +- No breaking changes to existing workflows + +### Migration Path + +For users currently using OpenAI-specific debugging: +```bash +# Old way (still works) +export LLM_OPENAI_SHOW_RESPONSES=1 + +# New way (works for all providers) +export LLM_HTTP_LOGGING=1 +``` + +## Related Documentation + +- [LLM CLI Documentation](https://llm.datasette.io/) +- [Logging and Storage](logging.md) +- [Provider-Specific Setup](setup.md) +- [Tools and Function Calling](tools.md) diff --git a/docs/index.md b/docs/index.md index 4536f5d1..2f974868 100644 --- a/docs/index.md +++ b/docs/index.md @@ -121,6 +121,7 @@ embeddings/index plugins/index python-api logging +http-debugging related-tools help contributing diff --git a/docs/logging.md b/docs/logging.md index d1a46dcd..b65f9cf8 100644 --- a/docs/logging.md +++ b/docs/logging.md @@ -3,6 +3,8 @@ `llm` defaults to logging all prompts and responses to a SQLite database. +For debugging HTTP requests/responses to providers (OpenAI, Anthropic, Gemini, etc.), see {ref}`HTTP Debugging and Logging `. + You can find the location of that database using the `llm logs path` command: ```bash diff --git a/llm/cli.py b/llm/cli.py index 2e11e2c8..a200c2a9 100644 --- a/llm/cli.py +++ b/llm/cli.py @@ -61,6 +61,7 @@ schema_summary, token_usage_string, truncate_string, + configure_http_logging, ) import base64 import httpx @@ -310,7 +311,26 @@ def schema_option(fn): context_settings={"help_option_names": ["-h", "--help"]}, ) @click.version_option() -def cli(): +@click.option( + "--http-logging", + is_flag=True, + help="Enable HTTP request/response logging for debugging", + envvar="LLM_HTTP_LOGGING", +) +@click.option( + "--http-debug", + is_flag=True, + help="Enable verbose HTTP debugging (includes connection details)", + envvar="LLM_HTTP_DEBUG", +) +@click.option( + "--no-color", + is_flag=True, + help="Disable colored output in HTTP logging", + envvar="NO_COLOR", +) +@click.pass_context +def cli(ctx, http_logging, http_debug, no_color): """ Access Large Language Models from the command-line @@ -334,7 +354,25 @@ def cli(): For a full list of prompting options run: llm prompt --help + + Environment variables for HTTP debugging: + - LLM_HTTP_LOGGING=1: Enable HTTP request/response logging + - LLM_HTTP_DEBUG=1: Enable verbose HTTP debugging + - LLM_OPENAI_SHOW_RESPONSES=1: Legacy OpenAI-only debugging """ + # Set environment variables if CLI flags are provided + if http_logging: + os.environ["LLM_HTTP_LOGGING"] = "1" + if http_debug: + os.environ["LLM_HTTP_DEBUG"] = "1" + if no_color: + os.environ["NO_COLOR"] = "1" + + # Configure HTTP logging early in CLI initialization + configure_http_logging() + + # Ensure context object exists for subcommands + ctx.ensure_object(dict) @cli.command(name="prompt") diff --git a/llm/utils.py b/llm/utils.py index 58194bd6..9e47cfe1 100644 --- a/llm/utils.py +++ b/llm/utils.py @@ -3,6 +3,7 @@ import httpx import itertools import json +import logging import pathlib import puremagic import re @@ -154,6 +155,257 @@ def logging_client() -> httpx.Client: ) +# --- Universal HTTP Logging System --- + + +class HTTPColorFormatter(logging.Formatter): + """ + Custom formatter for HTTP logging with colors and improved readability. + """ + + # ANSI color codes + COLORS = { + "DEBUG": "\033[36m", # Cyan + "INFO": "\033[32m", # Green + "WARNING": "\033[33m", # Yellow + "ERROR": "\033[31m", # Red + "CRITICAL": "\033[35m", # Magenta + "RESET": "\033[0m", # Reset + "BOLD": "\033[1m", # Bold + "DIM": "\033[2m", # Dim + } + + # Logger-specific colors + LOGGER_COLORS = { + "httpx": "\033[94m", # Light blue + "httpcore": "\033[90m", # Dark gray + "openai": "\033[92m", # Light green + "anthropic": "\033[95m", # Light magenta + "llm.http": "\033[96m", # Light cyan + } + + def __init__(self, use_colors=True): + super().__init__() + self.use_colors = use_colors and self._supports_color() + + def _supports_color(self): + """Check if the terminal supports color output.""" + import os + import sys + + # Check if we're in a terminal that supports colors + if hasattr(sys.stderr, "isatty") and sys.stderr.isatty(): + # Check common environment variables + term = os.environ.get("TERM", "") + if "color" in term or term in ("xterm", "xterm-256color", "screen", "tmux"): + return True + # Check for NO_COLOR environment variable + if os.environ.get("NO_COLOR"): + return False + # Default to True for most terminals + return True + return False + + def format(self, record): + if not self.use_colors: + return self._format_plain(record) + + # Get colors + level_color = self.COLORS.get(record.levelname, "") + logger_color = self.LOGGER_COLORS.get(record.name.split(".")[0], "") + reset = self.COLORS["RESET"] + bold = self.COLORS["BOLD"] + dim = self.COLORS["DIM"] + + # Format timestamp + timestamp = self.formatTime(record, "%H:%M:%S") + + # Format logger name with color + logger_name = record.name + if logger_color: + logger_name = f"{logger_color}{logger_name}{reset}" + + # Format level with color + level = record.levelname + if level_color: + level = f"{level_color}{bold}{level}{reset}" + + # Format message with special handling for HTTP-specific content + message = record.getMessage() + message = self._colorize_message(message, record.name) + + # Assemble final format + return f"{dim}[{timestamp}]{reset} {level} {logger_name} - {message}" + + def _format_plain(self, record): + """Plain formatting without colors.""" + timestamp = self.formatTime(record, "%H:%M:%S") + return f"[{timestamp}] {record.levelname} {record.name} - {record.getMessage()}" + + def _colorize_message(self, message, logger_name): + """Add color highlights to HTTP-specific message content.""" + if not self.use_colors: + return message + + reset = self.COLORS["RESET"] + + # Highlight HTTP methods + for method in ["GET", "POST", "PUT", "DELETE", "PATCH", "HEAD", "OPTIONS"]: + message = message.replace( + f" {method} ", f' {self.COLORS["BOLD"]}{method}{reset} ' + ) + + # Highlight HTTP status codes + import re + + # Success status codes (2xx) - green + message = re.sub(r"\b(2\d\d)\b", f'{self.COLORS["DEBUG"]}\\1{reset}', message) + + # Client error status codes (4xx) - yellow + message = re.sub(r"\b(4\d\d)\b", f'{self.COLORS["WARNING"]}\\1{reset}', message) + + # Server error status codes (5xx) - red + message = re.sub(r"\b(5\d\d)\b", f'{self.COLORS["ERROR"]}\\1{reset}', message) + + # Highlight URLs + message = re.sub( + r'(https?://[^\s"]+)', f'{self.COLORS["DIM"]}\\1{reset}', message + ) + + # Highlight important headers + for header in ["Authorization", "Content-Type", "User-Agent"]: + message = message.replace( + f"'{header}'", f"'{self.COLORS['BOLD']}{header}{reset}'" + ) + message = message.replace( + f'"{header}"', f'"{self.COLORS["BOLD"]}{header}{reset}"' + ) + + # Highlight connection events for httpcore + if "httpcore" in logger_name: + for event in [ + "connect_tcp", + "start_tls", + "send_request", + "receive_response", + ]: + if event in message: + message = message.replace( + event, f'{self.COLORS["BOLD"]}{event}{reset}' + ) + + return message + + +def _get_http_logging_config(): + """ + Determine HTTP logging configuration from environment variables. + + Returns: + dict: Configuration with 'enabled', 'level', 'format' and 'use_colors' keys + """ + import os + + # Check various environment variables for HTTP logging + enabled = bool( + os.environ.get("LLM_HTTP_LOGGING") + or os.environ.get("LLM_HTTP_DEBUG") + or os.environ.get("LLM_HTTP_VERBOSE") + or os.environ.get("LLM_OPENAI_SHOW_RESPONSES") # Backward compatibility + ) + + if not enabled: + return {"enabled": False} + + # Determine logging level + level = "INFO" + if os.environ.get("LLM_HTTP_DEBUG") or os.environ.get("LLM_HTTP_VERBOSE"): + level = "DEBUG" + + return { + "enabled": True, + "level": level, + # Provide a standard logging format description for reference + "format": "%(name)s - %(levelname)s - %(message)s", + "use_colors": not os.environ.get("NO_COLOR"), + } + + +def configure_http_logging(): + """ + Configure Python logging for HTTP requests across all providers. + + This enables logging for: + - httpx: High-level HTTP client logs + - httpcore: Low-level HTTP connection logs + - openai: OpenAI SDK logs + - anthropic: Anthropic SDK logs + + Environment variables: + - LLM_HTTP_LOGGING=1: Enable INFO-level HTTP logging + - LLM_HTTP_DEBUG=1: Enable DEBUG-level HTTP logging + - LLM_HTTP_VERBOSE=1: Enable DEBUG-level HTTP logging + - LLM_OPENAI_SHOW_RESPONSES=1: Backward compatibility (INFO level) + """ + import logging + + config = _get_http_logging_config() + if not config["enabled"]: + return + + # Create our custom formatter + formatter = HTTPColorFormatter(use_colors=config.get("use_colors", True)) + + # Configure root logger if not already configured + if not logging.getLogger().handlers: + handler = logging.StreamHandler() + handler.setFormatter(formatter) + logging.getLogger().addHandler(handler) + logging.getLogger().setLevel(logging.WARNING) # Keep root logger quiet + + # Set up HTTP-related loggers + log_level = getattr(logging, config["level"]) + + # Core HTTP libraries + logging.getLogger("httpx").setLevel(log_level) + logging.getLogger("httpcore").setLevel(log_level) + + # Provider SDKs + logging.getLogger("openai").setLevel(log_level) + logging.getLogger("anthropic").setLevel(log_level) + + # Additional useful loggers + if config["level"] == "DEBUG": + logging.getLogger("urllib3").setLevel(log_level) + logging.getLogger("requests").setLevel(log_level) + + # Configure all HTTP loggers to use our custom formatter + http_loggers = ["httpx", "httpcore", "openai", "anthropic", "llm.http"] + if config["level"] == "DEBUG": + http_loggers.extend(["urllib3", "requests"]) + + # Replace NullHandlers with our custom handler + if all(isinstance(h, logging.NullHandler) for h in logger.handlers): + logger.handlers.clear() + handler = logging.StreamHandler() + handler.setFormatter(formatter) + logger.addHandler(handler) + logger.propagate = False + logger.addHandler(handler) + logger.propagate = False + + # Log that HTTP logging is enabled + logging.getLogger("llm.http").info( + f"HTTP logging enabled at {config['level']} level" + ) + + +def is_http_logging_enabled() -> bool: + """Check if HTTP logging is enabled via environment variables.""" + config = _get_http_logging_config() + return config["enabled"] + + def simplify_usage_dict(d): # Recursively remove keys with value 0 and empty dictionaries def remove_empty_and_zero(obj): diff --git a/tests/test_http_logging.py b/tests/test_http_logging.py new file mode 100644 index 00000000..b748259e --- /dev/null +++ b/tests/test_http_logging.py @@ -0,0 +1,260 @@ +""" +Tests for universal HTTP logging functionality. + +This module tests the HTTP logging system that enables debug visibility +into requests/responses across all LLM providers (OpenAI, Anthropic, Gemini, etc). +""" + +import logging +import os +import pytest +from unittest.mock import patch, MagicMock +from click.testing import CliRunner + +from llm.cli import cli +from llm.utils import ( + _get_http_logging_config, + configure_http_logging, + is_http_logging_enabled, +) + + +class TestHTTPLoggingConfig: + """Test HTTP logging configuration detection.""" + + def test_logging_disabled_by_default(self): + """HTTP logging should be disabled when no env vars are set.""" + with patch.dict(os.environ, {}, clear=True): + config = _get_http_logging_config() + assert config["enabled"] is False + + def test_llm_http_logging_enables_info_level(self): + """LLM_HTTP_LOGGING=1 should enable INFO level logging.""" + with patch.dict(os.environ, {"LLM_HTTP_LOGGING": "1"}): + config = _get_http_logging_config() + assert config["enabled"] is True + assert config["level"] == "INFO" + assert "format" in config + + def test_llm_http_debug_enables_debug_level(self): + """LLM_HTTP_DEBUG=1 should enable DEBUG level logging.""" + with patch.dict(os.environ, {"LLM_HTTP_DEBUG": "1"}): + config = _get_http_logging_config() + assert config["enabled"] is True + assert config["level"] == "DEBUG" + + def test_llm_http_verbose_enables_debug_level(self): + """LLM_HTTP_VERBOSE=1 should enable DEBUG level logging.""" + with patch.dict(os.environ, {"LLM_HTTP_VERBOSE": "1"}): + config = _get_http_logging_config() + assert config["enabled"] is True + assert config["level"] == "DEBUG" + + def test_backward_compatibility_with_openai_flag(self): + """LLM_OPENAI_SHOW_RESPONSES=1 should enable logging for backward compatibility.""" + with patch.dict(os.environ, {"LLM_OPENAI_SHOW_RESPONSES": "1"}): + config = _get_http_logging_config() + assert config["enabled"] is True + assert config["level"] == "INFO" + + def test_debug_overrides_info_level(self): + """DEBUG flags should override INFO level when both are set.""" + with patch.dict(os.environ, {"LLM_HTTP_LOGGING": "1", "LLM_HTTP_DEBUG": "1"}): + config = _get_http_logging_config() + assert config["enabled"] is True + assert config["level"] == "DEBUG" + + def test_multiple_env_vars_enable_logging(self): + """Any combination of env vars should enable logging.""" + test_cases = [ + {"LLM_HTTP_LOGGING": "1"}, + {"LLM_HTTP_DEBUG": "1"}, + {"LLM_OPENAI_SHOW_RESPONSES": "1"}, + {"LLM_HTTP_LOGGING": "1", "LLM_HTTP_DEBUG": "1"}, + ] + + for env_vars in test_cases: + with patch.dict(os.environ, env_vars, clear=True): + config = _get_http_logging_config() + assert config["enabled"] is True, f"Failed with env vars: {env_vars}" + + +class TestHTTPLoggingConfiguration: + """Test HTTP logging configuration setup.""" + + def test_configure_http_logging_when_disabled(self): + """configure_http_logging should do nothing when logging is disabled.""" + with patch.dict(os.environ, {}, clear=True): + # Mock logging inside the function where it's imported + with patch("logging.getLogger") as mock_get_logger: + configure_http_logging() + # Should not configure any loggers when disabled + mock_get_logger.assert_not_called() + + def test_configure_http_logging_when_enabled(self): + """configure_http_logging should set up loggers when enabled.""" + with patch.dict(os.environ, {"LLM_HTTP_LOGGING": "1"}): + with ( + patch("logging.getLogger") as mock_get_logger, + patch("logging.basicConfig"), + patch("logging.StreamHandler"), + ): + mock_logger = MagicMock() + mock_get_logger.return_value = mock_logger + + configure_http_logging() + + # Should configure core HTTP loggers + # Extract logger names from calls (skip empty first call) + logger_names = [] + for call in mock_get_logger.call_args_list: + if len(call.args) > 0: # Skip calls with no arguments + logger_names.append(call.args[0]) + + for logger_name in ["httpx", "httpcore", "openai", "anthropic"]: + assert logger_name in logger_names + + def test_configure_debug_level_loggers(self): + """DEBUG level should configure additional loggers.""" + with patch.dict(os.environ, {"LLM_HTTP_DEBUG": "1"}): + with ( + patch("logging.getLogger") as mock_get_logger, + patch("logging.basicConfig"), + patch("logging.StreamHandler"), + ): + mock_logger = MagicMock() + mock_get_logger.return_value = mock_logger + + configure_http_logging() + + # Should configure additional debug loggers + logger_names = [] + for call in mock_get_logger.call_args_list: + if len(call.args) > 0: # Skip calls with no arguments + logger_names.append(call.args[0]) + + assert "urllib3" in logger_names + assert "requests" in logger_names + + def test_is_http_logging_enabled_function(self): + """is_http_logging_enabled should return correct boolean.""" + with patch.dict(os.environ, {}, clear=True): + assert is_http_logging_enabled() is False + + with patch.dict(os.environ, {"LLM_HTTP_LOGGING": "1"}): + assert is_http_logging_enabled() is True + + +class TestCLIIntegration: + """Test CLI integration with HTTP logging.""" + + def test_cli_help_includes_http_logging_options(self): + """CLI help should document HTTP logging options.""" + runner = CliRunner() + result = runner.invoke(cli, ["--help"]) + assert result.exit_code == 0 + assert "--http-logging" in result.output + assert "--http-debug" in result.output + assert "HTTP request/response logging" in result.output + + def test_cli_http_logging_flag_sets_env_var(self): + """--http-logging flag should set environment variable.""" + runner = CliRunner() + + # Check that the CLI flag is passed through correctly + result = runner.invoke(cli, ["--http-logging", "--help"]) + assert result.exit_code == 0 + # The flag is processed by Click, no need to test internal mocking + + def test_cli_http_debug_flag_sets_env_var(self): + """--http-debug flag should set environment variable.""" + runner = CliRunner() + + result = runner.invoke(cli, ["--http-debug", "--help"]) + assert result.exit_code == 0 + # The flag is processed by Click + + def test_cli_calls_configure_http_logging(self): + """CLI should call configure_http_logging on startup.""" + runner = CliRunner() + + # Test that the CLI runs successfully with logging configuration + result = runner.invoke(cli, ["--help"]) + assert result.exit_code == 0 + + def test_env_var_enables_logging_in_cli(self): + """Environment variable should enable logging in CLI.""" + runner = CliRunner() + + # Test CLI with environment variable set + with patch.dict(os.environ, {"LLM_HTTP_LOGGING": "1"}): + result = runner.invoke(cli, ["--help"]) + assert result.exit_code == 0 + + +class TestHTTPLoggingIntegration: + """Test HTTP logging integration with real logging system.""" + + def test_logging_setup_creates_handlers(self): + """Test that logging setup actually creates handlers.""" + # Clear any existing handlers + for logger_name in ["httpx", "httpcore", "llm.http"]: + logger = logging.getLogger(logger_name) + logger.handlers.clear() + logger.setLevel(logging.NOTSET) + + with patch.dict(os.environ, {"LLM_HTTP_LOGGING": "1"}): + configure_http_logging() + + # Check that loggers are configured + httpx_logger = logging.getLogger("httpx") + assert httpx_logger.level == logging.INFO + + httpcore_logger = logging.getLogger("httpcore") + assert httpcore_logger.level == logging.INFO + + def test_debug_level_configuration(self): + """Test DEBUG level configuration.""" + # Clear any existing handlers + for logger_name in ["httpx", "httpcore"]: + logger = logging.getLogger(logger_name) + logger.handlers.clear() + logger.setLevel(logging.NOTSET) + + with patch.dict(os.environ, {"LLM_HTTP_DEBUG": "1"}): + configure_http_logging() + + httpx_logger = logging.getLogger("httpx") + assert httpx_logger.level == logging.DEBUG + + httpcore_logger = logging.getLogger("httpcore") + assert httpcore_logger.level == logging.DEBUG + + +class TestHTTPLoggingDocumentation: + """Test that HTTP logging is properly documented.""" + + def test_cli_help_mentions_environment_variables(self): + """CLI help should mention all supported environment variables.""" + runner = CliRunner() + result = runner.invoke(cli, ["--help"]) + assert result.exit_code == 0 + + # Check for environment variable documentation + assert "LLM_HTTP_LOGGING" in result.output + assert "LLM_HTTP_DEBUG" in result.output + assert "LLM_OPENAI_SHOW_RESPONSES" in result.output + + def test_function_docstrings_are_comprehensive(self): + """Test that key functions have good docstrings.""" + assert configure_http_logging.__doc__ is not None + assert "httpx" in configure_http_logging.__doc__ + assert "httpcore" in configure_http_logging.__doc__ + assert "LLM_HTTP_LOGGING" in configure_http_logging.__doc__ + + assert _get_http_logging_config.__doc__ is not None + assert is_http_logging_enabled.__doc__ is not None + + +if __name__ == "__main__": + pytest.main([__file__, "-v"]) diff --git a/tests/test_http_logging_integration.py b/tests/test_http_logging_integration.py new file mode 100644 index 00000000..4ddb3fbd --- /dev/null +++ b/tests/test_http_logging_integration.py @@ -0,0 +1,120 @@ +""" +Integration tests for HTTP logging functionality. +Tests that HTTP logging works with real CLI commands and providers. +""" + +import os +import pytest +from click.testing import CliRunner +from unittest.mock import patch + +from llm.cli import cli +from llm.utils import ( + configure_http_logging, + is_http_logging_enabled, + _get_http_logging_config, +) + + +class TestHTTPLoggingIntegration: + """Integration tests for HTTP logging across the full system.""" + + def test_http_logging_cli_flags(self): + """Test HTTP logging CLI flags work correctly.""" + runner = CliRunner() + + # Test --http-logging flag + result = runner.invoke(cli, ["--http-logging", "--help"]) + assert result.exit_code == 0 + assert "--http-logging" in result.output + assert "--http-debug" in result.output + + # Test --http-debug flag + result = runner.invoke(cli, ["--http-debug", "--help"]) + assert result.exit_code == 0 + + @patch.dict(os.environ, {"LLM_HTTP_LOGGING": "1"}) + def test_environment_variable_detection(self): + """Test that environment variables are detected correctly.""" + assert is_http_logging_enabled() is True + + config = _get_http_logging_config() + assert config["enabled"] is True + assert config["level"] == "INFO" + + @patch.dict(os.environ, {"LLM_HTTP_DEBUG": "1"}) + def test_debug_level_detection(self): + """Test DEBUG level environment variable.""" + config = _get_http_logging_config() + assert config["enabled"] is True + assert config["level"] == "DEBUG" + + @patch.dict(os.environ, {"LLM_OPENAI_SHOW_RESPONSES": "1"}) + def test_backward_compatibility(self): + """Test backward compatibility with OpenAI-specific variable.""" + config = _get_http_logging_config() + assert config["enabled"] is True + assert config["level"] == "INFO" + + def test_http_logging_with_mock_request(self, httpx_mock): + """Test HTTP logging with a mocked HTTP request.""" + # Mock a simple HTTP request + httpx_mock.add_response( + method="GET", + url="https://example.com/test", + json={"status": "ok"}, + status_code=200, + ) + + # Configure logging + with patch.dict(os.environ, {"LLM_HTTP_LOGGING": "1"}): + configure_http_logging() + + # Make a request through httpx + import httpx + + response = httpx.get("https://example.com/test") + assert response.status_code == 200 + assert response.json() == {"status": "ok"} + + def test_logging_configuration_called_by_cli(self): + """Test that configure_http_logging is called during CLI initialization.""" + runner = CliRunner() + + # The CLI should run without errors even with logging enabled + with patch.dict(os.environ, {"LLM_HTTP_LOGGING": "1"}): + result = runner.invoke(cli, ["--help"]) + assert result.exit_code == 0 + + def test_models_list_with_http_logging(self): + """Test models list command with HTTP logging enabled.""" + runner = CliRunner() + + # Should work without errors when HTTP logging is enabled + with patch.dict(os.environ, {"LLM_HTTP_DEBUG": "1"}): + result = runner.invoke(cli, ["models", "list"]) + assert result.exit_code == 0 + # Should show available models including our mock models + assert "mock" in result.output + + @pytest.mark.parametrize( + "env_var,expected_level", + [ + ("LLM_HTTP_LOGGING", "INFO"), + ("LLM_HTTP_DEBUG", "DEBUG"), + ("LLM_HTTP_VERBOSE", "DEBUG"), + ("LLM_OPENAI_SHOW_RESPONSES", "INFO"), + ], + ) + def test_all_environment_variables(self, env_var, expected_level): + """Test all supported environment variables.""" + with patch.dict(os.environ, {env_var: "1"}, clear=True): + config = _get_http_logging_config() + assert config["enabled"] is True + assert config["level"] == expected_level + + def test_http_logging_disabled_by_default(self): + """Test that HTTP logging is disabled when no env vars are set.""" + with patch.dict(os.environ, {}, clear=True): + config = _get_http_logging_config() + assert config["enabled"] is False