diff --git a/.env.example b/.env.example index 694b6fc..2c3b4dd 100644 --- a/.env.example +++ b/.env.example @@ -1,5 +1,5 @@ # override this to your own model config toml -LITELLM_CONFIG_PATH=model.config.toml +LITELLM_CONFIG_PATH=./credentials/model.config.example.toml # Azure OpenAI Configuration (Legacy) AZURE_OPENAI_MODEL=your_model_name_here # e.g., o3-mini-deep-research @@ -22,8 +22,10 @@ AWS_ACCESS_KEY_ID=your_aws_access_key_id AWS_SECRET_ACCESS_KEY=your_aws_secret_access_key SENDER_EMAIL=your_sender_email@domain.com -# JINA API Key +# External Services JINA_API_KEY="YOUR_JINA_API_KEY" # Leave blank if not using deep research +BRAVE_SEARCH_API_KEY="" +RAPIDAPI_KEY="" # LLM Routing Configuration # GPT-4o-mini Instance 1 @@ -53,6 +55,7 @@ SUPABASE_URL=your_supabase_url SUPABASE_KEY=your_supabase_key SUPABASE_SERVICE_ROLE_KEY=your_supabase_service_role_key WHITELIST_SIGNUP_URL=your_whitelist_signup_url # e.g., https://yourdomain.com/ +FRONTEND_URL=https://mxtoai.com/ # Server Configuration PORT=8000 diff --git a/README.md b/README.md index f1d40f4..29ae80d 100644 --- a/README.md +++ b/README.md @@ -96,10 +96,58 @@ poetry run python run_api.py ``` 5. Start the workers: + +Using only single process and couple of threads for local development: + +```bash +poetry run dramatiq mxtoai.tasks --processes 1 --threads 2 --watch ./. +``` + +### Docker Setup (Alternative Installation) + +The project can also be run using Docker Compose, which provides an isolated environment with all required services. + +1. Ensure you have Docker and Docker Compose installed on your system. + +2. Build and start all services: ```bash -poetry run dramatiq mxtoai.tasks --watch ./. +docker compose up -d ``` +3. Access the services: +- API Server: http://localhost:8000 +- RabbitMQ Management: http://localhost:15672 (credentials: guest/guest) +- Redis: localhost:6379 +- Ollama: localhost:11434 (optional) + +#### Service Details +- **API Server**: FastAPI application running on port 8000 +- **Worker**: Background task processor using Dramatiq +- **Redis**: Used for caching and session management +- **RabbitMQ**: Message broker for task queue +- **Ollama**: Optional LLM service (disabled by default) + +#### Running with Ollama +To include the Ollama service (required for local LLM processing): +```bash +docker compose --profile ollama up -d +``` + +#### Stopping Services +```bash +# Stop all services +docker compose down + +# Stop and remove all data volumes (this will delete all data) +docker compose down -v +``` + +#### Important Notes +- The Docker setup includes all required services (Redis, RabbitMQ) automatically +- Model configuration file (`model.config.toml`) should be placed in the `credentials/` directory +- All services are configured to restart automatically unless stopped manually +- Data persistence is enabled for Redis, RabbitMQ, and Ollama through Docker volumes + ### Environment Variables Copy the `.env.example` file to `.env` and update with your specific configuration: @@ -110,6 +158,46 @@ LITELLM_CONFIG_PATH=model.config.toml # Redis configuration REDIS_HOST=localhost REDIS_PORT=6379 +REDIS_DB=0 +REDIS_PASSWORD= + +# rabbitmq config +RABBITMQ_HOST=localhost +RABBITMQ_PORT=5672 +RABBITMQ_USER=guest +RABBITMQ_PASSWORD=guest +RABBITMQ_VHOST=/ +RABBITMQ_HEARTBEAT=60 # Default heartbeat interval in seconds + +# server config +PORT=8000 +HOST=0.0.0.0 +LOG_LEVEL=INFO +IS_PROD=false +X_API_KEY=your_api_key + +# supabase +SUPABASE_URL=your_supabase_url +SUPABASE_KEY=your_supabase_key +SUPABASE_SERVICE_ROLE_KEY=your_supabase_service_role_key +WHITELIST_SIGNUP_URL=your_whitelist_signup_url # e.g., https://yourdomain.com/ + +# open ai api key +AZURE_OPENAI_API_KEY=your_api_key_here + +# Hugging Face Token +HF_TOKEN=your_huggingface_token + +# AWS SES Configuration +AWS_REGION=your_aws_region # e.g., ap-south-1 +AWS_ACCESS_KEY_ID=your_aws_access_key_id +AWS_SECRET_ACCESS_KEY=your_aws_secret_access_key +SENDER_EMAIL=your_sender_email@domain.com + +# External services +JINA_API_KEY="YOUR_JINA_API_KEY" # Leave blank if not using deep research +BRAVE_SEARCH_API_KEY="" +RAPIDAPI_KEY="" # Optional for research functionality JINA_API_KEY=your-jina-api-key @@ -122,7 +210,7 @@ AZURE_VISION_KEY=your-azure-vision-key SERPAPI_API_KEY=your-serpapi-api-key ``` -This project supports load balancing and routing across multiple models, so you can define as many models as you'd like. Copy `model.config.example.toml` to a toml file and update it with your preferred configuration. Update `.env` with the path your toml relative to root. +This project supports load balancing and routing across multiple models, so you can define as many models as you'd like. Copy `credentials/model.config.example.toml` to a toml file and update it in the same directory with your preferred configuration. Update `.env` with the path your toml relative to root. A sample configuration looks like this: diff --git a/docker-compose.yml b/docker-compose.yml new file mode 100644 index 0000000..17ae4c5 --- /dev/null +++ b/docker-compose.yml @@ -0,0 +1,90 @@ +version: "3.9" + +services: + redis: + image: redis:7-alpine + container_name: redis + ports: + - ${REDIS_PORT:-6379}:${REDIS_PORT:-6379} + volumes: + - redis_data:/data + restart: unless-stopped + command: ["redis-server", "--requirepass", "${REDIS_PASSWORD:-changeme}"] + + rabbitmq: + image: rabbitmq:3-management + container_name: rabbitmq + ports: + - ${RABBITMQ_PORT:-5672}:${RABBITMQ_PORT:-5672} + - ${RABBITMQ_MANAGEMENT_PORT:-15672}:${RABBITMQ_MANAGEMENT_PORT:-15672} + volumes: + - rabbitmq_data:/var/lib/rabbitmq + environment: + RABBITMQ_DEFAULT_USER: ${RABBITMQ_USER:-guest} + RABBITMQ_DEFAULT_PASS: ${RABBITMQ_PASSWORD:-guest} + restart: unless-stopped + healthcheck: + test: ["CMD", "rabbitmq-diagnostics", "ping"] + interval: 30s + timeout: 10s + retries: 5 + + ollama: + image: ollama/ollama:latest + container_name: ollama + ports: + - ${OLLAMA_PORT:-11434}:${OLLAMA_PORT:-11434} + volumes: + - ollama_data:/root/.ollama + restart: unless-stopped + profiles: + - ollama + + api_server: + build: + context: . + dockerfile: docker/api_server.dockerfile + container_name: api_server + env_file: + - .env + depends_on: + rabbitmq: + condition: service_healthy + redis: + condition: service_started + ports: + - "8000:8000" + environment: + - REDIS_URL=redis://:${REDIS_PASSWORD:-changeme}@redis:${REDIS_PORT:-6379}/0 + - RABBITMQ_URL=amqp://${RABBITMQ_USER:-guest}:${RABBITMQ_PASSWORD:-guest}@rabbitmq:${RABBITMQ_PORT:-5672}/ + - OLLAMA_URL=http://ollama:${OLLAMA_PORT:-11434} + - LITELLM_CONFIG_PATH=/app/credentials/model.config.toml + volumes: + - ./credentials/model.config.toml:/app/credentials/model.config.toml + command: ["poetry", "run", "uvicorn", "mxtoai.api:app", "--host", "0.0.0.0", "--port", "8000", "--workers", "4"] + + worker: + build: + context: . + dockerfile: docker/worker.dockerfile + container_name: worker + env_file: + - .env + depends_on: + rabbitmq: + condition: service_healthy + redis: + condition: service_started + environment: + - REDIS_URL=redis://:${REDIS_PASSWORD:-changeme}@redis:${REDIS_PORT:-6379}/0 + - RABBITMQ_URL=amqp://${RABBITMQ_USER:-guest}:${RABBITMQ_PASSWORD:-guest}@rabbitmq:${RABBITMQ_PORT:-5672}/ + - OLLAMA_URL=http://ollama:${OLLAMA_PORT:-11434} + - LITELLM_CONFIG_PATH=/app/credentials/model.config.toml + volumes: + - ./credentials/model.config.toml:/app/credentials/model.config.toml + command: ["poetry", "run", "dramatiq", "mxtoai.tasks", "--watch", "./."] + +volumes: + rabbitmq_data: + ollama_data: + redis_data: diff --git a/docker/api_server.dockerfile b/docker/api_server.dockerfile new file mode 100644 index 0000000..90c80a4 --- /dev/null +++ b/docker/api_server.dockerfile @@ -0,0 +1,28 @@ +FROM python:3.13-slim-bookworm + +# System dependencies +RUN apt-get update && apt-get install -y --no-install-recommends \ + curl \ + build-essential \ + ffmpeg \ + && rm -rf /var/lib/apt/lists/* + +# Set working directory +WORKDIR /app + +# Install Poetry (latest) +RUN curl -sSL https://install.python-poetry.org | python3 - && \ + ln -s /root/.local/bin/poetry /usr/local/bin/poetry + +# Copy dependency files first (for cache) +COPY pyproject.toml poetry.lock ./ + +# Install dependencies (no virtualenv) +RUN poetry config virtualenvs.create false && poetry install --no-root --no-interaction --no-ansi + +# Copy only the relevant application code +COPY mxtoai ./mxtoai +COPY run_api.py . + +# Run the API via uvicorn +CMD ["poetry", "run", "uvicorn", "mxtoai.api:app", "--host", "0.0.0.0", "--port", "8000", "--workers", "4"] diff --git a/docker/worker.dockerfile b/docker/worker.dockerfile new file mode 100644 index 0000000..a2caca4 --- /dev/null +++ b/docker/worker.dockerfile @@ -0,0 +1,27 @@ +FROM python:3.13-slim-bookworm + +# System dependencies +RUN apt-get update && apt-get install -y --no-install-recommends \ + curl \ + build-essential \ + ffmpeg \ + && rm -rf /var/lib/apt/lists/* + +# Set working directory +WORKDIR /app + +# Install Poetry (latest) +RUN curl -sSL https://install.python-poetry.org | python3 - && \ + ln -s /root/.local/bin/poetry /usr/local/bin/poetry + +# Copy dependency files first (for cache) +COPY pyproject.toml poetry.lock ./ + +# Install dependencies +RUN poetry config virtualenvs.create false && poetry install --no-root --no-interaction --no-ansi + +# Copy only the relevant worker code +COPY mxtoai ./mxtoai + +# Run the Dramatiq worker +CMD ["poetry", "run", "dramatiq", "mxtoai.tasks", "--watch", "./mxtoai"] diff --git a/docusaurus-site/docs/introduction.md b/docusaurus-site/docs/introduction.md index 8d52f4a..e8718bb 100644 --- a/docusaurus-site/docs/introduction.md +++ b/docusaurus-site/docs/introduction.md @@ -8,7 +8,7 @@ Welcome to MXtoAI! MXtoAI is an intelligent email assistant that processes users emails automatically. Users simply forward any email to our specialized handles (email IDs) and the appropriate actions are taken. Most of the actions are AI driven. -Forward emails to our general email handle `ask@mxtoai.com` or to one of our specialized handles. Get summaries, replies, and research without the complexity. +Forward emails to our general email handle `ask@mxtoai.com` or to one of our specialized handles. Get summaries, replies, research, and professional PDF exports without the complexity. ![Forwarding email to ask@mxtoai.com](/img/mxtoai-ask.gif) *Example: Forwarding an email to ask@mxtoai.com with instructions.* diff --git a/docusaurus-site/docs/usage.md b/docusaurus-site/docs/usage.md index 7051fd2..63eb173 100644 --- a/docusaurus-site/docs/usage.md +++ b/docusaurus-site/docs/usage.md @@ -18,7 +18,10 @@ Here's a brief overview of the available handles and their primary functions: * `background@mxtoai.com`: Get background information on entities mentioned. * `ask@mxtoai.com`: Ask specific questions about the email content. * `schedule@mxtoai.com`: Extract scheduling information or propose meeting times. +* `pdf@mxtoai.com`: Export email content as a professional PDF document. For detailed information on each handle, including aliases and key features, please refer to the [Email Handles section on our website](https://www.mxtoai.com/#usecases). **Pro tip:** Add our email handles to your contacts for quicker access when forwarding emails. + +**Note:** Any handle can export content as PDF by simply asking "convert to PDF" or "export as PDF" in your request. diff --git a/docusaurus-site/docs/why-use-mxtoai.md b/docusaurus-site/docs/why-use-mxtoai.md index f324276..7664bcc 100644 --- a/docusaurus-site/docs/why-use-mxtoai.md +++ b/docusaurus-site/docs/why-use-mxtoai.md @@ -8,8 +8,9 @@ - Works anywhere - Gmail, Outlook, Apple Mail, even your phone - Works with attachments - PDFs, images, spreadsheets all processed - Works with email threads - understands context from conversation history +- **PDF Export** - any response can be converted to PDF, or use pdf@ to export content directly -As simple as it sounds, MXtoAI is very powerful. If you spend more than 10 minutes a day reading, understanding, or responding to emails, MXtoAI can probably save you time and mental energy. Whether you're dealing with work emails, personal correspondence, or staying informed, there's likely a use case that fits your daily routine. Here are some use-cases you can use it for. +As simple as it sounds, MXtoAI is very powerful. If you spend more than 10 minutes a day reading, understanding, or responding to emails, MXtoAI can probably save you time and mental energy. Whether you're dealing with work emails, personal correspondence, or staying informed, there's likely a use case that fits your daily routine. Plus, any content generated can be exported as a professional PDF for sharing, printing, or archiving. Here are some use-cases you can use it for. ## 📧 Daily Email Management @@ -70,6 +71,10 @@ As simple as it sounds, MXtoAI is very powerful. If you spend more than 10 minut - Forward to summarize@mxtoai.com - get key requirements and deadlines - Forward to ask@mxtoai.com with "What's our competitive advantage here?" +**"I need to document compliance or regulatory communications"** +- Forward regulatory emails to pdf@mxtoai.com for permanent, clean records +- Perfect for audit trails and compliance documentation + ## 🔍 Information Verification & Research **"I get forwarded news and want to check if it's true"** @@ -88,6 +93,20 @@ As simple as it sounds, MXtoAI is very powerful. If you spend more than 10 minut - Forward competitor emails to background@mxtoai.com - Get market position, recent moves, customer feedback +## 📄 PDF Export & Document Creation + +**"I need to create professional documents from email content"** +- Forward newsletters to pdf@mxtoai.com - get clean, formatted PDF without email headers +- Perfect for sharing research, reports, or important communications + +**"I want to convert AI responses to shareable documents"** +- Ask any handle to "generate a report and convert to PDF" +- Forward to ask@mxtoai.com with "Create a HN newsletter with top AI posts of the week and export as PDF" + +**"I need to archive important email content professionally"** +- Forward contracts, agreements, or important announcements to pdf@mxtoai.com +- Get clean documents without email clutter - perfect for filing or legal records + ## 🏠 Personal Life **"I get complex medical/insurance emails"** @@ -128,6 +147,10 @@ As simple as it sounds, MXtoAI is very powerful. If you spend more than 10 minut - Forward to ask@mxtoai.com with "What are the practical applications?" - Understand real-world relevance quickly +**"I want to compile research findings into shareable documents"** +- Forward multiple research emails to ask@mxtoai.com with "Synthesize findings and create PDF report" +- Perfect for thesis research or professional development + ## 🛍️ Shopping & Consumer Decisions **"I get overwhelmed by product comparison emails"** @@ -199,6 +222,7 @@ As simple as it sounds, MXtoAI is very powerful. If you spend more than 10 minut **Consultant:** - "I forward client emails to background@mxtoai.com before meetings to understand industry challenges" - "I forward RFPs to ask@mxtoai.com with 'What's the hidden agenda here?' to win more bids" +- "I forward research findings to ask@mxtoai.com with 'Create client presentation and export as PDF' for professional deliverables" **Retiree:** - "I forward Medicare emails to simplify@mxtoai.com - healthcare is confusing enough" diff --git a/mxtoai/_logging.py b/mxtoai/_logging.py index 50c88f4..5727e91 100644 --- a/mxtoai/_logging.py +++ b/mxtoai/_logging.py @@ -1,15 +1,17 @@ +import logging import os import sys from collections.abc import Sequence -from contextlib import contextmanager +from contextlib import contextmanager, suppress from pathlib import Path from typing import Any import logfire from dotenv import load_dotenv from loguru import logger +from rich.console import Console -__all__ = ["get_logger", "span"] +__all__ = ["get_logger", "get_smolagents_console", "span"] # Load environment variables load_dotenv() @@ -69,11 +71,134 @@ logger.add(**logfire_handler) logfire.configure(console=False) + +class InterceptHandler(logging.Handler): + """ + Intercept standard library logging and redirect to loguru. + This captures logs from third-party libraries like LiteLLM, httpx, etc. + """ + + def emit(self, record): + # Get corresponding Loguru level if it exists + try: + level = logger.level(record.levelname).name + except ValueError: + level = record.levelno + + # Find caller from where originated the logged message + frame, depth = logging.currentframe(), 2 + while frame and (frame.f_code.co_filename in (logging.__file__, __file__)): + frame = frame.f_back + depth += 1 + + # Use the logger name from the original record for better identification + logger_name = record.name if record.name else "unknown" + + # Get the formatted message + try: + message = record.getMessage() + except Exception: + message = str(record.msg) + + # Log through loguru with proper context + logger.opt(depth=depth, exception=record.exc_info).bind(name=logger_name).log(level, message) + + +# Intercept standard library logging and redirect to loguru +def setup_stdlib_logging_intercept(): + """Set up interception of standard library logging.""" + # Create our intercept handler + intercept_handler = InterceptHandler() + + # Configure root logger + logging.root.handlers = [intercept_handler] + logging.root.setLevel(LOG_LEVEL) + + # Also configure specific loggers that might be problematic + third_party_loggers = [ + "litellm", + "httpx", + "dramatiq", + # "pika", # RabbitMQ client + "azure", + "openai", + "smolagents", # Capture smolagents verbose output + "transformers", # HuggingFace transformers + "torch", # PyTorch logging + "requests", # HTTP requests logging + "urllib3", # HTTP library used by requests + "aiohttp", # Async HTTP client + ] + + for logger_name in third_party_loggers: + third_party_logger = logging.getLogger(logger_name) + third_party_logger.handlers = [intercept_handler] + third_party_logger.setLevel(LOG_LEVEL) + third_party_logger.propagate = True + + +# Set up the interception +setup_stdlib_logging_intercept() + # Log a test message to verify logging is working logger.info("Logging initialized with level: {}", LOG_LEVEL) logger.debug("Debug logging is enabled") +class LoguruRichConsole: + """ + Custom Rich console that integrates with loguru. + Captures smolagents Rich console output and feeds it into the loguru logging pipeline, + which then goes to app.log, debug.log, and logfire for unified observability. + """ + + def __init__(self): + """Initialize the loguru-integrated Rich console.""" + # Create a standard Rich console for terminal output + self.terminal_console = Console() + # Get loguru logger for capturing Rich output + self.rich_logger = logger.bind(source="smolagents_rich") + + def print(self, *args, **kwargs): + """Print to terminal and capture in loguru logging pipeline.""" + try: + # Print to terminal as normal + self.terminal_console.print(*args, **kwargs) + + # Capture the content for loguru logging + # Convert Rich renderables to plain text for logging + content_parts = [] + for arg in args: + if hasattr(arg, "__rich__") or hasattr(arg, "__rich_console__"): + # For Rich renderables, capture their string representation + content_parts.append(str(arg)) + else: + content_parts.append(str(arg)) + + content = " ".join(content_parts) + + # Determine log level based on style or content + log_level = "INFO" # Default level + style = kwargs.get("style", "") + if "error" in style.lower() or "red" in style.lower(): + log_level = "ERROR" + elif "warning" in style.lower() or "yellow" in style.lower(): + log_level = "WARNING" + elif "debug" in content.lower(): + log_level = "DEBUG" + + # Log to loguru (which feeds to app.log, debug.log, and logfire) + self.rich_logger.log(log_level, "Rich Console: {}", content) + + except Exception as e: + # Fallback logging if Rich integration fails + error_msg = f"Rich console integration error: {e}" + logger.error(error_msg) + # Still try to print to terminal + with suppress(Exception): + self.terminal_console.print(*args, **kwargs) + + def get_logger(source: str) -> Any: """Get a logger instance bound with the source name.""" return logger.bind(source=source) @@ -107,3 +232,11 @@ def span( else: # Return a dummy context manager that does nothing yield + + +def get_smolagents_console() -> LoguruRichConsole: + """ + Get a Rich console for smolagents that integrates with loguru. + This captures Rich console output and feeds it into the unified logging pipeline. + """ + return LoguruRichConsole() diff --git a/mxtoai/agents/email_agent.py b/mxtoai/agents/email_agent.py index f0c115a..fdd2543 100644 --- a/mxtoai/agents/email_agent.py +++ b/mxtoai/agents/email_agent.py @@ -11,17 +11,14 @@ # Add imports for the new default tools from smolagents.default_tools import ( - GoogleSearchTool, PythonInterpreterTool, VisitWebpageTool, - WebSearchTool, WikipediaSearchTool, ) -from mxtoai._logging import get_logger +from mxtoai._logging import get_logger, get_smolagents_console from mxtoai.models import ProcessingInstructions from mxtoai.prompts.base_prompts import ( - LIST_FORMATTING_REQUIREMENTS, MARKDOWN_STYLE_GUIDE, RESEARCH_GUIDELINES, RESPONSE_GUIDELINES, @@ -37,6 +34,7 @@ EmailContentDetails, EmailRequest, EmailSentStatus, + PDFExportResult, ProcessedAttachmentDetail, ProcessingError, ProcessingMetadata, @@ -45,10 +43,12 @@ from mxtoai.scripts.visual_qa import azure_visualizer from mxtoai.tools.attachment_processing_tool import AttachmentProcessingTool from mxtoai.tools.deep_research_tool import DeepResearchTool +from mxtoai.tools.external_data.linkedin import initialize_linkedin_data_api_tool, initialize_linkedin_fresh_tool +from mxtoai.tools.pdf_export_tool import PDFExportTool from mxtoai.tools.schedule_tool import ScheduleTool -# Import the refactored fallback search tool -from mxtoai.tools.search_with_fallback_tool import SearchWithFallbackTool +# Import the web search tools +from mxtoai.tools.web_search import BraveSearchTool, DDGSearchTool, GoogleSearchTool # Load environment variables load_dotenv(override=True) @@ -103,82 +103,106 @@ def __init__( self.visit_webpage_tool = VisitWebpageTool() self.python_tool = PythonInterpreterTool(authorized_imports=ALLOWED_PYTHON_IMPORTS) self.wikipedia_search_tool = WikipediaSearchTool() + self.pdf_export_tool = PDFExportTool() - # Initialize complex tools using helper methods - self.search_with_fallback_tool = self._initialize_search_tools() + # Initialize independent search tools + self.search_tools = self._initialize_independent_search_tools() self.research_tool = self._initialize_deep_research_tool(enable_deep_research) self.available_tools: list[Tool] = [ self.attachment_tool, self.schedule_tool, self.visit_webpage_tool, - self.search_with_fallback_tool, self.python_tool, self.wikipedia_search_tool, + self.pdf_export_tool, azure_visualizer, ] + + # Add all available search tools + self.available_tools.extend(self.search_tools) + if self.research_tool: self.available_tools.append(self.research_tool) + linkedin_fresh_tool = initialize_linkedin_fresh_tool() + if linkedin_fresh_tool: + self.available_tools.append(linkedin_fresh_tool) + + linkedin_data_api_tool = initialize_linkedin_data_api_tool() + if linkedin_data_api_tool: + self.available_tools.append(linkedin_data_api_tool) + logger.info(f"Agent tools initialized: {[tool.name for tool in self.available_tools]}") self._init_agent() logger.info("Email agent initialized successfully") def _init_agent(self): - """ - Initialize the ToolCallingAgent with Azure OpenAI. - """ + """Initialize the smolagents ToolCallingAgent.""" # Initialize the routed model with the default model group self.routed_model = RoutedLiteLLMModel() + # Create agent self.agent = ToolCallingAgent( model=self.routed_model, tools=self.available_tools, max_steps=12, - verbosity_level=2, + verbosity_level=2, # Increased back to 2 to capture detailed Rich console output planning_interval=4, name="email_processing_agent", description="An agent that processes emails, generates summaries, replies, and conducts research with advanced capabilities including web search, web browsing, and code execution.", provide_run_summary=True, ) - logger.debug("Agent initialized with routed model configuration") - def _initialize_search_tools(self) -> SearchWithFallbackTool: - """ - Initializes and configures the search tools, returning the SearchWithFallbackTool. + # Set up integrated Rich console that feeds into loguru/logfire pipeline + # This captures smolagents verbose output and integrates it with our unified logging + smolagents_console = get_smolagents_console() - Returns: - SearchWithFallbackTool: The configured search tool with Bing and DuckDuckGo as primary engines and Google as fallback. + # Override agent's console with our loguru-integrated console + if hasattr(self.agent, "logger") and hasattr(self.agent.logger, "console"): + self.agent.logger.console = smolagents_console + if ( + hasattr(self.agent, "monitor") + and hasattr(self.agent.monitor, "logger") + and hasattr(self.agent.monitor.logger, "console") + ): + self.agent.monitor.logger.console = smolagents_console - """ - bing_search_tool = WebSearchTool(engine="bing", max_results=5) - logger.debug("Initialized WebSearchTool with Bing engine.") + logger.debug("Agent initialized with routed model configuration and loguru-integrated Rich console") - ddg_search_tool = WebSearchTool(engine="duckduckgo", max_results=5) - logger.debug("Initialized WebSearchTool with DuckDuckGo engine.") - - google_search_fallback_tool = self._initialize_google_search_tool() - - primary_search_engines: list[Tool] = [] - # Ensure tools are only added if successfully initialized (though WebSearchTool constructor doesn't typically fail here) - if bing_search_tool: # bing_search_tool is always initialized - primary_search_engines.append(bing_search_tool) - if ddg_search_tool: # ddg_search_tool is always initialized - primary_search_engines.append(ddg_search_tool) + def _initialize_independent_search_tools(self) -> list[Tool]: + """ + Initialize independent search tools for DDG, Brave, and Google. + The agent will be able to choose which search engine to use based on cost and quality needs. - if not primary_search_engines: # Should not happen with current WebSearchTool, but good practice - logger.warning( - "No primary search engines (Bing, DuckDuckGo) could be initialized for SearchWithFallbackTool." - ) + Returns: + list[Tool]: List of available search tools. + """ + search_tools = [] + + # DDG Search - Always available (free) + ddg_tool = DDGSearchTool(max_results=10) + search_tools.append(ddg_tool) + logger.debug("Initialized DDG search tool (free, first choice)") + + # Brave Search - Available if API key is configured + if os.getenv("BRAVE_SEARCH_API_KEY"): + brave_tool = BraveSearchTool(max_results=5) + search_tools.append(brave_tool) + logger.debug("Initialized Brave search tool (moderate cost, better quality)") + else: + logger.warning("BRAVE_SEARCH_API_KEY not found. Brave search tool not initialized.") - search_tool = SearchWithFallbackTool( - primary_search_tools=primary_search_engines, fallback_search_tool=google_search_fallback_tool - ) + # Google Search - Available if API keys are configured + if os.getenv("SERPAPI_API_KEY") or os.getenv("SERPER_API_KEY"): + google_tool = GoogleSearchTool() + search_tools.append(google_tool) + logger.debug("Initialized Google search tool (premium cost, highest quality)") + else: + logger.warning("No Google Search API keys found. Google search tool not initialized.") - primary_names = [getattr(p, "engine", "UnknownEngine") for p in primary_search_engines] - fallback_name = getattr(google_search_fallback_tool, "name", "None") if google_search_fallback_tool else "None" - logger.info(f"Initialized SearchWithFallbackTool. Primary engines: {primary_names}, Fallback: {fallback_name}") - return search_tool + logger.info(f"Initialized {len(search_tools)} independent search tools: {[tool.name for tool in search_tools]}") + return search_tools def _get_required_actions(self, mode: str) -> list[str]: """ @@ -200,35 +224,6 @@ def _get_required_actions(self, mode: str) -> list[str]: actions.append("Conduct research") return actions - def _initialize_google_search_tool(self) -> Optional[GoogleSearchTool]: - """ - Initialize Google search tool with either SerpAPI or Serper provider. - - Returns: - Optional[GoogleSearchTool]: Initialized GoogleSearchTool instance or None if initialization fails - - """ - if os.getenv("SERPAPI_API_KEY"): - try: - tool = GoogleSearchTool(provider="serpapi") - logger.debug("Initialized GoogleSearchTool with SerpAPI for fallback.") - return tool - except ValueError as e: - logger.warning(f"Failed to initialize GoogleSearchTool with SerpAPI for fallback: {e}") - elif os.getenv("SERPER_API_KEY"): - try: - tool = GoogleSearchTool(provider="serper") - logger.debug("Initialized GoogleSearchTool with Serper for fallback.") - return tool - except ValueError as e: - logger.warning(f"Failed to initialize GoogleSearchTool with Serper for fallback: {e}") - else: - logger.warning( - "GoogleSearchTool (for fallback) not initialized. Missing SERPAPI_API_KEY or SERPER_API_KEY." - ) - - return None - def _initialize_deep_research_tool(self, enable_deep_research: bool) -> Optional[DeepResearchTool]: """ Initializes the DeepResearchTool if API key is available. @@ -378,7 +373,7 @@ def _create_task_template( output_template, RESPONSE_GUIDELINES, MARKDOWN_STYLE_GUIDE, - LIST_FORMATTING_REQUIREMENTS, + # LIST_FORMATTING_REQUIREMENTS, ] return "\n\n".join(filter(None, sections)) @@ -399,6 +394,8 @@ def _process_agent_result( research_output_findings: Union[str, None] = None research_output_metadata: Union[AgentResearchMetadata, None] = None + pdf_export_result: Union[PDFExportResult, None] = None + final_answer_from_llm: Union[str, None] = None email_text_content: Union[str, None] = None email_html_content: Union[str, None] = None @@ -425,7 +422,12 @@ def _process_agent_result( tool_output = action_out if action_out is not None else obs_out if tool_name and tool_output is not None: - needs_parsing = tool_name in ["schedule_generator", "attachment_processor", "deep_research"] + needs_parsing = tool_name in [ + "schedule_generator", + "attachment_processor", + "deep_research", + "pdf_export", + ] if isinstance(tool_output, str) and needs_parsing: try: tool_output = ast.literal_eval(tool_output) @@ -491,6 +493,27 @@ def _process_agent_result( else: error_msg = tool_output.get("message", "Schedule generator failed or missing ICS content.") errors_list.append(ProcessingError(message="Schedule Tool Error", details=error_msg)) + + elif tool_name == "pdf_export" and isinstance(tool_output, dict): + if tool_output.get("success"): + pdf_export_result = PDFExportResult( + filename=tool_output.get("filename", "document.pdf"), + file_path=tool_output.get("file_path", ""), + file_size=tool_output.get("file_size", 0), + title=tool_output.get("title", "Document"), + pages_estimated=tool_output.get("pages_estimated", 1), + mimetype=tool_output.get("mimetype", "application/pdf"), + temp_dir=tool_output.get("temp_dir"), + ) + logger.info(f"PDF export successful: {pdf_export_result.filename}") + else: + error_msg = tool_output.get("error", "PDF export failed") + details = tool_output.get("details", "") + errors_list.append( + ProcessingError(message="PDF Export Error", details=f"{error_msg}. {details}") + ) + logger.error(f"PDF export failed: {error_msg}") + else: logger.debug( f"[Memory Step {i + 1}] Tool '{tool_name}' output processed (no specific handler). Output: {str(tool_output)[:200]}..." @@ -592,6 +615,7 @@ def _process_agent_result( ) if research_output_findings or research_output_metadata else None, + pdf_export=pdf_export_result, ) except Exception as e: @@ -649,6 +673,7 @@ def _process_agent_result( ) if research_output_findings or research_output_metadata else None, + pdf_export=pdf_export_result, ) def process_email( @@ -722,4 +747,5 @@ def process_email( attachments=AttachmentsProcessingResult(processed=[]), calendar_data=None, research=None, + pdf_export=None, ) diff --git a/mxtoai/api.py b/mxtoai/api.py index be1f8d7..be06f9e 100644 --- a/mxtoai/api.py +++ b/mxtoai/api.py @@ -13,6 +13,7 @@ from fastapi import Depends, FastAPI, File, Form, HTTPException, Response, UploadFile, status from fastapi.security import APIKeyHeader +from mxtoai import validators from mxtoai._logging import get_logger from mxtoai.agents.email_agent import EmailAgent from mxtoai.config import ATTACHMENTS_DIR, SKIP_EMAIL_DELIVERY @@ -33,7 +34,6 @@ validate_email_whitelist, validate_rate_limits, ) -from mxtoai import validators # Load environment variables load_dotenv() @@ -467,6 +467,29 @@ async def process_email( Response: FastAPI Response object with JSON content """ + # Skip processing for AWS SES system emails + if from_email.endswith("@amazonses.com") or ".amazonses.com" in from_email: + logger.info(f"Skipping processing for AWS SES system email: {from_email} (subject: {subject})") + logger.info(f"AWS SES email content - Text: {textContent}") + logger.info(f"AWS SES email content - HTML: {htmlContent}") + if rawHeaders: + try: + parsed_headers = json.loads(rawHeaders) + logger.info(f"AWS SES email headers: {json.dumps(parsed_headers, indent=2)}") + except json.JSONDecodeError: + logger.warning(f"Could not parse AWS SES email headers: {rawHeaders}") + return Response( + content=json.dumps( + { + "message": "Skipped processing AWS SES system email", + "email": from_email, + "status": "skipped", + } + ), + status_code=status.HTTP_200_OK, + media_type="application/json", + ) + # Validate API key if response := await validate_api_key(api_key): return response diff --git a/mxtoai/email_handles.py b/mxtoai/email_handles.py index f8a775e..9b029ec 100644 --- a/mxtoai/email_handles.py +++ b/mxtoai/email_handles.py @@ -18,7 +18,7 @@ process_attachments=True, deep_research_mandatory=True, add_summary=True, - target_model="gpt-4-reasoning", + target_model="gpt-4", task_template=template_prompts.RESEARCH_TEMPLATE, output_template=output_prompts.RESEARCH_OUTPUT_GUIDELINES, ), @@ -45,7 +45,7 @@ aliases=["factcheck", "verify"], process_attachments=True, deep_research_mandatory=False, - target_model="gpt-4-reasoning", + target_model="gpt-4", task_template=template_prompts.FACT_TEMPLATE, output_template=output_prompts.FACT_CHECK_OUTPUT_GUIDELINES, ), @@ -54,7 +54,7 @@ aliases=["background-check", "background"], process_attachments=True, deep_research_mandatory=False, - target_model="gpt-4-reasoning", + target_model="gpt-4", task_template=template_prompts.BACKGROUND_RESEARCH_TEMPLATE, output_template=output_prompts.BACKGROUND_OUTPUT_GUIDELINES, ), @@ -77,4 +77,13 @@ task_template=template_prompts.SCHEDULE_TEMPLATE, output_template=output_prompts.SCHEDULE_OUTPUT_GUIDELINES, ), + ProcessingInstructions( + handle="pdf", + aliases=["export", "convert", "document", "export-pdf"], + process_attachments=True, + deep_research_mandatory=False, + target_model="gpt-4", + task_template=template_prompts.PDF_EXPORT_TEMPLATE, + output_template=output_prompts.PDF_EXPORT_OUTPUT_GUIDELINES, + ), ] diff --git a/mxtoai/email_provider_domains.txt b/mxtoai/email_provider_domains.txt index eefd17b..4e0afe2 100644 --- a/mxtoai/email_provider_domains.txt +++ b/mxtoai/email_provider_domains.txt @@ -2172,7 +2172,7 @@ justicemail.com justmail.de justmailz.com justmarriedmail.com -jwspamspy +jwspamspy k.ro kaazoo.com kabissa.org @@ -6101,4 +6101,4 @@ zybermail.com zydecofan.com zzn.com zzom.co.uk -zzz.com \ No newline at end of file +zzz.com diff --git a/mxtoai/prompts/base_prompts.py b/mxtoai/prompts/base_prompts.py index 088066f..aa9484b 100644 --- a/mxtoai/prompts/base_prompts.py +++ b/mxtoai/prompts/base_prompts.py @@ -6,7 +6,7 @@ MARKDOWN FORMATTING REQUIREMENTS: - **bold** for emphasis - _italics_ for quotes -- ### for section headers (if needed) +- Strictly use `###` for section headers - Proper bullet points and numbered lists - Clear paragraph spacing """ @@ -18,32 +18,42 @@ - Include only relevant information - Maintain appropriate tone and style - Use proper spacing and formatting -- ALWAYS Indent each nested level with two spaces +- Try to maintain visual hierarchy of the response using section headers and lists +- NEVER add numbers to section headers - DO NOT add any signature - it will be added automatically +- If web search tools were used, create a 'References' section at the end of your response. List the titles and URLs of the web pages used, formatted as markdown links (e.g., `1. [Page Title](URL)`). + +SEARCH TOOL SELECTION GUIDELINES: +- **ddg_search**: Use first for most queries (free and fast) +- **brave_search**: Use when DDG results are insufficient or you need better quality/more comprehensive information (moderate API cost) +- **google_search**: Use only when DDG and Brave are insufficient (premium API cost, highest quality) +- Choose search tools based on the importance, complexity and quality of search results received +- Whenever you use a search tool, keep a track of links you visited in memory and later add them as references. """ # Formatting requirements for HTML conversion -LIST_FORMATTING_REQUIREMENTS = """ -NESTED LIST OUTPUT FORMAT GUIDELINES (for Markdown to HTML conversion): - -1. Always begin with a **numbered list** (use `1.`). -2. **Alternate between numbered and bullet lists** at each level of nesting: - - Level 1: `1.`, `2.`, `3.` (numbered) - 1. Level 2: `-` (bullet) - - Level 3: `1.`, `2.`, `3.` (numbered) - 1. Level 4: `-` (bullet) - - And so on... -3. Use **blank lines** between paragraphs and between different list levels. - -Example: - -1. Main point - - Sub-point - 1. Sub-sub-point - - Sub-sub-sub-point - -All list sections **must follow this structure exactly**. Improper nesting or use of list styles will break the HTML conversion. -""" +# Not needed anymore, still keeping it for a while just in case +# LIST_FORMATTING_REQUIREMENTS = """ +# NESTED LIST OUTPUT FORMAT GUIDELINES (for Markdown to HTML conversion): + +# 1. Always begin with a **numbered list** (use `1.`). +# 2. **Alternate between numbered and bullet lists** at each level of nesting: +# - Level 1: `1.`, `2.`, `3.` (numbered) +# 1. Level 2: `-` (bullet) +# - Level 3: `1.`, `2.`, `3.` (numbered) +# 1. Level 4: `-` (bullet) +# - And so on... +# 3. Use **blank lines** between paragraphs and between different list levels. + +# Example: + +# 1. Main point +# - Sub-point +# 1. Sub-sub-point +# - Sub-sub-sub-point + +# All list sections **must follow this structure exactly**. Improper nesting or use of list styles will break the HTML conversion. +# """ # Research guidelines RESEARCH_GUIDELINES = { @@ -53,11 +63,13 @@ - Ensure comprehensive research before responding - Include citations and sources in your response - Synthesize findings with the email content +- Use appropriate search tools based on cost/quality needs (ddg_search > brave_search > google_search) """, "optional": """ RESEARCH GUIDELINES: -- Deep research is NOT allowed for this handle - Only use basic tools and provided information - Focus on addressing the direct content of the email +- If web search is needed, start with ddg_search for cost-effectiveness +- Escalate to brave_search or google_search only if better results are needed """, } diff --git a/mxtoai/prompts/output_prompts.py b/mxtoai/prompts/output_prompts.py index 06d4080..5436b39 100644 --- a/mxtoai/prompts/output_prompts.py +++ b/mxtoai/prompts/output_prompts.py @@ -22,6 +22,7 @@ 3. Detailed Analysis: In-depth exploration with subheadings 4. Supporting Evidence: Data, quotes, statistics 5. References: Numbered citations with links when available +6. Have separate sections for each of the above mentioned. """ # Simplify handler output guidelines @@ -37,7 +38,7 @@ # Ask handler output guidelines ASK_OUTPUT_GUIDELINES = """ Output Format Guidelines: -1. Begin with acknowledgment of the question +1. Begin with acknowledgment of the question at the top of the response. Then begin any section. 2. Structure response with clear sections 3. Use examples to illustrate complex points 4. Include actionable recommendations when applicable @@ -47,11 +48,12 @@ # Fact-check handler output guidelines FACT_CHECK_OUTPUT_GUIDELINES = """ Output Format Guidelines: -1. Present each claim in this format: +1. Present a short summary of the original email to setup the context. +2. Present each claim in this format: - **Claim**: [Original statement] - **Status**: [Verified ✓ / Not verified ❌ / Partially verified ⚠️] - **Evidence**: [Supporting information] - - **Sources**: [Citations with links] + - **Sources**: [Citations with links, make sure the links are valid] 2. Use consistent status symbols throughout """ @@ -61,7 +63,7 @@ 1. Start with executive summary of key findings 2. Organize information by entity (person, organization, domain) 3. Use tables for comparative information -4. Flag any security concerns prominently +4. Flag any concerns prominently """ # Translation handler output guidelines @@ -84,3 +86,20 @@ - **Notes**: Any assumptions or clarifications 2. Format times consistently with timezone """ + +# PDF Export handler output guidelines +PDF_EXPORT_OUTPUT_GUIDELINES = """ +Output Format Guidelines: +1. Begin with a brief confirmation of PDF generation +2. Include document details: + - **PDF Title**: Clear, descriptive document name + - **Content Summary**: What was included in the export + - **File Size/Pages**: Approximate document metrics + - **Attachment Notice**: Confirmation that PDF is attached +3. Content processing notes: + - What content was included/excluded and why + - Any assumptions made during processing + - Quality of source material for export +4. Professional tone acknowledging the export request +5. Keep response concise - let the PDF be the main deliverable +""" diff --git a/mxtoai/prompts/template_prompts.py b/mxtoai/prompts/template_prompts.py index 042f8bb..34bf452 100644 --- a/mxtoai/prompts/template_prompts.py +++ b/mxtoai/prompts/template_prompts.py @@ -4,19 +4,71 @@ # Summarize email handler template SUMMARIZE_TEMPLATE = """ -Provide a concise, direct summary of the key points from the email and attachments. +Systematically analyze and summarize content from all available sources with clear structure and action focus. -Content Guidelines: -1. Get straight to the key points -2. No redundant introductions -3. Include only relevant information -4. Keep it concise but complete -5. Use a natural, conversational tone - -Remember: -- If the user has specific intent, then focus on what the user asked about -- Skip unnecessary formality -- Ensure proper markdown formatting +# Summarization Process + +## STEP 1: Content Analysis +- **Process ALL sources**: Email content, attachments, embedded links, external references(if asked) +- **Assess complexity**: Determine appropriate detail level (concise/detailed/executive summary) +- **Identify priorities**: Key messages, action items, deadlines, stakeholder impact + +## STEP 2: Structured Summary Format +``` +## Executive Summary +[2-3 sentences capturing core message and significance] + +## Main Points +[Organized breakdown of key information from all sources] + +## Action Items +- [Specific actions required with deadlines] +- [Responsible parties if mentioned] + +## Additional Context +[Important background, implications, supporting details] +``` + +## STEP 3: Quality Standards +- **Process all content sources** before summarizing +- **Highlight action items** clearly +- **Note any inaccessible content** transparently +- **Match detail level** to content complexity +- **Maintain context** while being concise + +**Example Output:** +``` +## Executive Summary +Q3 sales report shows 12% revenue growth with West region leading performance, requiring strategy review meeting. + +## Key Information +- **From**: Sales Director Sarah Chen +- **Topic**: Q3 2024 Sales Performance Review +- **Urgency**: Standard quarterly review +- **Stakeholders**: Management team, regional leads + +## Main Points +**Sales Performance (from Excel attachment):** +- Total revenue: $4.2M (12% increase from Q2) +- West region: 23% growth, exceeding targets +- Product line A: 18% growth, strongest performer +- Customer acquisition: 156 new accounts + +## Action Items +- Review West region strategies for replication +- Address East region performance decline +- Quarterly review meeting: Next Friday + +## Additional Context +Strong Q3 performance driven by West region success and Product A growth. East region needs attention. +``` + +**Critical Requirements:** +- Process ALL available content sources (email, attachments, links) +- Structure information for easy scanning +- Clearly identify action items and deadlines +- Note any content processing limitations +- Adapt detail level to content complexity """ # Research handler template @@ -31,7 +83,7 @@ - ### Detailed Analysis - ### Supporting Evidence - ### References -2. Include proper citations [1], [2], etc. +2. Include proper citations [1], [2], etc. if the deep_research tool provides them. For web_search results, extract the title and URL for each source and list them under the 'References' section using markdown link format (e.g., 1. [Page Title](URL)). 3. Format tables using markdown table syntax 4. Use proper paragraph spacing @@ -45,152 +97,1013 @@ # Simplify handler template SIMPLIFY_TEMPLATE = """ -Explain the content in simple, easy-to-understand terms without technical jargon, like you're explaining to a 5-year-old. +Transform complex content into clear, accessible explanations using simple language and relatable examples. -Content Guidelines: -1. Use simple language -2. Avoid technical terms -3. Give everyday examples -4. Keep explanations short -5. Use bullet points for clarity +# Simplification Process + +## STEP 1: Complexity Assessment +- **Identify complexity sources**: Technical jargon, abstract concepts, complex processes, dense information +- **Determine target level**: General public understanding (assume no specialized knowledge) +- **Preserve core truth**: Maintain essential accuracy while removing complexity + +## STEP 2: Simplification Strategy +**Language Techniques:** +- Replace technical terms with everyday language(if replacement is not possible add dictionary at the end) +- Break complex sentences into shorter, clearer ones +- Use active voice and concrete examples +- Add helpful analogies from familiar experiences + +**Structure Format:** +``` +## The Simple Version +[One clear sentence explaining the core concept] + +## What This Means +[2-3 sentences expanding on the main idea] + +## Here's How It Works +[Step-by-step breakdown in simple terms] + +## Think of It Like This +[Relatable analogy or real-world example] + +## Why This Matters +[Practical significance in everyday terms] + +## The Bottom Line +[Key takeaway anyone can remember] +``` + +## STEP 3: Quality Check +- Could a 12-year-old understand the main point? +- Are technical terms explained or replaced? +- Do analogies help rather than confuse? +- Is the essential message preserved? + +**Requirements:** +- Use simple, everyday language +- Include helpful analogies and examples +- Preserve accuracy while removing jargon +- Make content accessible to general audiences +- Maintain respectful tone (not condescending) """ # Ask handler template ASK_TEMPLATE = """ -Provide a complete response addressing all aspects of the query. +Execute custom tasks and workflows systematically with research, analysis, and professional presentation. -Content Guidelines: -1. Brief summary of understanding -2. Detailed response -3. Additional insights if relevant -4. Next steps or recommendations +# General Task Execution Process + +## STEP 1: Task Analysis & Planning +- **Understand the request**: Break down what the user wants accomplished +- **Identify components**: Research needs, data gathering, analysis, formatting requirements +- **Determine approach**: What tools and steps are needed to complete this task +- **Set quality standards**: How should the final output be structured and presented + +## STEP 2: Systematic Execution +**Research & Data Gathering:** +- Use web search for current information and trends +- Visit relevant websites and sources +- Process any attachments or provided materials(if needed) +- Gather comprehensive data before analysis + +**Analysis & Curation:** +- Filter and prioritize information based on relevance and quality +- Identify key insights, patterns, or important details +- Apply criteria for selection (trending, popularity, importance) +- Add context and explanatory information + +**Content Creation:** +- Structure information logically and professionally +- Create engaging and informative content +- Include proper citations and links +- Format for easy reading and comprehension + +## STEP 3: Professional Presentation +**Standard Output Structure:** +``` +## [Task Title/Summary] +[Brief overview of what was accomplished] + +## [Main Content Sections] +[Organized, formatted content with clear headers] + +### [Subsections as needed] +- [Bullet points, lists, or structured information] +- [Include links, sources, and references] + +## Key Insights/Summary +[Important takeaways or conclusions] + +## Sources & References +[All sources used with proper attribution] +``` + +## STEP 4: Quality Standards +- **Comprehensive research** using available tools +- **Professional formatting** with clear structure +- **Accurate information** with proper source attribution +- **Engaging presentation** that's easy to read and understand +- **Complete execution** of all requested components + +**Example Task: "Prepare a newsletter with top 10 trending HN posts"** + +``` +## Hacker News Top 10 Trending Posts Newsletter +Daily digest of the most engaging discussions and innovations from the HN community. + +## Today's Top Trending Posts + +### 1. [Post Title](HN-link) +**Summary**: Brief description of the post content and significance +**Why It's Trending**: Key reasons for community engagement +**Discussion Highlights**: Notable comments or insights from HN users +**Relevance**: Why this matters to the tech community + +### 2. [Next Post Title](HN-link) +[Same format structure] + +[Continue for all 10 posts] + +## Key Themes Today +- [Pattern 1]: Multiple posts about [topic] +- [Pattern 2]: Community interest in [area] +- [Pattern 3]: Emerging trends in [field] + +## Community Insights +Notable discussions, debates, or expert opinions from today's conversations. + +## Sources +- Hacker News front page and trending algorithms +- Individual post discussions and comment threads +- Community engagement metrics and voting patterns +``` + +**Requirements:** +- Execute any custom task or workflow systematically +- Use all available tools for research and analysis +- Present results professionally with proper structure +- Include comprehensive sources and attribution +- Adapt format and approach to specific task requirements """ # Fact-check handler template FACT_TEMPLATE = """ -Validate and fact-check the content thoroughly. Use web search tool to find reliable sources alongside deep search tool. -Do not use deep search directly, use web search and page visit tool, if you're not satisfied with results, then only try deep search. +Systematically verify claims and statements with comprehensive source validation and transparent uncertainty handling. -Response Requirements: -1. Use proper markdown formatting: - - **Claim**: for stating each claim - - _Source_: for source citations - - ✓ or ❌ for verification status - - Bullet points for supporting evidence - - [text](url) for reference links -2. Structure each fact-check: - - Original claim - - Verification status - - Supporting evidence - - Source citations -3. Use clear paragraph breaks between checks +# Fact-Checking Methodology - SYSTEMATIC VERIFICATION PROCESS -Content Guidelines: -1. State each claim clearly -2. Provide verification status -3. Include supporting evidence -4. Cite reliable sources -5. Note any uncertainties -6. Always give a disclaimer that sometimes links may be outdated or incorrect depending on age of the source +## STEP 1: CLAIM EXTRACTION & CATEGORIZATION +**Extract ALL verifiable claims from the content:** +- **Factual Claims**: Statistics, dates, events, scientific facts +- **Attribution Claims**: Quotes, statements attributed to people/organizations +- **Causal Claims**: "X causes Y", "Due to X, Y happened" +- **Comparative Claims**: Rankings, comparisons, "better/worse than" +- **Current Status Claims**: Current prices, status, availability + +**Claim Prioritization:** +- **High Priority**: Core claims central to the message +- **Medium Priority**: Supporting details and context +- **Low Priority**: Tangential or well-established facts + +## STEP 2: SYSTEMATIC VERIFICATION STRATEGY +**Verification Hierarchy:** +1. **Primary Sources**: Official websites, government data, organization statements +2. **Academic Sources**: Peer-reviewed research, institutional studies, wikipedia +3. **Established News Sources**: Reuters, AP, BBC, established newspapers +4. **Industry Sources**: Trade publications, industry reports +5. **Secondary Analysis**: Expert commentary, analysis pieces + +**Search Strategy:** +1. **Direct Claim Search**: Search exact claim or paraphrased version +2. **Source Verification**: Search for original source of claimed information +3. **Counter-Evidence Search**: Actively search for contradicting information +4. **Recent Updates**: Check for more recent information that might contradict +5. **Context Search**: Understand broader context around the claim + +## STEP 3: SOURCE QUALITY ASSESSMENT +**Evaluate each source on:** + +✓ Authority: Is the source authoritative on this topic? +✓ Recency: How current is the information? +✓ Bias Assessment: Any obvious political, commercial, or ideological bias? +✓ Corroboration: Do multiple independent sources agree? +✓ Original vs. Secondary: Is this the original source or reporting on it? +✓ Methodology: For studies/surveys, is methodology sound? + + +## STEP 4: VERIFICATION STATUS DETERMINATION +**Classification System:** +- ✅ **VERIFIED**: Multiple reliable sources confirm +- ⚠️ **PARTIALLY VERIFIED**: Some aspects confirmed, others unclear +- ❌ **FALSE**: Reliable sources contradict the claim +- 🔍 **UNVERIFIABLE**: Insufficient reliable sources available +- 📅 **OUTDATED**: Was true but circumstances have changed +- 🤔 **DISPUTED**: Reliable sources disagree + +## STEP 5: UNCERTAINTY & LIMITATION HANDLING +**When verification is unclear:** +- **Multiple conflicting sources**: Present different perspectives with source quality +- **Insufficient information**: Clearly state limitations and what's unknown +- **Rapidly changing situations**: Note information currency and change potential +- **Complex claims**: Break down into verifiable components + +**Fallback Strategy:** +``` +Unable to fully verify this claim due to: + +**Verification Challenges:** +- [Specific challenge, e.g., "Limited reliable sources available"] +- [Another challenge, e.g., "Conflicting expert opinions"] + +**What We Found:** +- [Partial information available] +- [Related verified information] + +**Recommendation:** +- [Suggested approach for the user] +- [When to check for updates] +``` + +## STEP 6: COMPREHENSIVE REPORTING FORMAT + +**For Each Claim:** +``` +**Claim**: [Original statement] +**Status**: [Verification symbol + status] +**Evidence**: +- [Supporting evidence with source quality] +- [Contradicting evidence if any] +**Source Quality**: [Assessment of primary sources used] +**Last Updated**: [When information was verified] +**Notes**: [Important context, limitations, or nuances] +``` + +# FACT-CHECKING EXAMPLES + +## Example 1: Statistical Claim +**Original**: "Carbon emissions increased by 15% in 2023 globally" + +### Verification Process: +1. **Search Strategy**: "global carbon emissions 2023 statistics" +2. **Sources Found**: IEA Global Energy Review, UN Environment Programme +3. **Cross-verification**: Check multiple climate monitoring organizations + +### Result: +**Claim**: Carbon emissions increased by 15% in 2023 globally +**Status**: ❌ **FALSE** +**Evidence**: +- IEA data shows 1.1% increase in 2023, not 15% +- Multiple climate organizations report similar ~1% increase +**Source Quality**: High (authoritative international organizations) +**Last Updated**: Based on 2023 year-end data +**Notes**: The 15% figure appears to be confused with a different metric or time period + +## Example 2: Attribution Claim with Uncertainty +**Original**: "Elon Musk said Tesla will achieve full self-driving by end of 2024" + +### Verification Process: +1. **Quote Search**: Search for exact or similar statements +2. **Timeline Search**: Check recent Musk statements on FSD timeline +3. **Context Search**: Understand history of similar predictions + +### Result: +**Claim**: Elon Musk said Tesla will achieve full self-driving by end of 2024 +**Status**: 🔍 **UNVERIFIABLE** (specific quote) +**Evidence**: +- Musk has made multiple FSD timeline predictions +- No exact quote found for "end of 2024" +- Pattern of similar predictions that were later revised +**Source Quality**: Mixed (social media posts, interviews, earnings calls) +**Last Updated**: [Current date] +**Notes**: Musk frequently revises FSD timelines; recommend checking recent official Tesla communications + +## Example 3: Complex Multi-Part Claim +**Original**: "The new AI regulation will cost businesses $50B annually and reduce innovation by 30%" + +### Verification Breakdown: +1. **Cost Component**: Search for economic impact studies +2. **Innovation Component**: Look for innovation metrics and projections +3. **Regulation Specificity**: Identify which specific regulation + +### Result: +**Claim**: The new AI regulation will cost businesses $50B annually and reduce innovation by 30% +**Status**: ⚠️ **PARTIALLY VERIFIED** +**Evidence**: +- Cost estimates vary widely ($20B-$80B across different studies) +- No specific studies found supporting 30% innovation reduction +- Impact highly dependent on implementation details +**Source Quality**: Medium (industry estimates, some academic analysis) +**Last Updated**: [Current date] +**Notes**: Economic projections for new regulations are inherently uncertain; multiple scenarios exist + +**SYSTEMATIC REQUIREMENTS:** +- **ALWAYS search for counter-evidence** to avoid confirmation bias +- **ALWAYS assess source quality** and note limitations +- **ALWAYS distinguish between** "not verified" and "false" +- **ALWAYS provide context** for complex or nuanced claims +- **ALWAYS note information currency** and potential for updates +- **ALWAYS break down complex claims** into verifiable components + +**Content Guidelines:** +1. **Transparent methodology** - explain verification approach +2. **Source transparency** - clearly cite sources and assess quality +3. **Uncertainty acknowledgment** - be honest about limitations +4. **Actionable results** - provide clear verification status +5. **Context preservation** - maintain nuance and complexity +6. **Update recommendations** - suggest when to re-verify +7. **Bias awareness** - acknowledge potential verification biases + +**Critical Verification Standards:** +- Multiple independent sources for verification +- Active search for contradictory evidence +- Clear distinction between correlation and causation +- Recognition of context-dependent claims +- Transparent limitations and uncertainty acknowledgment """ # Background research handler template BACKGROUND_RESEARCH_TEMPLATE = """ -Research identities mentioned in email including names, email addresses, and domains. Focus on finding background information about the sender and other parties mentioned. -Do not use deep search directly, use web search and page visit tool, if you're not satisfied with results, then only try deep search. +Conduct comprehensive business intelligence research on individuals and organizations mentioned in the email. +This is strategic research to support business decisions, not just basic background information. -Response Requirements: -1. Structure with clear sections: - - ### Executive Summary - - ### Key Findings - - ### Detailed Analysis - - ### Supporting Evidence - - ### References -2. Include proper citations [1], [2], etc. -3. Format tables using markdown table syntax -4. Use proper paragraph spacing +# Research Methodology - CRITICAL PROCESS -Content Guidelines: -1. Focus on relevant background -2. Include verifiable information -3. Note information sources -4. Maintain professional tone -5. Flag any concerns -6. Always give a disclaimer that sometimes links may be outdated or incorrect depending on age of the source +## STEP 1: INFORMATION EXTRACTION & VERIFICATION +**Extract ALL available identifiers from the email content:** +- Full names (first, middle, last) +- Email addresses +- Company names and variations +- Job titles or roles +- Any additional context clues (locations, mutual connections, etc.) + +**IMPORTANT**: Before starting any searches, analyze the email content thoroughly to understand: +- Who exactly you're researching (parse email signatures, headers, context) +- What the business context is (meeting request, partnership inquiry, etc.) +- Any specific details that could help distinguish the right person/company + +## STEP 2: SYSTEMATIC SEARCH STRATEGY +**Phase 1 - Targeted Combined Searches:** +1. **Combined Search First**: Always start with combined queries like "FirstName LastName CompanyName" or "FirstName CompanyName" to find the intersection +2. **Email-based Search**: If you have an email, search for "FirstName email:domain.com" or "CompanyName email:domain.com" +3. **Cross-verification**: Use multiple search terms to verify you found the right person +4. **Typo Resilience**: If exact name searches fail, try common variations: + - **Missing/extra letters**: "Maxx Henlay" → try "Max Henley", "Maxx Henlay" + - **Common substitutions**: "ph/f", "c/k", "i/y", "ou/u" + - **Double letters**: "Connor" → try "Conor", "Connors" + - **Similar sounding**: Use phonetic variations if initial search fails + - **CRITICAL**: When you find a match with corrected spelling, clearly highlight this in your response + +**Phase 2 - LinkedIn Strategic Search:** +- Use `linkedin_data_api` for SEARCHING and FINDING profiles/companies when you don't have LinkedIn URLs: + - **Action: search_people** - Primary tool for finding people by: + - `first_name` + `last_name` + `company` (most targeted) + - If exact match fails, try spelling variations systematically + - `keywords` (combined name and company terms) + - `keyword_title` (job title keywords) + - Multiple parameters can be combined for precision + - **Action: search_companies** - For finding companies by: + - `keyword` (company name or description) + - Can add location, industry filters if needed + - **Action: get_profile_by_url** - If you have a LinkedIn URL from web search + - **Action: get_profile_data** - If you have a LinkedIn username (not common) + +- Use `linkedin_fresh_data` for getting DETAILED profile data from confirmed LinkedIn URLs: + - **Action: get_linkedin_profile** - For detailed individual profile data when you have confirmed LinkedIn URL + - Include optional sections: `include_skills=true`, `include_certifications=true` for comprehensive research + - **Action: get_company_by_linkedin_url** - For detailed company data when you have confirmed LinkedIn company URL + - **CRITICAL**: Only use this tool AFTER you have confirmed LinkedIn URLs from previous searches + +**Phase 3 - Validation Searches:** +- Search for the person/company name + recent news/updates +- Look for any conflicting information that might indicate wrong identification +- Cross-reference details across multiple sources +- **Spelling Verification**: If you used a corrected spelling, verify this is the correct name through multiple sources + +## STEP 3: IDENTITY CONFIRMATION PROTOCOL +**Before proceeding with detailed research, confirm you have the RIGHT person/company:** + + +✓ Does the company association match? (email domain, mentioned company, etc.) +✓ Do the role/title indicators align with context? +✓ Are there geographical indicators that match? +✓ Do any mutual connections or context clues align? +✓ Is there recent activity that supports this identification? + + +**If ANY verification point fails or is uncertain:** +- **STOP the research process** +- **DO NOT proceed with fabricated or uncertain information** +- **Request clarification from user** (see Step 5) + +## STEP 4: COMPREHENSIVE RESEARCH (Only After Confirmed Identity) + +### Research Strategy & Tool Usage: +- Start with web search to identify LinkedIn profiles, company pages, and recent news +- Use LinkedIn tools strategically: + - **linkedin_data_api**: For SEARCHING and finding profiles/companies + - search_people: Find people by name + company combination + - search_companies: Find companies by name/keyword + - get_profile_by_url: Get profile data if you have LinkedIn URL + - **linkedin_fresh_data**: For DETAILED data extraction from confirmed URLs + - get_linkedin_profile: Comprehensive profile data (with skills, certifications, etc.) + - get_company_by_linkedin_url: Detailed company information +- Cross-reference information across multiple sources for accuracy +- Focus on business relevance - what matters for the decision at hand +- **MANDATORY**: Keep detailed notes of ALL links visited and sources used for references section + +## STEP 5: FALLBACK STRATEGY - REQUEST CLARIFICATION + +**When to use the fallback strategy:** +- Multiple profiles found with same name but unclear which is correct +- Company association unclear or contradictory +- Insufficient unique identifiers to confirm identity +- Any doubt about accuracy of identification + +**Fallback Response Format:** +``` +I found multiple potential matches for [name/company] but need clarification to ensure accuracy: + +**Potential Matches Found:** +1. [Name] at [Company] - [brief description] +2. [Name] at [Company] - [brief description] + +**To provide accurate research, could you please clarify:** +- [Specific question about company/role/location] +- [Any additional identifying information] +- [Context that would help distinguish the right person] + +This will help me provide reliable business intelligence rather than potentially incorrect information. +``` + +# SUCCESSFUL RESEARCH EXAMPLE + +## Example Query: "Research background on sarah.chen@techstartup.io" + +### Step 1: Information Extraction +- Name: Sarah Chen +- Email domain: techstartup.io +- Company: TechStartup (from domain) + +### Step 2: Search Strategy +1. **Combined Search**: "Sarah Chen TechStartup" via Web Search tool +2. **Email Search**: "Sarah Chen techstartup.io" via Web Search tool +3. **LinkedIn Search**: Use linkedin_data_api with action "search_people": + - first_name: "Sarah" + - last_name: "Chen" + - company: "TechStartup" + +### Step 3: Verification +✓ Found Sarah Chen, CTO at TechStartup Inc. +✓ Email domain matches company +✓ Role aligns with technical email signature +✓ Location and timeline consistent + +## Example with Typo Correction: "Research background on Vetri Vellor at Stych India" + +### Step 1: Information Extraction +- Name: Vetri Vellor (potential typo) +- Company: Stych India + +### Step 2: Search Strategy with Typo Handling +1. **Initial Search**: "Vetri Vellor Stych India" - No clear matches +2. **Typo Variations**: Try "Vetri Vellore", "Vetri Veller" with Stych India +3. **LinkedIn Search**: linkedin_data_api "search_people": + - first_name: "Vetri" + - last_name: "Vellore" (corrected spelling) + - company: "Stych India" + +### Step 3: Verification with Correction Note +✓ Found Vetri Vellore at Stych India +✓ Company matches exactly +✓ Profile shows expected role and location +⚠️ **Note: Corrected spelling from 'Vetri Vellor' to 'Vetri Vellore'** + +### Step 4: Comprehensive Research +**Detailed Profile Data (using linkedin_fresh_data):** +- Action: get_linkedin_profile with confirmed LinkedIn URL +- Include comprehensive sections: include_skills=true, include_certifications=true, only if needed +- Current Role: CTO at TechStartup Inc. (2022-present) +- Background: 8 years at Google, 3 years at Meta +- Expertise: ML/AI, cloud infrastructure +- Education: Stanford CS PhD + +**Company Research (using linkedin_fresh_data):** +- Action: get_company_by_linkedin_url for TechStartup company page +- TechStartup Inc: Series B, $50M raised +- Focus: AI-powered analytics tools +- Team: 120 employees, growing 40% YoY +- Recent: Partnership with Microsoft announced + +**Business Context:** +- High-value technical leader with strong background +- Company in growth phase, well-funded +- Strategic opportunity for technical partnerships +- Recent Microsoft partnership indicates market validation + +### References Used: +1. [LinkedIn Profile](linkedin-url) +2. [Company Crunchbase](crunchbase-url) +3. [Recent Partnership News](news-url) + +**Content Guidelines:** +1. **Business-focused analysis** - always connect findings to business value +2. **Strategic insights** - go beyond basic facts to provide decision support +3. **Professional tone** - appropriate for executive-level communications +4. **Actionable intelligence** - provide specific, usable insights +5. **Cross-referenced accuracy** - verify key facts across multiple sources +6. **Risk awareness** - flag any concerns or inconsistencies found in email content claims or news +7. **Competitive context** - position findings within market landscape +8. **Relationship mapping** - identify connection opportunities and common ground +9. **Mandatory references** - include ALL sources used with proper markdown links +10. **Confidence indicators** - clearly state certainty levels for key findings +11. **Spelling corrections highlight** - clearly note any name/spelling corrections made during research + +**CRITICAL REQUIREMENTS:** +- **NEVER proceed with research if identity verification fails** +- **ALWAYS include comprehensive references section with actual links** +- **ALWAYS state confidence levels and any assumptions made** +- **ALWAYS provide fallback response if uncertain about identity** +- **ALWAYS connect research findings to business context and value** +- **ALWAYS highlight spelling corrections with format: "⚠️ Note: Corrected spelling from '[original]' to '[corrected]'"** + +**Important Notes:** +- Keep detailed notes of ALL links visited and used for research +- Provide strategic context for all findings +- Include confidence levels for key assertions +- Always include disclaimer about information accuracy and age +- Prioritize recent and verifiable information +- Connect individual research to broader business context +- **If uncertain about identity, request clarification rather than guessing** """ # Translation handler template TRANSLATE_TEMPLATE = """ -Provide accurate translation with proper formatting. -Detect language if not specified. If non-English, translate to English. If English, look for requested target language or ask user. +Provide accurate translations with cultural context preservation and clear explanation of translation decisions. -Response Requirements: -1. Use proper markdown formatting: - - **Original**: for source text - - **Translation**: for translated text - - _Notes_: for translation notes - - > for quoted text blocks - - Proper paragraph breaks -2. Structure the response: - - Language detection result - - Original text block - - Translation block - - Any relevant notes -3. Preserve original formatting +# Translation Process -Content Guidelines: -1. Maintain original meaning -2. Note any ambiguities -3. Preserve cultural context -4. Include helpful notes +## STEP 1: Language Analysis +- **Detect source language** including dialect and formality level +- **Identify content type**: Technical, formal, casual, creative, or cultural content +- **Note complexity factors**: Idioms, cultural references, technical terms, humor + +## STEP 2: Translation Strategy +**Choose appropriate approach:** +- **Literal**: For technical/legal content requiring precision +- **Cultural**: For marketing, creative, or culturally-specific content +- **Functional**: For instructions and informational content + +## STEP 3: Translation Output Format +``` +## Language Detection +**Source Language**: [Language with confidence level] +**Content Type**: [Document type classification] + +## Translation +**Target Language**: [Target language, default is english] +**Approach**: [Literal/Cultural/Functional] + +### Original Text +[Source text clearly presented] + +### Translation +[Accurate translation in target language] + +## Translation Notes +### Cultural Adaptations +- [Idioms or cultural references adapted] +- [Explanations for cultural adjustments] + +### Technical Decisions +- [Specialized terminology choices] +- [Alternative translations if applicable] + +## Quality Verification +**Accuracy**: [High/Medium with any challenging areas noted] +**Cultural Appropriateness**: [Verified for target audience] +``` + +## STEP 4: Quality Standards +- **Preserve intent and tone** of original content +- **Adapt cultural elements** appropriately (idioms, references, humor) +- **Maintain natural expression** in target language +- **Note translation challenges** and decisions made +- **Provide alternatives** when multiple interpretations possible + +**Example Output:** +``` +## Language Detection +**Source Language**: Spanish (Standard) +**Content Type**: Idiomatic expression + +## Translation +**Target Language**: English +**Approach**: Cultural adaptation + +### Original Text +"No hay mal que por bien no venga" + +### Translation +"Every cloud has a silver lining" + +## Translation Notes +### Cultural Adaptations +- Used equivalent English idiom rather than literal translation +- Preserves consolatory and optimistic meaning +- Maintains proverbial nature of expression + +### Alternative Options +- Literal: "There is no bad that doesn't come for good" +- Explanatory: "Something good comes from every bad situation" + +## Quality Verification +**Accuracy**: High (meaning fully preserved) +**Cultural Appropriateness**: Verified for consolatory context +``` + +**Requirements:** +- Accurately detect and identify source language +- Preserve cultural context and intent +- Use natural expression in target language +- Explain translation decisions clearly +- Note any limitations or professional recommendations needed """ # Scheduling handler template SCHEDULE_TEMPLATE = """ -Extract meeting/scheduling related information including participants, timing, and location details to provide scheduling recommendations - -**STEP 1: Assess Clarity** -- Determine if the email provides enough specific information to create a calendar event. Key details needed are: - - A clear event purpose/title. - - A specific date (or range to choose from). - - A specific start time (or range). - - A timezone (or enough context to infer one, otherwise default to UTC and state assumption). - - Optionally: duration/end time, location, attendees. - -**STEP 2: Handle Ambiguity** -- **IF** the details are too vague or missing critical information (like a specific date or time): - - **DO NOT** attempt to call the `ScheduleTool`. - - Respond to the user explaining which details are unclear or missing. - - Ask specific questions to get the needed clarification (e.g., "Could you please specify the date and time for this meeting?", "What timezone should I use?", "What is the main topic or title for this event?"). - - Your entire response should be this request for clarification. - - Attempt to gain clarity to these questions by using tools at your disposal. For example attachment processor or web search - -**STEP 3: Extract and Format (If Clear)** -- **IF** the details are clear enough: - - Extract the event title, start time, end time (if specified), description, location, and attendee emails. - - Check email thread and generate a title & description based on the context if not explicitly provided - - **IMPORTANT DATE/TIME FORMATTING:** Determine the correct timezone and format ALL start/end times as ISO 8601 strings including the offset (e.g., '2024-08-15T10:00:00+01:00', '2024-08-16T09:00:00Z'). State any timezone assumptions made. - -**STEP 4: Use Tool (If Clear)** -- Call the `ScheduleTool` (`schedule_generator`) with the extracted and formatted details: - - `title`: The event title. - - `start_time`: The ISO 8601 formatted start time string (with timezone). - - `end_time`: The ISO 8601 formatted end time string (with timezone), if available. - - `description`: Event description, if available. - - `location`: Event location, if available. - - `attendees`: List of attendee email strings, if available. - -**STEP 5: Format Response (If Tool Used)** -- Structure the response based on the `ScheduleTool` output: - 1. **Summary of Extracted Details:** Briefly list the key event details identified. - 2. **Add to Calendar:** Use the `calendar_links` output. Present the links clearly using markdown: - - [Add to Google Calendar](google_link_url) - - [Add to Outlook Calendar](outlook_link_url) - 3. **Calendar File:** Mention that a standard calendar file (.ics) has been attached to this email for easy import into most calendar applications. (Do not mention tools or internal processes). - 4. **Notes/Recommendations:** Include any relevant notes or assumptions made (like assumed timezone). - -**GENERAL FORMATTING:** -- Use clear markdown (bolding, bullet points). -- Ensure any generated "Add to Calendar" links are functional. -- Present information concisely. +Intelligently extract, research, and schedule meetings or appointments with proper validation, research, and clarification protocols. + +# Scheduling Methodology - SYSTEMATIC PROCESS + +## STEP 1: INFORMATION EXTRACTION & ANALYSIS +**Extract ALL available information from the request:** +- **Participants/Service Providers**: Names, titles, organizations, type of service needed (e.g., therapist). +- **Contact Information**: Emails if provided. +- **Research Criteria**: Location, specialization, ratings, insurance, keywords for the service. +- **Time References**: Specific dates/times, relative ("next week", "available evenings"). +- **Location Preferences**: Physical locations, cities, virtual. +- **Meeting/Appointment Context**: Purpose, urgency, duration. +- **User Context**: Timezone, location, insurance. + +**CRITICAL ANALYSIS:** +- Identify organizer/requester (usually sender). +- Determine if: scheduling with known contacts, finding new service/professional, or personal reminder. +- Pinpoint info needed for research if finding new service/professional. +- Check for missing contact info for known participants. + +## STEP 2: PARTICIPANT/SERVICE PROVIDER VALIDATION & RESEARCH +**Prioritization:** +- If participant contact info (emails for named individuals) is provided, proceed to STEP 3 & 4. Targeted research in this step is ONLY for *critical missing contact info for explicitly named participants*. +- If finding a service type (e.g., "a therapist"), an unnamed professional, or needing extensive details on a named one, use broader research protocols below. + +**Requirements:** +- **Direct scheduling**: Min. 2 participants with contact info (unless personal reminder). +- **Service/professional search**: Clear research criteria. +- Research missing contacts for known participants or find services/professionals based on user criteria. + +**Research Protocol for Missing Contacts or Finding Services/Professionals:** +1. **Known Individuals (Missing Contact Info)**: + * Web search: "[Name] [Company] email", "[Name] contact". + * Check LinkedIn, company sites, directories. +2. **Services/Professionals (e.g., Therapists, Plumbers)**: + * Web search: "[Service] [Location]", "[Specialization] [Service] [City] [Rating] [Insurance]". + * Use professional directories (Psychology Today, Zocdoc), review sites (Yelp). + * Extract: Names, contact details, websites, specializations, ratings, hours, insurance. +3. **Typo Resilience**: Systematically try spelling variations. + +**Presenting Research & Requesting Clarification:** +- **Always present research findings clearly before asking for clarification or scheduling.** +- Include names, contact details, specializations, ratings, links. +- If multiple options, present them and ask user to choose or add filters. +- If info is incomplete, show what was found and ask for guidance. +``` +I've researched based on your request. Here's a summary: + +**[Option 1: Name of Professional/Service]** +- Specialization: [Details] +- Location: [Address/Area] +- Contact: [Phone/Email/Website Link] +- Ratings/Reviews: [Summary or Link] +- Insurance: [Accepted/Not Found/NA] +- Notes: [e.g., "Offers virtual sessions"] + +(Repeat for Option 2, etc.) + +**To proceed, please clarify:** +- Which option(s) do you prefer? +- Additional filtering criteria? +- Confirm if I should [specific next action, e.g., "find consultation availability"]? +- If contact info is missing for a chosen option, how to proceed (e.g., "search website contact form")? +``` + +## STEP 3: TIME & TIMEZONE RESOLUTION +**Time Reference Handling:** +1. **Relative Time**: "Next week same time" (email timestamp + 7 days), "Tomorrow 2pm" (email date + 1 day), "Weekday evenings after 6 PM". +2. **Timezone Priority**: Explicit mentions ("3pm EST"), location clues ("Beverly Hills, LA" -> PST/PDT), email metadata, then default to UTC (notify user). +3. **Duration Defaults**: Meetings 30 min, therapy 50-60 min, initial consults 15-30 min (unless specified). + +## STEP 4: VALIDATION CHECKLIST +**Before scheduling or finalizing recommendations, verify:** + +✓ Research done & findings presented (if service search). +✓ User clarification obtained if research yielded multiple/incomplete options. +✓ Participants/service provider identified for contact. +✓ Contact info available/researched for chosen entity. +✓ Date/time (or preferred range) determined. +✓ Timezone established. +✓ Duration set. +✓ Purpose/title clear. +✓ Location (virtual/physical) identified. + + +**If ANY validation point fails (user input needed):** +- **STOP scheduling.** Do NOT call `schedule_generator` if critical info is missing. +- Present research & request specific clarification (See Step 2 & 6). + +## STEP 5: SCHEDULE GENERATION / FINALIZING RECOMMENDATION +**If direct scheduling possible (`schedule_generator` details confirmed):** +**Tool Usage: schedule_generator** +- title: Clear meeting/appointment title. +- start_time: ISO 8601 with timezone (e.g., "2024-08-15T10:00:00-07:00"). +- end_time: ISO 8601 (start_time + duration). +- description: Context/agenda. +- location: Virtual link or physical address. +- attendees: ALL emails (user, professional if applicable & known). + +**Response (Successful Scheduling):** +1. Summary: Overview of event. +2. Participants: List attendees, contact methods. +3. Calendar Links: Google, Outlook. +4. Next Steps: Invitation instructions. +5. Research Notes: Assumptions made. + +**Response (Providing options/needing clarification pre-scheduling):** +- Use Step 2 clarification format. Outline next steps once user clarifies (e.g., "Once you select a therapist, I can find their availability."). + +## STEP 6: FALLBACK STRATEGIES + +### Missing or Ambiguous Information (General) +``` +I need more information/clarification: + +**Current Understanding:** +- Goal: [e.g., Schedule with X, Find therapist] +- Key details: [List info you have] +- Research (if any): [Summary & sticking points] + +**Clarification Needed:** +- [Specific question 1, e.g., "Email for John Doe?"] +- [Specific question 2, e.g., "Preferred therapist from list?"] +- [Specific question 3, e.g., "For 'next week', specify day/date range?"] + +With these details, I can [next action, e.g., "schedule meeting", "contact therapist for availability"]. +``` + +### Clarification on Researched Options (e.g., Multiple Therapists) +(Covered by Step 2 "Presenting Research" template) +``` +Based on research, I found: + +**[Option 1 Name]** +- Details: [Specialization, Location, Contact, Ratings, Website] +- Notes: [e.g., Accepts Cigna, Virtual sessions] + +(Repeat for other options) + +**Please let me know:** +- Which option to explore further? +- Find consultation availability for preferred option(s) (e.g., "weekday evenings after 6 PM")? +- Other criteria to narrow search? +``` + +### Ambiguous Timing +``` +Timing needs clarification: + +**Understood:** +- Time reference: [e.g., "weekday evenings after 6 PM"] +- Timezone: [Assumed, e.g., "PST/PDT for Beverly Hills"] + +**Please clarify:** +- Specific date(s) or range for consultation/meeting? +- Confirm timezone if assumed incorrect. +- Preferred duration (default: [default duration])? +``` + +# SUCCESSFUL SCHEDULING EXAMPLES (AND RESEARCH LEADING TO SCHEDULING) + +## Example 1: Researching and Clarifying to Schedule with a Therapist +**User Request:** "Find licensed therapist in Beverly Hills, LA for anxiety & work-life balance, near Wilshire/Beverly Dr, 4.5+ stars, in-person & virtual, free weekday evenings after 6 PM. Cigna insurance." + +**Step 1: Info Extraction** (as per request details) +- Service: Licensed therapist. Location: Beverly Hills (Wilshire/Beverly Dr). Specialization: Anxiety, work-life balance. Ratings: 4.5+. Sessions: In-person & virtual. Availability: Weekday evenings >6 PM. Insurance: Cigna. + +**Step 2: Research & Presenting Findings** +1. **Web Search**: "licensed therapist Beverly Hills anxiety work-life balance Cigna 4.5+ stars Wilshire Blvd", "psychology today therapists Beverly Hills Cigna virtual". +2. **Check Directories**: Psychology Today, Zocdoc, Cigna provider list. +3. **Visit Websites**: For shortlisted therapists, check details. + +**Step 3: Example Clarification Response (Post-Research)** +``` +Researched therapists in Beverly Hills for anxiety/work-life balance, possibly accepting Cigna: + +**Option 1: Dr. Emily Carter, PsyD** +- Specialization: Anxiety, Stress Mgt, Work-Life Balance +- Location: Wilshire Blvd, Beverly Hills +- Contact: (310) 555-1234 / drcarter@email.com / www.dremilycartertherapy.com +- Ratings: 4.8 (PsychologyToday), 4.7 (Yelp) +- Insurance: Listed for Cigna (verify) +- Sessions: In-person & virtual. +- Notes: Focus on professionals. Availability not online. + +**Option 2: Beverly Balance Therapy Center (Dr. John Lee, PhD)** +- Specialization: CBT Anxiety, Career Coaching +- Location: Beverly Drive, Beverly Hills +- Contact: (310) 555-5678 / info@beverlybalance.com / www.beverlybalancetherapy.com +- Ratings: 4.6 (Zocdoc), Google positive. +- Insurance: Appears Cigna PPO. +- Sessions: In-person & telehealth. +- Notes: Group practice. Online booking for 15-min free chat. + +**To find consultation slots, please specify:** +1. Preferred therapist(s)? +2. Should I check their availability for "weekday evenings after 6 PM"? (Some require direct contact/portal use for first appointments). +3. Preferred day next week for initial consultation? + +Once preferred, I'll attempt to find availability or guide booking. + +References: +1. [Psychology Today - Beverly Hills, Cigna, Anxiety](https://www.psychologytoday.com/us/therapists/ca/beverly-hills?category=cigna&spec=123) +2. [Zocdoc - Therapists Beverly Hills](https://www.zocdoc.com/therapists/ca/beverly-hills-12345zip) +``` + +**Step 4 (Post User Clarification, e.g., User picks Dr. Carter, confirms check availability):** +- Contact Dr. Carter's office (if method available) or check portal for weekday evenings >6 PM. +- If slots found, present to user. +- If direct booking/check not possible: Advise user (e.g., "Dr. Carter's site suggests calling [number]. Want an email draft to inquire?"). + +**Step 5: Schedule Generation (if slot confirmed by user & professional):** +(e.g., Dr. Carter available next Tue 6:30 PM PST, user confirms) +**Tool Usage: schedule_generator** +- title: "Initial Consultation: [User Name] & Dr. Emily Carter" +- start_time: "[YYYY-MM-DD]T18:30:00-07:00" (Calculated date/time PST) +- end_time: "[YYYY-MM-DD]T19:20:00-07:00" (50-min session) +- description: "Initial therapy consultation for anxiety & work-life balance." +- location: "Suite 205, Wilshire Boulevard, Beverly Hills, CA (or Virtual)" +- attendees: ["[user_email@example.com]", "drcarter@email.com"] (if confirmed for invites) + +## Example 2: "Set up coffee with Sarah from Marketing tomorrow 3pm" +**Info Extraction:** Organizer: [Sender email]. Participant: Sarah from Marketing (research if email unknown). Time: Tomorrow 3pm. Type: Coffee (30 min). + +**Research (if Sarah's email unknown):** Internal directory search. Web search: "[User's Company] Sarah Marketing email". If multiple Sarahs, ask user for last name/team. + +**Scheduling (Contact Found/Provided):** If sarah.m@example.com found: Schedule tomorrow 3pm local. Duration 30 min. Location: "TBD" or virtual. Call `schedule_generator`. + +**CRITICAL REQUIREMENTS:** +- **Present research with sources/links** if external research was done. +- **NEVER schedule without confirmed participant contact information** (email for `schedule_generator`). +- **Validate min. 2 participants for meetings** (or confirmed service provider contact) unless personal reminder. +- **Specify timezone assumptions clearly.** +- **Default to appropriate duration** (30 min meetings, 50-60 min therapy) unless specified. +- **Research thoroughly before asking broad clarification.** Ask specific questions based on findings. +- **Highlight spelling corrections** in names. + +**Content Guidelines:** +1. Professional tone. +2. Clear time specs (inc. timezone). +3. Thorough participant/service validation & research. Document attempts. +4. Practical next steps. +5. Transparency in research (what was performed/found, with links). +6. Flexible options when primary approach fails. + +**Important Notes:** +- Research contacts/services thoroughly before asking user for clarification. +- Provide specific, actionable next steps. +- Include timezone/duration defaults. +- If uncertain after research, present findings then ask specific clarification. +""" + +# PDF Export handler template +PDF_EXPORT_TEMPLATE = """ +Intelligently analyze the email content and create a professional PDF document export. + +# PDF Export Process + +## STEP 1: Content Analysis & Preparation +**Analyze the content to determine what should be exported:** +- **Extract meaningful content**: Focus on substantial information, insights, research, or analysis +- **Remove email metadata**: Strip out From/To/Subject headers and email-specific formatting +- **Preserve content structure**: Maintain formatting, lists, sections, and logical flow +- **Assess content significance**: Determine if the content warrants PDF export + +**Content Worth Exporting:** +- Research findings and analysis +- Detailed reports or summaries +- Important documents or presentations +- Substantial meeting notes or agendas +- Technical documentation or guides +- Data analysis or insights + +**Content NOT Worth Exporting:** +- Simple greetings or acknowledgments +- Basic confirmations or "thank you" messages +- Short scheduling emails +- Minimal content with just email headers + +## STEP 2: Intelligent Content Processing +**Extract and clean the content:** +1. **Remove email headers** (From, To, Subject, Date, etc.) +2. **Preserve meaningful content** exactly as written +3. **Maintain formatting** (lists, bold, italic, headers) +4. **Keep research findings** if available +5. **Include attachment summaries** only if explicitly relevant and requested + +**Process attachments conditionally:** +- **Include attachment content** ONLY if user specifically requests it or if it's essential to understanding +- **Summarize attachments** when they add substantial value to the export +- **Skip basic attachments** unless they contain important insights + +## STEP 3: PDF Generation +**Use the pdf_export tool with appropriate parameters:** +- **content**: The cleaned, meaningful content (no email headers) +- **title**: Extract or generate an appropriate document title +- **research_findings**: Include if substantial research was conducted +- **attachments_summary**: Include only if attachments add value and were requested +- **include_attachments**: Set to true only if user explicitly wants attachment content + +**Example Tool Call:** +``` +pdf_export( + content="[Main content without email headers]", + title="[Document title]", + research_findings="[Research content if available]", + attachments_summary="[Attachment summaries if relevant]", + include_attachments=false # Only true if explicitly requested +) +``` + +## STEP 4: Response Guidelines +**If PDF export is successful:** +- Confirm the PDF has been generated and attached +- Briefly describe what content was included +- Mention the title and estimated page count +- Note any content that was excluded and why + +**If content is not substantial enough:** +- Explain why PDF export may not be necessary +- Suggest alternatives (email client print function) +- Offer to proceed anyway if user insists + +**Response Format:** +``` +I've analyzed your content and created a professional PDF document: + +**PDF Generated:** [title].pdf +**Content Included:** [brief description of what was exported] +**Pages:** Approximately [X] pages +**Format:** Professional document layout with proper formatting + +The PDF includes: +- [Main content description] +- [Research findings if included] +- [Attachment summaries if included] + +Email headers and metadata have been excluded to focus on the meaningful content. + +The PDF is attached to this email for your use. +``` + +## CONTENT PROCESSING PRINCIPLES + +**DO Export:** +✓ Substantial research findings or analysis +✓ Important business documents or reports +✓ Meeting notes with significant content +✓ Technical documentation or guides +✓ Data analysis and insights +✓ Educational or instructional content + +**DON'T Export:** +✗ Basic email correspondence +✗ Simple confirmations or acknowledgments +✗ Short scheduling messages +✗ Content that's primarily email headers +✗ Minimal content without substance + +**ALWAYS Remember:** +- Remove email headers (From, To, Subject, Date) +- Preserve content exactly as written - no modifications +- Focus on meaningful, substantial content +- Include research findings when available +- Process attachments only when explicitly requested or highly relevant +- Generate appropriate, descriptive titles +- Provide professional formatting and structure + +**Content Guidelines:** +1. **Preserve original content** - export content as-is without alterations +2. **Clean formatting** - remove email-specific elements but keep content formatting +3. **Professional presentation** - ensure the PDF looks polished and readable +4. **Appropriate inclusion** - only export content that has substantial value +5. **Clear documentation** - explain what was included and why """ diff --git a/mxtoai/routed_litellm_model.py b/mxtoai/routed_litellm_model.py index dc84355..528c3c6 100644 --- a/mxtoai/routed_litellm_model.py +++ b/mxtoai/routed_litellm_model.py @@ -177,7 +177,7 @@ def generate( completion_kwargs = self._prepare_completion_kwargs( messages=messages, stop_sequences=stop_sequences, - grammar=None if is_local_llm else grammar, + grammar=None if is_local_llm else grammar, # this seems to be removed in smolagents v1.17.0 tools_to_call_from=None if is_local_llm else tools_to_call_from, model=self.model_id, api_base=self.api_base, diff --git a/mxtoai/schemas.py b/mxtoai/schemas.py index e4ad54b..c5ad1b7 100644 --- a/mxtoai/schemas.py +++ b/mxtoai/schemas.py @@ -158,12 +158,25 @@ class AgentResearchOutput(BaseModel): metadata: Optional[AgentResearchMetadata] = None +class PDFExportResult(BaseModel): + """Model for PDF export results.""" + + filename: str + file_path: str + file_size: int + title: str + pages_estimated: int + mimetype: str = "application/pdf" + temp_dir: Optional[str] = None # Path to temp directory for cleanup + + class DetailedEmailProcessingResult(BaseModel): metadata: ProcessingMetadata email_content: EmailContentDetails attachments: AttachmentsProcessingResult calendar_data: Optional[CalendarResult] = None research: Optional[AgentResearchOutput] = None + pdf_export: Optional[PDFExportResult] = None # Add other top-level keys from the agent's result dict if any (e.g. 'summary', 'handle' but they seem to be in error dicts) # Ensure Pydantic can populate by name and validate defaults diff --git a/mxtoai/scripts/report_formatter.py b/mxtoai/scripts/report_formatter.py index 12d3763..ce0f282 100644 --- a/mxtoai/scripts/report_formatter.py +++ b/mxtoai/scripts/report_formatter.py @@ -3,6 +3,7 @@ import re from typing import Any, Optional +import markdown2 from jinja2 import Environment, FileSystemLoader, select_autoescape from mxtoai._logging import get_logger @@ -36,11 +37,11 @@ def __init__(self, template_dir: Optional[str] = None): # Default signature self.signature_block = """ ---- +
-**MXtoAI Assistant** +

MXtoAI Assistant

-_Feel free to reply to this email to continue our conversation._ +

Feel free to reply to this email to continue our conversation.

""" def _init_template_env(self): @@ -92,6 +93,9 @@ def format_report( # Remove any existing signatures content = self._remove_existing_signatures(content) + # Apply markdown fixes for all formats + content = self._fix_ai_markdown(content) + # Process citations and references before converting format # DISABLED: _process_citations was causing issues with already formatted markdown. # The DeepResearchTool now handles citation/reference formatting directly. @@ -203,8 +207,11 @@ def _to_plain_text(self, markdown: str) -> str: Plain text version """ + # Handle tables first - convert markdown tables to plain text format + text = self._convert_tables_to_plain_text(markdown) + # Remove heading markers but preserve citations - text = re.sub(r"^#+\s+", "", markdown, flags=re.MULTILINE) + text = re.sub(r"^#+\s+", "", text, flags=re.MULTILINE) # Remove bold markers text = re.sub(r"\*\*(.*?)\*\*", r"\1", text) text = re.sub(r"__(.*?)__", r"\1", text) @@ -224,64 +231,199 @@ def _to_plain_text(self, markdown: str) -> str: text = re.sub(r"\n{3,}", "\n\n", text) return text.strip() + def _convert_tables_to_plain_text(self, markdown: str) -> str: + """ + Convert markdown tables to readable plain text format. + + Args: + markdown: Markdown content with tables + + Returns: + Markdown with tables converted to plain text + + """ + lines = markdown.split("\n") + result_lines = [] + i = 0 + + while i < len(lines): + line = lines[i].strip() + + # Check if this looks like a table header + if "|" in line and i + 1 < len(lines) and "|" in lines[i + 1] and "-" in lines[i + 1]: + # Found a table, process it + table_lines = [line] + i += 1 + + # Skip the separator line + i += 1 + + # Collect table rows + while i < len(lines) and "|" in lines[i].strip(): + table_lines.append(lines[i].strip()) + i += 1 + + # Convert table to plain text + plain_table = self._format_table_as_plain_text(table_lines) + result_lines.extend(plain_table) + result_lines.append("") # Add spacing after table + + continue + result_lines.append(lines[i]) + i += 1 + + return "\n".join(result_lines) + + def _format_table_as_plain_text(self, table_lines: list[str]) -> list[str]: + """ + Format a markdown table as readable plain text. + + Args: + table_lines: List of table lines (header + rows) + + Returns: + List of formatted plain text lines + + """ + if not table_lines: + return [] + + # Parse table data + rows = [] + for line in table_lines: + # Remove leading/trailing pipes and split + cells = [cell.strip() for cell in line.strip("|").split("|")] + rows.append(cells) + + if not rows: + return [] + + # Calculate column widths + max_cols = max(len(row) for row in rows) + col_widths = [] + + for col in range(max_cols): + max_width = 0 + for row in rows: + if col < len(row): + max_width = max(max_width, len(row[col])) + col_widths.append(max(max_width, 8)) # Minimum width of 8 + + # Format as plain text + result = [] + + for row_idx, row in enumerate(rows): + # Pad cells to column width with center alignment + formatted_cells = [] + for col in range(max_cols): + cell_content = row[col] if col < len(row) else "" + formatted_cells.append(cell_content.center(col_widths[col])) + + # Join with spacing + result.append(" ".join(formatted_cells).rstrip()) + + # Add separator after header + if row_idx == 0: + separator_parts = [] + for width in col_widths: + separator_parts.append("-" * width) + result.append(" ".join(separator_parts)) + + return result + def _to_html(self, markdown_content: str, theme: str = "default") -> str: """ - Convert markdown to HTML using templates and themes. + Convert markdown to HTML using markdown2 for robust AI-generated content handling. Args: - markdown_content: Markdown content + markdown_content: Markdown content (already processed by _fix_ai_markdown) theme: Theme name to use Returns: HTML version """ - try: - import markdown as md_converter - from markdown.extensions.attr_list import AttrListExtension - from markdown.extensions.fenced_code import FencedCodeExtension - from markdown.extensions.nl2br import Nl2BrExtension - from markdown.extensions.sane_lists import SaneListExtension - from markdown.extensions.tables import TableExtension - from markdown.extensions.toc import TocExtension - - # Configure extensions with specific settings - extensions = [ - TableExtension(), # Support for tables - FencedCodeExtension(), # Support for fenced code blocks - SaneListExtension(), # Better list handling - Nl2BrExtension(), # Convert newlines to line breaks - TocExtension(permalink=False), # Table of contents support without permalinks - AttrListExtension(), # Support for attributes - ] - - # Convert markdown to HTML with configured extensions - html_content = md_converter.markdown( - markdown_content, - extensions=extensions, - extension_configs={ - # Explicitly disable footnotes if it's a default or separate extension - # 'markdown.extensions.footnotes': {'PLACE_MARKER': '!!!!FOOTNOTES!!!!'} - }, - output_format="html5", # Use html5 for better compatibility - ) + # Convert markdown to HTML with markdown2 (robust for AI content) + html_content = markdown2.markdown( + markdown_content, + extras=[ + "fenced-code-blocks", # Support for ```code``` blocks + "tables", # Support for tables + "strike", # Support for ~~strikethrough~~ + "cuddled-lists", # Better list handling (key for AI content!) + "header-ids", # Add IDs to headers + "markdown-in-html", # Allow markdown inside HTML + "breaks", # Handle line breaks better + ], + ) + + if self.template_env: + try: + theme_settings = self.themes.get(theme, self.themes["default"]) + template = self.template_env.get_template("email_template.html") + + return template.render(content=html_content, theme=theme_settings) + except Exception as e: + logger.error(f"Template rendering failed: {e}. Falling back to basic rendering.") + + # fallback + logger.info("Template environment not available. Using basic HTML rendering.") + return self._basic_html_render(html_content) + + def _fix_ai_markdown(self, content: str) -> str: + """ + Fix AI-generated markdown issues that markdown2 doesn't handle. + This function performs several cleaning steps in a single pass over the lines. + + Args: + content: Raw markdown content + + Returns: + Fixed markdown content + + """ + lines = content.split("\n") + result_lines = [] - if self.template_env: - try: - theme_settings = self.themes.get(theme, self.themes["default"]) - template = self.template_env.get_template("email_template.html") + for i, line in enumerate(lines): + # --- FIX 1: Ensure headers are separated by a blank line --- + if line.strip().startswith("#") and i > 0 and result_lines and result_lines[-1].strip() != "": + # Insert blank line before header + result_lines.append("") - return template.render(content=html_content, theme=theme_settings) - except Exception as e: - logger.error(f"Template rendering failed: {e}. Falling back to basic rendering.") + # --- FIX 2: Manually parse and fix bolded links in list items --- + if line.strip().startswith(("*", "-")) and "**[" in line and "](" in line and ")**" in line: + # This is a very specific pattern, so we can be confident in this replacement + # Replace **[text](url)** with [**text**](url) + line = re.sub(r"\*\*\[(.*?)\]\((.*?)\)\*\*", r"[**\1**](\2)", line) + + # --- FIX 3: Convert letter-based lists to numbers --- + # e.g., a. Item -> 1. Item + match = re.match(r"^(\s*)([a-z])\.\s+(.*)$", line) + if match: + indent, letter, text = match.groups() + number = ord(letter) - ord("a") + 1 + line = f"{indent}{number}. {text}" + + # --- FIX 4: Fix mixed list formatting --- + # e.g., - 1. Item -> 1. Item + line = re.sub(r"^(\s*)[*-]\s+(\d+\.\s+.*)", r"\1\2", line) + + # --- FIX 5: Fix missing spaces after list markers --- + # Skip lines that start with bold markers like "**Summary:**" + if not (line.strip().startswith("**") and ("**:" in line or line.strip().endswith("**"))): + # Check for missing spaces after list markers + match = re.match(r"^(\s*)(\d+\.|\*|-|\+)([^\s].*)", line) + if match: + indent, marker, rest_of_line = match.groups() + # It's a real list item, just missing a space + line = f"{indent}{marker} {rest_of_line.lstrip()}" + + result_lines.append(line) + + return "\n".join(result_lines) - # fallback - logger.info("Template environment not available. Using basic HTML rendering.") - return self._basic_html_render(html_content, theme) - except ImportError: - logger.error("Markdown package not available - this should never happen as it's a required dependency") - raise # We should always have markdown package available def _basic_html_render(self, html_content: str) -> str: """ @@ -378,19 +520,35 @@ def _get_minimal_css(self) -> str: table { border-collapse: collapse; width: 100%; - margin: 1em 0; + margin: 1.5em 0; + font-size: 14px; + border: 2px solid #333; + background-color: #fff; } th, td { - border: 1px solid #ddd; - padding: 8px; - text-align: left; + border: 1px solid #333; + padding: 12px 16px; + text-align: center; + vertical-align: top; } th { - background-color: #f6f8fa; + background-color: #f0f0f0; + font-weight: bold; + color: #333; + border-bottom: 2px solid #333; } - tr:nth-child(even) { + tr:nth-child(even) td { background-color: #f9f9f9; } + td:first-child { + font-weight: 600; + background-color: #f6f8fa; + width: 30%; + } + table a { + color: #0366d6; + text-decoration: underline; + } blockquote { border-left: 4px solid #dfe2e5; margin: 0; diff --git a/mxtoai/scripts/templates/email_template.html b/mxtoai/scripts/templates/email_template.html index 9cd27dc..580ad7e 100644 --- a/mxtoai/scripts/templates/email_template.html +++ b/mxtoai/scripts/templates/email_template.html @@ -21,17 +21,17 @@ --container-width: {{ theme.spacing.container_width|default("800px") }}; --spacing-paragraph: {{ theme.spacing.paragraph|default("1em") }}; } - + * { box-sizing: border-box; margin: 0; padding: 0; } - + html { font-size: var(--font-size); } - + body { font-family: var(--font-family); line-height: var(--line-height); @@ -40,13 +40,13 @@ padding: 0; margin: 0; } - + .container { max-width: var(--container-width); margin: 0 auto; padding: 2rem 1rem; } - + /* Typography */ h1, h2, h3, h4, h5, h6 { color: var(--color-heading); @@ -55,78 +55,88 @@ font-weight: 600; line-height: 1.25; } - + h1 { font-size: 2em; } h2 { font-size: 1.5em; } h3 { font-size: 1.25em; } h4 { font-size: 1em; } h5 { font-size: 0.875em; } h6 { font-size: 0.85em; } - + p { margin-bottom: var(--spacing-paragraph); } - + a { color: var(--color-link); text-decoration: none; } - + a:hover { text-decoration: underline; } - + + /* List styles - Fixed for proper nesting */ ul, ol { - margin: 0 0 1rem 0.5rem; + margin: 0 0 1rem 0; padding-left: 2rem; list-style-position: outside; } - + /* Base list styles */ ul { list-style-type: disc; } ol { list-style-type: decimal; } - - ul ul, - ol ul { - list-style-type: circle; + + /* Nested unordered lists */ + ul ul { + list-style-type: circle; margin: 0.5rem 0; - padding-left: 3rem; } - - ul ul ul, - ol ul ul { - list-style-type: square; + + ul ul ul { + list-style-type: square; } - - ol ol, - ul ol { - list-style-type: lower-alpha; + + /* Nested ordered lists - this will make them show as a, b, c */ + ol ol { + list-style-type: lower-alpha; margin: 0.5rem 0; - padding-left: 4rem; } - - ol ol ol, - ul ol ol { - list-style-type: lower-roman; + + ol ol ol { + list-style-type: lower-roman; } - + + /* Mixed nesting */ + ul ol { + list-style-type: lower-alpha; + margin: 0.5rem 0; + } + + ol ul { + list-style-type: disc; + margin: 0.5rem 0; + } + + /* List items */ li { - margin: 0.5rem 0.5rem; + margin: 0.5rem 0; line-height: 1.5; display: list-item; - position: relative; } - + + /* Paragraph within list items */ li p { - margin: 0.5rem 0; - display: inline-block; + margin: 0; } - - li > ul, + + /* Nested lists within list items */ + li > ul, li > ol { + margin-top: 0.5rem; margin-bottom: 0.5rem; } - + code { font-family: SFMono-Regular, Consolas, "Liberation Mono", Menlo, monospace; background-color: var(--color-code-bg); @@ -134,7 +144,7 @@ border-radius: 3px; font-size: 0.9em; } - + pre { background-color: var(--color-code-bg); padding: 1rem; @@ -142,40 +152,67 @@ overflow-x: auto; margin: 1rem 0; } - + pre code { padding: 0; background-color: transparent; font-size: 0.9em; } - + table { border-collapse: collapse; width: 100%; margin: 1.5rem 0; - font-size: 0.95em; + font-size: 14px; + border: 2px solid #333; + background-color: #fff; } - + th, td { - border: 1px solid var(--color-table-border); - padding: 0.75rem; - text-align: left; + border: 1px solid #333; + padding: 12px 16px; + text-align: center; vertical-align: top; } - + th { - background-color: var(--color-table-header); + background-color: #f0f0f0; + font-weight: bold; + color: #333; + border-bottom: 2px solid #333; + } + + tr:nth-child(even) td { + background-color: #f9f9f9; + } + + /* First column styling for attribute tables */ + td:first-child { font-weight: 600; + background-color: #f6f8fa; + width: 30%; } - - tr:nth-child(even) { - background-color: rgba(0, 0, 0, 0.02); + + /* Links in tables */ + table a { + color: var(--color-link); + text-decoration: underline; + } + + /* Email client compatibility */ + table[border="0"] { + border: 2px solid #333; } - - tr:hover { - background-color: rgba(0, 0, 0, 0.03); + + table td[style*="border"] { + border: 1px solid #333; + } + + .table-wrapper { + width: 100%; + overflow-x: auto; } - + /* Blockquotes */ blockquote { border-left: 4px solid var(--color-table-border); @@ -183,14 +220,14 @@ color: var(--color-blockquote); margin: 1rem 0; } - + /* Horizontal Rule */ hr { border: none; border-top: 1px solid var(--color-table-border); margin: 1.5rem 0; } - + /* Images */ img { max-width: 100%; @@ -198,20 +235,20 @@ display: block; margin: 1.5rem auto; } - + /* Citations and References */ .citation { font-size: 0.8em; vertical-align: super; color: var(--color-blockquote); } - + .references { margin-top: 2rem; padding-top: 1rem; border-top: 1px solid var(--color-table-border); } - + .reference { margin: 0.5rem 0; padding: 0.5rem; @@ -219,7 +256,7 @@ border-left: 3px solid var(--color-table-border); font-size: 0.9em; } - + /* Table of Contents */ .toc { background-color: rgba(0, 0, 0, 0.02); @@ -227,16 +264,16 @@ margin: 1rem 0; border-radius: 5px; } - + .toc ul { list-style-type: none; padding-left: 1rem; } - + .toc li { margin: 0.3rem 0; } - + /* Signature */ .signature { color: var(--color-blockquote); @@ -245,13 +282,13 @@ padding-top: 1rem; margin-top: 2rem; } - + /* Additional utility classes */ .text-center { text-align: center; } .text-right { text-align: right; } .mt-0 { margin-top: 0; } .mb-0 { margin-bottom: 0; } - + /* Print styling */ @media print { body { @@ -259,69 +296,69 @@ color: #000; background: #fff; } - + .container { width: 100%; max-width: none; padding: 0; margin: 0; } - + a { text-decoration: underline; color: #000; } - + a[href]:after { content: " (" attr(href) ")"; font-size: 0.8em; } - + a[href^="#"]:after { content: ""; } - + pre, blockquote { border: 1px solid #999; page-break-inside: avoid; } - + thead { display: table-header-group; } - + tr, img { page-break-inside: avoid; } - + img { max-width: 100% !important; } - + p, h2, h3 { orphans: 3; widows: 3; } - + h2, h3 { page-break-after: avoid; } } - + /* Responsive adjustments */ @media (max-width: 600px) { .container { padding: 1rem 0.5rem; } - + table { font-size: 0.85em; } - + th, td { padding: 0.5rem; } - + pre { padding: 0.75rem; } @@ -333,4 +370,4 @@ {{ content|safe }} - \ No newline at end of file + diff --git a/mxtoai/tasks.py b/mxtoai/tasks.py index dd9169d..612828b 100644 --- a/mxtoai/tasks.py +++ b/mxtoai/tasks.py @@ -1,5 +1,6 @@ import asyncio import os +import shutil from datetime import datetime from pathlib import Path from typing import TYPE_CHECKING, Any, Union @@ -35,7 +36,17 @@ # Build RabbitMQ URL from environment variables (Broker) # Include heartbeat as a query parameter in the URL RABBITMQ_HEARTBEAT = os.getenv("RABBITMQ_HEARTBEAT", "5") -RABBITMQ_URL = f"amqp://{os.getenv('RABBITMQ_USER', 'guest')}:{os.getenv('RABBITMQ_PASSWORD', 'guest')}@{os.getenv('RABBITMQ_HOST', 'localhost')}:{os.getenv('RABBITMQ_PORT', '5672')}{os.getenv('RABBITMQ_VHOST', '/')}?heartbeat={RABBITMQ_HEARTBEAT}" +RABBITMQ_URL = os.getenv("RABBITMQ_URL") + +if not RABBITMQ_URL: + user = os.getenv("RABBITMQ_USER", "guest") + password = os.getenv("RABBITMQ_PASSWORD", "guest") + host = os.getenv("RABBITMQ_HOST", "localhost") + port = os.getenv("RABBITMQ_PORT", "5672") + vhost = os.getenv("RABBITMQ_VHOST", "/") + heartbeat = os.getenv("RABBITMQ_HEARTBEAT", "60") + + RABBITMQ_URL = f"amqp://{user}:{password}@{host}:{port}{vhost}?heartbeat={heartbeat}" # Initialize RabbitMQ broker rabbitmq_broker = RabbitmqBroker( @@ -123,6 +134,7 @@ def process_email_task( attachments=AttachmentsProcessingResult(processed=[]), calendar_data=None, research=None, + pdf_export=None, ) except exceptions.UnspportedHandleException as e: # Catch specific exception logger.error(f"Unsupported email handle: {handle}. Error: {e!s}") @@ -139,6 +151,7 @@ def process_email_task( attachments=AttachmentsProcessingResult(processed=[]), calendar_data=None, research=None, + pdf_export=None, ) # Removed the early return for `if not email_instructions` as the try-except handles it. @@ -185,6 +198,43 @@ def process_email_task( ) logger.info("Prepared invite.ics for attachment in task.") + # Add PDF export attachment if available + if processing_result.pdf_export and processing_result.pdf_export.file_path: + try: + # Read the PDF file content + with open(processing_result.pdf_export.file_path, "rb") as pdf_file: + pdf_content = pdf_file.read() + + attachments_to_send.append( + { + "filename": processing_result.pdf_export.filename, + "content": pdf_content, + "mimetype": processing_result.pdf_export.mimetype, + } + ) + logger.info(f"Prepared {processing_result.pdf_export.filename} for attachment in task.") + + # Clean up the temporary PDF file + os.unlink(processing_result.pdf_export.file_path) + logger.info(f"Cleaned up temporary PDF file: {processing_result.pdf_export.file_path}") + + # Clean up the PDF tool's temporary directory using tracked temp_dir + if processing_result.pdf_export.temp_dir: + pdf_temp_dir = processing_result.pdf_export.temp_dir + if pdf_temp_dir and os.path.exists(pdf_temp_dir): + shutil.rmtree(pdf_temp_dir, ignore_errors=True) + logger.info(f"Cleaned up PDF tool temp directory: {pdf_temp_dir}") + else: + # Fallback: extract parent directory from the PDF file path + pdf_temp_dir = Path(processing_result.pdf_export.file_path).parent + if pdf_temp_dir.exists(): + shutil.rmtree(pdf_temp_dir, ignore_errors=True) + logger.info(f"Cleaned up PDF tool temp directory (fallback): {pdf_temp_dir}") + + except Exception as pdf_error: + logger.error(f"Failed to attach PDF file: {pdf_error}") + # Continue without the PDF attachment rather than failing the entire email + original_email_details = { "from": email_request.from_email, "to": email_request.to, diff --git a/mxtoai/tools/__init__.py b/mxtoai/tools/__init__.py index ae0fdba..03ca8d1 100644 --- a/mxtoai/tools/__init__.py +++ b/mxtoai/tools/__init__.py @@ -2,11 +2,15 @@ from mxtoai.tools.attachment_processing_tool import AttachmentProcessingTool from mxtoai.tools.deep_research_tool import DeepResearchTool from mxtoai.tools.schedule_tool import ScheduleTool -from mxtoai.tools.search_with_fallback_tool import SearchWithFallbackTool + +# Web search tools +from mxtoai.tools.web_search import DDGSearchTool, BraveSearchTool, GoogleSearchTool __all__ = [ "AttachmentProcessingTool", "DeepResearchTool", "ScheduleTool", - "SearchWithFallbackTool", + "DDGSearchTool", + "BraveSearchTool", + "GoogleSearchTool", ] diff --git a/mxtoai/tools/external_data/__init__.py b/mxtoai/tools/external_data/__init__.py new file mode 100644 index 0000000..cf448bd --- /dev/null +++ b/mxtoai/tools/external_data/__init__.py @@ -0,0 +1,23 @@ +""" +External Data Module for MXtoAI. + +This module provides integration with various external data sources to enhance +the capabilities of the MXtoAI email agent. +""" + +from .linkedin import ( + LinkedInDataAPITool, + LinkedInFreshDataTool, + initialize_linkedin_data_api_tool, + initialize_linkedin_fresh_tool, +) + +__all__ = [ + "LinkedInDataAPITool", + "LinkedInFreshDataTool", + "initialize_linkedin_data_api_tool", + "initialize_linkedin_fresh_tool", +] + +# Version of the external data module +__version__ = "0.2.0" diff --git a/mxtoai/tools/external_data/linkedin/__init__.py b/mxtoai/tools/external_data/linkedin/__init__.py new file mode 100644 index 0000000..8fb1081 --- /dev/null +++ b/mxtoai/tools/external_data/linkedin/__init__.py @@ -0,0 +1,14 @@ +""" +LinkedIn data integration module for MXtoAI. +Provides tools for accessing LinkedIn data through various APIs. +""" + +from .fresh_data import LinkedInFreshDataTool, initialize_linkedin_fresh_tool +from .linkedin_data_api import LinkedInDataAPITool, initialize_linkedin_data_api_tool + +__all__ = [ + "LinkedInDataAPITool", + "LinkedInFreshDataTool", + "initialize_linkedin_data_api_tool", + "initialize_linkedin_fresh_tool", +] diff --git a/mxtoai/tools/external_data/linkedin/fresh_data.py b/mxtoai/tools/external_data/linkedin/fresh_data.py new file mode 100644 index 0000000..38c7dd9 --- /dev/null +++ b/mxtoai/tools/external_data/linkedin/fresh_data.py @@ -0,0 +1,284 @@ +""" +LinkedIn Fresh Data API implementation. +Provides access to LinkedIn data through the Fresh LinkedIn Profile Data API. +""" + +import logging +import os +from typing import Optional + +import requests +from smolagents import Tool + +logger = logging.getLogger(__name__) + + +class LinkedInFreshDataTool(Tool): + """Tool for accessing LinkedIn data through Fresh LinkedIn Profile Data API.""" + + name: str = "linkedin_fresh_data" + description: str = ( + "Access LinkedIn profile and company data directly from LinkedIn URLs for research and verification." + ) + output_type: str = "object" + inputs: dict = { # noqa: RUF012 + "action": { + "type": "string", + "description": "The action to perform: 'get_linkedin_profile' or 'get_company_by_linkedin_url'", + "enum": ["get_linkedin_profile", "get_company_by_linkedin_url"], + }, + "linkedin_url": {"type": "string", "description": "The LinkedIn URL (profile or company)"}, + # Optional parameters for get_linkedin_profile action + "include_skills": { + "type": "boolean", + "description": "Include skills section in response (default: false)", + "default": False, + "nullable": True, + }, + "include_certifications": { + "type": "boolean", + "description": "Include certifications section in response (default: false)", + "default": False, + "nullable": True, + }, + "include_publications": { + "type": "boolean", + "description": "Include publications section in response (default: false)", + "default": False, + "nullable": True, + }, + "include_honors": { + "type": "boolean", + "description": "Include honors and awards section in response (default: false)", + "default": False, + "nullable": True, + }, + "include_volunteers": { + "type": "boolean", + "description": "Include volunteer experience section in response (default: false)", + "default": False, + "nullable": True, + }, + "include_projects": { + "type": "boolean", + "description": "Include projects section in response (default: false)", + "default": False, + "nullable": True, + }, + "include_patents": { + "type": "boolean", + "description": "Include patents section in response (default: false)", + "default": False, + "nullable": True, + }, + "include_courses": { + "type": "boolean", + "description": "Include courses section in response (default: false)", + "default": False, + "nullable": True, + }, + "include_organizations": { + "type": "boolean", + "description": "Include organizations section in response (default: false)", + "default": False, + "nullable": True, + }, + "include_profile_status": { + "type": "boolean", + "description": "Include profile status information (default: false)", + "default": False, + "nullable": True, + }, + "include_company_public_url": { + "type": "boolean", + "description": "Include company public URL information (default: false)", + "default": False, + "nullable": True, + }, + } + + def __init__(self, api_key: str): + """ + Initialize the LinkedIn Fresh Data tool. + + Args: + api_key: The RapidAPI key for authentication. + + """ + super().__init__() + if not api_key: + msg = "RapidAPI key is required for LinkedIn Fresh Data API." + raise ValueError(msg) + self.api_key = api_key + self.base_url = "https://fresh-linkedin-profile-data.p.rapidapi.com" + self.headers = {"x-rapidapi-key": self.api_key, "x-rapidapi-host": "fresh-linkedin-profile-data.p.rapidapi.com"} + + def forward( + self, + action: str, + linkedin_url: str, + include_skills: bool = False, + include_certifications: bool = False, + include_publications: bool = False, + include_honors: bool = False, + include_volunteers: bool = False, + include_projects: bool = False, + include_patents: bool = False, + include_courses: bool = False, + include_organizations: bool = False, + include_profile_status: bool = False, + include_company_public_url: bool = False, + ) -> dict: + """ + Process LinkedIn data requests. + + Args: + action: The type of request to perform ('get_linkedin_profile' or 'get_company_by_linkedin_url') + linkedin_url: The LinkedIn URL (profile or company) + include_skills: Include skills section in response (default: false) + include_certifications: Include certifications section in response (default: false) + include_publications: Include publications section in response (default: false) + include_honors: Include honors and awards section in response (default: false) + include_volunteers: Include volunteer experience section in response (default: false) + include_projects: Include projects section in response (default: false) + include_patents: Include patents section in response (default: false) + include_courses: Include courses section in response (default: false) + include_organizations: Include organizations section in response (default: false) + include_profile_status: Include profile status information (default: false) + include_company_public_url: Include company public URL information (default: false) + + Returns: + Dict containing the results + + """ + actions = { + "get_linkedin_profile": self.get_linkedin_profile, + "get_company_by_linkedin_url": self.get_company_by_linkedin_url, + } + + if action not in actions: + msg = f"Unsupported action: {action}" + raise ValueError(msg) + + try: + if action == "get_linkedin_profile": + return actions[action]( + linkedin_url=linkedin_url, + include_skills=include_skills, + include_certifications=include_certifications, + include_publications=include_publications, + include_honors=include_honors, + include_volunteers=include_volunteers, + include_projects=include_projects, + include_patents=include_patents, + include_courses=include_courses, + include_organizations=include_organizations, + include_profile_status=include_profile_status, + include_company_public_url=include_company_public_url, + ) + # get_company_by_linkedin_url + return actions[action](linkedin_url=linkedin_url) + except requests.exceptions.RequestException as e: + logger.error(f"LinkedIn Fresh Data API request failed: {e}") + msg = f"LinkedIn Fresh Data API request failed: {e}" + raise Exception(msg) from e + except Exception as e: + logger.error(f"Error processing LinkedIn Fresh Data API request: {e}") + msg = f"Failed to process LinkedIn Fresh Data API request: {e}" + raise Exception(msg) from e + + def get_linkedin_profile( + self, + linkedin_url: str, + include_skills: bool = False, + include_certifications: bool = False, + include_publications: bool = False, + include_honors: bool = False, + include_volunteers: bool = False, + include_projects: bool = False, + include_patents: bool = False, + include_courses: bool = False, + include_organizations: bool = False, + include_profile_status: bool = False, + include_company_public_url: bool = False, + ) -> dict: + """ + Get detailed LinkedIn profile information from a LinkedIn profile URL. + + Args: + linkedin_url: LinkedIn profile URL (e.g., "https://www.linkedin.com/in/username/") + include_skills: Include skills section in response + include_certifications: Include certifications section in response + include_publications: Include publications section in response + include_honors: Include honors and awards section in response + include_volunteers: Include volunteer experience section in response + include_projects: Include projects section in response + include_patents: Include patents section in response + include_courses: Include courses section in response + include_organizations: Include organizations section in response + include_profile_status: Include profile status information + include_company_public_url: Include company public URL information + + Returns: + Dict containing detailed profile information + + """ + endpoint = "/get-linkedin-profile" + params = { + "linkedin_url": linkedin_url, + "include_skills": str(include_skills).lower(), + "include_certifications": str(include_certifications).lower(), + "include_publications": str(include_publications).lower(), + "include_honors": str(include_honors).lower(), + "include_volunteers": str(include_volunteers).lower(), + "include_projects": str(include_projects).lower(), + "include_patents": str(include_patents).lower(), + "include_courses": str(include_courses).lower(), + "include_organizations": str(include_organizations).lower(), + "include_profile_status": str(include_profile_status).lower(), + "include_company_public_url": str(include_company_public_url).lower(), + } + + response = requests.get(f"{self.base_url}{endpoint}", headers=self.headers, params=params) + response.raise_for_status() + return response.json() + + def get_company_by_linkedin_url(self, linkedin_url: str) -> dict: + """ + Get company information from a LinkedIn company URL. + + Args: + linkedin_url: LinkedIn company URL (e.g., "https://www.linkedin.com/company/apple/") + + Returns: + Dict containing company information + + """ + endpoint = "/get-company-by-linkedinurl" + params = {"linkedin_url": linkedin_url} + + response = requests.get(f"{self.base_url}{endpoint}", headers=self.headers, params=params) + response.raise_for_status() + return response.json() + + +def initialize_linkedin_fresh_tool() -> Optional[LinkedInFreshDataTool]: + """ + Initializes the LinkedInFreshDataTool if the API key is available. + + Returns: + Optional[LinkedInFreshDataTool]: Initialized tool instance or None if initialization fails + + """ + api_key = os.getenv("RAPIDAPI_KEY") + if api_key: + try: + tool = LinkedInFreshDataTool(api_key=api_key) + logger.debug("Initialized LinkedInFreshDataTool.") + return tool # noqa: TRY300 + except ValueError as e: + logger.warning(f"Failed to initialize LinkedInFreshDataTool: {e}") + return None + else: + logger.warning("LinkedInFreshDataTool not initialized. Missing RAPIDAPI_KEY environment variable.") + return None diff --git a/mxtoai/tools/external_data/linkedin/linkedin_data_api.py b/mxtoai/tools/external_data/linkedin/linkedin_data_api.py new file mode 100644 index 0000000..4ed3c18 --- /dev/null +++ b/mxtoai/tools/external_data/linkedin/linkedin_data_api.py @@ -0,0 +1,458 @@ +""" +LinkedIn Data API implementation. +Provides access to LinkedIn data through the LinkedIn Data API (different from Fresh Data API). +""" + +import logging +import os +from typing import Optional + +import requests +from smolagents import Tool + +logger = logging.getLogger(__name__) + + +class LinkedInDataAPITool(Tool): + """Tool for accessing LinkedIn data through LinkedIn Data API.""" + + name: str = "linkedin_data_api" + description: str = "Access LinkedIn profile and company data using LinkedIn Data API for research and verification." + output_type: str = "object" + inputs: dict = { # noqa: RUF012 + "action": { + "type": "string", + "description": "The action to perform", + "enum": [ + "get_profile_data", + "get_profile_by_url", + "search_people", + "search_people_by_url", + "get_company_details", + "search_companies", + ], + }, + # Parameters for get_profile_data and get_company_details + "username": { + "type": "string", + "description": "LinkedIn username (for get_profile_data and get_company_details actions)", + "nullable": True, + }, + # Parameters for get_profile_by_url and search_people_by_url + "profile_url": { + "type": "string", + "description": "LinkedIn profile URL (for get_profile_by_url action)", + "nullable": True, + }, + "search_url": { + "type": "string", + "description": "LinkedIn search URL (for search_people_by_url action)", + "nullable": True, + }, + # Parameters for search_people action + "keywords": {"type": "string", "description": "Search keywords for people search (optional)", "nullable": True}, + "start": { + "type": "string", + "description": "Pagination start position for people search - could be one of: 0, 10, 20, 30, etc. (optional)", + "nullable": True, + }, + "geo": { + "type": "string", + "description": "Geographic location codes for people search, comma-separated (e.g., '103644278,101165590') (optional)", + "nullable": True, + }, + "school_id": { + "type": "string", + "description": "School identifier for education filter in people search (optional)", + "nullable": True, + }, + "first_name": { + "type": "string", + "description": "First name filter for people search (optional)", + "nullable": True, + }, + "last_name": { + "type": "string", + "description": "Last name filter for people search (optional)", + "nullable": True, + }, + "keyword_school": { + "type": "string", + "description": "School-related keywords for people search (optional)", + "nullable": True, + }, + "keyword_title": { + "type": "string", + "description": "Job title keywords for people search (optional)", + "nullable": True, + }, + "company": {"type": "string", "description": "Company filter for people search (optional)", "nullable": True}, + # Parameters for search_companies action + "keyword": { + "type": "string", + "description": "Search keyword for company name/description in company search (optional)", + "nullable": True, + }, + "locations": { + "type": "array", + "items": {"type": "integer"}, + "description": "List of location codes for company search (e.g., [103644278]) (optional)", + "nullable": True, + }, + "company_sizes": { + "type": "array", + "items": {"type": "string"}, + "description": "List of company size codes for company search (e.g., ['D', 'E', 'F', 'G']) where D=1001-5000, E=5001-10000, F=10001+, etc. (optional)", + "nullable": True, + }, + "has_jobs": { + "type": "boolean", + "description": "Whether the company has active job postings in company search (optional)", + "nullable": True, + }, + "industries": { + "type": "array", + "items": {"type": "integer"}, + "description": "List of industry codes for company search (e.g., [96, 4]) (optional)", + "nullable": True, + }, + "page": { + "type": "integer", + "description": "Page number for pagination in company search (default: 1)", + "default": 1, + "nullable": True, + }, + } + + def __init__(self, api_key: str): + """ + Initialize the LinkedIn Data API tool. + + Args: + api_key: The RapidAPI key for authentication. + + """ + super().__init__() + if not api_key: + msg = "RapidAPI key is required for LinkedIn Data API." + raise ValueError(msg) + self.api_key = api_key + self.base_url = "https://linkedin-data-api.p.rapidapi.com" + self.headers = { + "x-rapidapi-key": self.api_key, + "x-rapidapi-host": "linkedin-data-api.p.rapidapi.com", + "Content-Type": "application/json", + } + + def forward( + self, + action: str, + username: Optional[str] = None, + profile_url: Optional[str] = None, + search_url: Optional[str] = None, + keywords: Optional[str] = None, + start: Optional[str] = None, + geo: Optional[str] = None, + school_id: Optional[str] = None, + first_name: Optional[str] = None, + last_name: Optional[str] = None, + keyword_school: Optional[str] = None, + keyword_title: Optional[str] = None, + company: Optional[str] = None, + keyword: Optional[str] = None, + locations: Optional[list[int]] = None, + company_sizes: Optional[list[str]] = None, + has_jobs: Optional[bool] = None, + industries: Optional[list[int]] = None, + page: int = 1, + ) -> dict: + """ + Process LinkedIn data requests. + + Args: + action: The type of search to perform + username: LinkedIn username (for get_profile_data and get_company_details actions) + profile_url: LinkedIn profile URL (for get_profile_by_url action) + search_url: LinkedIn search URL (for search_people_by_url action) + keywords: Search keywords for people search (optional) + start: Pagination start position for people search (optional) + geo: Geographic location codes for people search (optional) + school_id: School identifier for education filter in people search (optional) + first_name: First name filter for people search (optional) + last_name: Last name filter for people search (optional) + keyword_school: School-related keywords for people search (optional) + keyword_title: Job title keywords for people search (optional) + company: Company filter for people search (optional) + keyword: Search keyword for company name/description in company search (optional) + locations: List of location codes for company search (optional) + company_sizes: List of company size codes for company search (optional) + has_jobs: Whether the company has active job postings in company search (optional) + industries: List of industry codes for company search (optional) + page: Page number for pagination in company search (default: 1) + + Returns: + Dict containing the search results + + """ + actions = { + "get_profile_data": self.get_profile_data, + "get_profile_by_url": self.get_profile_by_url, + "search_people": self.search_people, + "search_people_by_url": self.search_people_by_url, + "get_company_details": self.get_company_details, + "search_companies": self.search_companies, + } + + if action not in actions: + msg = f"Unsupported action: {action}" + raise ValueError(msg) + + try: + if action == "get_profile_data": + if not username: + msg = "username is required for get_profile_data action" + raise ValueError(msg) + return actions[action](username=username) + if action == "get_profile_by_url": + if not profile_url: + msg = "profile_url is required for get_profile_by_url action" + raise ValueError(msg) + return actions[action](profile_url=profile_url) + if action == "search_people_by_url": + if not search_url: + msg = "search_url is required for search_people_by_url action" + raise ValueError(msg) + return actions[action](search_url=search_url) + if action == "get_company_details": + if not username: + msg = "username is required for get_company_details action" + raise ValueError(msg) + return actions[action](username=username) + if action == "search_people": + return actions[action]( + keywords=keywords, + start=start, + geo=geo, + school_id=school_id, + first_name=first_name, + last_name=last_name, + keyword_school=keyword_school, + keyword_title=keyword_title, + company=company, + ) + if action == "search_companies": + return actions[action]( + keyword=keyword, + locations=locations, + company_sizes=company_sizes, + has_jobs=has_jobs, + industries=industries, + page=page, + ) + msg = f"Action '{action}' not implemented in forward method" + raise ValueError(msg) + except requests.exceptions.RequestException as e: + logger.error(f"LinkedIn Data API request failed: {e}") + msg = f"LinkedIn Data API request failed: {e}" + raise Exception(msg) from e + except Exception as e: + logger.error(f"Error processing LinkedIn Data API request: {e}") + msg = f"Failed to process LinkedIn Data API request: {e}" + raise Exception(msg) from e + + def get_profile_data(self, username: str) -> dict: + """ + Get profile data by LinkedIn username. + + Args: + username: LinkedIn username + + Returns: + Dict containing profile data + + """ + endpoint = "/get-profile-data" + params = {"username": username} + response = requests.post(f"{self.base_url}{endpoint}", params=params, headers=self.headers) + response.raise_for_status() + return response.json() + + def get_profile_by_url(self, profile_url: str) -> dict: + """ + Get profile data by URL (alternative endpoint). + + Args: + profile_url: LinkedIn profile URL + + Returns: + Dict containing profile data + + """ + endpoint = "/get-profile-data-by-url" + payload = {"url": profile_url} + + response = requests.post(f"{self.base_url}{endpoint}", json=payload, headers=self.headers) + response.raise_for_status() + return response.json() + + def search_people( + self, + keywords: Optional[str] = None, + start: Optional[str] = None, + geo: Optional[str] = None, + school_id: Optional[str] = None, + first_name: Optional[str] = None, + last_name: Optional[str] = None, + keyword_school: Optional[str] = None, + keyword_title: Optional[str] = None, + company: Optional[str] = None, + ) -> dict: + """ + Search for people on LinkedIn. + + Args: + keywords: Search keywords (optional) + start: Pagination start position - could be one of: 0, 10, 20, 30, etc. (optional) + geo: Geographic location codes, comma-separated (e.g., "103644278,101165590") (optional) + school_id: School identifier for education filter (optional) + first_name: First name filter (optional) + last_name: Last name filter (optional) + keyword_school: School-related keywords (optional) + keyword_title: Job title keywords (optional) + company: Company filter (optional) + + Returns: + Dict containing search results + + """ + endpoint = "/search-people" + params = {} + + # Add parameters only if they are provided + if keywords: + params["keywords"] = keywords + if start: + params["start"] = start + if geo: + params["geo"] = geo + if school_id: + params["schoolId"] = school_id + if first_name: + params["firstName"] = first_name + if last_name: + params["lastName"] = last_name + if keyword_school: + params["keywordSchool"] = keyword_school + if keyword_title: + params["keywordTitle"] = keyword_title + if company: + params["company"] = company + + response = requests.get(f"{self.base_url}{endpoint}", headers=self.headers, params=params) + response.raise_for_status() + return response.json() + + def search_people_by_url(self, search_url: str) -> dict: + """ + Search people using a LinkedIn search URL. + + Args: + search_url: LinkedIn search URL + + Returns: + Dict containing search results + + Example Payload: + { + "url": "https://www.linkedin.com/search/results/people/?currentCompany=%5B%221035%22%5D&geoUrn=%5B%22103644278%22%5D&keywords=max&origin=FACETED_SEARCH&sid=%3AB5" + } + + """ + endpoint = "/search-people-by-url" + payload = {"url": search_url} + + response = requests.post(f"{self.base_url}{endpoint}", json=payload, headers=self.headers) + response.raise_for_status() + return response.json() + + def get_company_details(self, username: str) -> dict: + """ + Get company details by LinkedIn company username. + + Args: + username: LinkedIn company username + + Returns: + Dict containing company details + + """ + endpoint = "/get-company-details" + params = {"username": username} + + response = requests.post(f"{self.base_url}{endpoint}", params=params, headers=self.headers) + response.raise_for_status() + return response.json() + + def search_companies( + self, + keyword: Optional[str] = None, + locations: Optional[list[int]] = None, + company_sizes: Optional[list[str]] = None, + has_jobs: Optional[bool] = None, + industries: Optional[list[int]] = None, + page: int = 1, + ) -> dict: + """ + Search for companies on LinkedIn. + + Args: + keyword: Search keyword for company name/description + locations: List of location codes (e.g., [103644278]) + company_sizes: List of company size codes (e.g., ["D", "E", "F", "G"]) + where D=1001-5000, E=5001-10000, F=10001+, etc. + has_jobs: Whether the company has active job postings + industries: List of industry codes (e.g., [96, 4]) + page: Page number for pagination (default: 1) + + Returns: + Dict containing search results + + """ + endpoint = "/search-companies" + payload = {"keyword": keyword or "", "page": page} + + # Add optional parameters only if provided + if locations: + payload["locations"] = locations + if company_sizes: + payload["companySizes"] = company_sizes + if has_jobs is not None: + payload["hasJobs"] = has_jobs + if industries: + payload["industries"] = industries + + response = requests.post(f"{self.base_url}{endpoint}", json=payload, headers=self.headers) + response.raise_for_status() + return response.json() + + +def initialize_linkedin_data_api_tool() -> Optional[LinkedInDataAPITool]: + """ + Initializes the LinkedInDataAPITool if the API key is available. + + Returns: + Optional[LinkedInDataAPITool]: Initialized tool instance or None if initialization fails + + """ + api_key = os.getenv("RAPIDAPI_KEY") + if api_key: + try: + tool = LinkedInDataAPITool(api_key=api_key) + logger.debug("Initialized LinkedInDataAPITool.") + return tool # noqa: TRY300 + except ValueError as e: + logger.warning(f"Failed to initialize LinkedInDataAPITool: {e}") + return None + else: + logger.warning("LinkedInDataAPITool not initialized. Missing RAPIDAPI_KEY environment variable.") + return None diff --git a/mxtoai/tools/pdf_export_tool.py b/mxtoai/tools/pdf_export_tool.py new file mode 100644 index 0000000..7235317 --- /dev/null +++ b/mxtoai/tools/pdf_export_tool.py @@ -0,0 +1,601 @@ +import re +import shutil +import tempfile +from datetime import datetime +from pathlib import Path +from typing import Any, Optional + +from smolagents import Tool + +from mxtoai._logging import get_logger +from mxtoai.scripts.report_formatter import ReportFormatter + +logger = get_logger("pdf_export_tool") + +# Constants for filename handling +MAX_FILENAME_LENGTH = 50 # Maximum length for filename before ".pdf" extension + +class PDFExportTool(Tool): + """Tool for exporting email content and research findings to PDF format.""" + + name = "pdf_export" + description = "Export email content, research findings, and attachment summaries to a professionally formatted PDF document" + + inputs = { + "content": { + "type": "string", + "description": "The main content to export (email body, research findings, etc.)" + }, + "title": { + "type": "string", + "description": "Title for the PDF document", + "nullable": True + }, + "research_findings": { + "type": "string", + "description": "Additional research findings to include", + "nullable": True + }, + "attachments_summary": { + "type": "string", + "description": "Summary of processed attachments", + "nullable": True + }, + "include_attachments": { + "type": "boolean", + "description": "Whether to include attachment summaries in the PDF", + "default": False, + "nullable": True + } + } + output_type = "object" + + def __init__(self): + super().__init__() + self.temp_dir = Path(tempfile.mkdtemp()) + self.temp_dir.mkdir(parents=True, exist_ok=True) + self.report_formatter = ReportFormatter() + logger.debug(f"PDFExportTool initialized with temp directory: {self.temp_dir}") + + @property + def temp_directory(self) -> Path: + """Get the temporary directory path for external cleanup.""" + return self.temp_dir + + def __del__(self): + """Cleanup temporary directory when object is destroyed.""" + self.cleanup() + + def cleanup(self): + """Explicitly cleanup the temporary directory and all its contents.""" + try: + if hasattr(self, "temp_dir") and self.temp_dir and self.temp_dir.exists(): + shutil.rmtree(self.temp_dir, ignore_errors=True) + logger.debug(f"Cleaned up PDFExportTool temp directory: {self.temp_dir}") + except Exception as e: + logger.warning(f"Failed to cleanup PDFExportTool temp directory: {e}") + + def forward( + self, + content: str, + title: Optional[str] = None, + research_findings: Optional[str] = None, + attachments_summary: Optional[str] = None, + include_attachments: bool = False + ) -> dict[str, Any]: + """ + Export content to PDF format. + + Args: + content: Main content to export + title: Document title (auto-generated if not provided) + research_findings: Additional research content + attachments_summary: Attachment summaries to include + include_attachments: Whether to include attachment section + + Returns: + Dict containing export results + + """ + try: + # Import WeasyPrint here to avoid import errors if not installed + from weasyprint import CSS, HTML + except ImportError as e: + logger.error(f"WeasyPrint not available: {e}") + return { + "error": "PDF generation library not available. Please install WeasyPrint.", + "details": str(e) + } + + try: + # Clean and prepare content + cleaned_content = self._clean_content(content) + doc_title = title or self._extract_title(cleaned_content) + + # Build complete markdown document + markdown_document = self._build_markdown_document( + content=cleaned_content, + title=doc_title, + research_findings=research_findings, + attachments_summary=attachments_summary if include_attachments else None + ) + + # Convert to HTML using existing ReportFormatter + html_content = self.report_formatter._to_html(markdown_document, theme="default") + + # Enhance HTML for PDF with custom CSS + pdf_html = self._enhance_html_for_pdf(html_content, doc_title) + + # Generate PDF + filename = self._sanitize_filename(doc_title) + ".pdf" + pdf_path = self.temp_dir / filename + + HTML(string=pdf_html).write_pdf( + pdf_path, + stylesheets=[CSS(string=self._get_pdf_styles())] + ) + + file_size = pdf_path.stat().st_size + + return { + "success": True, + "filename": filename, + "file_path": str(pdf_path), + "file_size": file_size, + "mimetype": "application/pdf", + "title": doc_title, + "pages_estimated": max(1, len(cleaned_content) // 2000), # Rough estimate + "temp_dir": str(self.temp_dir) # Include temp directory for cleanup + } + + except Exception as e: + logger.error(f"PDF export failed: {e}") + return { + "error": f"PDF export failed: {e!s}", + "details": "Please check the content format and try again" + } + + def _clean_content(self, content: str) -> str: + """ + Clean content by removing email headers and unnecessary formatting. + + Args: + content: Raw content to clean + + Returns: + Cleaned content suitable for PDF export + + """ + if not content: + return "" + + # Remove common email headers patterns + email_header_patterns = [ + r"^From:.*$", + r"^To:.*$", + r"^Subject:.*$", + r"^Date:.*$", + r"^CC:.*$", + r"^BCC:.*$", + r"^Reply-To:.*$", + r"^Message-ID:.*$", + r"^In-Reply-To:.*$", + r"^References:.*$", + r"^Received:.*$", + r"^Return-Path:.*$" + ] + + lines = content.split("\n") + cleaned_lines = [] + + for line in lines: + # Skip lines that match email header patterns + is_header = any(re.match(pattern, line.strip(), re.IGNORECASE) for pattern in email_header_patterns) + if not is_header: + cleaned_lines.append(line) + + cleaned_content = "\n".join(cleaned_lines).strip() + + # Remove excessive whitespace but preserve paragraph breaks + cleaned_content = re.sub(r"\n\s*\n\s*\n+", "\n\n", cleaned_content) + return re.sub(r"[ \t]+", " ", cleaned_content) + + + def _extract_title(self, content: str) -> str: + """ + Extract a meaningful title from content. + + Args: + content: Content to extract title from + + Returns: + Extracted title + + """ + if not content: + return "Document" + + # Look for markdown headers first + lines = content.split("\n") + for line in lines[:10]: # Check first 10 lines + line = line.strip() + if line.startswith("# "): + return line[2:].strip()[:60] # Remove # and limit length + if line.startswith("## "): + return line[3:].strip()[:60] # Remove ## and limit length + + # Look for lines that could be titles (short, meaningful lines) + for line in lines[:5]: # Check first 5 lines + line = line.strip() + if line and len(line) < 100 and len(line) > 5: + # Check if it looks like a title (no common body text indicators) + if not any(indicator in line.lower() for indicator in + ["the", "this", "that", "with", "from", "email", "message"]): + return line[:60] + + # Fallback: use first meaningful sentence + sentences = re.split(r"[.!?]+", content) + for sentence in sentences[:3]: + sentence = sentence.strip() + if 10 < len(sentence) < 80: + return sentence[:60] + + return f"Document - {datetime.now().strftime('%B %d, %Y')}" + + def _sanitize_filename(self, filename: str) -> str: + """ + Sanitize filename for file system compatibility. + + Args: + filename: Original filename + + Returns: + Sanitized filename + + """ + # Remove or replace invalid characters + filename = re.sub(r'[<>:"/\\|?*]', "", filename) + filename = re.sub(r"\s+", "_", filename) + filename = filename[:MAX_FILENAME_LENGTH] # Limit length using constant + return filename if filename else "document" + + def _build_markdown_document( + self, + content: str, + title: str, + research_findings: Optional[str] = None, + attachments_summary: Optional[str] = None + ) -> str: + """ + Build a complete markdown document for PDF conversion. + + Args: + content: Main content + title: Document title + research_findings: Research content + attachments_summary: Attachment summaries + + Returns: + Complete markdown document + + """ + # Start with title + markdown_parts = [f"# {title}\n"] + + # Add generation date + markdown_parts.append(f"*Generated on {datetime.now().strftime('%B %d, %Y at %I:%M %p')}*\n") + + # Add main content + if content: + markdown_parts.append(content) + + # Add research findings section + if research_findings: + markdown_parts.append("\n---\n") + markdown_parts.append("## Research Findings\n") + markdown_parts.append(research_findings) + + # Add attachments section + if attachments_summary: + markdown_parts.append("\n---\n") + markdown_parts.append("## Attachments Summary\n") + markdown_parts.append(attachments_summary) + + # Add professional watermark with link + markdown_parts.append('\n\n
') + markdown_parts.append('
') + markdown_parts.append('
') + markdown_parts.append('📄 Document generated via ') + markdown_parts.append('mxtoai.com') + markdown_parts.append(' • Email: ') + markdown_parts.append('pdf@mxtoai.com') + markdown_parts.append("
") + markdown_parts.append("
") + + return "\n".join(markdown_parts) + + def _enhance_html_for_pdf(self, html_content: str, title: str) -> str: + """ + Enhance HTML content for better PDF generation. + + Args: + html_content: HTML content from ReportFormatter + title: Document title + + Returns: + Enhanced HTML suitable for PDF + + """ + # Extract the body content if it's a full HTML document + if "" in html_content: + # Extract content between body tags + import re + body_match = re.search(r"]*>(.*?)", html_content, re.DOTALL) + body_content = body_match.group(1) if body_match else html_content + else: + body_content = html_content + + # Create a clean PDF-optimized HTML document + return f""" + + + + + {self._html_escape(title)} + + +
+ {body_content} +
+ + + """ + + def _html_escape(self, text: str) -> str: + """Escape HTML special characters.""" + if not text: + return "" + return (text.replace("&", "&") + .replace("<", "<") + .replace(">", ">") + .replace('"', """) + .replace("'", "'")) + + def _get_pdf_styles(self) -> str: + """ + Get CSS styles for PDF generation. + + Returns: + CSS stylesheet string optimized for PDF + + """ + return """ + @page { + margin: 0.75in; + size: letter; + @bottom-right { + content: "Page " counter(page) " of " counter(pages); + font-size: 10px; + color: #666; + margin-bottom: 0.5in; + } + } + + * { + box-sizing: border-box; + } + + body { + font-family: 'Times New Roman', Times, serif; + font-size: 11pt; + line-height: 1.6; + color: #333; + margin: 0; + padding: 0; + } + + .pdf-document { + max-width: 100%; + } + + /* Override existing styles for PDF optimization */ + .container { + max-width: none !important; + margin: 0 !important; + padding: 0 !important; + } + + h1 { + color: #2563eb; + font-size: 24pt; + font-weight: bold; + margin: 0 0 20px 0; + line-height: 1.3; + border-bottom: 2px solid #2563eb; + padding-bottom: 15px; + } + + h2 { + color: #1f2937; + font-size: 18pt; + font-weight: bold; + margin: 30px 0 15px 0; + border-bottom: 1px solid #e5e7eb; + padding-bottom: 5px; + } + + h3 { + color: #374151; + font-size: 14pt; + font-weight: bold; + margin: 25px 0 12px 0; + } + + h4 { + color: #4b5563; + font-size: 12pt; + font-weight: bold; + margin: 20px 0 10px 0; + } + + p { + margin: 12px 0; + text-align: justify; + } + + ul, ol { + margin: 12px 0; + padding-left: 30px; + } + + li { + margin: 6px 0; + } + + strong, b { + font-weight: bold; + color: #1f2937; + } + + em, i { + font-style: italic; + } + + /* Table styling */ + table { + border-collapse: collapse; + width: 100%; + margin: 1.5em 0; + font-size: 10pt; + border: 2px solid #333; + } + + th, td { + border: 1px solid #333; + padding: 8px 12px; + text-align: left; + vertical-align: top; + } + + th { + background-color: #f0f0f0; + font-weight: bold; + border-bottom: 2px solid #333; + } + + /* Code styling */ + code { + background-color: #f6f8fa; + padding: 2px 4px; + border-radius: 3px; + font-family: 'Courier New', monospace; + font-size: 10pt; + } + + pre { + background-color: #f6f8fa; + padding: 12px; + border-radius: 6px; + overflow: auto; + font-family: 'Courier New', monospace; + font-size: 9pt; + line-height: 1.4; + } + + /* Links */ + a { + color: #2563eb; + text-decoration: underline; + } + + /* Blockquotes */ + blockquote { + border-left: 4px solid #dfe2e5; + margin: 1em 0; + padding: 0 1em; + color: #6a737d; + font-style: italic; + } + + /* Horizontal rules */ + hr { + border: none; + border-top: 1px solid #d1d5db; + margin: 2em 0; + } + + /* Ensure good page breaks */ + h1, h2, h3, h4 { + page-break-after: avoid; + } + + /* Watermark styling */ + .watermark { + margin-top: 40px; + padding: 20px 0; + page-break-inside: avoid; + } + + .watermark-divider { + border: none; + border-top: 2px solid #e5e7eb; + margin: 20px 0 15px 0; + } + + .watermark-content { + text-align: center; + font-size: 10pt; + color: #6b7280; + background: linear-gradient(135deg, #f9fafb 0%, #f3f4f6 100%); + border: 1px solid #e5e7eb; + border-radius: 8px; + padding: 12px 20px; + margin: 0 auto; + max-width: 500px; + } + + .watermark-text { + font-weight: normal; + color: #4b5563; + } + + .watermark-email { + color: #6b7280; + font-weight: normal; + } + + .watermark-link { + color: #2563eb; + text-decoration: none; + font-weight: 600; + transition: color 0.2s ease; + } + + .watermark-link:hover { + color: #1d4ed8; + text-decoration: underline; + } + + .watermark-link:visited { + color: #2563eb; + } + + /* Print-friendly styles */ + @media print { + body { + font-size: 10pt; + } + + h1 { + font-size: 20pt; + } + + h2 { + font-size: 16pt; + } + + .watermark-content { + background: #f9fafb; + } + } + """ diff --git a/mxtoai/tools/search_with_fallback_tool.py b/mxtoai/tools/search_with_fallback_tool.py deleted file mode 100644 index 4eb6b12..0000000 --- a/mxtoai/tools/search_with_fallback_tool.py +++ /dev/null @@ -1,98 +0,0 @@ -import logging -from typing import Optional - -from smolagents import Tool - -logger = logging.getLogger(__name__) - - -class SearchWithFallbackTool(Tool): - """ - A web search tool that attempts a sequence of primary search tools - and falls back to another tool if all primary attempts fail. - """ - - name = "web_search" # Consistent name for the agent to use - description = ( - "Performs a web search using a sequence of search engines. " - "It first attempts searches with primary engines (e.g., Bing, DuckDuckGo). " - "If all primary searches fail or yield no results, it attempts a fallback engine (e.g., Google Search)." - "If everything fails, rephrase the query and try again." - ) - inputs = { - "query": {"type": "string", "description": "The search query to perform."}, - } - output_type = "string" - - def __init__( - self, - primary_search_tools: list[Tool], - fallback_search_tool: Optional[Tool] = None, - ): - """ - Initializes the SearchWithFallbackTool. - - Args: - primary_search_tools: A list of Tool instances to try in order as primary searchers. - fallback_search_tool: An optional Tool instance to use if all primary tools fail. - - """ - if not primary_search_tools and not fallback_search_tool: - msg = "SearchWithFallbackTool requires at least one primary or fallback search tool." - raise ValueError(msg) - - self.primary_search_tools = primary_search_tools if primary_search_tools else [] - if not self.primary_search_tools: - logger.warning( - "SearchWithFallbackTool initialized without any primary search tools. " - "It will only use the fallback tool if available and primary attempts are implicitly skipped." - ) - - self.fallback_search_tool = fallback_search_tool - super().__init__() - - def _get_tool_identifier(self, tool_instance: Tool, default_name: str) -> str: - """Helper to get a descriptive name for a tool instance for logging.""" - base_name = getattr(tool_instance, "name", default_name) - if hasattr(tool_instance, "engine"): # Specifically for WebSearchTool - return f"{base_name} (engine: {tool_instance.engine})" - return base_name - - def forward(self, query: str) -> str: - """ - Execute the search, trying primary tools first, then the fallback tool. - """ - # Try primary search tools in order - for i, tool_instance in enumerate(self.primary_search_tools): - tool_identifier = self._get_tool_identifier(tool_instance, f"PrimaryTool_{i + 1}") - try: - logger.debug(f"Attempting search with primary tool: {tool_identifier}") - result = tool_instance.forward(query=query) - # Underlying smolagents tools typically raise exceptions if no results are found. - # So, a successful return here implies results were found. - logger.info(f"Primary search tool {tool_identifier} succeeded.") - return result - except Exception as e: - logger.warning( - f"Primary search tool {tool_identifier} failed: {e!s}. Trying next primary tool or fallback." - ) - - # If all primary tools failed, try the fallback tool - if self.fallback_search_tool: - fallback_tool_instance = self.fallback_search_tool - tool_identifier = self._get_tool_identifier(fallback_tool_instance, "FallbackTool") - try: - logger.debug(f"Attempting search with fallback tool: {tool_identifier}") - result = fallback_tool_instance.forward(query=query) - logger.info(f"Fallback search tool {tool_identifier} succeeded.") - return result - except Exception as e: - logger.error(f"Fallback search tool ({tool_identifier}) also failed: {e!s}") - # Ensure the original exception 'e' from the fallback tool is part of the new exception context - msg = f"All primary search tools failed, and the fallback search tool ({tool_identifier}) also failed. Last error: {e!s}" - raise SearchFailureException(msg) from e - else: - logger.error("All primary search tools failed and no fallback tool is configured.") - # It's important to raise an exception here if no tools succeeded and no fallback was available or fallback also failed. - msg = "All primary search tools failed and no fallback tool is configured or the fallback also failed." - raise SearchFailureException(msg) diff --git a/mxtoai/tools/web_search/__init__.py b/mxtoai/tools/web_search/__init__.py new file mode 100644 index 0000000..f8093f8 --- /dev/null +++ b/mxtoai/tools/web_search/__init__.py @@ -0,0 +1,14 @@ +""" +Web search tools module. +Independent search tools for different engines with cost/quality tradeoffs. +""" + +from .ddg_search import DDGSearchTool +from .brave_search import BraveSearchTool +from .google_search import GoogleSearchTool + +__all__ = [ + "DDGSearchTool", + "BraveSearchTool", + "GoogleSearchTool", +] \ No newline at end of file diff --git a/mxtoai/tools/web_search/brave_search.py b/mxtoai/tools/web_search/brave_search.py new file mode 100644 index 0000000..b460898 --- /dev/null +++ b/mxtoai/tools/web_search/brave_search.py @@ -0,0 +1,131 @@ +""" +Brave search tool - Better quality results with moderate API cost. +""" + +import logging +import os +from typing import Optional +from smolagents import Tool + +logger = logging.getLogger(__name__) + + +class BraveSearchTool(Tool): + """ + Brave search tool - Better quality results than DDG, moderate API cost. + Use when DDG results are insufficient or when you need more comprehensive information. + """ + + name = "brave_search" + description = ( + "Performs a web search using Brave Search API. You can do Web search, Images, Videos, News, and more." + "It might give better results than DuckDuckGo but has moderate API costs. Use this when DDG results are insufficient " + "or when you need more detailed, current, or specialized information. Good for research and detailed queries." + ) + inputs = { + "query": {"type": "string", "description": "The user's search query term. Max 400 chars, 50 words."}, + "country": {"type": "string", "description": "2-char country code for results (e.g., 'US', 'DE'). Default: 'US'.", "nullable": True}, + "search_lang": {"type": "string", "description": "Language code for search results (e.g., 'en', 'es'). Default: 'en'.", "nullable": True}, + "ui_lang": {"type": "string", "description": "UI language for response (e.g., 'en-US'). Default: 'en-US'.", "nullable": True}, + "safesearch": {"type": "string", "description": "Filter adult content: 'off', 'moderate', 'strict'. Default: 'moderate'.", "nullable": True}, + "freshness": {"type": "string", "description": "Filter by discovery date: 'pd' (day), 'pw' (week), 'pm' (month), 'py' (year), or 'YYYY-MM-DDtoYYYY-MM-DD'. Default: None.", "nullable": True}, + "result_filter": {"type": "string", "description": "Comma-separated result types (e.g., 'web,news'). Default: 'web'.", "nullable": True}, + } + output_type = "string" + + def __init__(self, max_results: int = 5): + """ + Initialize Brave search tool. + + Args: + max_results: Maximum number of results to return + """ + self.max_results = max_results + self.api_key = os.getenv("BRAVE_SEARCH_API_KEY") + super().__init__() + + if not self.api_key: + logger.warning("BRAVE_SEARCH_API_KEY not found. Brave search will not be available.") + else: + logger.debug(f"BraveSearchTool initialized with max_results={max_results}") + + def forward( + self, + query: str, + country: str = "US", + search_lang: str = "en", + ui_lang: str = "en-US", + safesearch: str = "moderate", + freshness: Optional[str] = None, + result_filter: str = "web", + ) -> str: + """Execute Brave search.""" + if not self.api_key: + raise ValueError("Brave Search API key not configured. Cannot perform search.") + + try: + log_params = { + "query": query, + "country": country, + "search_lang": search_lang, + "ui_lang": ui_lang, + "safesearch": safesearch, + "freshness": freshness, + "result_filter": result_filter, + } + logger.info(f"Performing Brave search with params: {log_params}") + + import requests + + headers = { + "Accept": "application/json", + "Accept-Encoding": "gzip", + "X-Subscription-Token": self.api_key, + } + + params = { + "q": query, + "count": self.max_results, + "country": country, + "search_lang": search_lang, + "ui_lang": ui_lang, + "safesearch": safesearch, + "result_filter": result_filter, + "text_decorations": False, + "spellcheck": True, + } + if freshness: + params["freshness"] = freshness + + response = requests.get( + "https://api.search.brave.com/res/v1/web/search", + headers=headers, + params=params, + timeout=10, + ) + response.raise_for_status() + + data = response.json() + web_results = data.get("web", {}).get("results", []) + + if not web_results: + logger.warning(f"Brave search returned no results for query: {query}") + return f"No results found for query: {query}" + + # Format results + formatted_results = [] + for i, result in enumerate(web_results[:self.max_results], 1): + title = result.get("title", "No title") + url = result.get("url", "") + description = result.get("description", "No description") + + formatted_result = f"{i}. **{title}**\n URL: {url}\n {description}\n" + formatted_results.append(formatted_result) + + result_text = "\n".join(formatted_results) + logger.info("Brave search completed successfully") + return result_text + + except Exception as e: + logger.error(f"Brave search failed: {e}") + raise \ No newline at end of file diff --git a/mxtoai/tools/web_search/ddg_search.py b/mxtoai/tools/web_search/ddg_search.py new file mode 100644 index 0000000..daae004 --- /dev/null +++ b/mxtoai/tools/web_search/ddg_search.py @@ -0,0 +1,50 @@ +""" +DuckDuckGo search tool - Free and fast search option. +""" + +import logging +from smolagents import Tool +from smolagents.default_tools import WebSearchTool + +logger = logging.getLogger(__name__) + + +class DDGSearchTool(Tool): + """ + DuckDuckGo search tool - Free and fast, but may have limited results. + Use this first for most queries as it's cost-effective. + """ + + name = "ddg_search" + description = ( + "Performs a web search using DuckDuckGo. This is the most cost-effective search option " + "and should be tried first for most queries. It's free but may have limited or less comprehensive results " + "compared to premium search engines. Good for general information and quick searches." + ) + inputs = { + "query": {"type": "string", "description": "The search query to perform."}, + } + output_type = "string" + + def __init__(self, max_results: int = 5): + """ + Initialize DDG search tool. + + Args: + max_results: Maximum number of results to return + """ + self.max_results = max_results + self.ddg_tool = WebSearchTool(engine="duckduckgo", max_results=max_results) + super().__init__() + logger.debug(f"DDGSearchTool initialized with max_results={max_results}") + + def forward(self, query: str) -> str: + """Execute DuckDuckGo search.""" + try: + logger.info(f"Performing DDG search for: {query}") + result = self.ddg_tool.forward(query=query) + logger.info("DDG search completed successfully") + return result + except Exception as e: + logger.error(f"DDG search failed: {e}") + raise \ No newline at end of file diff --git a/mxtoai/tools/web_search/google_search.py b/mxtoai/tools/web_search/google_search.py new file mode 100644 index 0000000..53ca6a4 --- /dev/null +++ b/mxtoai/tools/web_search/google_search.py @@ -0,0 +1,69 @@ +""" +Google search tool - Highest quality results with premium API cost. +""" + +import logging +import os +from typing import Optional +from smolagents import Tool +from smolagents.default_tools import GoogleSearchTool as SmolagentsGoogleSearchTool + +logger = logging.getLogger(__name__) + + +class GoogleSearchTool(Tool): + """ + Google search tool - Highest quality results, premium API cost. + Use only when DDG and Brave searches are insufficient for critical or complex queries. + """ + + name = "google_search" + description = ( + "Performs a web search using Google Search API (SerpAPI or Serper). Use it when google results are needed" + "It has premium API costs. Use this only when " + "DDG and Brave searches are insufficient, or for critical/complex queries that require the best available information." + "Ideal for authoritative sources, breaking news, and complex research topics." + ) + inputs = { + "query": {"type": "string", "description": "The search query to perform."}, + } + output_type = "string" + + def __init__(self): + """ + Initialize Google search tool. + """ + self.google_tool: Optional[SmolagentsGoogleSearchTool] = None + + # Try to initialize Google search tool with available providers + if os.getenv("SERPAPI_API_KEY"): + try: + self.google_tool = SmolagentsGoogleSearchTool(provider="serpapi") + logger.debug("GoogleSearchTool initialized with SerpAPI") + except ValueError as e: + logger.warning(f"Failed to initialize GoogleSearchTool with SerpAPI: {e}") + elif os.getenv("SERPER_API_KEY"): + try: + self.google_tool = SmolagentsGoogleSearchTool(provider="serper") + logger.debug("GoogleSearchTool initialized with Serper") + except ValueError as e: + logger.warning(f"Failed to initialize GoogleSearchTool with Serper: {e}") + + if not self.google_tool: + logger.warning("No Google Search API keys found. Google search will not be available.") + + super().__init__() + + def forward(self, query: str) -> str: + """Execute Google search.""" + if not self.google_tool: + raise ValueError("Google Search API not configured. Cannot perform search.") + + try: + logger.info(f"Performing Google search for: {query}") + result = self.google_tool.forward(query=query) + logger.info("Google search completed successfully") + return result + except Exception as e: + logger.error(f"Google search failed: {e}") + raise \ No newline at end of file diff --git a/mxtoai/validators.py b/mxtoai/validators.py index f44d465..ea4cebd 100644 --- a/mxtoai/validators.py +++ b/mxtoai/validators.py @@ -14,23 +14,27 @@ from mxtoai.dependencies import processing_instructions_resolver from mxtoai.email_sender import send_email_reply from mxtoai.schemas import RateLimitPlan -from mxtoai.whitelist import get_whitelist_signup_url, is_email_whitelisted +from mxtoai.whitelist import get_whitelist_signup_url, is_email_whitelisted, trigger_automatic_verification logger = get_logger(__name__) # Globals to be initialized from api.py redis_client: Optional[aioredis.Redis] = None -email_provider_domain_set: set[str] = set() # Still useful for the domain check logic +email_provider_domain_set: set[str] = set() # Still useful for the domain check logic # Rate limit settings RATE_LIMITS_BY_PLAN = { RateLimitPlan.BETA: { - "hour": {"limit": 20, "period_seconds": 3600, "expiry_seconds": 3600 * 2}, # 2hr expiry for 1hr window - "day": {"limit": 50, "period_seconds": 86400, "expiry_seconds": 86400 + 3600}, # 25hr expiry for 24hr window - "month": {"limit": 300, "period_seconds": 30 * 86400, "expiry_seconds": (30 * 86400) + 86400} # 31day expiry for 30day window + "hour": {"limit": 20, "period_seconds": 3600, "expiry_seconds": 3600 * 2}, # 2hr expiry for 1hr window + "day": {"limit": 50, "period_seconds": 86400, "expiry_seconds": 86400 + 3600}, # 25hr expiry for 24hr window + "month": { + "limit": 300, + "period_seconds": 30 * 86400, + "expiry_seconds": (30 * 86400) + 86400, + }, # 31day expiry for 30day window } } -RATE_LIMIT_PER_DOMAIN_HOUR = { # Consistent structure for domain limits +RATE_LIMIT_PER_DOMAIN_HOUR = { # Consistent structure for domain limits "hour": {"limit": 50, "period_seconds": 3600, "expiry_seconds": 3600 * 2} } @@ -38,7 +42,7 @@ PERIOD_EXPIRY = { "hour": 3600 * 2, # 2 hours "day": 86400 + 3600, # 25 hours - "month": 30 * 86400 + 86400, # 31 days + "month": 30 * 86400 + 86400, # 31 days } @@ -54,11 +58,13 @@ def get_current_timestamp_for_period(period_name: str, dt: datetime) -> str: async def check_rate_limit_redis( - key_type: str, # "email" or "domain" + key_type: str, # "email" or "domain" identifier: str, - plan_or_domain_limits: dict[str, dict[str, int]], # e.g., RATE_LIMITS_BY_PLAN[RateLimitPlan.BETA] or RATE_LIMIT_PER_DOMAIN_HOUR + plan_or_domain_limits: dict[ + str, dict[str, int] + ], # e.g., RATE_LIMITS_BY_PLAN[RateLimitPlan.BETA] or RATE_LIMIT_PER_DOMAIN_HOUR current_dt: datetime, - plan_name_for_key: str = "" # e.g. "beta" or "" for domain + plan_name_for_key: str = "", # e.g. "beta" or "" for domain ) -> Optional[str]: """ Checks and updates rate limits using Redis. @@ -77,7 +83,7 @@ async def check_rate_limit_redis( """ if redis_client is None: logger.error("Redis client not initialized for rate limiting.") - return None # Fail open if Redis is not ready + return None # Fail open if Redis is not ready for period_name, config in plan_or_domain_limits.items(): limit = config["limit"] @@ -86,7 +92,7 @@ async def check_rate_limit_redis( time_bucket = get_current_timestamp_for_period(period_name, current_dt) redis_key_parts = ["rate_limit", key_type, identifier] - if plan_name_for_key: # Add plan to key for email limits + if plan_name_for_key: # Add plan to key for email limits redis_key_parts.append(plan_name_for_key) redis_key_parts.extend([period_name, time_bucket]) redis_key = ":".join(redis_key_parts) @@ -110,10 +116,10 @@ async def check_rate_limit_redis( f"Rate limit EXCEEDED for {key_type} '{identifier}' (Plan: '{plan_name_for_key if plan_name_for_key else 'N/A'}'): " f"Period '{period_name}', Count: {current_count}/{limit}. Key: {redis_key}" ) - return period_name # e.g., "hour", "day", "month" + return period_name # e.g., "hour", "day", "month" except aioredis.RedisError as e: logger.error(f"Redis error during rate limit check for key {redis_key}: {e}") - return None # Fail open on Redis error to avoid blocking legitimate requests + return None # Fail open on Redis error to avoid blocking legitimate requests return None @@ -139,7 +145,7 @@ def get_domain_from_email(email_address: str) -> str: try: return email_address.split("@", 1)[1].lower() except IndexError: - return "" # Should not happen for valid emails + return "" # Should not happen for valid emails async def send_rate_limit_rejection_email( @@ -151,10 +157,10 @@ async def send_rate_limit_rejection_email( Please try again after some time. Best, -MX to AI Team""" +MXtoAI Team""" html_rejection_text = f"""

Your email could not be processed because the usage limit has been exceeded ({limit_type}).

Please try again after some time.

-

Best regards,
MX to AI Team

""" +

Best regards,
MXtoAI Team

""" email_dict = { "from": from_email, @@ -178,13 +184,13 @@ async def validate_rate_limits( """ Validate incoming email against defined rate limits based on the plan, using Redis. """ - if redis_client is None: # Should not happen if initialized correctly + if redis_client is None: # Should not happen if initialized correctly logger.warning("Redis client not initialized. Skipping rate limit check.") return None normalized_user_email = normalize_email(from_email) email_domain = get_domain_from_email(normalized_user_email) - current_dt = datetime.now(timezone.utc) # Use timezone-aware datetime + current_dt = datetime.now(timezone.utc) # Use timezone-aware datetime # 1. Per-email limits based on plan plan_email_limits_config = RATE_LIMITS_BY_PLAN.get(plan) @@ -196,7 +202,7 @@ async def validate_rate_limits( identifier=normalized_user_email, plan_or_domain_limits=plan_email_limits_config, current_dt=current_dt, - plan_name_for_key=plan.value + plan_name_for_key=plan.value, ) if email_limit_exceeded_period: limit_type_msg = f"email {email_limit_exceeded_period} for {plan.value} plan" @@ -218,10 +224,10 @@ async def validate_rate_limits( domain_limit_exceeded_period = await check_rate_limit_redis( key_type="domain", identifier=email_domain, - plan_or_domain_limits=RATE_LIMIT_PER_DOMAIN_HOUR, # This needs to be a dict of periods - current_dt=current_dt + plan_or_domain_limits=RATE_LIMIT_PER_DOMAIN_HOUR, # This needs to be a dict of periods + current_dt=current_dt, ) - if domain_limit_exceeded_period: # This will be "hour" if exceeded + if domain_limit_exceeded_period: # This will be "hour" if exceeded limit_type_msg = f"domain {domain_limit_exceeded_period}" await send_rate_limit_rejection_email(from_email, to, subject, messageId, limit_type_msg) return Response( @@ -264,6 +270,9 @@ async def validate_email_whitelist( """ Validate if the sender's email is whitelisted and verified. + Major email providers are allowed, OR emails that exist and are verified in the Supabase whitelist. + For non-whitelisted emails, automatic verification is triggered and email processing is stopped. + Args: from_email: Sender's email address to: Recipient's email address @@ -274,46 +283,103 @@ async def validate_email_whitelist( Response if validation fails, None if validation succeeds """ + # Extract domain from sender's email + email_domain = get_domain_from_email(from_email) + + # Check if email is from major email provider + is_major_provider = email_domain in email_provider_domain_set + + # Check Supabase whitelist for all emails exists_in_whitelist, is_verified = await is_email_whitelisted(from_email) - if not exists_in_whitelist: - # Case 1: Email not in whitelist at all - signup_url = get_whitelist_signup_url() - rejection_msg = f"""Your email address is not whitelisted in our system. + # Allow if email is from major provider OR exists and is verified in whitelist + if is_major_provider: + logger.info(f"Email allowed from major email provider: {from_email} (domain: {email_domain})") + return None + if exists_in_whitelist and is_verified: + logger.info(f"Email allowed from Supabase whitelist: {from_email} (verified)") + return None + + # For non-major providers that are not verified, trigger automatic verification + # and STOP email processing until they verify + logger.info(f"Triggering automatic verification for {from_email} (exists={exists_in_whitelist}, verified={is_verified})") + + # Trigger automatic verification in the background + verification_triggered = False + try: + verification_triggered = await trigger_automatic_verification(from_email) + if verification_triggered: + logger.info(f"Successfully triggered automatic verification for {from_email}") + else: + logger.warning(f"Failed to trigger automatic verification for {from_email}") + except Exception as e: + logger.error(f"Error triggering automatic verification for {from_email}: {e}") + + # Determine rejection message based on verification status and outcome + if verification_triggered: + # Verification email was sent successfully + rejection_msg = f"""Your email could not be processed because your domain is not automatically whitelisted. -To use our email processing service, please visit {signup_url} to request access. +Major email providers (Gmail, Outlook, Yahoo, etc.) are automatically whitelisted, but custom domains require verification. -Once your email is added to the whitelist and verified, you can resend your email for processing. +🚀 GOOD NEWS: We've automatically started the verification process for you! + +📧 CHECK YOUR EMAIL: You should receive a verification email at {from_email} within the next few minutes. + +✅ NEXT STEPS: +1. Click the verification link in the email we just sent +2. Once verified, simply resend your original email to this address +3. Your email will then be processed normally + +⚠️ IMPORTANT: You must verify your email first, then resend your request for it to be processed. Best, -MX to AI Team""" +MXtoAI Team""" + + html_rejection = f"""

Your email could not be processed because your domain is not automatically whitelisted.

+

Major email providers (Gmail, Outlook, Yahoo, etc.) are automatically whitelisted, but custom domains require verification.

+ +
+ 🚀 GOOD NEWS: We've automatically started the verification process for you! +
+ +
+ 📧 CHECK YOUR EMAIL: You should receive a verification email at {from_email} within the next few minutes. +
- html_rejection = f"""

Your email address is not whitelisted in our system.

-

To use our email processing service, please visit {signup_url} to request access.

-

Once your email is added to the whitelist and verified, you can resend your email for processing.

-

Best regards,
MX to AI Team

""" +

✅ NEXT STEPS:

+
    +
  1. Click the verification link in the email we just sent
  2. +
  3. Once verified, simply resend your original email to this address
  4. +
  5. Your email will then be processed normally
  6. +
- elif not is_verified: - # Case 2: Email in whitelist but not verified +
+ ⚠️ IMPORTANT: You must verify your email first, then resend your request for it to be processed. +
+ +

Best regards,
MXtoAI Team

""" + else: + # Verification email failed to send - fallback to manual signup signup_url = get_whitelist_signup_url() - rejection_msg = f"""Your email is registered but not yet verified. + rejection_msg = f"""Your email could not be processed because your domain is not automatically whitelisted. + +Major email providers (Gmail, Outlook, Yahoo, etc.) are automatically whitelisted, but custom domains require manual approval. -Please check your email for a verification link we sent when you registered. If you can't find it, you can request a new verification link at {signup_url}. +We attempted to automatically send you a verification email, but it failed. Please visit {signup_url} to manually request access. -Once verified, you can resend your email for processing. +Once your email is verified, you can resend your email for processing. Best, -MX to AI Team""" +MXtoAI Team""" - html_rejection = f"""

Your email is registered but not yet verified.

-

Please check your email for a verification link we sent when you registered. If you can't find it, you can request a new verification link at {signup_url}.

-

Once verified, you can resend your email for processing.

-

Best regards,
MX to AI Team

""" - else: - # Email exists and is verified - return None + html_rejection = f"""

Your email could not be processed because your domain is not automatically whitelisted.

+

Major email providers (Gmail, Outlook, Yahoo, etc.) are automatically whitelisted, but custom domains require manual approval.

+

We attempted to automatically send you a verification email, but it failed. Please visit {signup_url} to manually request access.

+

Once your email is verified, you can resend your email for processing.

+

Best regards,
MXtoAI Team

""" - # Send rejection email for both unverified and non-existent cases + # Send rejection email email_dict = { "from": from_email, # Original sender becomes recipient "to": to, # Original recipient becomes sender @@ -327,21 +393,21 @@ async def validate_email_whitelist( try: await send_email_reply(email_dict, rejection_msg, html_rejection) logger.info( - f"Sent whitelist rejection email to {from_email} (exists={exists_in_whitelist}, verified={is_verified})" + f"Sent verification instruction email to {from_email} (verification_triggered={verification_triggered})" ) except Exception as e: - logger.error(f"Failed to send whitelist rejection email: {e}") + logger.error(f"Failed to send verification instruction email: {e}") - # Return appropriate error response - status_message = "Email not whitelisted" if not exists_in_whitelist else "Email not verified" + # Return error response to stop email processing return Response( content=json.dumps( { - "message": f"Email rejected - {status_message}", + "message": "Email verification required - check your email for verification instructions", "email": from_email, + "verification_triggered": verification_triggered, "exists_in_whitelist": exists_in_whitelist, "is_verified": is_verified, - "rejection_sent": True, + "next_action": "verify_email_then_resend", } ), status_code=status.HTTP_403_FORBIDDEN, @@ -425,13 +491,13 @@ async def validate_attachments( Please reduce the number of attachments and try again. Best, -MX to AI Team""" +MXtoAI Team""" html_rejection = f"""

Your email could not be processed due to too many attachments.

Maximum allowed attachments: {MAX_ATTACHMENTS_COUNT}
Number of attachments in your email: {len(attachments)}

Please reduce the number of attachments and try again.

-

Best regards,
MX to AI Team

""" +

Best regards,
MXtoAI Team

""" email_dict = { "from": from_email, @@ -475,13 +541,13 @@ async def validate_attachments( Please reduce the file size and try again. Best, -MX to AI Team""" +MXtoAI Team""" html_rejection = f"""

Your email could not be processed due to an oversized attachment.

Maximum allowed size per attachment: {MAX_ATTACHMENT_SIZE_MB}MB
Size of attachment '{attachment.get("filename", "unknown")}': {size_mb:.1f}MB

Please reduce the file size and try again.

-

Best regards,
MX to AI Team

""" +

Best regards,
MXtoAI Team

""" email_dict = { "from": from_email, @@ -522,13 +588,13 @@ async def validate_attachments( Please reduce the total size of attachments and try again. Best, -MX to AI Team""" +MXtoAI Team""" html_rejection = f"""

Your email could not be processed due to total attachment size exceeding the limit.

Maximum allowed total size: {MAX_TOTAL_ATTACHMENTS_SIZE_MB}MB
Total size of your attachments: {total_size_mb:.1f}MB

Please reduce the total size of attachments and try again.

-

Best regards,
MX to AI Team

""" +

Best regards,
MXtoAI Team

""" email_dict = { "from": from_email, diff --git a/mxtoai/whitelist.py b/mxtoai/whitelist.py index ff1a4f1..af3b6d0 100644 --- a/mxtoai/whitelist.py +++ b/mxtoai/whitelist.py @@ -1,4 +1,6 @@ import os +import uuid +from datetime import datetime, timezone from typing import Optional from supabase import Client, create_client @@ -66,6 +68,181 @@ async def is_email_whitelisted(email: str) -> tuple[bool, bool]: return False, False +async def trigger_automatic_verification(email: str) -> bool: + """ + Automatically trigger email verification for non-whitelisted users. + + This function: + 1. Generates a unique verification token + 2. Inserts/updates the email in whitelisted_emails table with verified=false + 3. Sends verification email using SES + + Args: + email: The email address to verify + + Returns: + bool: True if verification process was successfully triggered, False otherwise + + """ + try: + if not supabase: + init_supabase() + + # Generate unique verification token + verification_token = str(uuid.uuid4()) + current_time = datetime.now(timezone.utc).isoformat() + + # Check if email already exists in whitelist + existing_response = supabase.table("whitelisted_emails").select("*").eq("email", email).execute() + + if hasattr(existing_response, "data") and len(existing_response.data) > 0: + # Email exists, update with new verification token + update_response = supabase.table("whitelisted_emails").update({ + "verification_token": verification_token, + "verified": False, + "updated_at": current_time + }).eq("email", email).execute() + + if hasattr(update_response, "data") and len(update_response.data) > 0: + logger.info(f"Updated existing email {email} with new verification token") + else: + logger.error(f"Failed to update verification token for {email}") + return False + else: + # Email doesn't exist, insert new record + insert_response = supabase.table("whitelisted_emails").insert({ + "email": email, + "verified": False, + "verification_token": verification_token, + "created_at": current_time, + "updated_at": current_time + }).execute() + + if hasattr(insert_response, "data") and len(insert_response.data) > 0: + logger.info(f"Inserted new email {email} with verification token") + else: + logger.error(f"Failed to insert verification record for {email}") + return False + + # Send verification email + verification_sent = await send_verification_email(email, verification_token) + + if verification_sent: + logger.info(f"Successfully triggered automatic verification for {email}") + return True + logger.error(f"Failed to send verification email to {email}") + return False + + except Exception as e: + logger.error(f"Error triggering automatic verification for {email}: {e}") + return False + + +async def send_verification_email(email: str, verification_token: str) -> bool: + """ + Send verification email using SES email sender. + + Args: + email: Recipient email address + verification_token: Unique verification token + + Returns: + bool: True if email was sent successfully, False otherwise + + """ + try: + # Import here to avoid circular imports + from mxtoai.email_sender import EmailSender + + # Get the origin URL for verification links + origin = os.getenv("FRONTEND_URL", "https://mxtoai.com") + verification_url = f"{origin}/verify?token={verification_token}" + + # Create email content + subject = "Verify your email for MXtoAI" + + text_content = f"""Welcome to MXtoAI! + +To complete your registration and start using our email processing service, please verify your email address by clicking the link below: + +{verification_url} + +This verification link will expire in 24 hours for security reasons. + +If you didn't request this verification, you can safely ignore this email. + +Best regards, +MXtoAI Team + +--- +MXtoAI - Transform your emails with AI +https://mxtoai.com""" + + html_content = f""" + + + + Verify your email - MXtoAI + + + +
+
+

Welcome to MXtoAI!

+
+
+

Verify your email address

+

To complete your registration and start using our email processing service, please verify your email address by clicking the button below:

+ +
+ Verify Email Address +
+ +

Or copy and paste this link into your browser:

+

{verification_url}

+ +
+ ⏰ Important: This verification link will expire in 24 hours for security reasons. +
+ +

If you didn't request this verification, you can safely ignore this email.

+ +

Best regards,
+ MXtoAI Team

+
+ +
+ +""" + + # Initialize email sender and send verification email + email_sender = EmailSender() + response = await email_sender.send_email( + to_address=email, + subject=subject, + body_text=text_content, + body_html=html_content + ) + + logger.info(f"Verification email sent successfully to {email}: {response.get('MessageId', 'Unknown')}") + return True + + except Exception as e: + logger.error(f"Error sending verification email to {email}: {e}") + return False + + def get_whitelist_signup_url() -> str: """ Get the URL where users can sign up to be whitelisted @@ -74,4 +251,4 @@ def get_whitelist_signup_url() -> str: str: The URL for whitelist signup """ - return os.getenv("WHITELIST_SIGNUP_URL", "https://mxtoai.com/whitelist-signup") + return os.getenv("WHITELIST_SIGNUP_URL", "https://mxtoai.com/whitelist") diff --git a/poetry.lock b/poetry.lock index 3f3dfd4..7f241f7 100644 --- a/poetry.lock +++ b/poetry.lock @@ -1,4 +1,4 @@ -# This file is automatically @generated by Poetry 2.1.1 and should not be changed by hand. +# This file is automatically @generated by Poetry 2.1.3 and should not be changed by hand. [[package]] name = "aiofiles" @@ -26,98 +26,98 @@ files = [ [[package]] name = "aiohttp" -version = "3.12.0" +version = "3.12.8" description = "Async http client/server framework (asyncio)" optional = false python-versions = ">=3.9" groups = ["main"] files = [ - {file = "aiohttp-3.12.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:91fca62b1a454a72c48345ad3af0327c87a7352598049fd9fd02b5c96deca456"}, - {file = "aiohttp-3.12.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:4cd2ffa8cefce24305e573780d3d4a1bc8904bb76bc208509108bac04bc85c71"}, - {file = "aiohttp-3.12.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:b011907c9f024d9b2017a2c12ca8abea571b919ebd85d42f16bd91a716dc7de2"}, - {file = "aiohttp-3.12.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d9c8db3b45fb114739bc3daae85ceb03b2bcb11f11f2d1eae25b00b989cd306a"}, - {file = "aiohttp-3.12.0-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:976607ee1790d2e6d1666f89f64afd9397af2647b5a99a84dc664a3ac715754f"}, - {file = "aiohttp-3.12.0-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:e76898841d4e655ac5e7e2f1e146d9e56ee1ffd2ce2dd31b41ab23bcfb29b209"}, - {file = "aiohttp-3.12.0-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:3ad61b28652898b25e7c8d42970b9f27f7eff068623704aad4424e2ee9409a80"}, - {file = "aiohttp-3.12.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ba8d85d36edc6698ef94cf8f7fcf5992cc2d9b639de67a1112799d5020c91a63"}, - {file = "aiohttp-3.12.0-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:5123488226a61df4a515dc3d5c3c0b578660ec3c22d2579599ce2e45335655db"}, - {file = "aiohttp-3.12.0-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:a03fb47a954df7fb0d587053e2feafdd5306828fc8a764b456775fc00d2d82a9"}, - {file = "aiohttp-3.12.0-cp310-cp310-musllinux_1_2_armv7l.whl", hash = "sha256:bf9acc3914c921ea8fb0bcda3d07ece85d09eff035bd7c11cea826aa5dd827a5"}, - {file = "aiohttp-3.12.0-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:c7cfb0b19e775143982e34a472f9da66067c22b66ce7a56e88f851752a467f15"}, - {file = "aiohttp-3.12.0-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:2eba07f1de9a02920f34761c8b8e375f91fd98304a80ff0287f8e9e2804decf7"}, - {file = "aiohttp-3.12.0-cp310-cp310-musllinux_1_2_s390x.whl", hash = "sha256:d46d99d028ad4a566f980bc8790099288824212c0f21a275d546be403cbcb7bc"}, - {file = "aiohttp-3.12.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:0bd190e4c418c1072563dd998ae118dfb588101f60c1af396e5cd42023f29259"}, - {file = "aiohttp-3.12.0-cp310-cp310-win32.whl", hash = "sha256:709d823cc86d0c3ab4e9b449fefba47a1a8586fe65a00d5fbce393458be9da1c"}, - {file = "aiohttp-3.12.0-cp310-cp310-win_amd64.whl", hash = "sha256:a44b25ade659f8736b0f2c32cfd2b59449defad41c5f1e514b94a338c777226f"}, - {file = "aiohttp-3.12.0-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:38ab87bc3c2f2c3861438e537cbd6732d72b73f2b82ea9ba4b214b6aca170ad9"}, - {file = "aiohttp-3.12.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:8862c9b190854c0ff3f5a3f25abee9ed7641aee6eccdc81aed2c3d427623d3dc"}, - {file = "aiohttp-3.12.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:8cd1eb1d5498cc541ce40946e148371e23efefcf48afdaa68f49328d2849f393"}, - {file = "aiohttp-3.12.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:07b7e64a7c325e2d87523e1f8210bdba0e2e159703ad00f75bff336134d8490a"}, - {file = "aiohttp-3.12.0-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:1accf0a2270e1e05b453d1dd0f51f176148eec81306c39da39b7af5b29e1d56b"}, - {file = "aiohttp-3.12.0-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:8c3aaae0180d804b4fe95cee7fe03c2ff362828c5ebb7a8370132957104b6311"}, - {file = "aiohttp-3.12.0-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:c0ab714799a6fd698715d9fc1d1a546a99288288939506fede60d133dc53328b"}, - {file = "aiohttp-3.12.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:02b33c67d7db1a4b2df28e5c1e4d8c025db8e4432b3d054db3ea695063cbfc52"}, - {file = "aiohttp-3.12.0-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:3718948668ae986d53bd2c443ffc82e6559de2bec1d66a215c1c5e059d80ff37"}, - {file = "aiohttp-3.12.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:fc9f188d2b864f65b17cee23d7a1923285df0c7b978058b0e2006426700d4c93"}, - {file = "aiohttp-3.12.0-cp311-cp311-musllinux_1_2_armv7l.whl", hash = "sha256:0851359eeb146690c19d368a1c86acf33dc17535ac8123e25a0eff5f5fa110e1"}, - {file = "aiohttp-3.12.0-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:3fcc1ccd74c932ce6b6fad61e054baa23e6624db8f5a9ec462af023abe5c600d"}, - {file = "aiohttp-3.12.0-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:062eaf38763c6b22fcbd47a97ba06952ad7751ed7b054a690cddeed4f50547fe"}, - {file = "aiohttp-3.12.0-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:b19f964b9130d572f0fed752417446ff6622fd1288e8c7860824a0dd57cd8dd5"}, - {file = "aiohttp-3.12.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:b210c1cdc7f1a45714d17510b7e049ca7b15766b66f8c278a2c73a6021bbc389"}, - {file = "aiohttp-3.12.0-cp311-cp311-win32.whl", hash = "sha256:6859c7ecd01cbcc839476c7d9504a19bf334bbe45715df611d351103945a9d23"}, - {file = "aiohttp-3.12.0-cp311-cp311-win_amd64.whl", hash = "sha256:0159620f09dd338bab29e7136efd51c971462a5bb69dcdace39a2c581e87c4af"}, - {file = "aiohttp-3.12.0-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:71fe01ddea2673973f1958c3776da990106e33a02a4a5c708d4bb34717cae712"}, - {file = "aiohttp-3.12.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:9ce499a7ea20925572d52f86cd42e16690f4db2ff56933710bf759cf1ec68212"}, - {file = "aiohttp-3.12.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:75a7d00e20221b1bb8a04e14dba850596cdafeac10fb112ce7b6ef0ad1f9bd42"}, - {file = "aiohttp-3.12.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9f9cb8f69371d50ba61f061065d440edcbebf00cb4ef2141465a9e753a00ecb9"}, - {file = "aiohttp-3.12.0-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:037a53da5016e8fa33840ecddb2bdc20091d731e0fe866f4f9d9364a94504856"}, - {file = "aiohttp-3.12.0-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:851543bb8dd5db048c0b6a7454cae3fd0f618a592cbb70844ec0d548767b5763"}, - {file = "aiohttp-3.12.0-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2688fb204b07c2bffcb12795b6384ec051d927147e0ec542ba3518dd60a86f2f"}, - {file = "aiohttp-3.12.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9cbc8604c21a163ee492542b344a4f02797d48d38d335af47490d77c0e15d2ed"}, - {file = "aiohttp-3.12.0-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:754d5fd1a47656592d3b11488652fba567d00c6492e9304ba59255dfee8b856f"}, - {file = "aiohttp-3.12.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:2a613da41e577256d13929bbb4a95cadb570aebeab3914a24fc0056ae843d3c7"}, - {file = "aiohttp-3.12.0-cp312-cp312-musllinux_1_2_armv7l.whl", hash = "sha256:9c8f9e1de28529751345f1e55cb405f22ff09fb251a1bce7fc7e915d0ee49d1f"}, - {file = "aiohttp-3.12.0-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:32c1977f5354fef6b43b98ac830c87bddaafcfb6516c520e3241fef8f3e299e7"}, - {file = "aiohttp-3.12.0-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:4ac3e360ab9c1b7893ae5c254a222986162bafa9f981fa85f09bad7b1527fed4"}, - {file = "aiohttp-3.12.0-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:b3e62337e0a24925fefe638f8dd91be4324ac7f2bbbe9d8d0ae992bd35b2dc45"}, - {file = "aiohttp-3.12.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:7285a756ba23e99f1a24cf41e8440f06a1d2cba595ee2cc1acb854e4262e2075"}, - {file = "aiohttp-3.12.0-cp312-cp312-win32.whl", hash = "sha256:b53cd833233a09d5a22481a7e936bfdce46845e3b09f1b936d383d5c14d39ba6"}, - {file = "aiohttp-3.12.0-cp312-cp312-win_amd64.whl", hash = "sha256:68e4a94c3bf80e93340d4c9108f57b46b019ca88eddec18bf5c8e1ded463cbef"}, - {file = "aiohttp-3.12.0-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:6ab223f5d0bd30f1b419addc4aef37f8d7723027e3d92393281cba97f8529209"}, - {file = "aiohttp-3.12.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:c5beab804eeff85cfae5c053e0d3bb7a7cdc2756ced50a586c56deb8b8ce16b9"}, - {file = "aiohttp-3.12.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:bb157df65c18f4c84dd2a3b989975076d228866e6c4872220139c385bb0fea3b"}, - {file = "aiohttp-3.12.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c9dff812b540fd31e08678fb1caed2498c294e0f75262829259588992ca59372"}, - {file = "aiohttp-3.12.0-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:f4f06d93c08670b8deb6e965578c804eecd85450319f403ed5695e7105ca4f38"}, - {file = "aiohttp-3.12.0-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:bc77ef0cd57e669f4835ccced3e374c14a9890ef5b99427c5712d965b1a3dca3"}, - {file = "aiohttp-3.12.0-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:16acea48107e36eb672530b155be727d701658c8e0132f5c38919431063df1aa"}, - {file = "aiohttp-3.12.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8314272c09dfb3424a3015222b950ca4a0845165fa43528f079a67dd0d98bd56"}, - {file = "aiohttp-3.12.0-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c9b51e1f1fe9ef73e3dc23908586d7ea3aa928da1b44a38f0cb0c3f60cfcfa76"}, - {file = "aiohttp-3.12.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:471858b4cb972205fbc46b9485d8d532092df0189dd681869616bbbc7192ead3"}, - {file = "aiohttp-3.12.0-cp313-cp313-musllinux_1_2_armv7l.whl", hash = "sha256:47d30f5fc30bd9dfe8875374aa05f566719d82e9026839dd5c59f281fb94d302"}, - {file = "aiohttp-3.12.0-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:1c4ae2aced91b2e879f16d4f0225c7733e007367403a195c2f72d9c01dac4b68"}, - {file = "aiohttp-3.12.0-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:c2d61673e3eec7f703419ae430417ac84305095220af11524f9496f7c0b81dc6"}, - {file = "aiohttp-3.12.0-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:a08d1c18b588ddfd049f4ac082b9935ee68a3796dc7ad70c8317605a8bd7e298"}, - {file = "aiohttp-3.12.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:33bb4ab2c7b86bf0ef19d426afcc3e60f08415b8e46b9cdb67b632c1d48931a3"}, - {file = "aiohttp-3.12.0-cp313-cp313-win32.whl", hash = "sha256:199bfe20aebba88c94113def5c5f7eb8abeca55caf4dab8060fa25f573f062e5"}, - {file = "aiohttp-3.12.0-cp313-cp313-win_amd64.whl", hash = "sha256:9c24ce9ccfe2c24e391bdd72f3d5ff9c42ed1f8b15f813cb4b4c22e0d5930433"}, - {file = "aiohttp-3.12.0-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:438c44e1d0b58b2fe0c02c0bc265ebd7908b84cf1788ab53afbda459f072ee00"}, - {file = "aiohttp-3.12.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:77f1ce28d4ed7c2aac4c23d290232771def64de95b10c06173a00be5d0ebf635"}, - {file = "aiohttp-3.12.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:796fd64d36d1b2a1c96fea93b4423a693494f7c5439cb6139f609ad1b26c6e55"}, - {file = "aiohttp-3.12.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d219bcc62e4470d91fea9ecff2c1563976d1363e294da8c4b79d8463f9abd60e"}, - {file = "aiohttp-3.12.0-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:b5f6e7c74db7e88e1a6d62b2838f7cb9211d6786ae41b016faf0c1eae01fff18"}, - {file = "aiohttp-3.12.0-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:e898963e18f8734ee5f8772367beb29791ca861e0e1f7ddfeb90e75c7a6c7c83"}, - {file = "aiohttp-3.12.0-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2bfcbca6abc6337cdb4b7d5329fb4bce598dcf72e67ceaafadd1cf46d95a90cc"}, - {file = "aiohttp-3.12.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:222fdf305be960495793b872ccebc64b03d761cdf7351533c87101529840ddee"}, - {file = "aiohttp-3.12.0-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f7931a21d80d1a2ba30d238b38228a7fcb3160a4a22eb2d555e80eb5962015f2"}, - {file = "aiohttp-3.12.0-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:dbcff7b1dce09bb430e0035cdbf734a69a0c87c753e6cd2c7e87dca46e3f2443"}, - {file = "aiohttp-3.12.0-cp39-cp39-musllinux_1_2_armv7l.whl", hash = "sha256:d1bf5632b57266aeb3af47b6ab96ef6927d20add906815f15e20ab3b15ca3fd6"}, - {file = "aiohttp-3.12.0-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:2241d73ff8d2fb429d446e1286c5fd9a6f38c34ce5d617a28be7345abc505168"}, - {file = "aiohttp-3.12.0-cp39-cp39-musllinux_1_2_ppc64le.whl", hash = "sha256:459196d2f86edd9588079a5c338007e744c4d02fe64231de005b47fa2d465d78"}, - {file = "aiohttp-3.12.0-cp39-cp39-musllinux_1_2_s390x.whl", hash = "sha256:3a6785d737ad7acc95c28f945d58f65876b80715da9f9b312edb0e7a61b2a459"}, - {file = "aiohttp-3.12.0-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:4744aa06674299040d7a560e79282df7e66769882cfc0a4f602d5d2e3b972660"}, - {file = "aiohttp-3.12.0-cp39-cp39-win32.whl", hash = "sha256:19ae1ca271325184153c51b08512758e4c8cf631e39f37cf96b9f39d8de9be2f"}, - {file = "aiohttp-3.12.0-cp39-cp39-win_amd64.whl", hash = "sha256:243281975c4a4fa7af43f35c962ed15ecd3f45342ef2052dd1afcf55afa14902"}, - {file = "aiohttp-3.12.0.tar.gz", hash = "sha256:e3f0a2b4d7fb16c0d584d9b8860f1e46d39f7d93372b25a6f80c10015a7acdab"}, + {file = "aiohttp-3.12.8-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:4dbb967657a4e290cafaad79e4b6ae0bb04d44463214f552360ebc64e86521d0"}, + {file = "aiohttp-3.12.8-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:b1ba9b08a99ccb409c4ede20265316085e7e28971984eae93277ed4e4a1d9690"}, + {file = "aiohttp-3.12.8-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:b84f4e77372490d6c07a09f9982013e135e337453012f6f599fe3ec98c2eb486"}, + {file = "aiohttp-3.12.8-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:99d6699a957b14f90489194e1c6215927ceb66b3ad5c41d4cc88eb83fba3aa56"}, + {file = "aiohttp-3.12.8-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:be80d260a9b8e41ef5c33189c7f28d877a637bc5dcb30054d6a26350a5667d1d"}, + {file = "aiohttp-3.12.8-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:e5c13363592704339a6ead1943c1492b123e11e1debdf764eb71fce8c9531ffc"}, + {file = "aiohttp-3.12.8-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:983a100232bd603e741d059fbe66ae4a6d15c824c0faadfcf76f912667248da6"}, + {file = "aiohttp-3.12.8-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:08d108b9136b97f9abbe1fa24fbe343a77d63d7e1202e4910c656c79eb7f1e85"}, + {file = "aiohttp-3.12.8-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:02266fb5818158ac5cb9e0360df1be8acc2035ed4703e0e4acd251cb11f0929e"}, + {file = "aiohttp-3.12.8-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:71407580e2b6c7aed1d991ce3c372fc577812b1a91a9980f12395bf723bad9d7"}, + {file = "aiohttp-3.12.8-cp310-cp310-musllinux_1_2_armv7l.whl", hash = "sha256:035feaafa9ebeb7fa609737f7dc4232403a0854abdb809aadf8a0eebde8da974"}, + {file = "aiohttp-3.12.8-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:e2f79ef7e620a78a29af3e0e05b8ff74790461dfe794b98ad07543b835092d68"}, + {file = "aiohttp-3.12.8-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:2a2b9d9db25fff1eaebd8c5a1c83f901f6f3d3474ca8422dc2b3cfdd61aba25d"}, + {file = "aiohttp-3.12.8-cp310-cp310-musllinux_1_2_s390x.whl", hash = "sha256:d6eedc0407ead020b2ac7295c85ba71115c12ec70e2e80d460a738a0d1cd3c07"}, + {file = "aiohttp-3.12.8-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:6164219acf66c8fee268008971843173e2fabedd6aa86531b6ea9b1211ec271c"}, + {file = "aiohttp-3.12.8-cp310-cp310-win32.whl", hash = "sha256:85ff9f7f04486504b0a46d9f570e15fa905804d39563685af44659023a98e62a"}, + {file = "aiohttp-3.12.8-cp310-cp310-win_amd64.whl", hash = "sha256:7dfb744fbf99da22bdd8f62bae54c9953201c3fc0f198f48951b8b15afba0cc8"}, + {file = "aiohttp-3.12.8-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:27bcd1bbbddd49e40b3a01c2c442881d1e4548487468f12b5ff13b45c55c169c"}, + {file = "aiohttp-3.12.8-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:fe66d0bab9430b40278d5736ac59b52861783f74366449b74ef1e7feb37b98de"}, + {file = "aiohttp-3.12.8-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:67ac2706fd0581717b626ef8c0e9db7840fed0d85b5dbe6523036f116a61c3c9"}, + {file = "aiohttp-3.12.8-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:418a9c19e9481ea744a40f0213db1132f8bc462013b79e4f7544e458a1995b38"}, + {file = "aiohttp-3.12.8-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:b2ca5edbed995039b668c0640945bbb1683da6ffe853ee791a11997197b6826c"}, + {file = "aiohttp-3.12.8-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:18e46324b9dacc8c921b78825d968c2b465d1dd6a859558361ada2e21d21cf56"}, + {file = "aiohttp-3.12.8-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a2cc417dd10b797f471016a8630ea3d7e8717a7c14a9b04487edfa5c885acd94"}, + {file = "aiohttp-3.12.8-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2b5dd6dd7dbedcd037837edfd2686bda91366b477e2e01d016cc72245254c2cb"}, + {file = "aiohttp-3.12.8-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d80a144083ada630d1aba99eded7e877555af3f9065f92e946a72d94619d4dff"}, + {file = "aiohttp-3.12.8-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:516edb0a9476be7b3ea9b66320950c399a719404fbaa11353c190a98990a1f61"}, + {file = "aiohttp-3.12.8-cp311-cp311-musllinux_1_2_armv7l.whl", hash = "sha256:408225b0f91b27d47d8b838ddc84595b12927e63031ec7e37130a0a15294b39a"}, + {file = "aiohttp-3.12.8-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:1bdf5baa3f1874654923268f220340352e1bb1c054cd1538e0674e94bd2d9ab6"}, + {file = "aiohttp-3.12.8-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:51c7f01e50cec0a828a2dddaeb37c4f6e57882ddc7d5c91fdb955424dafac28e"}, + {file = "aiohttp-3.12.8-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:31484e637a4b34e7771d0a9bf14de655a8943041ff2981e825556f3fa6f51e4f"}, + {file = "aiohttp-3.12.8-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:d53ce5059731e3f11f3245baec47620ad777ac336fc87d1cfd8b2393e384dff7"}, + {file = "aiohttp-3.12.8-cp311-cp311-win32.whl", hash = "sha256:2969c9b9d8312f01dbe8cfd96d31ce0ff912ae3653d437fa73d2a6cfb0aed20d"}, + {file = "aiohttp-3.12.8-cp311-cp311-win_amd64.whl", hash = "sha256:61cafeda35bfbabce4c38237485da9e31adae6a0a81ce351936cbe36acae0507"}, + {file = "aiohttp-3.12.8-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:10cf602de13ce84965d113094a7f88c8b393c58a40663b342f60c7e84472145a"}, + {file = "aiohttp-3.12.8-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:c6fb7a89538475597e7635086ade991c7223c2aa34105c53dc3878a61e21ebcf"}, + {file = "aiohttp-3.12.8-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:443d29d6c382b8d5c932cf88db006cb8c12db1019618b228791a251e2c73490b"}, + {file = "aiohttp-3.12.8-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:cc7a47a7fcad1a396abe7b7951cd4e9c50824e50bb5ddd6de2d465ad7e9f40be"}, + {file = "aiohttp-3.12.8-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:2fa21401a7fd516cafcf44511d356dbf15f77bdd2c53a92aa8760cf97af41c36"}, + {file = "aiohttp-3.12.8-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:2edb5286a4c924974f12798863ef81568c45e1ce2191f483a981276f82ce64e9"}, + {file = "aiohttp-3.12.8-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:db28ab4a5b409ae7e747575d6dcb5bff75aa98d750b8366fa036135b5446d3c8"}, + {file = "aiohttp-3.12.8-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4c03c0a5a8169cec51b8447700fd96183c0917c4a29aed707d2775cda20abbed"}, + {file = "aiohttp-3.12.8-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ee2821809ddfa441263f616c644c5ba8b6a0c9586555efc804c0cfabccc8a1e5"}, + {file = "aiohttp-3.12.8-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:4fceb45ff7b539c8e5af00aaee38d13e50c9ac7a024da0f9ecc832347f77ed3e"}, + {file = "aiohttp-3.12.8-cp312-cp312-musllinux_1_2_armv7l.whl", hash = "sha256:db7fd45e72947f555bd4c8a529f017a8d519f814d45a071f9da66d04166ed6ed"}, + {file = "aiohttp-3.12.8-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:5da5aabbd47a95d7cfd65a9b6b201b628474471f0e49fc4d6d8af8c15d544957"}, + {file = "aiohttp-3.12.8-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:be1b38de5c846e2ea0589a5967f5d9d37e522d5ee4df8393af7550d038662ba1"}, + {file = "aiohttp-3.12.8-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:7e88efdb0e4373ac7040cb9009527f2f859e0035639aa57ff8281f0206a75bc4"}, + {file = "aiohttp-3.12.8-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:afd2e80e229ecc30c8b773910c2c381fd89b1f0662ecbf4fc4494a7a25788f8d"}, + {file = "aiohttp-3.12.8-cp312-cp312-win32.whl", hash = "sha256:b20d42b621287ac12bd3e627d401615c80397bd3a4ec3ece50654af5b2b30c58"}, + {file = "aiohttp-3.12.8-cp312-cp312-win_amd64.whl", hash = "sha256:58a90c26603a7ed89f2dcaeb8dbdf4805d55b363666870c71106a6f60ee93efd"}, + {file = "aiohttp-3.12.8-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:0cfe91b12a86f1a1f292f46201f8801263f1df3b843c1f9020b98e6007838918"}, + {file = "aiohttp-3.12.8-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:326396e1c174e775ac1bd49d41689128a28434d86af2d5dd9adfb211686f16c8"}, + {file = "aiohttp-3.12.8-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:e0e7de9631c3163997a34d8ed55a2165e2df2ec6082dc4bd16eea92d00145c4c"}, + {file = "aiohttp-3.12.8-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0759cbdca820a6a5a8562606052792ef9acb79633c3e79e6733a3c1c133d4c7d"}, + {file = "aiohttp-3.12.8-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:311060ebd0832074968dd7d1b204a3b155203f7f9159c5b0908b76c5430be50f"}, + {file = "aiohttp-3.12.8-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:268a7c20eb404e9154b77fa521f832bbfd876cf5ff7b3e0dcb744d5b46738149"}, + {file = "aiohttp-3.12.8-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:0725406d0c512d589be41141391d11d56b447f4b56eee9bd9a17424b0e3e1d78"}, + {file = "aiohttp-3.12.8-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3a8edf6c68eb4397136591d66ce4bda8cdb47ca585948223bc98e8492d37c71f"}, + {file = "aiohttp-3.12.8-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:25aa7f26286769210938a159db2f2f65d05bcc06190e34fca03eeb643f20dfc7"}, + {file = "aiohttp-3.12.8-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:952d2fa762ba6a946f3498a3a62c19dadb066539f5ac0dfa20dfe58bb8f733b5"}, + {file = "aiohttp-3.12.8-cp313-cp313-musllinux_1_2_armv7l.whl", hash = "sha256:c2b7f82193482c7778d2de25d67dcc29a65895f5281ef177609ed4ebfb015d68"}, + {file = "aiohttp-3.12.8-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:0a99972b368e370882994a72bd2e7b2d89a886288f0ff0ea09793452865b97a3"}, + {file = "aiohttp-3.12.8-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:ae8c65d708a473fcfa5b972a1dc6bf31257d58a07f4e4c93d0327384deab5cae"}, + {file = "aiohttp-3.12.8-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:a9e0be4d239794c065e1ba439d19de7e4773b4b4b3b9849af2c3c233f3e5b3eb"}, + {file = "aiohttp-3.12.8-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:ad754910e7acce5bd52326b308ca58d404e8ba1a5bcd5a0c8607ce3dee4a1d65"}, + {file = "aiohttp-3.12.8-cp313-cp313-win32.whl", hash = "sha256:3ca646524940dd62a96658ccc1e64dc548c633dd0468a0bf8525dd3163e2c60c"}, + {file = "aiohttp-3.12.8-cp313-cp313-win_amd64.whl", hash = "sha256:2f7e553bd4ff72d7e3b06ff60fc2d29457865ddb8cb7d7dc9287991c660151c5"}, + {file = "aiohttp-3.12.8-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:feeea724193df2fb795f3985785d4ea99e66b8db42812f73b1f99f96408ab79e"}, + {file = "aiohttp-3.12.8-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:265a2a6f6ea42dc712ff453fb6e66059a08ecc8a492c6e38109c0677f6239525"}, + {file = "aiohttp-3.12.8-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:cb93c41be6c20e934733a25f28171333dbf5029efe7531a412a1979fd4147e06"}, + {file = "aiohttp-3.12.8-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7a0deeae01ff39b4a51be7353e74bde83d03c7394c9d8bea5696732fdd08327a"}, + {file = "aiohttp-3.12.8-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:e4cac5a21b03c96caa5e9c88e8e4e4fa81b8a55e5988735875960a50d955cd27"}, + {file = "aiohttp-3.12.8-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:8bae2b0801095fa5d747f71a51750b25ae5f99278e05fbb8e219e0142ae5a403"}, + {file = "aiohttp-3.12.8-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:3783a87d1f704d65ba41470a12c2acc6f299ac856632b008b1361dfaefb1973c"}, + {file = "aiohttp-3.12.8-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:022e59f551b1e0022c13ab258770b230f9419e70ae30304344d39877730ff412"}, + {file = "aiohttp-3.12.8-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b3b1cc723102f270aca976df732991736a2287d98a1f7537f34c3501e2f511e6"}, + {file = "aiohttp-3.12.8-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:79d83aae6ed60de95f3095590e45d3cb15d0ff8ab30c8b7c254e7343d01f3cc9"}, + {file = "aiohttp-3.12.8-cp39-cp39-musllinux_1_2_armv7l.whl", hash = "sha256:93a4d00d9b08fbaeacd93a385177c0d39442789924e747bc636b26b448bc44fb"}, + {file = "aiohttp-3.12.8-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:622d3f2e50e623221429b05433f0cfef17494618b82d2f1ebf7c161aff7e4dad"}, + {file = "aiohttp-3.12.8-cp39-cp39-musllinux_1_2_ppc64le.whl", hash = "sha256:2cefc341d25e4940081576b7e3666ecced80deb0651e186eb06fac4d029a5a1d"}, + {file = "aiohttp-3.12.8-cp39-cp39-musllinux_1_2_s390x.whl", hash = "sha256:3075f67cbf7533af422983464b8d6af508795eb1da5ff318390de0127ae52f42"}, + {file = "aiohttp-3.12.8-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:dcf9bb3f3d9aee06074067701971e07018f41ae3cf06d220471148c5b41cf644"}, + {file = "aiohttp-3.12.8-cp39-cp39-win32.whl", hash = "sha256:86262b58893701ec88acdc2fd9aa6b56d83ed244ad3dfe1f3642986a2172239c"}, + {file = "aiohttp-3.12.8-cp39-cp39-win_amd64.whl", hash = "sha256:42d54fc4e16f2f757a3081335988d4dc9277db34b001c718b7e25aedbc5af8f7"}, + {file = "aiohttp-3.12.8.tar.gz", hash = "sha256:3c0d2ca376b7cea6c1dfa1fc2479b02b3f482a249fc7020c69063b321080140a"}, ] [package.dependencies] @@ -244,7 +244,7 @@ description = "LTS Port of Python audioop" optional = false python-versions = ">=3.13" groups = ["main"] -markers = "python_version >= \"3.13\"" +markers = "python_version == \"3.13\"" files = [ {file = "audioop_lts-0.2.1-cp313-abi3-macosx_10_13_universal2.whl", hash = "sha256:fd1345ae99e17e6910f47ce7d52673c6a1a70820d78b67de1b7abb3af29c426a"}, {file = "audioop_lts-0.2.1-cp313-abi3-macosx_10_13_x86_64.whl", hash = "sha256:e175350da05d2087e12cea8e72a70a1a8b14a17e92ed2022952a4419689ede5e"}, @@ -345,18 +345,18 @@ files = [ [[package]] name = "boto3" -version = "1.38.23" +version = "1.38.29" description = "The AWS SDK for Python" optional = false python-versions = ">=3.9" groups = ["main"] files = [ - {file = "boto3-1.38.23-py3-none-any.whl", hash = "sha256:70ab8364f1f6f0a7e0eaf97f62fbdacf9c1e4cc1de330faf1c146ef9ab01e7d0"}, - {file = "boto3-1.38.23.tar.gz", hash = "sha256:bcf73aca469add09e165b8793be18e7578db8d2604d82505ab13dc2495bad982"}, + {file = "boto3-1.38.29-py3-none-any.whl", hash = "sha256:90a9b1a08122b840216b0e33b7b0dbe4ef50f12d00a573bf7b030cddeda9c507"}, + {file = "boto3-1.38.29.tar.gz", hash = "sha256:0777a87e8d28ebae09a086017a53bcaf25ec7c094d8f7e4122b265aa48e273f5"}, ] [package.dependencies] -botocore = ">=1.38.23,<1.39.0" +botocore = ">=1.38.29,<1.39.0" jmespath = ">=0.7.1,<2.0.0" s3transfer = ">=0.13.0,<0.14.0" @@ -365,14 +365,14 @@ crt = ["botocore[crt] (>=1.21.0,<2.0a0)"] [[package]] name = "botocore" -version = "1.38.23" +version = "1.38.29" description = "Low-level, data-driven core of boto 3." optional = false python-versions = ">=3.9" groups = ["main"] files = [ - {file = "botocore-1.38.23-py3-none-any.whl", hash = "sha256:a7f818672f10d7a080c2c4558428011c3e0abc1039a047d27ac76ec846158457"}, - {file = "botocore-1.38.23.tar.gz", hash = "sha256:29685c91050a870c3809238dc5da1ac65a48a3a20b4bca46b6057dcb6b39c72a"}, + {file = "botocore-1.38.29-py3-none-any.whl", hash = "sha256:4d623f54326eb66d1a633f0c1780992c80f3db317a91c9afe31d5c700290621e"}, + {file = "botocore-1.38.29.tar.gz", hash = "sha256:98c42b1bbb52f4086282e7db8aa724c9cb0f7278b7827d6736d872511c856e4f"}, ] [package.dependencies] @@ -389,7 +389,7 @@ version = "1.1.0" description = "Python bindings for the Brotli compression library" optional = false python-versions = "*" -groups = ["test"] +groups = ["main", "test"] files = [ {file = "Brotli-1.1.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:e1140c64812cb9b06c922e77f1c26a75ec5e3f0fb2bf92cc8c58720dec276752"}, {file = "Brotli-1.1.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:c8fd5270e906eef71d4a8d19b7c6a43760c6abcfcc10c9101d14eb2357418de9"}, @@ -517,6 +517,48 @@ files = [ {file = "Brotli-1.1.0-cp39-cp39-win_amd64.whl", hash = "sha256:cdad5b9014d83ca68c25d2e9444e28e967ef16e80f6b436918c700c117a85467"}, {file = "Brotli-1.1.0.tar.gz", hash = "sha256:81de08ac11bcb85841e440c13611c00b67d3bf82698314928d0b676362546724"}, ] +markers = {main = "platform_python_implementation == \"CPython\""} + +[[package]] +name = "brotlicffi" +version = "1.1.0.0" +description = "Python CFFI bindings to the Brotli library" +optional = false +python-versions = ">=3.7" +groups = ["main"] +markers = "platform_python_implementation != \"CPython\"" +files = [ + {file = "brotlicffi-1.1.0.0-cp37-abi3-macosx_10_9_x86_64.whl", hash = "sha256:9b7ae6bd1a3f0df532b6d67ff674099a96d22bc0948955cb338488c31bfb8851"}, + {file = "brotlicffi-1.1.0.0-cp37-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:19ffc919fa4fc6ace69286e0a23b3789b4219058313cf9b45625016bf7ff996b"}, + {file = "brotlicffi-1.1.0.0-cp37-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9feb210d932ffe7798ee62e6145d3a757eb6233aa9a4e7db78dd3690d7755814"}, + {file = "brotlicffi-1.1.0.0-cp37-abi3-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:84763dbdef5dd5c24b75597a77e1b30c66604725707565188ba54bab4f114820"}, + {file = "brotlicffi-1.1.0.0-cp37-abi3-win32.whl", hash = "sha256:1b12b50e07c3911e1efa3a8971543e7648100713d4e0971b13631cce22c587eb"}, + {file = "brotlicffi-1.1.0.0-cp37-abi3-win_amd64.whl", hash = "sha256:994a4f0681bb6c6c3b0925530a1926b7a189d878e6e5e38fae8efa47c5d9c613"}, + {file = "brotlicffi-1.1.0.0-pp310-pypy310_pp73-macosx_10_9_x86_64.whl", hash = "sha256:2e4aeb0bd2540cb91b069dbdd54d458da8c4334ceaf2d25df2f4af576d6766ca"}, + {file = "brotlicffi-1.1.0.0-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4b7b0033b0d37bb33009fb2fef73310e432e76f688af76c156b3594389d81391"}, + {file = "brotlicffi-1.1.0.0-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:54a07bb2374a1eba8ebb52b6fafffa2afd3c4df85ddd38fcc0511f2bb387c2a8"}, + {file = "brotlicffi-1.1.0.0-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7901a7dc4b88f1c1475de59ae9be59799db1007b7d059817948d8e4f12e24e35"}, + {file = "brotlicffi-1.1.0.0-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:ce01c7316aebc7fce59da734286148b1d1b9455f89cf2c8a4dfce7d41db55c2d"}, + {file = "brotlicffi-1.1.0.0-pp37-pypy37_pp73-macosx_10_9_x86_64.whl", hash = "sha256:246f1d1a90279bb6069de3de8d75a8856e073b8ff0b09dcca18ccc14cec85979"}, + {file = "brotlicffi-1.1.0.0-pp37-pypy37_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:cc4bc5d82bc56ebd8b514fb8350cfac4627d6b0743382e46d033976a5f80fab6"}, + {file = "brotlicffi-1.1.0.0-pp37-pypy37_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:37c26ecb14386a44b118ce36e546ce307f4810bc9598a6e6cb4f7fca725ae7e6"}, + {file = "brotlicffi-1.1.0.0-pp37-pypy37_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ca72968ae4eaf6470498d5c2887073f7efe3b1e7d7ec8be11a06a79cc810e990"}, + {file = "brotlicffi-1.1.0.0-pp37-pypy37_pp73-win_amd64.whl", hash = "sha256:add0de5b9ad9e9aa293c3aa4e9deb2b61e99ad6c1634e01d01d98c03e6a354cc"}, + {file = "brotlicffi-1.1.0.0-pp38-pypy38_pp73-macosx_10_9_x86_64.whl", hash = "sha256:9b6068e0f3769992d6b622a1cd2e7835eae3cf8d9da123d7f51ca9c1e9c333e5"}, + {file = "brotlicffi-1.1.0.0-pp38-pypy38_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8557a8559509b61e65083f8782329188a250102372576093c88930c875a69838"}, + {file = "brotlicffi-1.1.0.0-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2a7ae37e5d79c5bdfb5b4b99f2715a6035e6c5bf538c3746abc8e26694f92f33"}, + {file = "brotlicffi-1.1.0.0-pp38-pypy38_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:391151ec86bb1c683835980f4816272a87eaddc46bb91cbf44f62228b84d8cca"}, + {file = "brotlicffi-1.1.0.0-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:2f3711be9290f0453de8eed5275d93d286abe26b08ab4a35d7452caa1fef532f"}, + {file = "brotlicffi-1.1.0.0-pp39-pypy39_pp73-macosx_10_9_x86_64.whl", hash = "sha256:1a807d760763e398bbf2c6394ae9da5815901aa93ee0a37bca5efe78d4ee3171"}, + {file = "brotlicffi-1.1.0.0-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:fa8ca0623b26c94fccc3a1fdd895be1743b838f3917300506d04aa3346fd2a14"}, + {file = "brotlicffi-1.1.0.0-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3de0cf28a53a3238b252aca9fed1593e9d36c1d116748013339f0949bfc84112"}, + {file = "brotlicffi-1.1.0.0-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:6be5ec0e88a4925c91f3dea2bb0013b3a2accda6f77238f76a34a1ea532a1cb0"}, + {file = "brotlicffi-1.1.0.0-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:d9eb71bb1085d996244439154387266fd23d6ad37161f6f52f1cd41dd95a3808"}, + {file = "brotlicffi-1.1.0.0.tar.gz", hash = "sha256:b77827a689905143f87915310b93b273ab17888fd43ef350d4832c4a71083c13"}, +] + +[package.dependencies] +cffi = ">=1.0.0" [[package]] name = "certifi" @@ -606,7 +648,7 @@ files = [ {file = "cffi-1.17.1-cp39-cp39-win_amd64.whl", hash = "sha256:d016c76bdd850f3c626af19b0542c9677ba156e4ee4fccfdd7848803533ef662"}, {file = "cffi-1.17.1.tar.gz", hash = "sha256:1c39c6016c32bc48dd54561950ebd6836e1670f2ae46128f67cf49e789c52824"}, ] -markers = {main = "platform_python_implementation != \"PyPy\"", test = "platform_python_implementation == \"CPython\" and sys_platform == \"win32\" or implementation_name == \"pypy\""} +markers = {test = "platform_python_implementation == \"CPython\" and sys_platform == \"win32\" or implementation_name == \"pypy\""} [package.dependencies] pycparser = "*" @@ -623,6 +665,18 @@ files = [ {file = "cfgv-3.4.0.tar.gz", hash = "sha256:e52591d4c5f5dead8e0f673fb16db7949d2cfb3f7da4582893288f0ded8fe560"}, ] +[[package]] +name = "chardet" +version = "5.2.0" +description = "Universal encoding detector for Python 3" +optional = false +python-versions = ">=3.7" +groups = ["main"] +files = [ + {file = "chardet-5.2.0-py3-none-any.whl", hash = "sha256:e1cf59446890a00105fe7b7912492ea04b6e6f06d4b742b2c788469e34c82970"}, + {file = "chardet-5.2.0.tar.gz", hash = "sha256:1b3b6ff479a8c414bc3fa2c0852995695c4a026dcd6d0633b2dd092ca39c1cf7"}, +] + [[package]] name = "charset-normalizer" version = "3.4.2" @@ -863,49 +917,49 @@ toml = ["tomli ; python_full_version <= \"3.11.0a6\""] [[package]] name = "cryptography" -version = "45.0.2" +version = "45.0.3" description = "cryptography is a package which provides cryptographic recipes and primitives to Python developers." optional = false python-versions = "!=3.9.0,!=3.9.1,>=3.7" groups = ["main"] files = [ - {file = "cryptography-45.0.2-cp311-abi3-macosx_10_9_universal2.whl", hash = "sha256:61a8b1bbddd9332917485b2453d1de49f142e6334ce1d97b7916d5a85d179c84"}, - {file = "cryptography-45.0.2-cp311-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4cc31c66411e14dd70e2f384a9204a859dc25b05e1f303df0f5326691061b839"}, - {file = "cryptography-45.0.2-cp311-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:463096533acd5097f8751115bc600b0b64620c4aafcac10c6d0041e6e68f88fe"}, - {file = "cryptography-45.0.2-cp311-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:cdafb86eb673c3211accffbffdb3cdffa3aaafacd14819e0898d23696d18e4d3"}, - {file = "cryptography-45.0.2-cp311-abi3-manylinux_2_28_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:05c2385b1f5c89a17df19900cfb1345115a77168f5ed44bdf6fd3de1ce5cc65b"}, - {file = "cryptography-45.0.2-cp311-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:e9e4bdcd70216b08801e267c0b563316b787f957a46e215249921f99288456f9"}, - {file = "cryptography-45.0.2-cp311-abi3-manylinux_2_34_aarch64.whl", hash = "sha256:b2de529027579e43b6dc1f805f467b102fb7d13c1e54c334f1403ee2b37d0059"}, - {file = "cryptography-45.0.2-cp311-abi3-manylinux_2_34_x86_64.whl", hash = "sha256:10d68763892a7b19c22508ab57799c4423c7c8cd61d7eee4c5a6a55a46511949"}, - {file = "cryptography-45.0.2-cp311-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:d2a90ce2f0f5b695e4785ac07c19a58244092f3c85d57db6d8eb1a2b26d2aad6"}, - {file = "cryptography-45.0.2-cp311-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:59c0c8f043dd376bbd9d4f636223836aed50431af4c5a467ed9bf61520294627"}, - {file = "cryptography-45.0.2-cp311-abi3-win32.whl", hash = "sha256:80303ee6a02ef38c4253160446cbeb5c400c07e01d4ddbd4ff722a89b736d95a"}, - {file = "cryptography-45.0.2-cp311-abi3-win_amd64.whl", hash = "sha256:7429936146063bd1b2cfc54f0e04016b90ee9b1c908a7bed0800049cbace70eb"}, - {file = "cryptography-45.0.2-cp37-abi3-macosx_10_9_universal2.whl", hash = "sha256:e86c8d54cd19a13e9081898b3c24351683fd39d726ecf8e774aaa9d8d96f5f3a"}, - {file = "cryptography-45.0.2-cp37-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e328357b6bbf79928363dbf13f4635b7aac0306afb7e5ad24d21d0c5761c3253"}, - {file = "cryptography-45.0.2-cp37-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:49af56491473231159c98c2c26f1a8f3799a60e5cf0e872d00745b858ddac9d2"}, - {file = "cryptography-45.0.2-cp37-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:f169469d04a23282de9d0be349499cb6683b6ff1b68901210faacac9b0c24b7d"}, - {file = "cryptography-45.0.2-cp37-abi3-manylinux_2_28_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:9cfd1399064b13043082c660ddd97a0358e41c8b0dc7b77c1243e013d305c344"}, - {file = "cryptography-45.0.2-cp37-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:18f8084b7ca3ce1b8d38bdfe33c48116edf9a08b4d056ef4a96dceaa36d8d965"}, - {file = "cryptography-45.0.2-cp37-abi3-manylinux_2_34_aarch64.whl", hash = "sha256:2cb03a944a1a412724d15a7c051d50e63a868031f26b6a312f2016965b661942"}, - {file = "cryptography-45.0.2-cp37-abi3-manylinux_2_34_x86_64.whl", hash = "sha256:a9727a21957d3327cf6b7eb5ffc9e4b663909a25fea158e3fcbc49d4cdd7881b"}, - {file = "cryptography-45.0.2-cp37-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:ddb8d01aa900b741d6b7cc585a97aff787175f160ab975e21f880e89d810781a"}, - {file = "cryptography-45.0.2-cp37-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:c0c000c1a09f069632d8a9eb3b610ac029fcc682f1d69b758e625d6ee713f4ed"}, - {file = "cryptography-45.0.2-cp37-abi3-win32.whl", hash = "sha256:08281de408e7eb71ba3cd5098709a356bfdf65eebd7ee7633c3610f0aa80d79b"}, - {file = "cryptography-45.0.2-cp37-abi3-win_amd64.whl", hash = "sha256:48caa55c528617fa6db1a9c3bf2e37ccb31b73e098ac2b71408d1f2db551dde4"}, - {file = "cryptography-45.0.2-pp310-pypy310_pp73-macosx_10_9_x86_64.whl", hash = "sha256:a8ec324711596fbf21837d3a5db543937dd84597d364769b46e0102250023f77"}, - {file = "cryptography-45.0.2-pp310-pypy310_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:965611880c3fa8e504b7458484c0697e00ae6e937279cd6734fdaa2bc954dc49"}, - {file = "cryptography-45.0.2-pp310-pypy310_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:d891942592789fa0ab71b502550bbadb12f540d7413d7d7c4cef4b02af0f5bc6"}, - {file = "cryptography-45.0.2-pp310-pypy310_pp73-manylinux_2_34_aarch64.whl", hash = "sha256:b19f4b28dd2ef2e6d600307fee656c00825a2980c4356a7080bd758d633c3a6f"}, - {file = "cryptography-45.0.2-pp310-pypy310_pp73-manylinux_2_34_x86_64.whl", hash = "sha256:7c73968fbb7698a4c5d6160859db560d3aac160edde89c751edd5a8bc6560c88"}, - {file = "cryptography-45.0.2-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:501de1296b2041dccf2115e3c7d4947430585601b251b140970ce255c5cfb985"}, - {file = "cryptography-45.0.2-pp311-pypy311_pp73-macosx_10_9_x86_64.whl", hash = "sha256:1655d3a76e3dedb683c982a6c3a2cbfae2d08f47a48ec5a3d58db52b3d29ea6f"}, - {file = "cryptography-45.0.2-pp311-pypy311_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:dc7693573f16535428183de8fd27f0ca1ca37a51baa0b41dc5ed7b3d68fe80e2"}, - {file = "cryptography-45.0.2-pp311-pypy311_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:614bca7c6ed0d8ad1dce683a6289afae1f880675b4090878a0136c3da16bc693"}, - {file = "cryptography-45.0.2-pp311-pypy311_pp73-manylinux_2_34_aarch64.whl", hash = "sha256:4142e20c29224cec63e9e32eb1e6014fb285fe39b7be66b3564ca978a3a8afe9"}, - {file = "cryptography-45.0.2-pp311-pypy311_pp73-manylinux_2_34_x86_64.whl", hash = "sha256:9a900036b42f7324df7c7ad9569eb92ba0b613cf699160dd9c2154b24fd02f8e"}, - {file = "cryptography-45.0.2-pp311-pypy311_pp73-win_amd64.whl", hash = "sha256:057723b79752a142efbc609e90b0dff27b0361ccbee3bd48312d70f5cdf53b78"}, - {file = "cryptography-45.0.2.tar.gz", hash = "sha256:d784d57b958ffd07e9e226d17272f9af0c41572557604ca7554214def32c26bf"}, + {file = "cryptography-45.0.3-cp311-abi3-macosx_10_9_universal2.whl", hash = "sha256:7573d9eebaeceeb55285205dbbb8753ac1e962af3d9640791d12b36864065e71"}, + {file = "cryptography-45.0.3-cp311-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d377dde61c5d67eb4311eace661c3efda46c62113ff56bf05e2d679e02aebb5b"}, + {file = "cryptography-45.0.3-cp311-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fae1e637f527750811588e4582988932c222f8251f7b7ea93739acb624e1487f"}, + {file = "cryptography-45.0.3-cp311-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:ca932e11218bcc9ef812aa497cdf669484870ecbcf2d99b765d6c27a86000942"}, + {file = "cryptography-45.0.3-cp311-abi3-manylinux_2_28_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:af3f92b1dc25621f5fad065288a44ac790c5798e986a34d393ab27d2b27fcff9"}, + {file = "cryptography-45.0.3-cp311-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:2f8f8f0b73b885ddd7f3d8c2b2234a7d3ba49002b0223f58cfde1bedd9563c56"}, + {file = "cryptography-45.0.3-cp311-abi3-manylinux_2_34_aarch64.whl", hash = "sha256:9cc80ce69032ffa528b5e16d217fa4d8d4bb7d6ba8659c1b4d74a1b0f4235fca"}, + {file = "cryptography-45.0.3-cp311-abi3-manylinux_2_34_x86_64.whl", hash = "sha256:c824c9281cb628015bfc3c59335163d4ca0540d49de4582d6c2637312907e4b1"}, + {file = "cryptography-45.0.3-cp311-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:5833bb4355cb377ebd880457663a972cd044e7f49585aee39245c0d592904578"}, + {file = "cryptography-45.0.3-cp311-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:9bb5bf55dcb69f7067d80354d0a348368da907345a2c448b0babc4215ccd3497"}, + {file = "cryptography-45.0.3-cp311-abi3-win32.whl", hash = "sha256:3ad69eeb92a9de9421e1f6685e85a10fbcfb75c833b42cc9bc2ba9fb00da4710"}, + {file = "cryptography-45.0.3-cp311-abi3-win_amd64.whl", hash = "sha256:97787952246a77d77934d41b62fb1b6f3581d83f71b44796a4158d93b8f5c490"}, + {file = "cryptography-45.0.3-cp37-abi3-macosx_10_9_universal2.whl", hash = "sha256:c92519d242703b675ccefd0f0562eb45e74d438e001f8ab52d628e885751fb06"}, + {file = "cryptography-45.0.3-cp37-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c5edcb90da1843df85292ef3a313513766a78fbbb83f584a5a58fb001a5a9d57"}, + {file = "cryptography-45.0.3-cp37-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:38deed72285c7ed699864f964a3f4cf11ab3fb38e8d39cfcd96710cd2b5bb716"}, + {file = "cryptography-45.0.3-cp37-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:5555365a50efe1f486eed6ac7062c33b97ccef409f5970a0b6f205a7cfab59c8"}, + {file = "cryptography-45.0.3-cp37-abi3-manylinux_2_28_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:9e4253ed8f5948a3589b3caee7ad9a5bf218ffd16869c516535325fece163dcc"}, + {file = "cryptography-45.0.3-cp37-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:cfd84777b4b6684955ce86156cfb5e08d75e80dc2585e10d69e47f014f0a5342"}, + {file = "cryptography-45.0.3-cp37-abi3-manylinux_2_34_aarch64.whl", hash = "sha256:a2b56de3417fd5f48773ad8e91abaa700b678dc7fe1e0c757e1ae340779acf7b"}, + {file = "cryptography-45.0.3-cp37-abi3-manylinux_2_34_x86_64.whl", hash = "sha256:57a6500d459e8035e813bd8b51b671977fb149a8c95ed814989da682314d0782"}, + {file = "cryptography-45.0.3-cp37-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:f22af3c78abfbc7cbcdf2c55d23c3e022e1a462ee2481011d518c7fb9c9f3d65"}, + {file = "cryptography-45.0.3-cp37-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:232954730c362638544758a8160c4ee1b832dc011d2c41a306ad8f7cccc5bb0b"}, + {file = "cryptography-45.0.3-cp37-abi3-win32.whl", hash = "sha256:cb6ab89421bc90e0422aca911c69044c2912fc3debb19bb3c1bfe28ee3dff6ab"}, + {file = "cryptography-45.0.3-cp37-abi3-win_amd64.whl", hash = "sha256:d54ae41e6bd70ea23707843021c778f151ca258081586f0cfa31d936ae43d1b2"}, + {file = "cryptography-45.0.3-pp310-pypy310_pp73-macosx_10_9_x86_64.whl", hash = "sha256:ed43d396f42028c1f47b5fec012e9e12631266e3825e95c00e3cf94d472dac49"}, + {file = "cryptography-45.0.3-pp310-pypy310_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:fed5aaca1750e46db870874c9c273cd5182a9e9deb16f06f7bdffdb5c2bde4b9"}, + {file = "cryptography-45.0.3-pp310-pypy310_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:00094838ecc7c6594171e8c8a9166124c1197b074cfca23645cee573910d76bc"}, + {file = "cryptography-45.0.3-pp310-pypy310_pp73-manylinux_2_34_aarch64.whl", hash = "sha256:92d5f428c1a0439b2040435a1d6bc1b26ebf0af88b093c3628913dd464d13fa1"}, + {file = "cryptography-45.0.3-pp310-pypy310_pp73-manylinux_2_34_x86_64.whl", hash = "sha256:ec64ee375b5aaa354b2b273c921144a660a511f9df8785e6d1c942967106438e"}, + {file = "cryptography-45.0.3-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:71320fbefd05454ef2d457c481ba9a5b0e540f3753354fff6f780927c25d19b0"}, + {file = "cryptography-45.0.3-pp311-pypy311_pp73-macosx_10_9_x86_64.whl", hash = "sha256:edd6d51869beb7f0d472e902ef231a9b7689508e83880ea16ca3311a00bf5ce7"}, + {file = "cryptography-45.0.3-pp311-pypy311_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:555e5e2d3a53b4fabeca32835878b2818b3f23966a4efb0d566689777c5a12c8"}, + {file = "cryptography-45.0.3-pp311-pypy311_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:25286aacb947286620a31f78f2ed1a32cded7be5d8b729ba3fb2c988457639e4"}, + {file = "cryptography-45.0.3-pp311-pypy311_pp73-manylinux_2_34_aarch64.whl", hash = "sha256:050ce5209d5072472971e6efbfc8ec5a8f9a841de5a4db0ebd9c2e392cb81972"}, + {file = "cryptography-45.0.3-pp311-pypy311_pp73-manylinux_2_34_x86_64.whl", hash = "sha256:dc10ec1e9f21f33420cc05214989544727e776286c1c16697178978327b95c9c"}, + {file = "cryptography-45.0.3-pp311-pypy311_pp73-win_amd64.whl", hash = "sha256:9eda14f049d7f09c2e8fb411dda17dd6b16a3c76a1de5e249188a32aeb92de19"}, + {file = "cryptography-45.0.3.tar.gz", hash = "sha256:ec21313dd335c51d7877baf2972569f40a4291b76a0ce51391523ae358d05899"}, ] [package.dependencies] @@ -918,9 +972,29 @@ nox = ["nox (>=2024.4.15)", "nox[uv] (>=2024.3.2) ; python_full_version >= \"3.8 pep8test = ["check-sdist ; python_full_version >= \"3.8.0\"", "click (>=8.0.1)", "mypy (>=1.4)", "ruff (>=0.3.6)"] sdist = ["build (>=1.0.0)"] ssh = ["bcrypt (>=3.1.5)"] -test = ["certifi (>=2024)", "cryptography-vectors (==45.0.2)", "pretend (>=0.7)", "pytest (>=7.4.0)", "pytest-benchmark (>=4.0)", "pytest-cov (>=2.10.1)", "pytest-xdist (>=3.5.0)"] +test = ["certifi (>=2024)", "cryptography-vectors (==45.0.3)", "pretend (>=0.7)", "pytest (>=7.4.0)", "pytest-benchmark (>=4.0)", "pytest-cov (>=2.10.1)", "pytest-xdist (>=3.5.0)"] test-randomorder = ["pytest-randomly"] +[[package]] +name = "cssselect2" +version = "0.8.0" +description = "CSS selectors for Python ElementTree" +optional = false +python-versions = ">=3.9" +groups = ["main"] +files = [ + {file = "cssselect2-0.8.0-py3-none-any.whl", hash = "sha256:46fc70ebc41ced7a32cd42d58b1884d72ade23d21e5a4eaaf022401c13f0e76e"}, + {file = "cssselect2-0.8.0.tar.gz", hash = "sha256:7674ffb954a3b46162392aee2a3a0aedb2e14ecf99fcc28644900f4e6e3e9d3a"}, +] + +[package.dependencies] +tinycss2 = "*" +webencodings = "*" + +[package.extras] +doc = ["furo", "sphinx"] +test = ["pytest", "ruff"] + [[package]] name = "decorator" version = "5.2.1" @@ -1025,30 +1099,30 @@ wmi = ["wmi (>=1.5.1)"] [[package]] name = "dramatiq" -version = "1.17.1" +version = "1.18.0" description = "Background Processing for Python 3." optional = false -python-versions = ">=3.8" +python-versions = ">=3.9" groups = ["main"] files = [ - {file = "dramatiq-1.17.1-py3-none-any.whl", hash = "sha256:951cdc334478dff8e5150bb02a6f7a947d215ee24b5aedaf738eff20e17913df"}, - {file = "dramatiq-1.17.1.tar.gz", hash = "sha256:2675d2f57e0d82db3a7d2a60f1f9c536365349db78c7f8d80a63e4c54697647a"}, + {file = "dramatiq-1.18.0-py3-none-any.whl", hash = "sha256:d360f608aa3cd06f5db714bfcd23825dc7098bacfee52aca536b0bb0faae3c69"}, + {file = "dramatiq-1.18.0.tar.gz", hash = "sha256:5ea436b6e50dae64d4de04f1eb519ad239a6b1ba6315ba1dce1c0c4c1ebedfaf"}, ] [package.dependencies] pika = {version = ">=1.0,<2.0", optional = true, markers = "extra == \"rabbitmq\""} prometheus-client = ">=0.2" watchdog = {version = ">=4.0", optional = true, markers = "extra == \"watch\""} -watchdog-gevent = {version = ">=0.2", optional = true, markers = "extra == \"watch\""} +watchdog_gevent = {version = ">=0.2", optional = true, markers = "extra == \"watch\""} [package.extras] -all = ["gevent (>=1.1)", "pika (>=1.0,<2.0)", "pylibmc (>=1.5,<2.0)", "redis (>=2.0,<6.0)", "watchdog (>=4.0)", "watchdog-gevent (>=0.2)"] -dev = ["alabaster", "bumpversion", "flake8", "flake8-bugbear", "flake8-quotes", "gevent (>=1.1)", "hiredis", "isort", "mypy", "pika (>=1.0,<2.0)", "pylibmc (>=1.5,<2.0)", "pytest", "pytest-benchmark[histogram]", "pytest-cov", "redis (>=2.0,<6.0)", "sphinx", "sphinxcontrib-napoleon", "tox", "twine", "watchdog (>=4.0)", "watchdog-gevent (>=0.2)", "wheel"] +all = ["gevent (>=1.1)", "pika (>=1.0,<2.0)", "pylibmc (>=1.5,<2.0)", "redis (>=2.0,<7.0)", "watchdog (>=4.0)", "watchdog_gevent (>=0.2)"] +dev = ["alabaster", "bumpversion", "flake8", "flake8-bugbear", "flake8-quotes", "gevent (>=1.1)", "hiredis", "isort", "mypy", "pika (>=1.0,<2.0)", "pylibmc (>=1.5,<2.0)", "pytest", "pytest-benchmark[histogram]", "pytest-cov", "redis (>=2.0,<7.0)", "sphinx", "sphinxcontrib-napoleon", "tox", "twine", "watchdog (>=4.0)", "watchdog_gevent (>=0.2)", "wheel"] gevent = ["gevent (>=1.1)"] memcached = ["pylibmc (>=1.5,<2.0)"] rabbitmq = ["pika (>=1.0,<2.0)"] -redis = ["redis (>=2.0,<6.0)"] -watch = ["watchdog (>=4.0)", "watchdog-gevent (>=0.2)"] +redis = ["redis (>=2.0,<7.0)"] +watch = ["watchdog (>=4.0)", "watchdog_gevent (>=0.2)"] [[package]] name = "duckduckgo-search" @@ -1262,6 +1336,77 @@ files = [ Flask = ">=1.0.4" Werkzeug = ">=1.0.1" +[[package]] +name = "fonttools" +version = "4.58.1" +description = "Tools to manipulate font files" +optional = false +python-versions = ">=3.9" +groups = ["main"] +files = [ + {file = "fonttools-4.58.1-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:4ebd423034ac4f74196c1ae29f8ed3b862f820345acbf35600af8596ebf62573"}, + {file = "fonttools-4.58.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:9dc36f4b4044d95e6fb358da4c3e6a5c07c9b6f4c1e8c396e89bee3b65dae902"}, + {file = "fonttools-4.58.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bc4b74d7bb84189fe264d56a544ac5c818f8f1e8141856746768691fe185b229"}, + {file = "fonttools-4.58.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3aa4fa41e9cb43f78881a5896d6e41b6a0ec54e9d68e7eaaff6d7a1769b17017"}, + {file = "fonttools-4.58.1-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:91335202f19c9edc04f2f6a7d9bb269b0a435d7de771e3f33c3ea9f87f19c8d4"}, + {file = "fonttools-4.58.1-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:e6b0ec2171e811a0d9e467225dc06b0fac39a84b4704f263c2d538c3c67b99b2"}, + {file = "fonttools-4.58.1-cp310-cp310-win32.whl", hash = "sha256:a788983d522d02a9b457cc98aa60fc631dabae352fb3b30a56200890cd338ca0"}, + {file = "fonttools-4.58.1-cp310-cp310-win_amd64.whl", hash = "sha256:c8c848a2d5961d277b85ac339480cecea90599059f72a42047ced25431e8b72a"}, + {file = "fonttools-4.58.1-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:9966e14729669bcfbb56f83b747a2397c4d97c6d4798cb2e2adc28f9388fa008"}, + {file = "fonttools-4.58.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:64cc1647bbe83dea57f5496ec878ad19ccdba7185b0dd34955d3e6f03dc789e6"}, + {file = "fonttools-4.58.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:464f790ce681d08d1583df0735776aa9cb1999594bf336ddd0bf962c17b629ac"}, + {file = "fonttools-4.58.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3c53c6a720ee70cc25746d511ba88c45c95ec510fd258026ed209b0b9e3ba92f"}, + {file = "fonttools-4.58.1-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:b6823a633bbce29cf3033508ebb54a433c473fb9833eff7f936bfdc5204fd98d"}, + {file = "fonttools-4.58.1-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:5701fe66a1408c1974d2f78c00f964f8aad17cccbc32bc041e1b81421f31f448"}, + {file = "fonttools-4.58.1-cp311-cp311-win32.whl", hash = "sha256:4cad2c74adf9ee31ae43be6b0b376fdb386d4d50c60979790e32c3548efec051"}, + {file = "fonttools-4.58.1-cp311-cp311-win_amd64.whl", hash = "sha256:7ade12485abccb0f6b6a6e2a88c50e587ff0e201e48e0153dd9b2e0ed67a2f38"}, + {file = "fonttools-4.58.1-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:f56085a65769dc0100822c814069327541db9c3c4f21e599c6138f9dbda75e96"}, + {file = "fonttools-4.58.1-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:19c65a88e522c9f1be0c05d73541de20feada99d23d06e9b5354023cc3e517b0"}, + {file = "fonttools-4.58.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b01bb37006e97703300bfde7a73d1c7038574dd1df9d8d92ca99af151becf2ca"}, + {file = "fonttools-4.58.1-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d629dea240f0fc826d8bb14566e95c663214eece21b5932c9228d3e8907f55aa"}, + {file = "fonttools-4.58.1-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:ef0b33ff35421a04a638e736823c2dee9d200cdd275cfdb43e875ca745150aae"}, + {file = "fonttools-4.58.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:4db9399ee633855c718fe8bea5eecbdc5bf3fdbed2648e50f67f8946b943ed1c"}, + {file = "fonttools-4.58.1-cp312-cp312-win32.whl", hash = "sha256:5cf04c4f73d36b30ea1cff091a7a9e65f8d5b08345b950f82679034e9f7573f4"}, + {file = "fonttools-4.58.1-cp312-cp312-win_amd64.whl", hash = "sha256:4a3841b59c67fa1f739542b05211609c453cec5d11d21f863dd2652d5a81ec9b"}, + {file = "fonttools-4.58.1-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:68379d1599fc59569956a97eb7b07e0413f76142ac8513fa24c9f2c03970543a"}, + {file = "fonttools-4.58.1-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:8631905657de4f9a7ae1e12186c1ed20ba4d6168c2d593b9e0bd2908061d341b"}, + {file = "fonttools-4.58.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e2ecea7289061c2c71468723409a8dd6e70d1ecfce6bc7686e5a74b9ce9154fe"}, + {file = "fonttools-4.58.1-cp313-cp313-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9b8860f8cd48b345bd1df1d7be650f600f69ee971ffe338c5bd5bcb6bdb3b92c"}, + {file = "fonttools-4.58.1-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:7c9a0acdefcb8d7ccd7c59202056166c400e797047009ecb299b75ab950c2a9c"}, + {file = "fonttools-4.58.1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:1e1fac0be6be3e4309058e156948cb73196e5fd994268b89b5e3f5a26ee2b582"}, + {file = "fonttools-4.58.1-cp313-cp313-win32.whl", hash = "sha256:aed7f93a9a072f0ce6fb46aad9474824ac6dd9c7c38a72f8295dd14f2215950f"}, + {file = "fonttools-4.58.1-cp313-cp313-win_amd64.whl", hash = "sha256:b27d69c97c20c9bca807f7ae7fc7df459eb62994859ff6a2a489e420634deac3"}, + {file = "fonttools-4.58.1-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:927762f9fe39ea0a4d9116353251f409389a6b58fab58717d3c3377acfc23452"}, + {file = "fonttools-4.58.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:761ac80efcb7333c71760458c23f728d6fe2dff253b649faf52471fd7aebe584"}, + {file = "fonttools-4.58.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:deef910226f788a4e72aa0fc1c1657fb43fa62a4200b883edffdb1392b03fe86"}, + {file = "fonttools-4.58.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7ff2859ca2319454df8c26af6693269b21f2e9c0e46df126be916a4f6d85fc75"}, + {file = "fonttools-4.58.1-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:418927e888e1bcc976b4e190a562f110dc27b0b5cac18033286f805dc137fc66"}, + {file = "fonttools-4.58.1-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:a907007a8b341e8e129d3994d34d1cc85bc8bf38b3a0be65eb14e4668f634a21"}, + {file = "fonttools-4.58.1-cp39-cp39-win32.whl", hash = "sha256:455cb6adc9f3419273925fadc51a6207046e147ce503797b29895ba6bdf85762"}, + {file = "fonttools-4.58.1-cp39-cp39-win_amd64.whl", hash = "sha256:2e64931258866df187bd597b4e9fff488f059a0bc230fbae434f0f112de3ce46"}, + {file = "fonttools-4.58.1-py3-none-any.whl", hash = "sha256:db88365d0962cd6f5bce54b190a4669aeed9c9941aa7bd60a5af084d8d9173d6"}, + {file = "fonttools-4.58.1.tar.gz", hash = "sha256:cbc8868e0a29c3e22628dfa1432adf7a104d86d1bc661cecc3e9173070b6ab2d"}, +] + +[package.dependencies] +brotli = {version = ">=1.0.1", optional = true, markers = "platform_python_implementation == \"CPython\" and extra == \"woff\""} +brotlicffi = {version = ">=0.8.0", optional = true, markers = "platform_python_implementation != \"CPython\" and extra == \"woff\""} +zopfli = {version = ">=0.1.4", optional = true, markers = "extra == \"woff\""} + +[package.extras] +all = ["brotli (>=1.0.1) ; platform_python_implementation == \"CPython\"", "brotlicffi (>=0.8.0) ; platform_python_implementation != \"CPython\"", "fs (>=2.2.0,<3)", "lxml (>=4.0)", "lz4 (>=1.7.4.2)", "matplotlib", "munkres ; platform_python_implementation == \"PyPy\"", "pycairo", "scipy ; platform_python_implementation != \"PyPy\"", "skia-pathops (>=0.5.0)", "sympy", "uharfbuzz (>=0.23.0)", "unicodedata2 (>=15.1.0) ; python_version <= \"3.12\"", "xattr ; sys_platform == \"darwin\"", "zopfli (>=0.1.4)"] +graphite = ["lz4 (>=1.7.4.2)"] +interpolatable = ["munkres ; platform_python_implementation == \"PyPy\"", "pycairo", "scipy ; platform_python_implementation != \"PyPy\""] +lxml = ["lxml (>=4.0)"] +pathops = ["skia-pathops (>=0.5.0)"] +plot = ["matplotlib"] +repacker = ["uharfbuzz (>=0.23.0)"] +symfont = ["sympy"] +type1 = ["xattr ; sys_platform == \"darwin\""] +ufo = ["fs (>=2.2.0,<3)"] +unicode = ["unicodedata2 (>=15.1.0) ; python_version <= \"3.12\""] +woff = ["brotli (>=1.0.1) ; platform_python_implementation == \"CPython\"", "brotlicffi (>=0.8.0) ; platform_python_implementation != \"CPython\"", "zopfli (>=0.1.4)"] + [[package]] name = "freezegun" version = "1.5.2" @@ -1279,116 +1424,116 @@ python-dateutil = ">=2.7" [[package]] name = "frozenlist" -version = "1.6.0" +version = "1.6.2" description = "A list-like structure which implements collections.abc.MutableSequence" optional = false python-versions = ">=3.9" groups = ["main"] files = [ - {file = "frozenlist-1.6.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:e6e558ea1e47fd6fa8ac9ccdad403e5dd5ecc6ed8dda94343056fa4277d5c65e"}, - {file = "frozenlist-1.6.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:f4b3cd7334a4bbc0c472164f3744562cb72d05002cc6fcf58adb104630bbc352"}, - {file = "frozenlist-1.6.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:9799257237d0479736e2b4c01ff26b5c7f7694ac9692a426cb717f3dc02fff9b"}, - {file = "frozenlist-1.6.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f3a7bb0fe1f7a70fb5c6f497dc32619db7d2cdd53164af30ade2f34673f8b1fc"}, - {file = "frozenlist-1.6.0-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:36d2fc099229f1e4237f563b2a3e0ff7ccebc3999f729067ce4e64a97a7f2869"}, - {file = "frozenlist-1.6.0-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f27a9f9a86dcf00708be82359db8de86b80d029814e6693259befe82bb58a106"}, - {file = "frozenlist-1.6.0-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:75ecee69073312951244f11b8627e3700ec2bfe07ed24e3a685a5979f0412d24"}, - {file = "frozenlist-1.6.0-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f2c7d5aa19714b1b01a0f515d078a629e445e667b9da869a3cd0e6fe7dec78bd"}, - {file = "frozenlist-1.6.0-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:69bbd454f0fb23b51cadc9bdba616c9678e4114b6f9fa372d462ff2ed9323ec8"}, - {file = "frozenlist-1.6.0-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:7daa508e75613809c7a57136dec4871a21bca3080b3a8fc347c50b187df4f00c"}, - {file = "frozenlist-1.6.0-cp310-cp310-musllinux_1_2_armv7l.whl", hash = "sha256:89ffdb799154fd4d7b85c56d5fa9d9ad48946619e0eb95755723fffa11022d75"}, - {file = "frozenlist-1.6.0-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:920b6bd77d209931e4c263223381d63f76828bec574440f29eb497cf3394c249"}, - {file = "frozenlist-1.6.0-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:d3ceb265249fb401702fce3792e6b44c1166b9319737d21495d3611028d95769"}, - {file = "frozenlist-1.6.0-cp310-cp310-musllinux_1_2_s390x.whl", hash = "sha256:52021b528f1571f98a7d4258c58aa8d4b1a96d4f01d00d51f1089f2e0323cb02"}, - {file = "frozenlist-1.6.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:0f2ca7810b809ed0f1917293050163c7654cefc57a49f337d5cd9de717b8fad3"}, - {file = "frozenlist-1.6.0-cp310-cp310-win32.whl", hash = "sha256:0e6f8653acb82e15e5443dba415fb62a8732b68fe09936bb6d388c725b57f812"}, - {file = "frozenlist-1.6.0-cp310-cp310-win_amd64.whl", hash = "sha256:f1a39819a5a3e84304cd286e3dc62a549fe60985415851b3337b6f5cc91907f1"}, - {file = "frozenlist-1.6.0-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:ae8337990e7a45683548ffb2fee1af2f1ed08169284cd829cdd9a7fa7470530d"}, - {file = "frozenlist-1.6.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:8c952f69dd524558694818a461855f35d36cc7f5c0adddce37e962c85d06eac0"}, - {file = "frozenlist-1.6.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:8f5fef13136c4e2dee91bfb9a44e236fff78fc2cd9f838eddfc470c3d7d90afe"}, - {file = "frozenlist-1.6.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:716bbba09611b4663ecbb7cd022f640759af8259e12a6ca939c0a6acd49eedba"}, - {file = "frozenlist-1.6.0-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:7b8c4dc422c1a3ffc550b465090e53b0bf4839047f3e436a34172ac67c45d595"}, - {file = "frozenlist-1.6.0-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b11534872256e1666116f6587a1592ef395a98b54476addb5e8d352925cb5d4a"}, - {file = "frozenlist-1.6.0-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:1c6eceb88aaf7221f75be6ab498dc622a151f5f88d536661af3ffc486245a626"}, - {file = "frozenlist-1.6.0-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:62c828a5b195570eb4b37369fcbbd58e96c905768d53a44d13044355647838ff"}, - {file = "frozenlist-1.6.0-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e1c6bd2c6399920c9622362ce95a7d74e7f9af9bfec05fff91b8ce4b9647845a"}, - {file = "frozenlist-1.6.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:49ba23817781e22fcbd45fd9ff2b9b8cdb7b16a42a4851ab8025cae7b22e96d0"}, - {file = "frozenlist-1.6.0-cp311-cp311-musllinux_1_2_armv7l.whl", hash = "sha256:431ef6937ae0f853143e2ca67d6da76c083e8b1fe3df0e96f3802fd37626e606"}, - {file = "frozenlist-1.6.0-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:9d124b38b3c299ca68433597ee26b7819209cb8a3a9ea761dfe9db3a04bba584"}, - {file = "frozenlist-1.6.0-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:118e97556306402e2b010da1ef21ea70cb6d6122e580da64c056b96f524fbd6a"}, - {file = "frozenlist-1.6.0-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:fb3b309f1d4086b5533cf7bbcf3f956f0ae6469664522f1bde4feed26fba60f1"}, - {file = "frozenlist-1.6.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:54dece0d21dce4fdb188a1ffc555926adf1d1c516e493c2914d7c370e454bc9e"}, - {file = "frozenlist-1.6.0-cp311-cp311-win32.whl", hash = "sha256:654e4ba1d0b2154ca2f096bed27461cf6160bc7f504a7f9a9ef447c293caf860"}, - {file = "frozenlist-1.6.0-cp311-cp311-win_amd64.whl", hash = "sha256:3e911391bffdb806001002c1f860787542f45916c3baf764264a52765d5a5603"}, - {file = "frozenlist-1.6.0-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:c5b9e42ace7d95bf41e19b87cec8f262c41d3510d8ad7514ab3862ea2197bfb1"}, - {file = "frozenlist-1.6.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:ca9973735ce9f770d24d5484dcb42f68f135351c2fc81a7a9369e48cf2998a29"}, - {file = "frozenlist-1.6.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:6ac40ec76041c67b928ca8aaffba15c2b2ee3f5ae8d0cb0617b5e63ec119ca25"}, - {file = "frozenlist-1.6.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:95b7a8a3180dfb280eb044fdec562f9b461614c0ef21669aea6f1d3dac6ee576"}, - {file = "frozenlist-1.6.0-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:c444d824e22da6c9291886d80c7d00c444981a72686e2b59d38b285617cb52c8"}, - {file = "frozenlist-1.6.0-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:bb52c8166499a8150bfd38478248572c924c003cbb45fe3bcd348e5ac7c000f9"}, - {file = "frozenlist-1.6.0-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b35298b2db9c2468106278537ee529719228950a5fdda686582f68f247d1dc6e"}, - {file = "frozenlist-1.6.0-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d108e2d070034f9d57210f22fefd22ea0d04609fc97c5f7f5a686b3471028590"}, - {file = "frozenlist-1.6.0-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4e1be9111cb6756868ac242b3c2bd1f09d9aea09846e4f5c23715e7afb647103"}, - {file = "frozenlist-1.6.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:94bb451c664415f02f07eef4ece976a2c65dcbab9c2f1705b7031a3a75349d8c"}, - {file = "frozenlist-1.6.0-cp312-cp312-musllinux_1_2_armv7l.whl", hash = "sha256:d1a686d0b0949182b8faddea596f3fc11f44768d1f74d4cad70213b2e139d821"}, - {file = "frozenlist-1.6.0-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:ea8e59105d802c5a38bdbe7362822c522230b3faba2aa35c0fa1765239b7dd70"}, - {file = "frozenlist-1.6.0-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:abc4e880a9b920bc5020bf6a431a6bb40589d9bca3975c980495f63632e8382f"}, - {file = "frozenlist-1.6.0-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:9a79713adfe28830f27a3c62f6b5406c37376c892b05ae070906f07ae4487046"}, - {file = "frozenlist-1.6.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:9a0318c2068e217a8f5e3b85e35899f5a19e97141a45bb925bb357cfe1daf770"}, - {file = "frozenlist-1.6.0-cp312-cp312-win32.whl", hash = "sha256:853ac025092a24bb3bf09ae87f9127de9fe6e0c345614ac92536577cf956dfcc"}, - {file = "frozenlist-1.6.0-cp312-cp312-win_amd64.whl", hash = "sha256:2bdfe2d7e6c9281c6e55523acd6c2bf77963cb422fdc7d142fb0cb6621b66878"}, - {file = "frozenlist-1.6.0-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:1d7fb014fe0fbfee3efd6a94fc635aeaa68e5e1720fe9e57357f2e2c6e1a647e"}, - {file = "frozenlist-1.6.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:01bcaa305a0fdad12745502bfd16a1c75b14558dabae226852f9159364573117"}, - {file = "frozenlist-1.6.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:8b314faa3051a6d45da196a2c495e922f987dc848e967d8cfeaee8a0328b1cd4"}, - {file = "frozenlist-1.6.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:da62fecac21a3ee10463d153549d8db87549a5e77eefb8c91ac84bb42bb1e4e3"}, - {file = "frozenlist-1.6.0-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:d1eb89bf3454e2132e046f9599fbcf0a4483ed43b40f545551a39316d0201cd1"}, - {file = "frozenlist-1.6.0-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d18689b40cb3936acd971f663ccb8e2589c45db5e2c5f07e0ec6207664029a9c"}, - {file = "frozenlist-1.6.0-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:e67ddb0749ed066b1a03fba812e2dcae791dd50e5da03be50b6a14d0c1a9ee45"}, - {file = "frozenlist-1.6.0-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:fc5e64626e6682638d6e44398c9baf1d6ce6bc236d40b4b57255c9d3f9761f1f"}, - {file = "frozenlist-1.6.0-cp313-cp313-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:437cfd39564744ae32ad5929e55b18ebd88817f9180e4cc05e7d53b75f79ce85"}, - {file = "frozenlist-1.6.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:62dd7df78e74d924952e2feb7357d826af8d2f307557a779d14ddf94d7311be8"}, - {file = "frozenlist-1.6.0-cp313-cp313-musllinux_1_2_armv7l.whl", hash = "sha256:a66781d7e4cddcbbcfd64de3d41a61d6bdde370fc2e38623f30b2bd539e84a9f"}, - {file = "frozenlist-1.6.0-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:482fe06e9a3fffbcd41950f9d890034b4a54395c60b5e61fae875d37a699813f"}, - {file = "frozenlist-1.6.0-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:e4f9373c500dfc02feea39f7a56e4f543e670212102cc2eeb51d3a99c7ffbde6"}, - {file = "frozenlist-1.6.0-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:e69bb81de06827147b7bfbaeb284d85219fa92d9f097e32cc73675f279d70188"}, - {file = "frozenlist-1.6.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:7613d9977d2ab4a9141dde4a149f4357e4065949674c5649f920fec86ecb393e"}, - {file = "frozenlist-1.6.0-cp313-cp313-win32.whl", hash = "sha256:4def87ef6d90429f777c9d9de3961679abf938cb6b7b63d4a7eb8a268babfce4"}, - {file = "frozenlist-1.6.0-cp313-cp313-win_amd64.whl", hash = "sha256:37a8a52c3dfff01515e9bbbee0e6063181362f9de3db2ccf9bc96189b557cbfd"}, - {file = "frozenlist-1.6.0-cp313-cp313t-macosx_10_13_universal2.whl", hash = "sha256:46138f5a0773d064ff663d273b309b696293d7a7c00a0994c5c13a5078134b64"}, - {file = "frozenlist-1.6.0-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:f88bc0a2b9c2a835cb888b32246c27cdab5740059fb3688852bf91e915399b91"}, - {file = "frozenlist-1.6.0-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:777704c1d7655b802c7850255639672e90e81ad6fa42b99ce5ed3fbf45e338dd"}, - {file = "frozenlist-1.6.0-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:85ef8d41764c7de0dcdaf64f733a27352248493a85a80661f3c678acd27e31f2"}, - {file = "frozenlist-1.6.0-cp313-cp313t-manylinux_2_17_armv7l.manylinux2014_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:da5cb36623f2b846fb25009d9d9215322318ff1c63403075f812b3b2876c8506"}, - {file = "frozenlist-1.6.0-cp313-cp313t-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:cbb56587a16cf0fb8acd19e90ff9924979ac1431baea8681712716a8337577b0"}, - {file = "frozenlist-1.6.0-cp313-cp313t-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:c6154c3ba59cda3f954c6333025369e42c3acd0c6e8b6ce31eb5c5b8116c07e0"}, - {file = "frozenlist-1.6.0-cp313-cp313t-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:2e8246877afa3f1ae5c979fe85f567d220f86a50dc6c493b9b7d8191181ae01e"}, - {file = "frozenlist-1.6.0-cp313-cp313t-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7b0f6cce16306d2e117cf9db71ab3a9e8878a28176aeaf0dbe35248d97b28d0c"}, - {file = "frozenlist-1.6.0-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:1b8e8cd8032ba266f91136d7105706ad57770f3522eac4a111d77ac126a25a9b"}, - {file = "frozenlist-1.6.0-cp313-cp313t-musllinux_1_2_armv7l.whl", hash = "sha256:e2ada1d8515d3ea5378c018a5f6d14b4994d4036591a52ceaf1a1549dec8e1ad"}, - {file = "frozenlist-1.6.0-cp313-cp313t-musllinux_1_2_i686.whl", hash = "sha256:cdb2c7f071e4026c19a3e32b93a09e59b12000751fc9b0b7758da899e657d215"}, - {file = "frozenlist-1.6.0-cp313-cp313t-musllinux_1_2_ppc64le.whl", hash = "sha256:03572933a1969a6d6ab509d509e5af82ef80d4a5d4e1e9f2e1cdd22c77a3f4d2"}, - {file = "frozenlist-1.6.0-cp313-cp313t-musllinux_1_2_s390x.whl", hash = "sha256:77effc978947548b676c54bbd6a08992759ea6f410d4987d69feea9cd0919911"}, - {file = "frozenlist-1.6.0-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:a2bda8be77660ad4089caf2223fdbd6db1858462c4b85b67fbfa22102021e497"}, - {file = "frozenlist-1.6.0-cp313-cp313t-win32.whl", hash = "sha256:a4d96dc5bcdbd834ec6b0f91027817214216b5b30316494d2b1aebffb87c534f"}, - {file = "frozenlist-1.6.0-cp313-cp313t-win_amd64.whl", hash = "sha256:e18036cb4caa17ea151fd5f3d70be9d354c99eb8cf817a3ccde8a7873b074348"}, - {file = "frozenlist-1.6.0-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:536a1236065c29980c15c7229fbb830dedf809708c10e159b8136534233545f0"}, - {file = "frozenlist-1.6.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:ed5e3a4462ff25ca84fb09e0fada8ea267df98a450340ead4c91b44857267d70"}, - {file = "frozenlist-1.6.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:e19c0fc9f4f030fcae43b4cdec9e8ab83ffe30ec10c79a4a43a04d1af6c5e1ad"}, - {file = "frozenlist-1.6.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c7c608f833897501dac548585312d73a7dca028bf3b8688f0d712b7acfaf7fb3"}, - {file = "frozenlist-1.6.0-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:0dbae96c225d584f834b8d3cc688825911960f003a85cb0fd20b6e5512468c42"}, - {file = "frozenlist-1.6.0-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:625170a91dd7261a1d1c2a0c1a353c9e55d21cd67d0852185a5fef86587e6f5f"}, - {file = "frozenlist-1.6.0-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:1db8b2fc7ee8a940b547a14c10e56560ad3ea6499dc6875c354e2335812f739d"}, - {file = "frozenlist-1.6.0-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:4da6fc43048b648275a220e3a61c33b7fff65d11bdd6dcb9d9c145ff708b804c"}, - {file = "frozenlist-1.6.0-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6ef8e7e8f2f3820c5f175d70fdd199b79e417acf6c72c5d0aa8f63c9f721646f"}, - {file = "frozenlist-1.6.0-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:aa733d123cc78245e9bb15f29b44ed9e5780dc6867cfc4e544717b91f980af3b"}, - {file = "frozenlist-1.6.0-cp39-cp39-musllinux_1_2_armv7l.whl", hash = "sha256:ba7f8d97152b61f22d7f59491a781ba9b177dd9f318486c5fbc52cde2db12189"}, - {file = "frozenlist-1.6.0-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:56a0b8dd6d0d3d971c91f1df75e824986667ccce91e20dca2023683814344791"}, - {file = "frozenlist-1.6.0-cp39-cp39-musllinux_1_2_ppc64le.whl", hash = "sha256:5c9e89bf19ca148efcc9e3c44fd4c09d5af85c8a7dd3dbd0da1cb83425ef4983"}, - {file = "frozenlist-1.6.0-cp39-cp39-musllinux_1_2_s390x.whl", hash = "sha256:1330f0a4376587face7637dfd245380a57fe21ae8f9d360c1c2ef8746c4195fa"}, - {file = "frozenlist-1.6.0-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:2187248203b59625566cac53572ec8c2647a140ee2738b4e36772930377a533c"}, - {file = "frozenlist-1.6.0-cp39-cp39-win32.whl", hash = "sha256:2b8cf4cfea847d6c12af06091561a89740f1f67f331c3fa8623391905e878530"}, - {file = "frozenlist-1.6.0-cp39-cp39-win_amd64.whl", hash = "sha256:1255d5d64328c5a0d066ecb0f02034d086537925f1f04b50b1ae60d37afbf572"}, - {file = "frozenlist-1.6.0-py3-none-any.whl", hash = "sha256:535eec9987adb04701266b92745d6cdcef2e77669299359c3009c3404dd5d191"}, - {file = "frozenlist-1.6.0.tar.gz", hash = "sha256:b99655c32c1c8e06d111e7f41c06c29a5318cb1835df23a45518e02a47c63b68"}, + {file = "frozenlist-1.6.2-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:92836b9903e52f787f4f4bfc6cf3b03cf19de4cbc09f5969e58806f876d8647f"}, + {file = "frozenlist-1.6.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:a3af419982432a13a997451e611ff7681a4fbf81dca04f70b08fc51106335ff0"}, + {file = "frozenlist-1.6.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:1570ba58f0852a6e6158d4ad92de13b9aba3474677c3dee827ba18dcf439b1d8"}, + {file = "frozenlist-1.6.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0de575df0135949c4049ae42db714c43d1693c590732abc78c47a04228fc1efb"}, + {file = "frozenlist-1.6.2-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:2b6eaba27ec2b3c0af7845619a425eeae8d510d5cc83fb3ef80569129238153b"}, + {file = "frozenlist-1.6.2-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:af1ee5188d2f63b4f09b67cf0c60b8cdacbd1e8d24669eac238e247d8b157581"}, + {file = "frozenlist-1.6.2-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:9179c5186eb996c0dd7e4c828858ade4d7a8d1d12dd67320675a6ae7401f2647"}, + {file = "frozenlist-1.6.2-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:38814ebc3c6bb01dc3bb4d6cffd0e64c19f4f2d03e649978aeae8e12b81bdf43"}, + {file = "frozenlist-1.6.2-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0dbcab0531318fc9ca58517865fae63a2fe786d5e2d8f3a56058c29831e49f13"}, + {file = "frozenlist-1.6.2-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:7472e477dc5d6a000945f45b6e38cbb1093fdec189dc1e98e57f8ab53f8aa246"}, + {file = "frozenlist-1.6.2-cp310-cp310-musllinux_1_2_armv7l.whl", hash = "sha256:17c230586d47332774332af86cc1e69ee095731ec70c27e5698dfebb9db167a0"}, + {file = "frozenlist-1.6.2-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:946a41e095592cf1c88a1fcdd154c13d0ef6317b371b817dc2b19b3d93ca0811"}, + {file = "frozenlist-1.6.2-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:d90c9b36c669eb481de605d3c2da02ea98cba6a3f5e93b3fe5881303026b2f14"}, + {file = "frozenlist-1.6.2-cp310-cp310-musllinux_1_2_s390x.whl", hash = "sha256:8651dd2d762d6eefebe8450ec0696cf3706b0eb5e46463138931f70c667ba612"}, + {file = "frozenlist-1.6.2-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:48400e6a09e217346949c034105b0df516a1b3c5aa546913b70b71b646caa9f5"}, + {file = "frozenlist-1.6.2-cp310-cp310-win32.whl", hash = "sha256:56354f09082262217f837d91106f1cc204dd29ac895f9bbab33244e2fa948bd7"}, + {file = "frozenlist-1.6.2-cp310-cp310-win_amd64.whl", hash = "sha256:3016ff03a332cdd2800f0eed81ca40a2699b2f62f23626e8cf81a2993867978a"}, + {file = "frozenlist-1.6.2-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:eb66c5d48b89701b93d58c31a48eb64e15d6968315a9ccc7dfbb2d6dc2c62ab7"}, + {file = "frozenlist-1.6.2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:8fb9aee4f7b495044b868d7e74fb110d8996e8fddc0bfe86409c7fc7bd5692f0"}, + {file = "frozenlist-1.6.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:48dde536fc4d8198fad4e211f977b1a5f070e6292801decf2d6bc77b805b0430"}, + {file = "frozenlist-1.6.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:91dd2fb760f4a2c04b3330e0191787c3437283f9241f0b379017d4b13cea8f5e"}, + {file = "frozenlist-1.6.2-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:f01f34f8a5c7b4d74a1c65227678822e69801dcf68edd4c11417a7c83828ff6f"}, + {file = "frozenlist-1.6.2-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f43f872cc4cfc46d9805d0e71302e9c39c755d5ad7572198cd2ceb3a291176cc"}, + {file = "frozenlist-1.6.2-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:3f96cc8ab3a73d42bcdb6d9d41c3dceffa8da8273ac54b71304b891e32de8b13"}, + {file = "frozenlist-1.6.2-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:9c0b257123320832cce9bea9935c860e4fa625b0e58b10db49fdfef70087df81"}, + {file = "frozenlist-1.6.2-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:23dc4def97ccc0232f491836050ae664d3d2352bb43ad4cd34cd3399ad8d1fc8"}, + {file = "frozenlist-1.6.2-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:fcf3663463c040315f025bd6a5f88b3748082cfe111e90fd422f71668c65de52"}, + {file = "frozenlist-1.6.2-cp311-cp311-musllinux_1_2_armv7l.whl", hash = "sha256:16b9e7b59ea6eef876a8a5fac084c95fd4bac687c790c4d48c0d53c6bcde54d1"}, + {file = "frozenlist-1.6.2-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:308b40d32a98a8d0d09bc28e4cbc13a0b803a0351041d4548564f28f6b148b05"}, + {file = "frozenlist-1.6.2-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:baf585d8968eaad6c1aae99456c40978a9fa822ccbdb36fd4746b581ef338192"}, + {file = "frozenlist-1.6.2-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:4dfdbdb671a6af6ea1a363b210373c8233df3925d9a7fb99beaa3824f6b99656"}, + {file = "frozenlist-1.6.2-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:94916e3acaeb8374d5aea9c37db777c9f0a2b9be46561f5de30064cbbbfae54a"}, + {file = "frozenlist-1.6.2-cp311-cp311-win32.whl", hash = "sha256:0453e3d2d12616949cb2581068942a0808c7255f2abab0676d2da7db30f9ea11"}, + {file = "frozenlist-1.6.2-cp311-cp311-win_amd64.whl", hash = "sha256:fb512753c4bbf0af03f6b9c7cc5ecc9bbac2e198a94f61aaabd26c3cf3229c8c"}, + {file = "frozenlist-1.6.2-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:48544d07404d7fcfccb6cc091922ae10de4d9e512c537c710c063ae8f5662b85"}, + {file = "frozenlist-1.6.2-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:6ee0cf89e7638de515c0bb2e8be30e8e2e48f3be9b6c2f7127bca4a1f35dff45"}, + {file = "frozenlist-1.6.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:e084d838693d73c0fe87d212b91af80c18068c95c3d877e294f165056cedfa58"}, + {file = "frozenlist-1.6.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:84d918b01781c6ebb5b776c18a87dd3016ff979eb78626aaca928bae69a640c3"}, + {file = "frozenlist-1.6.2-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:e2892d9ab060a847f20fab83fdb886404d0f213f648bdeaebbe76a6134f0973d"}, + {file = "frozenlist-1.6.2-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:bbd2225d7218e7d386f4953d11484b0e38e5d134e85c91f0a6b0f30fb6ae25c4"}, + {file = "frozenlist-1.6.2-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:9b679187cba0a99f1162c7ec1b525e34bdc5ca246857544d16c1ed234562df80"}, + {file = "frozenlist-1.6.2-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:bceb7bd48849d4b76eac070a6d508aa3a529963f5d9b0a6840fd41fb381d5a09"}, + {file = "frozenlist-1.6.2-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:88b1b79ae86fdacc4bf842a4e0456540947abba64a84e61b5ae24c87adb089db"}, + {file = "frozenlist-1.6.2-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:6c5c3c575148aa7308a38709906842039d7056bf225da6284b7a11cf9275ac5d"}, + {file = "frozenlist-1.6.2-cp312-cp312-musllinux_1_2_armv7l.whl", hash = "sha256:16263bd677a31fe1a5dc2b803b564e349c96f804a81706a62b8698dd14dbba50"}, + {file = "frozenlist-1.6.2-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:2e51b2054886ff7db71caf68285c2cd936eb7a145a509965165a2aae715c92a7"}, + {file = "frozenlist-1.6.2-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:ae1785b76f641cce4efd7e6f49ca4ae456aa230383af5ab0d4d3922a7e37e763"}, + {file = "frozenlist-1.6.2-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:30155cc481f73f92f47ab1e858a7998f7b1207f9b5cf3b3cba90ec65a7f224f5"}, + {file = "frozenlist-1.6.2-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:e1a1d82f2eb3d2875a8d139ae3f5026f7797f9de5dce44f53811ab0a883e85e7"}, + {file = "frozenlist-1.6.2-cp312-cp312-win32.whl", hash = "sha256:84105cb0f3479dfa20b85f459fb2db3b0ee52e2f84e86d447ea8b0de1fb7acdd"}, + {file = "frozenlist-1.6.2-cp312-cp312-win_amd64.whl", hash = "sha256:eecc861bd30bc5ee3b04a1e6ebf74ed0451f596d91606843f3edbd2f273e2fe3"}, + {file = "frozenlist-1.6.2-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:2ad8851ae1f6695d735f8646bf1e68675871789756f7f7e8dc8224a74eabb9d0"}, + {file = "frozenlist-1.6.2-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:cd2d5abc0ccd99a2a5b437987f3b1e9c265c1044d2855a09ac68f09bbb8082ca"}, + {file = "frozenlist-1.6.2-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:15c33f665faa9b8f8e525b987eeaae6641816e0f6873e8a9c4d224338cebbb55"}, + {file = "frozenlist-1.6.2-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d3e6c0681783723bb472b6b8304e61ecfcb4c2b11cf7f243d923813c21ae5d2a"}, + {file = "frozenlist-1.6.2-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:61bae4d345a26550d0ed9f2c9910ea060f89dbfc642b7b96e9510a95c3a33b3c"}, + {file = "frozenlist-1.6.2-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:90e5a84016d0d2fb828f770ede085b5d89155fcb9629b8a3237c960c41c120c3"}, + {file = "frozenlist-1.6.2-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:55dc289a064c04819d669e6e8a85a1c0416e6c601782093bdc749ae14a2f39da"}, + {file = "frozenlist-1.6.2-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b79bcf97ca03c95b044532a4fef6e5ae106a2dd863875b75fde64c553e3f4820"}, + {file = "frozenlist-1.6.2-cp313-cp313-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2e5e7564d232a782baa3089b25a0d979e2e4d6572d3c7231fcceacc5c22bf0f7"}, + {file = "frozenlist-1.6.2-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:6fcd8d56880dccdd376afb18f483ab55a0e24036adc9a83c914d4b7bb5729d4e"}, + {file = "frozenlist-1.6.2-cp313-cp313-musllinux_1_2_armv7l.whl", hash = "sha256:4fbce985c7fe7bafb4d9bf647c835dbe415b465a897b0c79d1bdf0f3fae5fe50"}, + {file = "frozenlist-1.6.2-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:3bd12d727cd616387d50fe283abebb2db93300c98f8ff1084b68460acd551926"}, + {file = "frozenlist-1.6.2-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:38544cae535ed697960891131731b33bb865b7d197ad62dc380d2dbb1bceff48"}, + {file = "frozenlist-1.6.2-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:47396898f98fae5c9b9bb409c3d2cf6106e409730f35a0926aad09dd7acf1ef5"}, + {file = "frozenlist-1.6.2-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:d10d835f8ce8571fd555db42d3aef325af903535dad7e6faa7b9c8abe191bffc"}, + {file = "frozenlist-1.6.2-cp313-cp313-win32.whl", hash = "sha256:a400fe775a41b6d7a3fef00d88f10cbae4f0074c9804e282013d7797671ba58d"}, + {file = "frozenlist-1.6.2-cp313-cp313-win_amd64.whl", hash = "sha256:cc8b25b321863ed46992558a29bb09b766c41e25f31461666d501be0f893bada"}, + {file = "frozenlist-1.6.2-cp313-cp313t-macosx_10_13_universal2.whl", hash = "sha256:56de277a0e0ad26a1dcdc99802b4f5becd7fd890807b68e3ecff8ced01d58132"}, + {file = "frozenlist-1.6.2-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:9cb386dd69ae91be586aa15cb6f39a19b5f79ffc1511371eca8ff162721c4867"}, + {file = "frozenlist-1.6.2-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:53835d8a6929c2f16e02616f8b727bd140ce8bf0aeddeafdb290a67c136ca8ad"}, + {file = "frozenlist-1.6.2-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:cc49f2277e8173abf028d744f8b7d69fe8cc26bffc2de97d47a3b529599fbf50"}, + {file = "frozenlist-1.6.2-cp313-cp313t-manylinux_2_17_armv7l.manylinux2014_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:65eb9e8a973161bdac5fa06ea6bd261057947adc4f47a7a6ef3d6db30c78c5b4"}, + {file = "frozenlist-1.6.2-cp313-cp313t-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:301eb2f898d863031f8c5a56c88a6c5d976ba11a4a08a1438b96ee3acb5aea80"}, + {file = "frozenlist-1.6.2-cp313-cp313t-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:207f717fd5e65fddb77d33361ab8fa939f6d89195f11307e073066886b33f2b8"}, + {file = "frozenlist-1.6.2-cp313-cp313t-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f83992722642ee0db0333b1dbf205b1a38f97d51a7382eb304ba414d8c3d1e05"}, + {file = "frozenlist-1.6.2-cp313-cp313t-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:12af99e6023851b36578e5bcc60618b5b30f4650340e29e565cd1936326dbea7"}, + {file = "frozenlist-1.6.2-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:6f01620444a674eaad900a3263574418e99c49e2a5d6e5330753857363b5d59f"}, + {file = "frozenlist-1.6.2-cp313-cp313t-musllinux_1_2_armv7l.whl", hash = "sha256:82b94c8948341512306ca8ccc702771600b442c6abe5f8ee017e00e452a209e8"}, + {file = "frozenlist-1.6.2-cp313-cp313t-musllinux_1_2_i686.whl", hash = "sha256:324a4cf4c220ddb3db1f46ade01e48432c63fa8c26812c710006e7f6cfba4a08"}, + {file = "frozenlist-1.6.2-cp313-cp313t-musllinux_1_2_ppc64le.whl", hash = "sha256:695284e51458dabb89af7f7dc95c470aa51fd259207aba5378b187909297feef"}, + {file = "frozenlist-1.6.2-cp313-cp313t-musllinux_1_2_s390x.whl", hash = "sha256:9ccbeb1c8dda4f42d0678076aa5cbde941a232be71c67b9d8ca89fbaf395807c"}, + {file = "frozenlist-1.6.2-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:cbbdf62fcc1864912c592a1ec748fee94f294c6b23215d5e8e9569becb7723ee"}, + {file = "frozenlist-1.6.2-cp313-cp313t-win32.whl", hash = "sha256:76857098ee17258df1a61f934f2bae052b8542c9ea6b187684a737b2e3383a65"}, + {file = "frozenlist-1.6.2-cp313-cp313t-win_amd64.whl", hash = "sha256:c06a88daba7e891add42f9278cdf7506a49bc04df9b1648be54da1bf1c79b4c6"}, + {file = "frozenlist-1.6.2-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:99119fa5ae292ac1d3e73336ecbe3301dbb2a7f5b4e6a4594d3a6b2e240c31c1"}, + {file = "frozenlist-1.6.2-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:af923dbcfd382554e960328133c2a8151706673d1280f55552b1bb914d276267"}, + {file = "frozenlist-1.6.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:69e85175df4cc35f2cef8cb60a8bad6c5fc50e91524cd7018d73dd2fcbc70f5d"}, + {file = "frozenlist-1.6.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:97dcdffe18c0e35ce57b3d7c1352893a3608e7578b814abb3b2a3cc15907e682"}, + {file = "frozenlist-1.6.2-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:cc228faf4533327e5f1d153217ab598648a2cd5f6b1036d82e63034f079a5861"}, + {file = "frozenlist-1.6.2-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:0ee53aba5d0768e2c5c6185ec56a94bab782ef002429f293497ec5c5a3b94bdf"}, + {file = "frozenlist-1.6.2-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d3214738024afd53434614ee52aa74353a562414cd48b1771fa82fd982cb1edb"}, + {file = "frozenlist-1.6.2-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:5628e6a6f74ef1693adbe25c0bce312eb9aee82e58abe370d287794aff632d0f"}, + {file = "frozenlist-1.6.2-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ad7678d3e32cb3884879f10c679804c08f768df55078436fb56668f3e13e2a5e"}, + {file = "frozenlist-1.6.2-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:b776ab5217e2bf99c84b2cbccf4d30407789c0653f72d1653b5f8af60403d28f"}, + {file = "frozenlist-1.6.2-cp39-cp39-musllinux_1_2_armv7l.whl", hash = "sha256:b1e162a99405cb62d338f747b8625d6bd7b6794383e193335668295fb89b75fb"}, + {file = "frozenlist-1.6.2-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:2de1ddeb9dd8a07383f6939996217f0f1b2ce07f6a01d74c9adb1db89999d006"}, + {file = "frozenlist-1.6.2-cp39-cp39-musllinux_1_2_ppc64le.whl", hash = "sha256:2dcabe4e7aac889d41316c1698df0eb2565ed233b66fab6bc4a5c5b7769cad4c"}, + {file = "frozenlist-1.6.2-cp39-cp39-musllinux_1_2_s390x.whl", hash = "sha256:06e28cd2ac31797e12ec8c65aa462a89116323f045e8b1930127aba9486aab24"}, + {file = "frozenlist-1.6.2-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:86f908b70043c3517f862247bdc621bd91420d40c3e90ede1701a75f025fcd5f"}, + {file = "frozenlist-1.6.2-cp39-cp39-win32.whl", hash = "sha256:2647a3d11f10014a5f9f2ca38c7fadd0dd28f5b1b5e9ce9c9d194aa5d0351c7e"}, + {file = "frozenlist-1.6.2-cp39-cp39-win_amd64.whl", hash = "sha256:e2cbef30ba27a1d9f3e3c6aa84a60f53d907d955969cd0103b004056e28bca08"}, + {file = "frozenlist-1.6.2-py3-none-any.whl", hash = "sha256:947abfcc8c42a329bbda6df97a4b9c9cdb4e12c85153b3b57b9d2f02aa5877dc"}, + {file = "frozenlist-1.6.2.tar.gz", hash = "sha256:effc641518696471cf4962e8e32050133bc1f7b2851ae8fd0cb8797dd70dc202"}, ] [[package]] @@ -1571,12 +1716,6 @@ files = [ {file = "geventhttpclient-2.3.3-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:447fc2d49a41449684154c12c03ab80176a413e9810d974363a061b71bdbf5a0"}, {file = "geventhttpclient-2.3.3-pp310-pypy310_pp73-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4598c2aa14c866a10a07a2944e2c212f53d0c337ce211336ad68ae8243646216"}, {file = "geventhttpclient-2.3.3-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:69d2bd7ab7f94a6c73325f4b88fd07b0d5f4865672ed7a519f2d896949353761"}, - {file = "geventhttpclient-2.3.3-pp311-pypy311_pp73-macosx_10_15_x86_64.whl", hash = "sha256:45a3f7e3531dd2650f5bb840ed11ce77d0eeb45d0f4c9cd6985eb805e17490e6"}, - {file = "geventhttpclient-2.3.3-pp311-pypy311_pp73-macosx_11_0_arm64.whl", hash = "sha256:73b427e0ea8c2750ee05980196893287bfc9f2a155a282c0f248b472ea7ae3e7"}, - {file = "geventhttpclient-2.3.3-pp311-pypy311_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c2959ef84271e4fa646c3dbaad9e6f2912bf54dcdfefa5999c2ef7c927d92127"}, - {file = "geventhttpclient-2.3.3-pp311-pypy311_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:0a800fcb8e53a8f4a7c02b4b403d2325a16cad63a877e57bd603aa50bf0e475b"}, - {file = "geventhttpclient-2.3.3-pp311-pypy311_pp73-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:528321e9aab686435ba09cc6ff90f12e577ace79762f74831ec2265eeab624a8"}, - {file = "geventhttpclient-2.3.3-pp311-pypy311_pp73-win_amd64.whl", hash = "sha256:034be44ff3318359e3c678cb5c4ed13efd69aeb558f2981a32bd3e3fb5355700"}, {file = "geventhttpclient-2.3.3-pp39-pypy39_pp73-macosx_10_15_x86_64.whl", hash = "sha256:7a3182f1457599c2901c48a1def37a5bc4762f696077e186e2050fcc60b2fbdf"}, {file = "geventhttpclient-2.3.3-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:86b489238dc2cbfa53cdd5621e888786a53031d327e0a8509529c7568292b0ce"}, {file = "geventhttpclient-2.3.3-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c4c8aca6ab5da4211870c1d8410c699a9d543e86304aac47e1558ec94d0da97a"}, @@ -1747,21 +1886,21 @@ hyperframe = ">=6.1,<7" [[package]] name = "hf-xet" -version = "1.1.2" +version = "1.1.3" description = "Fast transfer of large files with the Hugging Face Hub." optional = false python-versions = ">=3.8" groups = ["main"] markers = "platform_machine == \"x86_64\" or platform_machine == \"amd64\" or platform_machine == \"arm64\" or platform_machine == \"aarch64\"" files = [ - {file = "hf_xet-1.1.2-cp37-abi3-macosx_10_12_x86_64.whl", hash = "sha256:dfd1873fd648488c70735cb60f7728512bca0e459e61fcd107069143cd798469"}, - {file = "hf_xet-1.1.2-cp37-abi3-macosx_11_0_arm64.whl", hash = "sha256:29b584983b2d977c44157d9241dcf0fd50acde0b7bff8897fe4386912330090d"}, - {file = "hf_xet-1.1.2-cp37-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6b29ac84298147fe9164cc55ad994ba47399f90b5d045b0b803b99cf5f06d8ec"}, - {file = "hf_xet-1.1.2-cp37-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:d921ba32615676e436a0d15e162331abc9ed43d440916b1d836dc27ce1546173"}, - {file = "hf_xet-1.1.2-cp37-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:d9b03c34e13c44893ab6e8fea18ee8d2a6878c15328dd3aabedbdd83ee9f2ed3"}, - {file = "hf_xet-1.1.2-cp37-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:01b18608955b3d826307d37da8bd38b28a46cd2d9908b3a3655d1363274f941a"}, - {file = "hf_xet-1.1.2-cp37-abi3-win_amd64.whl", hash = "sha256:3562902c81299b09f3582ddfb324400c6a901a2f3bc854f83556495755f4954c"}, - {file = "hf_xet-1.1.2.tar.gz", hash = "sha256:3712d6d4819d3976a1c18e36db9f503e296283f9363af818f50703506ed63da3"}, + {file = "hf_xet-1.1.3-cp37-abi3-macosx_10_12_x86_64.whl", hash = "sha256:c3b508b5f583a75641aebf732853deb058953370ce8184f5dabc49f803b0819b"}, + {file = "hf_xet-1.1.3-cp37-abi3-macosx_11_0_arm64.whl", hash = "sha256:b788a61977fbe6b5186e66239e2a329a3f0b7e7ff50dad38984c0c74f44aeca1"}, + {file = "hf_xet-1.1.3-cp37-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fd2da210856444a34aad8ada2fc12f70dabed7cc20f37e90754d1d9b43bc0534"}, + {file = "hf_xet-1.1.3-cp37-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:8203f52827e3df65981984936654a5b390566336956f65765a8aa58c362bb841"}, + {file = "hf_xet-1.1.3-cp37-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:30c575a5306f8e6fda37edb866762140a435037365eba7a17ce7bd0bc0216a8b"}, + {file = "hf_xet-1.1.3-cp37-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:7c1a6aa6abed1f696f8099aa9796ca04c9ee778a58728a115607de9cc4638ff1"}, + {file = "hf_xet-1.1.3-cp37-abi3-win_amd64.whl", hash = "sha256:b578ae5ac9c056296bb0df9d018e597c8dc6390c5266f35b5c44696003cde9f3"}, + {file = "hf_xet-1.1.3.tar.gz", hash = "sha256:a5f09b1dd24e6ff6bcedb4b0ddab2d81824098bb002cf8b4ffa780545fa348c3"}, ] [package.extras] @@ -1827,32 +1966,16 @@ http2 = ["h2 (>=3,<5)"] socks = ["socksio (==1.*)"] zstd = ["zstandard (>=0.18.0)"] -[[package]] -name = "httpx-aiohttp" -version = "0.1.4" -description = "Aiohttp transport for HTTPX" -optional = false -python-versions = ">=3.9" -groups = ["main"] -files = [ - {file = "httpx_aiohttp-0.1.4-py3-none-any.whl", hash = "sha256:740a8725af7b7a03d12f21ccd48a83457baa037304646589b87595746c05c87e"}, - {file = "httpx_aiohttp-0.1.4.tar.gz", hash = "sha256:61030eed28deeac26286d2e872b7c167f5450b7b0eec5a617ae7d3f7da9c8684"}, -] - -[package.dependencies] -aiohttp = ">=3,<4" -httpx = ">=0.28.1,<1" - [[package]] name = "huggingface-hub" -version = "0.32.0" +version = "0.32.4" description = "Client library to download and publish models, datasets and other repos on the huggingface.co hub" optional = false python-versions = ">=3.8.0" groups = ["main"] files = [ - {file = "huggingface_hub-0.32.0-py3-none-any.whl", hash = "sha256:e56e94109649ce6ebdb59b4e393ee3543ec0eca2eab4f41b269e1d885c88d08c"}, - {file = "huggingface_hub-0.32.0.tar.gz", hash = "sha256:dd66c9365ea43049ec9b939bdcdb21a0051e1bd70026fc50304e4fb1bb6a15ba"}, + {file = "huggingface_hub-0.32.4-py3-none-any.whl", hash = "sha256:37abf8826b38d971f60d3625229221c36e53fe58060286db9baf619cfbf39767"}, + {file = "huggingface_hub-0.32.4.tar.gz", hash = "sha256:f61d45cd338736f59fb0e97550b74c24ee771bcc92c05ae0766b9116abe720be"}, ] [package.dependencies] @@ -1997,14 +2120,14 @@ ipython = {version = ">=7.31.1", markers = "python_version >= \"3.11\""} [[package]] name = "ipython" -version = "9.2.0" +version = "9.3.0" description = "IPython: Productive Interactive Computing" optional = false python-versions = ">=3.11" groups = ["main"] files = [ - {file = "ipython-9.2.0-py3-none-any.whl", hash = "sha256:fef5e33c4a1ae0759e0bba5917c9db4eb8c53fee917b6a526bd973e1ca5159f6"}, - {file = "ipython-9.2.0.tar.gz", hash = "sha256:62a9373dbc12f28f9feaf4700d052195bf89806279fc8ca11f3f54017d04751b"}, + {file = "ipython-9.3.0-py3-none-any.whl", hash = "sha256:1a0b6dd9221a1f5dddf725b57ac0cb6fddc7b5f470576231ae9162b9b3455a04"}, + {file = "ipython-9.3.0.tar.gz", hash = "sha256:79eb896f9f23f50ad16c3bc205f686f6e030ad246cc309c6279a242b14afe9d8"}, ] [package.dependencies] @@ -2193,14 +2316,14 @@ files = [ [[package]] name = "jsonschema" -version = "4.23.0" +version = "4.24.0" description = "An implementation of JSON Schema validation for Python" optional = false -python-versions = ">=3.8" +python-versions = ">=3.9" groups = ["main"] files = [ - {file = "jsonschema-4.23.0-py3-none-any.whl", hash = "sha256:fbadb6f8b144a8f8cf9f0b89ba94501d143e50411a1278633f56a7acf7fd5566"}, - {file = "jsonschema-4.23.0.tar.gz", hash = "sha256:d71497fef26351a33265337fa77ffeb82423f3ea21283cd9467bb03999266bc4"}, + {file = "jsonschema-4.24.0-py3-none-any.whl", hash = "sha256:a462455f19f5faf404a7902952b6f0e3ce868f3ee09a359b05eca6673bd8412d"}, + {file = "jsonschema-4.24.0.tar.gz", hash = "sha256:0b4e8069eb12aedfa881333004bccaec24ecef5a8a6a4b6df142b2cc9599d196"}, ] [package.dependencies] @@ -2230,21 +2353,20 @@ referencing = ">=0.31.0" [[package]] name = "litellm" -version = "1.71.0" +version = "1.72.0" description = "Library to easily interface with LLM API providers" optional = false python-versions = "!=2.7.*,!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,!=3.6.*,!=3.7.*,>=3.8" groups = ["main"] files = [ - {file = "litellm-1.71.0-py3-none-any.whl", hash = "sha256:caae99df64ddc27057d20d63aa2ef4931547ca4cc95fc9d3896081226422ccb2"}, - {file = "litellm-1.71.0.tar.gz", hash = "sha256:1908ebb61e6f2909a092648c5243adae8f60a9ce24d71530be91e2e727eb43b0"}, + {file = "litellm-1.72.0-py3-none-any.whl", hash = "sha256:88360a7ae9aa9c96278ae1bb0a459226f909e711c5d350781296d0640386a824"}, + {file = "litellm-1.72.0.tar.gz", hash = "sha256:135022b9b8798f712ffa84e71ac419aa4310f1d0a70f79dd2007f7ef3a381e43"}, ] [package.dependencies] aiohttp = "*" click = "*" httpx = ">=0.23.0" -httpx-aiohttp = {version = ">=0.1.4", markers = "python_version >= \"3.9\""} importlib-metadata = ">=6.8.0" jinja2 = ">=3.1.2,<4.0.0" jsonschema = ">=4.22.0,<5.0.0" @@ -2256,29 +2378,29 @@ tokenizers = "*" [package.extras] extra-proxy = ["azure-identity (>=1.15.0,<2.0.0)", "azure-keyvault-secrets (>=4.8.0,<5.0.0)", "google-cloud-kms (>=2.21.3,<3.0.0)", "prisma (==0.11.0)", "redisvl (>=0.4.1,<0.5.0) ; python_version >= \"3.9\" and python_version < \"3.14\"", "resend (>=0.8.0,<0.9.0)"] -proxy = ["PyJWT (>=2.8.0,<3.0.0)", "apscheduler (>=3.10.4,<4.0.0)", "backoff", "boto3 (==1.34.34)", "cryptography (>=43.0.1,<44.0.0)", "fastapi (>=0.115.5,<0.116.0)", "fastapi-sso (>=0.16.0,<0.17.0)", "gunicorn (>=23.0.0,<24.0.0)", "litellm-enterprise (==0.1.5)", "litellm-proxy-extras (==0.2.0)", "mcp (==1.5.0) ; python_version >= \"3.10\"", "orjson (>=3.9.7,<4.0.0)", "pynacl (>=1.5.0,<2.0.0)", "python-multipart (>=0.0.18,<0.0.19)", "pyyaml (>=6.0.1,<7.0.0)", "rich (==13.7.1)", "rq", "uvicorn (>=0.29.0,<0.30.0)", "uvloop (>=0.21.0,<0.22.0) ; sys_platform != \"win32\"", "websockets (>=13.1.0,<14.0.0)"] +proxy = ["PyJWT (>=2.8.0,<3.0.0)", "apscheduler (>=3.10.4,<4.0.0)", "backoff", "boto3 (==1.34.34)", "cryptography (>=43.0.1,<44.0.0)", "fastapi (>=0.115.5,<0.116.0)", "fastapi-sso (>=0.16.0,<0.17.0)", "gunicorn (>=23.0.0,<24.0.0)", "litellm-enterprise (==0.1.7)", "litellm-proxy-extras (==0.2.2)", "mcp (==1.5.0) ; python_version >= \"3.10\"", "orjson (>=3.9.7,<4.0.0)", "pynacl (>=1.5.0,<2.0.0)", "python-multipart (>=0.0.18,<0.0.19)", "pyyaml (>=6.0.1,<7.0.0)", "rich (==13.7.1)", "rq", "uvicorn (>=0.29.0,<0.30.0)", "uvloop (>=0.21.0,<0.22.0) ; sys_platform != \"win32\"", "websockets (>=13.1.0,<14.0.0)"] utils = ["numpydoc"] [[package]] name = "locust" -version = "2.37.5" +version = "2.37.7" description = "Developer-friendly load testing framework" optional = false python-versions = ">=3.10" groups = ["test"] files = [ - {file = "locust-2.37.5-py3-none-any.whl", hash = "sha256:9922a2718b42f1c57a05c822e47b66555b3c61292694ec5edaf7a166fac6d112"}, - {file = "locust-2.37.5.tar.gz", hash = "sha256:c90824c4cb6a01cdede220684c7c8381253fcca47fc689dbca4f6c46d740c99f"}, + {file = "locust-2.37.7-py3-none-any.whl", hash = "sha256:925d78f4834d6434ec9a9c4f3984228f762d2c2d95b2804e9b0e9fc856f2d3b5"}, + {file = "locust-2.37.7.tar.gz", hash = "sha256:9421ff51ce023a5ddff74f2544a13310f7929ca02452be2663130ffab26585a1"}, ] [package.dependencies] -configargparse = ">=1.5.5" +configargparse = ">=1.7.1" flask = ">=2.0.0" flask-cors = ">=3.0.10" flask-login = ">=0.6.3" gevent = ">=24.10.1,<25.0.0" geventhttpclient = ">=2.3.1" -locust-cloud = ">=1.21.8" +locust-cloud = ">=1.23.0" msgpack = ">=1.0.0" psutil = ">=5.9.1" pywin32 = {version = "*", markers = "sys_platform == \"win32\""} @@ -2289,32 +2411,32 @@ werkzeug = ">=2.0.0" [[package]] name = "locust-cloud" -version = "1.21.8" +version = "1.23.0" description = "Locust Cloud" optional = false python-versions = ">=3.10" groups = ["test"] files = [ - {file = "locust_cloud-1.21.8-py3-none-any.whl", hash = "sha256:4f06b5d8a26ba91840a768008f4870965b13cc71481de9797409556de2edc007"}, - {file = "locust_cloud-1.21.8.tar.gz", hash = "sha256:e8bde0da013c8731a45cc834cdf9fec2fc21738a5f2807d93c2c5eeb3008a80e"}, + {file = "locust_cloud-1.23.0-py3-none-any.whl", hash = "sha256:936cc4feb0b0dd89e113f6318a1205f318ed49a9df4e0feca8d40f2d9b353d30"}, + {file = "locust_cloud-1.23.0.tar.gz", hash = "sha256:4038a09eda858b483ced20f5cb82caf3f866244c2c7864e0da5c32722b97f532"}, ] [package.dependencies] -configargparse = ">=1.5.5" +configargparse = ">=1.7.1" gevent = ">=24.10.1,<25.0.0" platformdirs = ">=4.3.6,<5.0.0" python-socketio = {version = "5.13.0", extras = ["client"]} [[package]] name = "logfire" -version = "3.16.0" +version = "3.17.0" description = "The best Python observability tool! 🪵🔥" optional = false python-versions = ">=3.8" groups = ["main"] files = [ - {file = "logfire-3.16.0-py3-none-any.whl", hash = "sha256:ac49c1a63c492e6ed205a3d49ee9ded318ca6e5af49c942465cade51c759605b"}, - {file = "logfire-3.16.0.tar.gz", hash = "sha256:f09061cffd09b4787e2b785e2603da0868c1ac9c7cb124fdc04de82298feef15"}, + {file = "logfire-3.17.0-py3-none-any.whl", hash = "sha256:8ebccdb01e3af12ecf271246c2a6761ea35c109ef94f4d7520db6b7c3532cd05"}, + {file = "logfire-3.17.0.tar.gz", hash = "sha256:5c7230b373d9994b61466ec8a2462957826c4ca1d8351af7f43dde3e54f6f072"}, ] [package.dependencies] @@ -2332,7 +2454,7 @@ asgi = ["opentelemetry-instrumentation-asgi (>=0.42b0)"] asyncpg = ["opentelemetry-instrumentation-asyncpg (>=0.42b0)"] aws-lambda = ["opentelemetry-instrumentation-aws-lambda (>=0.42b0)"] celery = ["opentelemetry-instrumentation-celery (>=0.42b0)"] -django = ["opentelemetry-instrumentation-django (>=0.42b0)"] +django = ["opentelemetry-instrumentation-asgi (>=0.42b0)", "opentelemetry-instrumentation-django (>=0.42b0)"] fastapi = ["opentelemetry-instrumentation-fastapi (>=0.42b0)"] flask = ["opentelemetry-instrumentation-flask (>=0.42b0)"] httpx = ["opentelemetry-instrumentation-httpx (>=0.42b0)"] @@ -2518,14 +2640,14 @@ source = ["Cython (>=3.0.11,<3.1.0)"] [[package]] name = "mammoth" -version = "1.9.0" +version = "1.9.1" description = "Convert Word documents from docx to simple and clean HTML and Markdown" optional = false python-versions = ">=3.7" groups = ["main"] files = [ - {file = "mammoth-1.9.0-py2.py3-none-any.whl", hash = "sha256:0eea277316586f0ca65d86834aec4de5a0572c83ec54b4991f9bb520a891150f"}, - {file = "mammoth-1.9.0.tar.gz", hash = "sha256:74f5dae10ca240fd9b7a0e1a6deaebe0aad23bc590633ef6f5e868aa9b7042a6"}, + {file = "mammoth-1.9.1-py2.py3-none-any.whl", hash = "sha256:f0569bd640cee6c77a07e7c75c5dc10d745dc4dc95d530cfcbb0a5d9536d636c"}, + {file = "mammoth-1.9.1.tar.gz", hash = "sha256:7924254ab8f03efe55fadc0fd5f7828db831190eb2679d63cb4372873e71c572"}, ] [package.dependencies] @@ -2572,6 +2694,24 @@ profiling = ["gprof2dot"] rtd = ["jupyter_sphinx", "mdit-py-plugins", "myst-parser", "pyyaml", "sphinx", "sphinx-copybutton", "sphinx-design", "sphinx_book_theme"] testing = ["coverage", "pytest", "pytest-cov", "pytest-regressions"] +[[package]] +name = "markdown2" +version = "2.5.3" +description = "A fast and complete Python implementation of Markdown" +optional = false +python-versions = "<4,>=3.9" +groups = ["main"] +files = [ + {file = "markdown2-2.5.3-py3-none-any.whl", hash = "sha256:a8ebb7e84b8519c37bf7382b3db600f1798a22c245bfd754a1f87ca8d7ea63b3"}, + {file = "markdown2-2.5.3.tar.gz", hash = "sha256:4d502953a4633408b0ab3ec503c5d6984d1b14307e32b325ec7d16ea57524895"}, +] + +[package.extras] +all = ["latex2mathml ; python_version >= \"3.8.1\"", "pygments (>=2.7.3)", "wavedrom"] +code-syntax-highlighting = ["pygments (>=2.7.3)"] +latex = ["latex2mathml ; python_version >= \"3.8.1\""] +wavedrom = ["wavedrom"] + [[package]] name = "markdownify" version = "1.1.0" @@ -2965,14 +3105,14 @@ files = [ [[package]] name = "openai" -version = "1.82.0" +version = "1.84.0" description = "The official Python library for the openai API" optional = false python-versions = ">=3.8" groups = ["main"] files = [ - {file = "openai-1.82.0-py3-none-any.whl", hash = "sha256:8c40647fea1816516cb3de5189775b30b5f4812777e40b8768f361f232b61b30"}, - {file = "openai-1.82.0.tar.gz", hash = "sha256:b0a009b9a58662d598d07e91e4219ab4b1e3d8ba2db3f173896a92b9b874d1a7"}, + {file = "openai-1.84.0-py3-none-any.whl", hash = "sha256:7ec4436c3c933d68dc0f5a0cef0cb3dbc0864a54d62bddaf2ed5f3d521844711"}, + {file = "openai-1.84.0.tar.gz", hash = "sha256:4caa43bdab262cc75680ce1a2322cfc01626204074f7e8d9939ab372acf61698"}, ] [package.dependencies] @@ -3510,14 +3650,14 @@ dev = ["certifi", "mypy (>=1.14.1)", "pytest (>=8.1.1)", "pytest-asyncio (>=0.25 [[package]] name = "prometheus-client" -version = "0.22.0" +version = "0.22.1" description = "Python client for the Prometheus monitoring system." optional = false python-versions = ">=3.9" groups = ["main"] files = [ - {file = "prometheus_client-0.22.0-py3-none-any.whl", hash = "sha256:c8951bbe64e62b96cd8e8f5d917279d1b9b91ab766793f33d4dce6c228558713"}, - {file = "prometheus_client-0.22.0.tar.gz", hash = "sha256:18da1d2241ac2d10c8d2110f13eedcd5c7c0c8af18c926e8731f04fc10cd575c"}, + {file = "prometheus_client-0.22.1-py3-none-any.whl", hash = "sha256:cca895342e308174341b2cbf99a56bef291fbc0ef7b9e5412a0f26d653ba7094"}, + {file = "prometheus_client-0.22.1.tar.gz", hash = "sha256:190f1331e783cf21eb60bca559354e0a4d4378facecf78f5428c39b675d20d28"}, ] [package.extras] @@ -3648,23 +3788,23 @@ files = [ [[package]] name = "protobuf" -version = "5.29.4" +version = "5.29.5" description = "" optional = false python-versions = ">=3.8" groups = ["main"] files = [ - {file = "protobuf-5.29.4-cp310-abi3-win32.whl", hash = "sha256:13eb236f8eb9ec34e63fc8b1d6efd2777d062fa6aaa68268fb67cf77f6839ad7"}, - {file = "protobuf-5.29.4-cp310-abi3-win_amd64.whl", hash = "sha256:bcefcdf3976233f8a502d265eb65ea740c989bacc6c30a58290ed0e519eb4b8d"}, - {file = "protobuf-5.29.4-cp38-abi3-macosx_10_9_universal2.whl", hash = "sha256:307ecba1d852ec237e9ba668e087326a67564ef83e45a0189a772ede9e854dd0"}, - {file = "protobuf-5.29.4-cp38-abi3-manylinux2014_aarch64.whl", hash = "sha256:aec4962f9ea93c431d5714ed1be1c93f13e1a8618e70035ba2b0564d9e633f2e"}, - {file = "protobuf-5.29.4-cp38-abi3-manylinux2014_x86_64.whl", hash = "sha256:d7d3f7d1d5a66ed4942d4fefb12ac4b14a29028b209d4bfb25c68ae172059922"}, - {file = "protobuf-5.29.4-cp38-cp38-win32.whl", hash = "sha256:1832f0515b62d12d8e6ffc078d7e9eb06969aa6dc13c13e1036e39d73bebc2de"}, - {file = "protobuf-5.29.4-cp38-cp38-win_amd64.whl", hash = "sha256:476cb7b14914c780605a8cf62e38c2a85f8caff2e28a6a0bad827ec7d6c85d68"}, - {file = "protobuf-5.29.4-cp39-cp39-win32.whl", hash = "sha256:fd32223020cb25a2cc100366f1dedc904e2d71d9322403224cdde5fdced0dabe"}, - {file = "protobuf-5.29.4-cp39-cp39-win_amd64.whl", hash = "sha256:678974e1e3a9b975b8bc2447fca458db5f93a2fb6b0c8db46b6675b5b5346812"}, - {file = "protobuf-5.29.4-py3-none-any.whl", hash = "sha256:3fde11b505e1597f71b875ef2fc52062b6a9740e5f7c8997ce878b6009145862"}, - {file = "protobuf-5.29.4.tar.gz", hash = "sha256:4f1dfcd7997b31ef8f53ec82781ff434a28bf71d9102ddde14d076adcfc78c99"}, + {file = "protobuf-5.29.5-cp310-abi3-win32.whl", hash = "sha256:3f1c6468a2cfd102ff4703976138844f78ebd1fb45f49011afc5139e9e283079"}, + {file = "protobuf-5.29.5-cp310-abi3-win_amd64.whl", hash = "sha256:3f76e3a3675b4a4d867b52e4a5f5b78a2ef9565549d4037e06cf7b0942b1d3fc"}, + {file = "protobuf-5.29.5-cp38-abi3-macosx_10_9_universal2.whl", hash = "sha256:e38c5add5a311f2a6eb0340716ef9b039c1dfa428b28f25a7838ac329204a671"}, + {file = "protobuf-5.29.5-cp38-abi3-manylinux2014_aarch64.whl", hash = "sha256:fa18533a299d7ab6c55a238bf8629311439995f2e7eca5caaff08663606e9015"}, + {file = "protobuf-5.29.5-cp38-abi3-manylinux2014_x86_64.whl", hash = "sha256:63848923da3325e1bf7e9003d680ce6e14b07e55d0473253a690c3a8b8fd6e61"}, + {file = "protobuf-5.29.5-cp38-cp38-win32.whl", hash = "sha256:ef91363ad4faba7b25d844ef1ada59ff1604184c0bcd8b39b8a6bef15e1af238"}, + {file = "protobuf-5.29.5-cp38-cp38-win_amd64.whl", hash = "sha256:7318608d56b6402d2ea7704ff1e1e4597bee46d760e7e4dd42a3d45e24b87f2e"}, + {file = "protobuf-5.29.5-cp39-cp39-win32.whl", hash = "sha256:6f642dc9a61782fa72b90878af134c5afe1917c89a568cd3476d758d3c3a0736"}, + {file = "protobuf-5.29.5-cp39-cp39-win_amd64.whl", hash = "sha256:470f3af547ef17847a28e1f47200a1cbf0ba3ff57b7de50d22776607cd2ea353"}, + {file = "protobuf-5.29.5-py3-none-any.whl", hash = "sha256:6cf42630262c59b2d8de33954443d94b746c952b01434fc58a417fdbd2e84bd5"}, + {file = "protobuf-5.29.5.tar.gz", hash = "sha256:bc1463bafd4b0929216c35f437a8e28731a2b7fe3d98bb77a600efced5a15c84"}, ] [[package]] @@ -3754,7 +3894,7 @@ files = [ {file = "pycparser-2.22-py3-none-any.whl", hash = "sha256:c3702b6d3dd8c7abc1afa565d7e63d53a1d0bd86cdc24edd75470f4de499cfcc"}, {file = "pycparser-2.22.tar.gz", hash = "sha256:491c8be9c040f5390f5bf44a5b07752bd07f56edf992381b05c701439eec10f6"}, ] -markers = {main = "platform_python_implementation != \"PyPy\"", test = "platform_python_implementation == \"CPython\" and sys_platform == \"win32\" or implementation_name == \"pypy\""} +markers = {test = "platform_python_implementation == \"CPython\" and sys_platform == \"win32\" or implementation_name == \"pypy\""} [[package]] name = "pydantic" @@ -3903,6 +4043,22 @@ files = [ {file = "pydub-0.25.1.tar.gz", hash = "sha256:980a33ce9949cab2a569606b65674d748ecbca4f0796887fd6f46173a7b0d30f"}, ] +[[package]] +name = "pydyf" +version = "0.11.0" +description = "A low-level PDF generator." +optional = false +python-versions = ">=3.8" +groups = ["main"] +files = [ + {file = "pydyf-0.11.0-py3-none-any.whl", hash = "sha256:0aaf9e2ebbe786ec7a78ec3fbffa4cdcecde53fd6f563221d53c6bc1328848a3"}, + {file = "pydyf-0.11.0.tar.gz", hash = "sha256:394dddf619cca9d0c55715e3c55ea121a9bf9cbc780cdc1201a2427917b86b64"}, +] + +[package.extras] +doc = ["sphinx", "sphinx_rtd_theme"] +test = ["pillow", "pytest", "ruff"] + [[package]] name = "pyflakes" version = "3.3.2" @@ -3921,7 +4077,7 @@ version = "2.19.1" description = "Pygments is a syntax highlighting package written in Python." optional = false python-versions = ">=3.8" -groups = ["main"] +groups = ["main", "test"] files = [ {file = "pygments-2.19.1-py3-none-any.whl", hash = "sha256:9ea1544ad55cecf4b8242fab6dd35a93bbce657034b0611ee383099054ab6d8c"}, {file = "pygments-2.19.1.tar.gz", hash = "sha256:61c16d2a8576dc0649d9f39e089b5f02bcd27fba10d8fb4dcc28173f7a45151f"}, @@ -3948,26 +4104,43 @@ dev = ["coverage[toml] (==5.0.4)", "cryptography (>=3.4.0)", "pre-commit", "pyte docs = ["sphinx", "sphinx-rtd-theme", "zope.interface"] tests = ["coverage[toml] (==5.0.4)", "pytest (>=6.0.0,<7.0.0)"] +[[package]] +name = "pyphen" +version = "0.17.2" +description = "Pure Python module to hyphenate text" +optional = false +python-versions = ">=3.9" +groups = ["main"] +files = [ + {file = "pyphen-0.17.2-py3-none-any.whl", hash = "sha256:3a07fb017cb2341e1d9ff31b8634efb1ae4dc4b130468c7c39dd3d32e7c3affd"}, + {file = "pyphen-0.17.2.tar.gz", hash = "sha256:f60647a9c9b30ec6c59910097af82bc5dd2d36576b918e44148d8b07ef3b4aa3"}, +] + +[package.extras] +doc = ["sphinx", "sphinx_rtd_theme"] +test = ["pytest", "ruff"] + [[package]] name = "pytest" -version = "8.3.5" +version = "8.4.0" description = "pytest: simple powerful testing with Python" optional = false -python-versions = ">=3.8" +python-versions = ">=3.9" groups = ["main", "test"] files = [ - {file = "pytest-8.3.5-py3-none-any.whl", hash = "sha256:c69214aa47deac29fad6c2a4f590b9c4a9fdb16a403176fe154b79c0b4d4d820"}, - {file = "pytest-8.3.5.tar.gz", hash = "sha256:f4efe70cc14e511565ac476b57c279e12a855b11f48f212af1080ef2263d3845"}, + {file = "pytest-8.4.0-py3-none-any.whl", hash = "sha256:f40f825768ad76c0977cbacdf1fd37c6f7a468e460ea6a0636078f8972d4517e"}, + {file = "pytest-8.4.0.tar.gz", hash = "sha256:14d920b48472ea0dbf68e45b96cd1ffda4705f33307dcc86c676c1b5104838a6"}, ] [package.dependencies] -colorama = {version = "*", markers = "sys_platform == \"win32\""} -iniconfig = "*" -packaging = "*" +colorama = {version = ">=0.4", markers = "sys_platform == \"win32\""} +iniconfig = ">=1" +packaging = ">=20" pluggy = ">=1.5,<2" +pygments = ">=2.7.2" [package.extras] -dev = ["argcomplete", "attrs (>=19.2)", "hypothesis (>=3.56)", "mock", "pygments (>=2.7.2)", "requests", "setuptools", "xmlschema"] +dev = ["argcomplete", "attrs (>=19.2)", "hypothesis (>=3.56)", "mock", "requests", "setuptools", "xmlschema"] [[package]] name = "pytest-asyncio" @@ -4024,14 +4197,14 @@ pytest = ">=4.6" [[package]] name = "pytest-mock" -version = "3.14.0" +version = "3.14.1" description = "Thin-wrapper around the mock package for easier use with pytest" optional = false python-versions = ">=3.8" groups = ["main"] files = [ - {file = "pytest-mock-3.14.0.tar.gz", hash = "sha256:2719255a1efeceadbc056d6bf3df3d1c5015530fb40cf347c0f9afac88410bd0"}, - {file = "pytest_mock-3.14.0-py3-none-any.whl", hash = "sha256:0b72c38033392a5f4621342fe11e9219ac11ec9d375f8e2a0c164539e0d70f6f"}, + {file = "pytest_mock-3.14.1-py3-none-any.whl", hash = "sha256:178aefcd11307d874b4cd3100344e7e2d888d9791a6a1d9bfe90fbc1b74fd1d0"}, + {file = "pytest_mock-3.14.1.tar.gz", hash = "sha256:159e9edac4c451ce77a5cdb9fc5d1100708d2dd4ba3c3df572f14097351af80e"}, ] [package.dependencies] @@ -4422,18 +4595,18 @@ websockets = ">=11,<15" [[package]] name = "redis" -version = "6.1.0" +version = "6.2.0" description = "Python client for Redis database and key-value store" optional = false -python-versions = ">=3.8" +python-versions = ">=3.9" groups = ["test"] files = [ - {file = "redis-6.1.0-py3-none-any.whl", hash = "sha256:3b72622f3d3a89df2a6041e82acd896b0e67d9f54e9bcd906d091d23ba5219f6"}, - {file = "redis-6.1.0.tar.gz", hash = "sha256:c928e267ad69d3069af28a9823a07726edf72c7e37764f43dc0123f37928c075"}, + {file = "redis-6.2.0-py3-none-any.whl", hash = "sha256:c8ddf316ee0aab65f04a11229e94a64b2618451dab7a67cb2f77eb799d872d5e"}, + {file = "redis-6.2.0.tar.gz", hash = "sha256:e821f129b75dde6cb99dd35e5c76e8c49512a5a0d8dfdc560b2fbd44b85ca977"}, ] [package.extras] -hiredis = ["hiredis (>=3.0.0)"] +hiredis = ["hiredis (>=3.2.0)"] jwt = ["pyjwt (>=2.9.0)"] ocsp = ["cryptography (>=36.0.1)", "pyopenssl (>=20.0.1)", "requests (>=2.31.0)"] @@ -4558,6 +4731,29 @@ files = [ {file = "regex-2024.11.6.tar.gz", hash = "sha256:7ab159b063c52a0333c884e4679f8d7a85112ee3078fe3d9004b2dd875585519"}, ] +[[package]] +name = "reportlab" +version = "4.4.1" +description = "The Reportlab Toolkit" +optional = false +python-versions = "<4,>=3.7" +groups = ["main"] +files = [ + {file = "reportlab-4.4.1-py3-none-any.whl", hash = "sha256:9217a1c8c1917217f819718b24972a96ad0c485a1c494749562d097b58d974b7"}, + {file = "reportlab-4.4.1.tar.gz", hash = "sha256:5f9b9fc0b7a48e8912c25ccf69d26b82980ab0da718e4f583fa720e8f8f5073f"}, +] + +[package.dependencies] +chardet = "*" +pillow = ">=9.0.0" + +[package.extras] +accel = ["rl_accel (>=0.9.0,<1.1)"] +bidi = ["rlbidi"] +pycairo = ["freetype-py (>=2.3.0,<2.4)", "rlPyCairo (>=0.2.0,<1)"] +renderpm = ["rl_renderPM (>=4.0.3,<4.1)"] +shaping = ["uharfbuzz"] + [[package]] name = "requests" version = "2.32.3" @@ -4728,30 +4924,30 @@ files = [ [[package]] name = "ruff" -version = "0.11.11" +version = "0.11.12" description = "An extremely fast Python linter and code formatter, written in Rust." optional = false python-versions = ">=3.7" groups = ["dev"] files = [ - {file = "ruff-0.11.11-py3-none-linux_armv6l.whl", hash = "sha256:9924e5ae54125ed8958a4f7de320dab7380f6e9fa3195e3dc3b137c6842a0092"}, - {file = "ruff-0.11.11-py3-none-macosx_10_12_x86_64.whl", hash = "sha256:c8a93276393d91e952f790148eb226658dd275cddfde96c6ca304873f11d2ae4"}, - {file = "ruff-0.11.11-py3-none-macosx_11_0_arm64.whl", hash = "sha256:d6e333dbe2e6ae84cdedefa943dfd6434753ad321764fd937eef9d6b62022bcd"}, - {file = "ruff-0.11.11-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7885d9a5e4c77b24e8c88aba8c80be9255fa22ab326019dac2356cff42089fc6"}, - {file = "ruff-0.11.11-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:1b5ab797fcc09121ed82e9b12b6f27e34859e4227080a42d090881be888755d4"}, - {file = "ruff-0.11.11-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e231ff3132c1119ece836487a02785f099a43992b95c2f62847d29bace3c75ac"}, - {file = "ruff-0.11.11-py3-none-manylinux_2_17_ppc64.manylinux2014_ppc64.whl", hash = "sha256:a97c9babe1d4081037a90289986925726b802d180cca784ac8da2bbbc335f709"}, - {file = "ruff-0.11.11-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d8c4ddcbe8a19f59f57fd814b8b117d4fcea9bee7c0492e6cf5fdc22cfa563c8"}, - {file = "ruff-0.11.11-py3-none-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:6224076c344a7694c6fbbb70d4f2a7b730f6d47d2a9dc1e7f9d9bb583faf390b"}, - {file = "ruff-0.11.11-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:882821fcdf7ae8db7a951df1903d9cb032bbe838852e5fc3c2b6c3ab54e39875"}, - {file = "ruff-0.11.11-py3-none-musllinux_1_2_aarch64.whl", hash = "sha256:dcec2d50756463d9df075a26a85a6affbc1b0148873da3997286caf1ce03cae1"}, - {file = "ruff-0.11.11-py3-none-musllinux_1_2_armv7l.whl", hash = "sha256:99c28505ecbaeb6594701a74e395b187ee083ee26478c1a795d35084d53ebd81"}, - {file = "ruff-0.11.11-py3-none-musllinux_1_2_i686.whl", hash = "sha256:9263f9e5aa4ff1dec765e99810f1cc53f0c868c5329b69f13845f699fe74f639"}, - {file = "ruff-0.11.11-py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:64ac6f885e3ecb2fdbb71de2701d4e34526651f1e8503af8fb30d4915a3fe345"}, - {file = "ruff-0.11.11-py3-none-win32.whl", hash = "sha256:1adcb9a18802268aaa891ffb67b1c94cd70578f126637118e8099b8e4adcf112"}, - {file = "ruff-0.11.11-py3-none-win_amd64.whl", hash = "sha256:748b4bb245f11e91a04a4ff0f96e386711df0a30412b9fe0c74d5bdc0e4a531f"}, - {file = "ruff-0.11.11-py3-none-win_arm64.whl", hash = "sha256:6c51f136c0364ab1b774767aa8b86331bd8e9d414e2d107db7a2189f35ea1f7b"}, - {file = "ruff-0.11.11.tar.gz", hash = "sha256:7774173cc7c1980e6bf67569ebb7085989a78a103922fb83ef3dfe230cd0687d"}, + {file = "ruff-0.11.12-py3-none-linux_armv6l.whl", hash = "sha256:c7680aa2f0d4c4f43353d1e72123955c7a2159b8646cd43402de6d4a3a25d7cc"}, + {file = "ruff-0.11.12-py3-none-macosx_10_12_x86_64.whl", hash = "sha256:2cad64843da9f134565c20bcc430642de897b8ea02e2e79e6e02a76b8dcad7c3"}, + {file = "ruff-0.11.12-py3-none-macosx_11_0_arm64.whl", hash = "sha256:9b6886b524a1c659cee1758140138455d3c029783d1b9e643f3624a5ee0cb0aa"}, + {file = "ruff-0.11.12-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3cc3a3690aad6e86c1958d3ec3c38c4594b6ecec75c1f531e84160bd827b2012"}, + {file = "ruff-0.11.12-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:f97fdbc2549f456c65b3b0048560d44ddd540db1f27c778a938371424b49fe4a"}, + {file = "ruff-0.11.12-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:74adf84960236961090e2d1348c1a67d940fd12e811a33fb3d107df61eef8fc7"}, + {file = "ruff-0.11.12-py3-none-manylinux_2_17_ppc64.manylinux2014_ppc64.whl", hash = "sha256:b56697e5b8bcf1d61293ccfe63873aba08fdbcbbba839fc046ec5926bdb25a3a"}, + {file = "ruff-0.11.12-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:4d47afa45e7b0eaf5e5969c6b39cbd108be83910b5c74626247e366fd7a36a13"}, + {file = "ruff-0.11.12-py3-none-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:692bf9603fe1bf949de8b09a2da896f05c01ed7a187f4a386cdba6760e7f61be"}, + {file = "ruff-0.11.12-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:08033320e979df3b20dba567c62f69c45e01df708b0f9c83912d7abd3e0801cd"}, + {file = "ruff-0.11.12-py3-none-musllinux_1_2_aarch64.whl", hash = "sha256:929b7706584f5bfd61d67d5070f399057d07c70585fa8c4491d78ada452d3bef"}, + {file = "ruff-0.11.12-py3-none-musllinux_1_2_armv7l.whl", hash = "sha256:7de4a73205dc5756b8e09ee3ed67c38312dce1aa28972b93150f5751199981b5"}, + {file = "ruff-0.11.12-py3-none-musllinux_1_2_i686.whl", hash = "sha256:2635c2a90ac1b8ca9e93b70af59dfd1dd2026a40e2d6eebaa3efb0465dd9cf02"}, + {file = "ruff-0.11.12-py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:d05d6a78a89166f03f03a198ecc9d18779076ad0eec476819467acb401028c0c"}, + {file = "ruff-0.11.12-py3-none-win32.whl", hash = "sha256:f5a07f49767c4be4772d161bfc049c1f242db0cfe1bd976e0f0886732a4765d6"}, + {file = "ruff-0.11.12-py3-none-win_amd64.whl", hash = "sha256:5a4d9f8030d8c3a45df201d7fb3ed38d0219bccd7955268e863ee4a115fa0832"}, + {file = "ruff-0.11.12-py3-none-win_arm64.whl", hash = "sha256:65194e37853158d368e333ba282217941029a28ea90913c67e558c611d04daa5"}, + {file = "ruff-0.11.12.tar.gz", hash = "sha256:43cf7f69c7d7c7d7513b9d59c5d8cafd704e05944f978614aa9faff6ac202603"}, ] [[package]] @@ -4793,14 +4989,14 @@ test = ["pytest"] [[package]] name = "setuptools" -version = "80.8.0" +version = "80.9.0" description = "Easily download, build, install, upgrade, and uninstall Python packages" optional = false python-versions = ">=3.9" groups = ["main", "test"] files = [ - {file = "setuptools-80.8.0-py3-none-any.whl", hash = "sha256:95a60484590d24103af13b686121328cc2736bee85de8936383111e421b9edc0"}, - {file = "setuptools-80.8.0.tar.gz", hash = "sha256:49f7af965996f26d43c8ae34539c8d99c5042fbff34302ea151eaa9c207cd257"}, + {file = "setuptools-80.9.0-py3-none-any.whl", hash = "sha256:062d34222ad13e0cc312a4c02d73f059e86a4acbfbdea8f8f76b28c99f306922"}, + {file = "setuptools-80.9.0.tar.gz", hash = "sha256:f36b47402ecde768dbfafc46e8e4207b4360c654f1f3bb84475f0a28628fb19c"}, ] [package.extras] @@ -4975,7 +5171,7 @@ description = "Standard library aifc redistribution. \"dead battery\"." optional = false python-versions = "*" groups = ["main"] -markers = "python_version >= \"3.13\"" +markers = "python_version == \"3.13\"" files = [ {file = "standard_aifc-3.13.0-py3-none-any.whl", hash = "sha256:f7ae09cc57de1224a0dd8e3eb8f73830be7c3d0bc485de4c1f82b4a7f645ac66"}, {file = "standard_aifc-3.13.0.tar.gz", hash = "sha256:64e249c7cb4b3daf2fdba4e95721f811bde8bdfc43ad9f936589b7bb2fae2e43"}, @@ -4992,7 +5188,7 @@ description = "Standard library chunk redistribution. \"dead battery\"." optional = false python-versions = "*" groups = ["main"] -markers = "python_version >= \"3.13\"" +markers = "python_version == \"3.13\"" files = [ {file = "standard_chunk-3.13.0-py3-none-any.whl", hash = "sha256:17880a26c285189c644bd5bd8f8ed2bdb795d216e3293e6dbe55bbd848e2982c"}, {file = "standard_chunk-3.13.0.tar.gz", hash = "sha256:4ac345d37d7e686d2755e01836b8d98eda0d1a3ee90375e597ae43aaf064d654"}, @@ -5164,6 +5360,44 @@ requests = ">=2.26.0" [package.extras] blobfile = ["blobfile (>=2)"] +[[package]] +name = "tinycss2" +version = "1.4.0" +description = "A tiny CSS parser" +optional = false +python-versions = ">=3.8" +groups = ["main"] +files = [ + {file = "tinycss2-1.4.0-py3-none-any.whl", hash = "sha256:3a49cf47b7675da0b15d0c6e1df8df4ebd96e9394bb905a5775adb0d884c5289"}, + {file = "tinycss2-1.4.0.tar.gz", hash = "sha256:10c0972f6fc0fbee87c3edb76549357415e94548c1ae10ebccdea16fb404a9b7"}, +] + +[package.dependencies] +webencodings = ">=0.4" + +[package.extras] +doc = ["sphinx", "sphinx_rtd_theme"] +test = ["pytest", "ruff"] + +[[package]] +name = "tinyhtml5" +version = "2.0.0" +description = "HTML parser based on the WHATWG HTML specification" +optional = false +python-versions = ">=3.9" +groups = ["main"] +files = [ + {file = "tinyhtml5-2.0.0-py3-none-any.whl", hash = "sha256:13683277c5b176d070f82d099d977194b7a1e26815b016114f581a74bbfbf47e"}, + {file = "tinyhtml5-2.0.0.tar.gz", hash = "sha256:086f998833da24c300c414d9fe81d9b368fd04cb9d2596a008421cbc705fcfcc"}, +] + +[package.dependencies] +webencodings = ">=0.5.1" + +[package.extras] +doc = ["sphinx", "sphinx_rtd_theme"] +test = ["pytest", "ruff"] + [[package]] name = "tokenizers" version = "0.21.1" @@ -5261,14 +5495,14 @@ files = [ [[package]] name = "typing-extensions" -version = "4.13.2" -description = "Backported and Experimental Type Hints for Python 3.8+" +version = "4.14.0" +description = "Backported and Experimental Type Hints for Python 3.9+" optional = false -python-versions = ">=3.8" +python-versions = ">=3.9" groups = ["main"] files = [ - {file = "typing_extensions-4.13.2-py3-none-any.whl", hash = "sha256:a439e7c04b49fec3e5d3e2beaa21755cadbbdc391694e28ccdd36ca4a1408f8c"}, - {file = "typing_extensions-4.13.2.tar.gz", hash = "sha256:e6c81219bd689f51865d9e372991c540bda33a0379d5573cddb9a3a23f7caaef"}, + {file = "typing_extensions-4.14.0-py3-none-any.whl", hash = "sha256:a1514509136dd0b477638fc68d6a91497af5076466ad0fa6c338e44e359944af"}, + {file = "typing_extensions-4.14.0.tar.gz", hash = "sha256:8676b788e32f02ab42d9e7c61324048ae4c6d844a399eebace3d4979d75ceef4"}, ] [[package]] @@ -5430,6 +5664,44 @@ files = [ {file = "wcwidth-0.2.13.tar.gz", hash = "sha256:72ea0c06399eb286d978fdedb6923a9eb47e1c486ce63e9b4e64fc18303972b5"}, ] +[[package]] +name = "weasyprint" +version = "65.1" +description = "The Awesome Document Factory" +optional = false +python-versions = ">=3.9" +groups = ["main"] +files = [ + {file = "weasyprint-65.1-py3-none-any.whl", hash = "sha256:9baa54282dc86929f6b877034d06b0416e2a7cacb1af3f73d80960592fd0af89"}, + {file = "weasyprint-65.1.tar.gz", hash = "sha256:120281bdbd42ffaa7d7e5cedbe3182a2cef36ea5ad97fe9f357e43be6a1e58ea"}, +] + +[package.dependencies] +cffi = ">=0.6" +cssselect2 = ">=0.8.0" +fonttools = {version = ">=4.0.0", extras = ["woff"]} +Pillow = ">=9.1.0" +pydyf = ">=0.11.0" +Pyphen = ">=0.9.1" +tinycss2 = ">=1.4.0" +tinyhtml5 = ">=2.0.0b1" + +[package.extras] +doc = ["furo", "sphinx"] +test = ["pytest", "ruff"] + +[[package]] +name = "webencodings" +version = "0.5.1" +description = "Character encoding aliases for legacy web content" +optional = false +python-versions = "*" +groups = ["main"] +files = [ + {file = "webencodings-0.5.1-py2.py3-none-any.whl", hash = "sha256:a0af1213f3c2226497a97e2b3aa01a7e4bee4f403f95be16fc9acd2947514a78"}, + {file = "webencodings-0.5.1.tar.gz", hash = "sha256:b36a1c245f2d304965eb4e0a82848379241dc04b865afcc4aab16748587e1923"}, +] + [[package]] name = "websocket-client" version = "1.8.0" @@ -5827,14 +6099,14 @@ requests = "*" [[package]] name = "zipp" -version = "3.21.0" +version = "3.22.0" description = "Backport of pathlib-compatible object wrapper for zip files" optional = false python-versions = ">=3.9" groups = ["main"] files = [ - {file = "zipp-3.21.0-py3-none-any.whl", hash = "sha256:ac1bbe05fd2991f160ebce24ffbac5f6d11d83dc90891255885223d42b3cd931"}, - {file = "zipp-3.21.0.tar.gz", hash = "sha256:2c9958f6430a2040341a52eb608ed6dd93ef4392e02ffe219417c1b28b5dd1f4"}, + {file = "zipp-3.22.0-py3-none-any.whl", hash = "sha256:fe208f65f2aca48b81f9e6fd8cf7b8b32c26375266b009b413d45306b6148343"}, + {file = "zipp-3.22.0.tar.gz", hash = "sha256:dd2f28c3ce4bc67507bfd3781d21b7bb2be31103b51a4553ad7d90b84e57ace5"}, ] [package.extras] @@ -5842,7 +6114,7 @@ check = ["pytest-checkdocs (>=2.4)", "pytest-ruff (>=0.2.1) ; sys_platform != \" cover = ["pytest-cov"] doc = ["furo", "jaraco.packaging (>=9.3)", "jaraco.tidelift (>=1.4)", "rst.linker (>=1.9)", "sphinx (>=3.5)", "sphinx-lint"] enabler = ["pytest-enabler (>=2.2)"] -test = ["big-O", "importlib-resources ; python_version < \"3.9\"", "jaraco.functools", "jaraco.itertools", "jaraco.test", "more-itertools", "pytest (>=6,!=8.1.*)", "pytest-ignore-flaky"] +test = ["big-O", "importlib_resources ; python_version < \"3.9\"", "jaraco.functools", "jaraco.itertools", "jaraco.test", "more_itertools", "pytest (>=6,!=8.1.*)", "pytest-ignore-flaky"] type = ["pytest-mypy"] [[package]] @@ -5919,7 +6191,91 @@ docs = ["Sphinx", "furo", "repoze.sphinx.autointerface"] test = ["coverage[toml]", "zope.event", "zope.testing"] testing = ["coverage[toml]", "zope.event", "zope.testing"] +[[package]] +name = "zopfli" +version = "0.2.3.post1" +description = "Zopfli module for python" +optional = false +python-versions = ">=3.8" +groups = ["main"] +files = [ + {file = "zopfli-0.2.3.post1-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:e0137dd64a493ba6a4be37405cfd6febe650a98cc1e9dca8f6b8c63b1db11b41"}, + {file = "zopfli-0.2.3.post1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:aa588b21044f8a74e423d8c8a4c7fc9988501878aacced793467010039c50734"}, + {file = "zopfli-0.2.3.post1-cp310-cp310-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:9f4a7ec2770e6af05f5a02733fd3900f30a9cd58e5d6d3727e14c5bcd6e7d587"}, + {file = "zopfli-0.2.3.post1-cp310-cp310-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:f7d69c1a7168ad0e9cb864e8663acb232986a0c9c9cb9801f56bf6214f53a54d"}, + {file = "zopfli-0.2.3.post1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6c2d2bc8129707e34c51f9352c4636ca313b52350bbb7e04637c46c1818a2a70"}, + {file = "zopfli-0.2.3.post1-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:39e576f93576c5c223b41d9c780bbb91fd6db4babf3223d2a4fe7bf568e2b5a8"}, + {file = "zopfli-0.2.3.post1-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:cbe6df25807227519debd1a57ab236f5f6bad441500e85b13903e51f93a43214"}, + {file = "zopfli-0.2.3.post1-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:7cce242b5df12b2b172489daf19c32e5577dd2fac659eb4b17f6a6efb446fd5c"}, + {file = "zopfli-0.2.3.post1-cp310-cp310-win32.whl", hash = "sha256:f815fcc2b2a457977724bad97fb4854022980f51ce7b136925e336b530545ae1"}, + {file = "zopfli-0.2.3.post1-cp310-cp310-win_amd64.whl", hash = "sha256:0cc20b02a9531559945324c38302fd4ba763311632d0ec8a1a0aa9c10ea363e6"}, + {file = "zopfli-0.2.3.post1-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:518f1f4ed35dd69ce06b552f84e6d081f07c552b4c661c5312d950a0b764a58a"}, + {file = "zopfli-0.2.3.post1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:615a8ac9dda265e9cc38b2a76c3142e4a9f30fea4a79c85f670850783bc6feb4"}, + {file = "zopfli-0.2.3.post1-cp311-cp311-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a82fc2dbebe6eb908b9c665e71496f8525c1bc4d2e3a7a7722ef2b128b6227c8"}, + {file = "zopfli-0.2.3.post1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:37d011e92f7b9622742c905fdbed9920a1d0361df84142807ea2a528419dea7f"}, + {file = "zopfli-0.2.3.post1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e63d558847166543c2c9789e6f985400a520b7eacc4b99181668b2c3aeadd352"}, + {file = "zopfli-0.2.3.post1-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:60db20f06c3d4c5934b16cfa62a2cc5c3f0686bffe0071ed7804d3c31ab1a04e"}, + {file = "zopfli-0.2.3.post1-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:716cdbfc57bfd3d3e31a58e6246e8190e6849b7dbb7c4ce39ef8bbf0edb8f6d5"}, + {file = "zopfli-0.2.3.post1-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:3a89277ed5f8c0fb2d0b46d669aa0633123aa7381f1f6118c12f15e0fb48f8ca"}, + {file = "zopfli-0.2.3.post1-cp311-cp311-win32.whl", hash = "sha256:75a26a2307b10745a83b660c404416e984ee6fca515ec7f0765f69af3ce08072"}, + {file = "zopfli-0.2.3.post1-cp311-cp311-win_amd64.whl", hash = "sha256:81c341d9bb87a6dbbb0d45d6e272aca80c7c97b4b210f9b6e233bf8b87242f29"}, + {file = "zopfli-0.2.3.post1-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:3f0197b6aa6eb3086ae9e66d6dd86c4d502b6c68b0ec490496348ae8c05ecaef"}, + {file = "zopfli-0.2.3.post1-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:5fcfc0dc2761e4fcc15ad5d273b4d58c2e8e059d3214a7390d4d3c8e2aee644e"}, + {file = "zopfli-0.2.3.post1-cp312-cp312-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:cac2b37ab21c2b36a10b685b1893ebd6b0f83ae26004838ac817680881576567"}, + {file = "zopfli-0.2.3.post1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8d5ab297d660b75c159190ce6d73035502310e40fd35170aed7d1a1aea7ddd65"}, + {file = "zopfli-0.2.3.post1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9ba214f4f45bec195ee8559651154d3ac2932470b9d91c5715fc29c013349f8c"}, + {file = "zopfli-0.2.3.post1-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:c1e0ed5d84ffa2d677cc9582fc01e61dab2e7ef8b8996e055f0a76167b1b94df"}, + {file = "zopfli-0.2.3.post1-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:bfa1eb759e07d8b7aa7a310a2bc535e127ee70addf90dc8d4b946b593c3e51a8"}, + {file = "zopfli-0.2.3.post1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:cd2c002f160502608dcc822ed2441a0f4509c52e86fcfd1a09e937278ed1ca14"}, + {file = "zopfli-0.2.3.post1-cp312-cp312-win32.whl", hash = "sha256:7be5cc6732eb7b4df17305d8a7b293223f934a31783a874a01164703bc1be6cd"}, + {file = "zopfli-0.2.3.post1-cp312-cp312-win_amd64.whl", hash = "sha256:4e50ffac74842c1c1018b9b73875a0d0a877c066ab06bf7cccbaa84af97e754f"}, + {file = "zopfli-0.2.3.post1-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:ecb7572df5372abce8073df078207d9d1749f20b8b136089916a4a0868d56051"}, + {file = "zopfli-0.2.3.post1-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:a1cf720896d2ce998bc8e051d4b4ce0d8bec007aab6243102e8e1d22a0b2fb3f"}, + {file = "zopfli-0.2.3.post1-cp313-cp313-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:5aad740b4d4fcbaaae4887823925166ffd062db3b248b3f432198fc287381d1a"}, + {file = "zopfli-0.2.3.post1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6617fb10f9e4393b331941861d73afb119cd847e88e4974bdbe8068ceef3f73f"}, + {file = "zopfli-0.2.3.post1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a53b18797cdef27e019db595d66c4b077325afe2fd62145953275f53d84ce40c"}, + {file = "zopfli-0.2.3.post1-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:b78008a69300d929ca2efeffec951b64a312e9a811e265ea4a907ab546d79fa6"}, + {file = "zopfli-0.2.3.post1-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:0aa5f90d6298bda02a95bc8dc8c3c19004d5a4e44bda00b67ca7431d857b4b54"}, + {file = "zopfli-0.2.3.post1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:2768c877f76c8a0e7519b1c86c93757f3c01492ddde55751e9988afb7eff64e1"}, + {file = "zopfli-0.2.3.post1-cp313-cp313-win32.whl", hash = "sha256:71390dbd3fbf6ebea9a5d85ffed8c26ee1453ee09248e9b88486e30e0397b775"}, + {file = "zopfli-0.2.3.post1-cp313-cp313-win_amd64.whl", hash = "sha256:a86eb88e06bd87e1fff31dac878965c26b0c26db59ddcf78bb0379a954b120de"}, + {file = "zopfli-0.2.3.post1-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:3827170de28faf144992d3d4dcf8f3998fe3c8a6a6f4a08f1d42c2ec6119d2bb"}, + {file = "zopfli-0.2.3.post1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:b0ec13f352ea5ae0fc91f98a48540512eed0767d0ec4f7f3cb92d92797983d18"}, + {file = "zopfli-0.2.3.post1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5f272186e03ad55e7af09ab78055535c201b1a0bcc2944edb1768298d9c483a4"}, + {file = "zopfli-0.2.3.post1-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:29ea74e72ffa6e291b8c6f2504ce6c146b4fe990c724c1450eb8e4c27fd31431"}, + {file = "zopfli-0.2.3.post1-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:eb45a34f23da4f8bc712b6376ca5396914b0b7c09adbb001dad964eb7f3132f8"}, + {file = "zopfli-0.2.3.post1-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:6482db9876c68faac2d20a96b566ffbf65ddaadd97b222e4e73641f4f8722fc4"}, + {file = "zopfli-0.2.3.post1-cp38-cp38-musllinux_1_2_i686.whl", hash = "sha256:95a260cafd56b8fffa679918937401c80bb38e1681c448b988022e4c3610965d"}, + {file = "zopfli-0.2.3.post1-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:676919fba7311125244eb0c4393679ac5fe856e5864a15d122bd815205369fa0"}, + {file = "zopfli-0.2.3.post1-cp38-cp38-win32.whl", hash = "sha256:b9026a21b6d41eb0e2e63f5bc1242c3fcc43ecb770963cda99a4307863dac12e"}, + {file = "zopfli-0.2.3.post1-cp38-cp38-win_amd64.whl", hash = "sha256:3c163911f8bad94b3e1db0a572e7c28ba681a0c91d0002ea1e4fa9264c21ef17"}, + {file = "zopfli-0.2.3.post1-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:b05296e8bc88c92e2b21e0a9bae4740c1551ee613c1d93a51fd28a7a0b2b6fbb"}, + {file = "zopfli-0.2.3.post1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:f12000a6accdd4bf0a3fa6eaa1b1c7a7bc80af0a2edf3f89d770d3dcce1d0e22"}, + {file = "zopfli-0.2.3.post1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a241a68581d34d67b40c425cce3d1fd211c092f99d9250947824ccba9f491949"}, + {file = "zopfli-0.2.3.post1-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:3657e416ffb8f31d9d3424af12122bb251befae109f2e271d87d825c92fc5b7b"}, + {file = "zopfli-0.2.3.post1-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:4915a41375bdee4db749ecd07d985a0486eb688a6619f713b7bf6fbfd145e960"}, + {file = "zopfli-0.2.3.post1-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:bbe429fc50686bb2a2608a30843e36fbaa123462a5284f136c7d9e0145220bfd"}, + {file = "zopfli-0.2.3.post1-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:2345e713260a350bea0b01a816a469ea356bc2d63d009a0d777691ecbbcf7493"}, + {file = "zopfli-0.2.3.post1-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:fc39f5c27f962ec8660d8d20c24762431131b5d8c672b44b0a54cf2b5bcde9b9"}, + {file = "zopfli-0.2.3.post1-cp39-cp39-win32.whl", hash = "sha256:9a6aec38a989bad7ddd1ef53f1265699e49e294d08231b5313d61293f3cd6237"}, + {file = "zopfli-0.2.3.post1-cp39-cp39-win_amd64.whl", hash = "sha256:b3df42f52502438ee973042cc551877d24619fa1cd38ef7b7e9ac74200daca8b"}, + {file = "zopfli-0.2.3.post1-pp310-pypy310_pp73-macosx_10_15_x86_64.whl", hash = "sha256:4c1226a7e2c7105ac31503a9bb97454743f55d88164d6d46bc138051b77f609b"}, + {file = "zopfli-0.2.3.post1-pp310-pypy310_pp73-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:48dba9251060289101343110ab47c0756f66f809bb4d1ddbb6d5c7e7752115c5"}, + {file = "zopfli-0.2.3.post1-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:89899641d4de97dbad8e0cde690040d078b6aea04066dacaab98e0b5a23573f2"}, + {file = "zopfli-0.2.3.post1-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:3654bfc927bc478b1c3f3ff5056ed7b20a1a37fa108ca503256d0a699c03bbb1"}, + {file = "zopfli-0.2.3.post1-pp38-pypy38_pp73-macosx_10_9_x86_64.whl", hash = "sha256:c4278d1873ce6e803e5d4f8d702fd3026bd67fca744aa98881324d1157ddf748"}, + {file = "zopfli-0.2.3.post1-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:1d8cc06605519e82b16df090e17cb3990d1158861b2872c3117f1168777b81e4"}, + {file = "zopfli-0.2.3.post1-pp39-pypy39_pp73-macosx_10_15_x86_64.whl", hash = "sha256:1f990634fd5c5c8ced8edddd8bd45fab565123b4194d6841e01811292650acae"}, + {file = "zopfli-0.2.3.post1-pp39-pypy39_pp73-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:91a2327a4d7e77471fa4fbb26991c6de4a738c6fc6a33e09bb25f56a870a4b7b"}, + {file = "zopfli-0.2.3.post1-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8fbe5bcf10d01aab3513550f284c09fef32f342b36f56bfae2120a9c4d12c130"}, + {file = "zopfli-0.2.3.post1-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:34a99592f3d9eb6f737616b5bd74b48a589fdb3cb59a01a50d636ea81d6af272"}, + {file = "zopfli-0.2.3.post1.tar.gz", hash = "sha256:96484dc0f48be1c5d7ae9f38ed1ce41e3675fd506b27c11a6607f14b49101e99"}, +] + +[package.extras] +test = ["pytest"] + [metadata] lock-version = "2.1" python-versions = ">=3.12,<3.14" -content-hash = "f20bb21c7868b75d071485498c990b81aef3656cb5d779cbf19b2d0792dd8ff7" +content-hash = "2066ab33b53afb1cd919a3abed8985bb25be1b9db1069518ac3708d21b071ffa" diff --git a/pyproject.toml b/pyproject.toml index 560ecbd..74fe1b1 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -16,7 +16,7 @@ dependencies = [ "loguru (>=0.7.3,<0.8.0)", "boto3 (>=1.34.69,<2.0.0)", "aiohttp (>=3.11.14,<4.0.0)", - "smolagents (>=1.12.0,<2.0.0)", + "smolagents (==1.16.1)", "mammoth (>=1.9.0,<2.0.0)", "pdfminer-six (>=20240706,<20240707)", "python-pptx (>=1.0.2,<2.0.0)", @@ -45,6 +45,9 @@ dependencies = [ "python-multipart (>=0.0.20,<0.0.21)", "toml (>=0.10.2,<0.11.0)", "wikipedia-api (>=0.8.1,<0.9.0)", + "markdown2 (>=2.5.3,<3.0.0)", + "weasyprint (>=65.1,<66.0)", + "reportlab (>=4.4.1,<5.0.0)", ] [tool.ruff] @@ -118,7 +121,7 @@ max-statements = 75 [build-system] -requires = ["poetry-core>=2.0.0,<3.0.0"] +requires = ["poetry-core>=2.0.0"] build-backend = "poetry.core.masonry.api" [tool.poetry.group.dev.dependencies] diff --git a/tests/test_process_email.py b/tests/test_process_email.py index 83c0444..1a585cf 100644 --- a/tests/test_process_email.py +++ b/tests/test_process_email.py @@ -1,4 +1,5 @@ import os +import shutil from datetime import datetime from pathlib import Path from typing import Any, Optional @@ -279,6 +280,20 @@ async def mock_async_send_reply(*args, **kwargs): "ICS attachment was not prepared for sending by the schedule handle." ) assert any(att["mimetype"] == "text/calendar" for att in sent_attachments) + + # Check for PDF attachment if it's the PDF handle + if handle_instructions.handle == "pdf": + sent_attachments = kwargs.get("attachments", []) + assert len(sent_attachments) > 0, "PDF handle should have attachments" + pdf_attachment = None + for att in sent_attachments: + if att["filename"].endswith(".pdf"): + pdf_attachment = att + break + assert pdf_attachment is not None, "PDF attachment was not prepared for sending by the PDF handle" + assert pdf_attachment["mimetype"] == "application/pdf", "PDF attachment should have correct mimetype" + assert len(pdf_attachment["content"]) > 0, "PDF attachment should have content" + return {"MessageId": "mocked_message_id_happy_path", "status": "sent"} mock_sender_instance.send_reply = MagicMock(side_effect=mock_async_send_reply) @@ -303,6 +318,15 @@ async def mock_async_send_reply(*args, **kwargs): returned_result.calendar_data.ics_content is not None and len(returned_result.calendar_data.ics_content) > 0 ), "ICS content is missing or empty for schedule handle" + + # Specific assertions for PDF handle + if handle_instructions.handle == "pdf": + assert returned_result.pdf_export is not None, "PDF export result should be present for PDF handle" + assert returned_result.pdf_export.filename.endswith(".pdf"), "PDF export should have .pdf filename" + assert returned_result.pdf_export.file_size > 0, "PDF export should have non-zero file size" + assert returned_result.pdf_export.mimetype == "application/pdf", "PDF export should have correct mimetype" + assert returned_result.pdf_export.title is not None, "PDF export should have a title" + assert returned_result.pdf_export.pages_estimated >= 1, "PDF export should have at least 1 page estimated" else: # If not expecting a sent reply (e.g. if we were to use SKIP_EMAIL_DELIVERY for some handles) mock_sender_instance.send_reply.assert_not_called() @@ -317,3 +341,317 @@ async def mock_async_send_reply(*args, **kwargs): # Ensure deep_research_mandatory flag was respected (qualitative check via EmailAgent logs if verbose, hard to assert directly without deeper mocks) # We trust EmailAgent tests for this part. # Here, we are testing the task's integration with the agent for each handle. + + +# --- PDF Export Specific Tests --- + + +def test_pdf_export_tool_direct(): + """Test the PDFExportTool directly to ensure it generates valid PDFs.""" + from mxtoai.tools.pdf_export_tool import PDFExportTool + + tool = PDFExportTool() + + # Test basic content export + result = tool.forward( + content="# Test Document\n\nThis is a test document with some content.\n\n- Item 1\n- Item 2\n- Item 3", + title="Test PDF Document" + ) + + assert result["success"] is True, f"PDF export failed: {result.get('error', 'Unknown error')}" + assert result["filename"] == "Test_PDF_Document.pdf" + assert result["file_size"] > 0 + assert result["mimetype"] == "application/pdf" + assert result["title"] == "Test PDF Document" + assert result["pages_estimated"] >= 1 + + # Verify the file actually exists and has content + pdf_path = result["file_path"] + assert os.path.exists(pdf_path), "PDF file should exist" + + # Read and verify PDF content + with open(pdf_path, "rb") as f: + pdf_content = f.read() + assert len(pdf_content) > 100, "PDF should have substantial content" + assert pdf_content[:4] == b"%PDF", "File should start with PDF magic bytes" + + # Clean up + os.unlink(pdf_path) + + +def test_pdf_export_tool_with_research_findings(): + """Test PDF export with research findings and attachments summary.""" + from mxtoai.tools.pdf_export_tool import PDFExportTool + + tool = PDFExportTool() + + result = tool.forward( + content="# Email Newsletter Summary\n\nThis is the main content of the email.", + title="Weekly Newsletter Export", + research_findings="## Research Results\n\n1. Finding one\n2. Finding two\n3. Finding three", + attachments_summary="## Attachments Processed\n\n- attachment1.pdf\n- attachment2.docx", + include_attachments=True + ) + + assert result["success"] is True + assert result["filename"] == "Weekly_Newsletter_Export.pdf" + assert result["file_size"] > 0 + + # Verify the file exists + pdf_path = result["file_path"] + assert os.path.exists(pdf_path), "PDF file should exist" + + # Clean up + os.unlink(pdf_path) + + +def test_pdf_export_content_cleaning(): + """Test that PDF export properly cleans email headers and formats content.""" + from mxtoai.tools.pdf_export_tool import PDFExportTool + + tool = PDFExportTool() + + # Content with email headers that should be removed + email_content = """From: sender@example.com +To: recipient@example.com +Subject: Test Email +Date: Mon, 01 Jan 2024 12:00:00 +0000 + +# Important Newsletter + +This is the actual content that should be preserved. + +## Section 1 +- Important point 1 +- Important point 2 + +## Section 2 +More important content here. +""" + + result = tool.forward(content=email_content) + + assert result["success"] is True + assert result["file_size"] > 0 + + # The cleaned content should not contain email headers + # We can't easily verify the internal content without parsing the PDF, + # but we can verify the tool runs successfully + + # Clean up + os.unlink(result["file_path"]) + + +def test_pdf_handle_full_integration(): + """Test the full PDF handle integration with a more comprehensive email.""" + import shutil + import tempfile + from unittest.mock import MagicMock, patch + + from mxtoai.tasks import process_email_task + + # Create comprehensive test email content + email_data = { + "to": "pdf@mxtoai.com", + "from_email": "test@example.com", + "subject": "Weekly AI Newsletter - Export to PDF", + "textContent": """# Weekly AI Newsletter + +## Top Stories This Week + +1. **Breaking**: New AI model achieves breakthrough in natural language understanding + - Improved accuracy by 15% over previous models + - Reduced computational requirements + - Available for public research + +2. **Industry News**: Major tech companies announce AI partnerships + - Focus on ethical AI development + - Shared research initiatives + - Open source commitments + +3. **Research Highlights**: Recent papers in machine learning + - Novel architectures for transformer models + - Advances in computer vision + - Multimodal learning approaches + +## Tools and Resources + +- New dataset for training language models +- Updated frameworks and libraries +- Community challenges and competitions + +## Upcoming Events + +- AI Conference 2024 (March 15-17) +- Workshop on Ethical AI (April 2) +- Research symposium (April 20) + +This newsletter provides a comprehensive overview of recent developments in AI research and industry trends. +""", + "messageId": "", + "date": "2024-01-01T12:00:00Z", + "recipients": ["pdf@mxtoai.com"], + "cc": None, + "bcc": None, + "references": None, + "attachments": [] + } + + # Create temporary directory for attachments + temp_dir = tempfile.mkdtemp() + + try: + with ( + patch("mxtoai.tasks.EmailSender") as MockEmailSender, + ): + mock_sender_instance = MockEmailSender.return_value + + # Track the PDF attachment that gets sent + captured_pdf_attachment = None + + async def mock_async_send_reply(*args, **kwargs): + nonlocal captured_pdf_attachment + sent_attachments = kwargs.get("attachments", []) + + # Find the PDF attachment + for att in sent_attachments: + if att["filename"].endswith(".pdf"): + captured_pdf_attachment = att + break + + return {"MessageId": "test-pdf-message-id", "status": "sent"} + + mock_sender_instance.send_reply = MagicMock(side_effect=mock_async_send_reply) + + # Run the task + result = process_email_task.fn( + email_data=email_data, + email_attachments_dir=temp_dir, + attachment_info=[] + ) + + # Verify successful processing + assert isinstance(result, DetailedEmailProcessingResult) + # Note: email status will be 'skipped' because test@example.com is in SKIP_EMAIL_DELIVERY + assert result.metadata.email_sent.status == "skipped" + assert len(result.metadata.errors) == 0, f"Processing errors: {result.metadata.errors}" + + # Verify PDF export result + assert result.pdf_export is not None, "PDF export result should be present" + assert result.pdf_export.filename.endswith(".pdf") + assert result.pdf_export.file_size > 1000, "PDF should be reasonably sized for the content" + assert result.pdf_export.mimetype == "application/pdf" + assert "Weekly AI Newsletter" in result.pdf_export.title or "AI Newsletter" in result.pdf_export.title + assert result.pdf_export.pages_estimated >= 1 + + # Email should not be attempted to be sent due to SKIP_EMAIL_DELIVERY + mock_sender_instance.send_reply.assert_not_called() + + # The PDF was generated but not attached due to skipped email delivery + # In this test case, we're focusing on verifying that PDF export works + # and that the result contains the expected PDF data + + # Verify email response content + assert result.email_content is not None + assert result.email_content.text is not None + assert len(result.email_content.text) > 0 + assert result.email_content.html is not None + assert len(result.email_content.html) > 0 + + finally: + # Clean up temporary directory + shutil.rmtree(temp_dir, ignore_errors=True) + + +def test_pdf_export_error_handling(): + """Test PDF export tool error handling for invalid inputs.""" + from mxtoai.tools.pdf_export_tool import MAX_FILENAME_LENGTH, PDFExportTool + + tool = PDFExportTool() + + # Test with empty content + result = tool.forward(content="") + assert result["success"] is True # Should still work with empty content + assert result["title"] == "Document" # Should use default title + + # Clean up if file was created + if "file_path" in result and os.path.exists(result["file_path"]): + os.unlink(result["file_path"]) + + # Test with very long title + long_title = "A" * 200 # Very long title + result = tool.forward(content="Test content", title=long_title) + assert result["success"] is True + # Filename should be truncated - use constant instead of magic number + max_filename_length = MAX_FILENAME_LENGTH + len(".pdf") # Reflects truncation logic + assert len(result["filename"]) <= max_filename_length + + # Clean up + if "file_path" in result and os.path.exists(result["file_path"]): + os.unlink(result["file_path"]) + + +def test_pdf_export_cleanup(): + """Test that PDF export properly cleans up temporary directories.""" + import tempfile + + from mxtoai.tools.pdf_export_tool import PDFExportTool + + # Track temporary directories created + original_mkdtemp = tempfile.mkdtemp + created_temp_dirs = [] + + def tracking_mkdtemp(*args, **kwargs): + temp_dir = original_mkdtemp(*args, **kwargs) + created_temp_dirs.append(temp_dir) + return temp_dir + + # Patch mkdtemp to track directory creation + tempfile.mkdtemp = tracking_mkdtemp + + try: + tool = PDFExportTool() + + # Verify temp directory was created + assert len(created_temp_dirs) == 1, "Expected exactly one temp directory to be created" + temp_dir_path = created_temp_dirs[0] + assert os.path.exists(temp_dir_path), "Temp directory should exist after tool initialization" + + # Generate a PDF + result = tool.forward( + content="# Test PDF Cleanup\n\nThis tests that temporary directories are properly cleaned up.", + title="Cleanup Test PDF" + ) + + assert result["success"] is True, f"PDF generation failed: {result.get('error', 'Unknown error')}" + pdf_file_path = result["file_path"] + assert os.path.exists(pdf_file_path), "PDF file should exist" + + # Verify the PDF is in the temp directory + assert pdf_file_path.startswith(temp_dir_path), "PDF should be in the temp directory" + + # Call cleanup explicitly + tool.cleanup() + + # Verify temp directory is cleaned up + assert not os.path.exists(temp_dir_path), "Temp directory should be cleaned up after explicit cleanup" + assert not os.path.exists(pdf_file_path), "PDF file should be cleaned up with the temp directory" + + finally: + # Restore original mkdtemp + tempfile.mkdtemp = original_mkdtemp + + # Clean up any remaining directories + for temp_dir in created_temp_dirs: + if os.path.exists(temp_dir): + shutil.rmtree(temp_dir, ignore_errors=True) + + +if __name__ == "__main__": + # Run only PDF tests if executed directly + pytest.main([__file__ + "::test_pdf_export_tool_direct", "-v"]) + pytest.main([__file__ + "::test_pdf_export_tool_with_research_findings", "-v"]) + pytest.main([__file__ + "::test_pdf_export_content_cleaning", "-v"]) + pytest.main([__file__ + "::test_pdf_handle_full_integration", "-v"]) + pytest.main([__file__ + "::test_pdf_export_error_handling", "-v"]) + pytest.main([__file__ + "::test_pdf_export_cleanup", "-v"]) diff --git a/tests/test_report_formatter.py b/tests/test_report_formatter.py new file mode 100644 index 0000000..fb85477 --- /dev/null +++ b/tests/test_report_formatter.py @@ -0,0 +1,311 @@ +import re + +import pytest + +from mxtoai.scripts.report_formatter import ReportFormatter + + +class TestReportFormatterMarkdownFixes: + """Test all the markdown formatting fixes implemented in ReportFormatter.""" + + @pytest.fixture + def formatter(self): + """Create a ReportFormatter instance for testing.""" + return ReportFormatter() + + def test_header_separation_from_lists(self, formatter): + """Test that headers are properly separated from preceding lists.""" + markdown_content = """- Item 1 +- Item 2 +### Header Should Be Separated +Content after header""" + + fixed_markdown = formatter._fix_ai_markdown(markdown_content) + + # Should have a blank line before the header + assert "\n\n### Header Should Be Separated" in fixed_markdown + + # Test HTML output - header should not be inside a list + html_output = formatter._to_html(fixed_markdown) + assert " tags + assert not re.search(r"
    .*", html_output, re.DOTALL) + + def test_bolded_links_in_lists(self, formatter): + """Test that bolded links in list items render correctly.""" + markdown_content = """### Calendar Links +- **[Google Calendar](https://calendar.google.com)** +- **[Outlook Calendar](https://outlook.live.com)**""" + + fixed_markdown = formatter._fix_ai_markdown(markdown_content) + + # Should convert **[text](url)** to [**text**](url) + assert "- [**Google Calendar**](https://calendar.google.com)" in fixed_markdown + assert "- [**Outlook Calendar**](https://outlook.live.com)" in fixed_markdown + + # Test HTML output + html_output = formatter._to_html(fixed_markdown) + # Should have proper bold links in either order (both are valid) + google_link_ok = ( + 'Google Calendar' in html_output + or 'Google Calendar' in html_output + ) + outlook_link_ok = ( + 'Outlook Calendar' in html_output + or 'Outlook Calendar' in html_output + ) + + assert google_link_ok, "Google Calendar link should be bold" + assert outlook_link_ok, "Outlook Calendar link should be bold" + assert "** structure + assert "
      " in html_output + assert re.search(r"
        .*
          .*
        .*
      ", html_output, re.DOTALL) + + def test_complex_formatting_scenario(self, formatter): + """Test a complex scenario combining multiple formatting issues.""" + markdown_content = """### Event Details +- **Title:** Important Meeting +- **Date:** Tomorrow +- **Participants:** + - John Doe + - Jane Smith +### Calendar Links +- **[Google Calendar](https://calendar.google.com)** +- **[Outlook Calendar](https://outlook.live.com)** +1. Acknowledgment of Meeting Details +This confirms all the details above. +a. First action item +b. Second action item""" + + fixed_markdown = formatter._fix_ai_markdown(markdown_content) + html_output = formatter._to_html(fixed_markdown) + + # Headers should be properly separated + assert "
      Google Calendar' in html_output + or 'Google Calendar' in html_output + ) + assert google_link_ok, "Google Calendar link should be bold" + + # Numbered items should stay as list items (no conversion to headers) + assert "1. Acknowledgment of Meeting Details" in fixed_markdown + # Letter lists should be converted + assert "1. First action item" in fixed_markdown + assert "2. Second action item" in fixed_markdown + + def test_preserve_normal_bold_text(self, formatter): + """Test that normal bold text (not in links) is preserved.""" + markdown_content = """- **Important:** This is bold text +- **Note:** Another bold item""" + + fixed_markdown = formatter._fix_ai_markdown(markdown_content) + html_output = formatter._to_html(fixed_markdown) + + # Should preserve normal bold formatting + assert "**Important:**" in fixed_markdown + assert "**Note:**" in fixed_markdown + assert "Important:" in html_output + assert "Note:" in html_output + + def test_edge_case_empty_links(self, formatter): + """Test edge cases with empty or malformed links.""" + markdown_content = """- **[]()** +- Normal item +- **[Text only**""" + + # Should not crash and should handle gracefully + fixed_markdown = formatter._fix_ai_markdown(markdown_content) + html_output = formatter._to_html(fixed_markdown) + + assert "Normal item" in html_output # Should still process other items + + def test_multiple_headers_in_sequence(self, formatter): + """Test multiple headers appearing without content between them.""" + markdown_content = """- Last list item +### First Header +### Second Header +### Third Header +Content here.""" + + fixed_markdown = formatter._fix_ai_markdown(markdown_content) + + # Should add blank line before first header only (others already separated) + lines = fixed_markdown.split("\n") + first_header_index = next(i for i, line in enumerate(lines) if line.strip() == "### First Header") + assert lines[first_header_index - 1].strip() == "" # Blank line before first header + + def test_indented_lists(self, formatter): + """Test that indented lists maintain their structure.""" + markdown_content = """ - Indented item 1 + - Indented item 2 + - Double indented +- Normal item""" + + fixed_markdown = formatter._fix_ai_markdown(markdown_content) + html_output = formatter._to_html(fixed_markdown) + + # Should maintain list structure + assert "
    • Indented item 1
    • " in html_output + assert "
    • Normal item
    • " in html_output + + def test_format_report_integration(self, formatter): + """Test the full format_report method with our fixes.""" + content = """### Test Report +- **Summary:** This is a test +- **[Link](http://example.com)** +1. Executive Summary and Key Points +Content here. +a. Action item +b. Another action""" + + # Test HTML format + html_result = formatter.format_report(content, format_type="html") + + assert "Summary:" in html_result + # Bold links should render correctly in either order + link_ok = ( + 'Link' in html_result + or 'Link' in html_result + ) + assert link_ok, "Link should be bold" + assert "Executive Summary and Key Points" in html_result + + # Test markdown format (should include fixes) + markdown_result = formatter.format_report(content, format_type="markdown") + + # Numbered items should stay as list items (no conversion to headers) + assert "1. Executive Summary and Key Points" in markdown_result + assert "1. Action item" in markdown_result + assert "2. Another action" in markdown_result + + def test_signature_preservation(self, formatter): + """Test that the signature block is properly handled.""" + content = "Test content" + + result_with_signature = formatter.format_report(content, include_signature=True, format_type="html") + result_without_signature = formatter.format_report(content, include_signature=False, format_type="html") + + assert "MXtoAI Assistant" in result_with_signature + assert "MXtoAI Assistant" not in result_without_signature + + def test_real_world_calendar_example(self, formatter): + """Test with the exact example from the user's original issue.""" + markdown_content = """### Event Details +- **Title:** 30-minute call: Enterprise use cases of MXtoAI with Founders +- **Date & Time:** June 12, 2025, 10:27 AM PDT (UTC-7) / 10:57 PM IST (UTC+5:30) +- **Duration:** 30 minutes +- **Location:** Virtual call +- **Description:** Discussion on enterprise use cases of MXtoAI with the +founders of the company. +- **Participants:** + - Anisha (Organizer): 28gautam97@gmail.com + - MXtoAI Founders: founders@mxtoai.com +### Calendar Links +- **[Google Calendar](https://www.google.com/calendar/render?action=TEMPLATE&text=30-minute+call)** +- **[Outlook Calendar](https://outlook.live.com/calendar/0/deeplink/compose)** +### ICS File Notice +- An .ics file is generated and can be attached to an email for calendar +scheduling. +### Notes +- The meeting is approved by Anisha and details have been confirmed.""" + + fixed_markdown = formatter._fix_ai_markdown(markdown_content) + html_output = formatter._to_html(fixed_markdown) + + # Should have proper structure with headers separated from lists + assert "" in html_output + assert "
    • " in html_output + # Bold links should render correctly in either order + google_link_ok = ( + 'Google Calendar' + in html_output + or 'Google Calendar' + in html_output + ) + outlook_link_ok = ( + 'Outlook Calendar' + in html_output + or 'Outlook Calendar' + in html_output + ) + + assert google_link_ok, "Google Calendar link should be bold" + assert outlook_link_ok, "Outlook Calendar link should be bold" + # Should not have malformed bold tags + assert "**