From d07902ea6238a771ffd95d36174156362e71bf9b Mon Sep 17 00:00:00 2001 From: Andrew Ginns Date: Thu, 22 May 2025 17:18:31 +0000 Subject: [PATCH] feat: Add levels of mermaid fixes to perform. Convert npx MCP to python for consistent invocation and custom MCP decorated functions. Add logging --- .../eval_multi_mcp/evals_pydantic_mcp.py | 196 +++++--------- .../multi_mcp/mermaid_diagrams.py | 68 ++++- mcp_servers/mermaid_validator.py | 256 ++++++++++++++++++ pyproject.toml | 1 + uv.lock | 24 ++ 5 files changed, 417 insertions(+), 128 deletions(-) create mode 100644 mcp_servers/mermaid_validator.py diff --git a/agents_mcp_usage/multi_mcp/eval_multi_mcp/evals_pydantic_mcp.py b/agents_mcp_usage/multi_mcp/eval_multi_mcp/evals_pydantic_mcp.py index 1c568ec..9b43814 100644 --- a/agents_mcp_usage/multi_mcp/eval_multi_mcp/evals_pydantic_mcp.py +++ b/agents_mcp_usage/multi_mcp/eval_multi_mcp/evals_pydantic_mcp.py @@ -1,6 +1,5 @@ import asyncio import os -import subprocess from typing import Any import logfire @@ -14,8 +13,11 @@ from agents_mcp_usage.multi_mcp.mermaid_diagrams import ( invalid_mermaid_diagram_easy, + invalid_mermaid_diagram_medium, + invalid_mermaid_diagram_hard, valid_mermaid_diagram, ) +from mcp_servers.mermaid_validator import validate_mermaid_diagram load_dotenv() @@ -27,8 +29,8 @@ logfire.instrument_pydantic_ai() # Default model to use -DEFAULT_MODEL = "gemini-2.5-pro-preview-03-25" -# DEFAULT_MODEL = "openai:o4-mini" +DEFAULT_MODEL = "gemini-2.5-pro-preview-05-06" + # Configure MCP servers local_server = MCPServerStdio( command="uv", @@ -39,53 +41,23 @@ ], ) mermaid_server = MCPServerStdio( - command="npx", + command="uv", args=[ - "-y", - "@rtuin/mcp-mermaid-validator@latest", + "run", + "mcp_servers/mermaid_validator.py", ], ) # Create Agent with MCP servers -def create_agent(model: str = DEFAULT_MODEL): +def create_agent(model: str = DEFAULT_MODEL, model_settings: dict[str, Any] = {}): return Agent( model, mcp_servers=[local_server, mermaid_server], + model_settings=model_settings, ) -agent = create_agent() -Agent.instrument_all() - - -async def main( - query: str = "Hi!", request_limit: int = 5, model: str = DEFAULT_MODEL -) -> Any: - """ - Main function to run the agent - - Args: - query (str): The query to run the agent with - request_limit (int): The number of requests to make to the MCP servers - model (str): The model to use for the agent - - Returns: - The result from the agent's execution - """ - # Create a fresh agent with the specified model - current_agent = create_agent(model) - - # Set a request limit for LLM calls - usage_limits = UsageLimits(request_limit=request_limit) - - # Invoke the agent with the usage limits - async with current_agent.run_mcp_servers(): - result = await current_agent.run(query, usage_limits=usage_limits) - - return result - - # Define input and output schema for evaluations class MermaidInput(BaseModel): invalid_diagram: str @@ -110,86 +82,35 @@ class MermaidDiagramValid(Evaluator[MermaidInput, MermaidOutput]): async def evaluate( self, ctx: EvaluatorContext[MermaidInput, MermaidOutput] ) -> float: - diagram = ctx.output.fixed_diagram - - # Extract mermaid code from markdown code block if present - mermaid_code = diagram - if "```mermaid" in diagram and "```" in diagram: - start_idx = diagram.find("```mermaid") + len("```mermaid") - end_idx = diagram.rfind("```") - mermaid_code = diagram[start_idx:end_idx].strip() - - # Validate using mmdc - is_valid, _ = self.validate_mermaid_string_via_mmdc(mermaid_code) - return 1.0 if is_valid else 0.0 - - def validate_mermaid_string_via_mmdc( - self, mermaid_code: str, mmdc_path: str = "mmdc" - ) -> tuple[bool, str]: - """ - Validates a Mermaid string by attempting to compile it using the - Mermaid CLI (mmdc). Requires mmdc to be installed and in PATH, - or mmdc_path to be explicitly provided. - - Args: - mermaid_code: The string containing the Mermaid diagram syntax. - mmdc_path: The command or path to the mmdc executable. - - Returns: - A tuple (is_valid: bool, message: str). - 'message' will contain stderr output if not valid, or a success message. - """ - # Define temporary file names - temp_mmd_file = "temp_mermaid_for_validation.mmd" - # mmdc requires an output file, even if we don't use its content for validation. - temp_output_file = "temp_mermaid_output.svg" - - # Write the mermaid code to a temporary file - with open(temp_mmd_file, "w", encoding="utf-8") as f: - f.write(mermaid_code) - - try: - # Construct the command to run mmdc - command = [mmdc_path, "-i", temp_mmd_file, "-o", temp_output_file] - - # Execute the mmdc command - process = subprocess.run( - command, - capture_output=True, # Capture stdout and stderr - text=True, # Decode output as text - check=False, # Do not raise an exception for non-zero exit codes - encoding="utf-8", + # Strip whitespace, remove backticks and ```mermaid markers + input_str = ctx.output.fixed_diagram.strip() + + # Remove ```mermaid and ``` markers + if input_str.startswith("```mermaid"): + input_str = input_str[len("```mermaid") :].strip() + if input_str.endswith("```"): + input_str = input_str[:-3].strip() + + # Remove any remaining backticks + input_str = input_str.replace("`", "") + + logfire.info( + "Evaluating mermaid diagram validity", + diagram_length=len(input_str), + diagram_preview=input_str[:100], + ) + + # Use the MCP server's validation function + result = await validate_mermaid_diagram(input_str) + + if result.is_valid: + logfire.info("Mermaid diagram validation succeeded") + else: + logfire.warning( + "Mermaid diagram validation failed", error_message=result.error_message ) - if process.returncode == 0: - return True, "Syntax appears valid (compiled successfully by mmdc)." - else: - # mmdc usually prints errors to stderr. - error_message = process.stderr.strip() - # Sometimes, syntax errors might also appear in stdout for certain mmdc versions or error types - if not error_message and process.stdout.strip(): - error_message = process.stdout.strip() - return ( - False, - f"Invalid syntax or mmdc error (exit code {process.returncode}):\n{error_message}", - ) - except FileNotFoundError: - return False, ( - f"Validation failed: '{mmdc_path}' command not found. " - "Please ensure Mermaid CLI (mmdc) is installed and in your system's PATH, " - "or provide the full path to the executable." - ) - except Exception as e: - return ( - False, - f"Validation failed due to an unexpected error during mmdc execution: {e}", - ) - finally: - # Clean up the temporary files - if os.path.exists(temp_mmd_file): - os.remove(temp_mmd_file) - if os.path.exists(temp_output_file): - os.remove(temp_output_file) + return 1.0 if result.is_valid else 0.0 async def fix_mermaid_diagram( @@ -206,9 +127,15 @@ async def fix_mermaid_diagram( """ query = f"Add the current time and fix the mermaid diagram syntax using the validator: {inputs.invalid_diagram}. Return only the fixed mermaid diagram between backticks." - result = await main(query, model=model) + # Create a fresh agent for each invocation to avoid concurrent usage issues + current_agent = create_agent(model) + usage_limits = UsageLimits(request_limit=5) - # Extract the mermaid diagram from the output + # Use the agent's context manager directly in this function + async with current_agent.run_mcp_servers(): + result = await current_agent.run(query, usage_limits=usage_limits) + + # Extract the mermaid diagram from the result output output = result.output # Logic to extract the diagram from between backticks @@ -232,12 +159,25 @@ def create_evaluation_dataset(judge_model: str = DEFAULT_MODEL): The evaluation dataset """ return Dataset[MermaidInput, MermaidOutput, Any]( + # Construct 3 tests, each asks the LLM to fix an invalid mermaid diagram of increasing difficulty cases=[ Case( - name="fix_invalid_diagram_1", + name="fix_invalid_diagram_easy", inputs=MermaidInput(invalid_diagram=invalid_mermaid_diagram_easy), expected_output=MermaidOutput(fixed_diagram=valid_mermaid_diagram), - metadata={"test_type": "mermaid_easy_fix", "iteration": 1}, + metadata={"test_type": "mermaid_easy_fix"}, + ), + Case( + name="fix_invalid_diagram_medium", + inputs=MermaidInput(invalid_diagram=invalid_mermaid_diagram_medium), + expected_output=MermaidOutput(fixed_diagram=valid_mermaid_diagram), + metadata={"test_type": "mermaid_medium_fix"}, + ), + Case( + name="fix_invalid_diagram_hard", + inputs=MermaidInput(invalid_diagram=invalid_mermaid_diagram_hard), + expected_output=MermaidOutput(fixed_diagram=valid_mermaid_diagram), + metadata={"test_type": "mermaid_hard_fix"}, ), ], evaluators=[ @@ -249,9 +189,9 @@ def create_evaluation_dataset(judge_model: str = DEFAULT_MODEL): model=judge_model, ), LLMJudge( - rubric="The fixed diagram should maintain the same overall structure and intent as the expected output diagram while fixing any syntax errors." + rubric="The output diagram should maintain the same overall structure and intent as the expected output diagram while fixing any syntax errors." + "Check if nodes, connections, and labels are preserved." - + "The current time should be placeholder should be replace with a datetime", + + "The current time should be placeholder should be replace with a valid datetime", include_input=False, model=judge_model, ), @@ -276,20 +216,24 @@ async def fix_with_model(inputs: MermaidInput) -> MermaidOutput: return await fix_mermaid_diagram(inputs, model=model) report = await dataset.evaluate( - fix_with_model, name=f"{model}-multi-mcp-mermaid-diagram-fix-evals" + fix_with_model, + name=f"{model}-multi-mcp-mermaid-diagram-fix-evals", + max_concurrency=1, # Run one evaluation at a time ) - report.print(include_input=True, include_output=True) + report.print(include_input=False, include_output=False) return report if __name__ == "__main__": # You can use different models for the agent and the judge - agent_model = os.getenv("AGENT_MODEL", DEFAULT_MODEL) + # agent_model = os.getenv("AGENT_MODEL", DEFAULT_MODEL) + agent_model = "gemini-2.5-flash-preview-04-17" + # agent_model = "openai:o4-mini" + # agent_model = "gemini-2.5-flash-preview-04-17" judge_model = os.getenv("JUDGE_MODEL", DEFAULT_MODEL) async def run_all(): - # Run evaluations await run_evaluations(model=agent_model, judge_model=judge_model) asyncio.run(run_all()) diff --git a/agents_mcp_usage/multi_mcp/mermaid_diagrams.py b/agents_mcp_usage/multi_mcp/mermaid_diagrams.py index a777a7f..baffa49 100644 --- a/agents_mcp_usage/multi_mcp/mermaid_diagrams.py +++ b/agents_mcp_usage/multi_mcp/mermaid_diagrams.py @@ -63,7 +63,7 @@ ``` """ -invalid_mermaid_diagram_easy = """ +invalid_mermaid_diagram_medium = """ ```mermaid graph LR User((User)) --> |"Run script
(e.g., pydantic_mcp.py)"| Agent @@ -127,7 +127,71 @@ ``` """ -valid_mermaid_diagram = """ +invalid_mermaid_diagram_easy = """ +```mermaid +graph LR + User((User)) --> |"Run script
(e.g., pydantic_mcp.py)"| Agent + + %% Agent Frameworks + subgraph "Agent Frameworks" + direction TB + Agent[Agent] + ADK["Google ADK
(adk_mcp.py)"] + LG["LangGraph
(langgraph_mcp.py)"] + OAI["OpenAI Agents
(oai-agent_mcp.py)"] + PYD["Pydantic-AI
(pydantic_mcp.py)"] + + Agent --> ADK + Agent --> LG + Agent --> OAI + Agent --> PYD + end + + %% MCP Server + subgraph "MCP Server" + direction TB + MCP["Model Context Protocol Server
(run_server.py)"] + Tools["Tools
- add(a, b)
- get_current_time() e.g. {current_time}"] + Resources["Resources
- greeting://{{name}}"] + MCPs --- Tools + MCPs --- Resources + end + + subgraph "LLM Providers" + OAI_LLM["OpenAI Models"] + GEM["Google Gemini Models"] + OTHER["Other LLM Providers..."] + end + + Logfire[("Logfire
Tracing")] + + ADK --> MCP + LG --> MCP + OAI --> MCP + PYD --> MCP + + MCP --> OAI_LLM + MCP --> GEM + MCP --> OTHER + + ADK --> Logfire + LG --> Logfire + OAI --> Logfire + PYD --> Logfire + + LLM_Response[("Response")] --> User + OAI_LLM --> LLM_Response + GEM --> LLM_Response + OTHER --> LLM_Response + + style MCP fill:#f9f,stroke:#333,stroke-width:2px + style User fill:#bbf,stroke:#338,stroke-width:2px + style Logfire fill:#bfb,stroke:#383,stroke-width:2px + style LLM_Response fill:#fbb,stroke:#833,stroke-width:2px +``` +""" + +valid_mermaid_diagram = """` ```mermaid graph LR User((User)) --> |"Run script
(e.g., pydantic_mcp.py)"| Agent diff --git a/mcp_servers/mermaid_validator.py b/mcp_servers/mermaid_validator.py new file mode 100644 index 0000000..fcf5c13 --- /dev/null +++ b/mcp_servers/mermaid_validator.py @@ -0,0 +1,256 @@ +import json +import os +import subprocess +import tempfile +import argparse +import asyncio +import sys +import time +from typing import Optional + +from loguru import logger +from mcp.server.fastmcp import FastMCP +from pydantic import BaseModel, Field + +# Configure loguru +logger.remove() # Remove default handler +logger.add( + sys.stderr, + format="{time:YYYY-MM-DD HH:mm:ss.SSS} | {level: <8} | {name}:{function}:{line} - {message}", + level="DEBUG", +) +logger.add( + "mermaid_validator.log", + rotation="10 MB", + retention="1 week", + level="DEBUG", +) + +# Add a file logger with more details for debugging +logger.add( + "mermaid_raw_input.log", + format="{time:YYYY-MM-DD HH:mm:ss.SSS} | {level} | {message}", + level="DEBUG", + filter=lambda record: "raw_input" in record["extra"], + rotation="10 MB", +) + +# Patch mcp.run to capture raw input +original_run = FastMCP.run + + +def patched_run(self, transport: str = "stdio", *args, **kwargs): + logger.info(f"Starting MCP server with transport: {transport}") + + # Patch stdio handling if needed + if transport == "stdio": + # Store the original stdin.read + original_stdin_read = sys.stdin.read + original_stdin_readline = sys.stdin.readline + + def patched_read(n=-1): + data = original_stdin_read(n) + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + logger.bind(raw_input=True).debug( + f"[STDIN READ][{timestamp}] Length: {len(data)}, Data: {data}" + ) + return data + + def patched_readline(size=-1): + data = original_stdin_readline(size) + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + logger.bind(raw_input=True).debug( + f"[STDIN READLINE][{timestamp}] Line: {data}" + ) + return data + + # Patch stdin.read + sys.stdin.read = patched_read + sys.stdin.readline = patched_readline + + # Call the original run method + return original_run(self, transport, *args, **kwargs) + + +# Apply the patch +FastMCP.run = patched_run + +mcp = FastMCP("mermaid-validator") + + +class MermaidValidationResult(BaseModel): + """Result of mermaid diagram validation.""" + + is_valid: bool = Field(description="Whether the mermaid diagram is valid") + error_message: Optional[str] = Field( + None, description="Error message if the diagram is invalid" + ) + + +@mcp.tool() +async def validate_mermaid_diagram(diagram_text: str) -> MermaidValidationResult: + """Validate a mermaid diagram. + + Uses mermaid-cli to validate an input string as a mermaid diagram. + Expects input to be mermaid syntax only, no wrapping code blocks or ```mermaid tags. + + Args: + diagram_text: The mermaid diagram text to validate + + Returns: + A MermaidValidationResult object containing validation results + """ + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + logger.info(f"MCP tool called: validate_mermaid_diagram at {timestamp}") + logger.debug(f"Input diagram text (first 100 chars): {diagram_text[:100]}...") + # Log the full diagram text to the raw input log + logger.bind(raw_input=True).debug( + f"[TOOL CALL][{timestamp}] Full diagram text: {diagram_text}" + ) + + temp_file_path = None + puppeteer_config_path = None + + try: + with tempfile.NamedTemporaryFile( + suffix=".mmd", mode="w", delete=False + ) as temp_file: + temp_file.write(diagram_text) + temp_file_path = temp_file.name + logger.debug(f"Created temporary file: {temp_file_path}") + + puppeteer_config = {"args": ["--no-sandbox", "--disable-setuid-sandbox"]} + with tempfile.NamedTemporaryFile( + suffix=".json", mode="w", delete=False + ) as config_file: + json.dump(puppeteer_config, config_file) + puppeteer_config_path = config_file.name + logger.debug(f"Created puppeteer config file: {puppeteer_config_path}") + + # Run mermaid-cli to validate input string as mermaid diagram + logger.debug("Running mermaid-cli validation...") + result = subprocess.run( + [ + "npx", + "-y", + "@mermaid-js/mermaid-cli", + "-i", + temp_file_path, + "--puppeteerConfigFile", + puppeteer_config_path, + ], + capture_output=True, + text=True, + ) + + if result.returncode == 0: + logger.info("Mermaid diagram validation successful") + return MermaidValidationResult(is_valid=True, error_message=None) + else: + logger.warning(f"Mermaid diagram validation failed: {result.stderr}") + logger.bind(raw_input=True).debug( + f"[VALIDATION ERROR][{timestamp}] {result.stderr}" + ) + return MermaidValidationResult( + is_valid=False, + error_message=f"Mermaid diagram is invalid: {result.stderr}", + ) + except Exception as e: + logger.error(f"Error validating mermaid diagram: {str(e)}") + logger.bind(raw_input=True).debug(f"[EXCEPTION][{timestamp}] {str(e)}") + return MermaidValidationResult( + is_valid=False, + error_message=f"Error validating mermaid diagram: {str(e)}", + ) + finally: + for file_path in [temp_file_path, puppeteer_config_path]: + if file_path and os.path.exists(file_path): + try: + os.unlink(file_path) + logger.debug(f"Deleted temporary file: {file_path}") + except Exception as e: + logger.error(f"Error deleting temporary file {file_path}: {e}") + + +@mcp.resource("example://mermaid-diagram") +def get_example_mermaid_diagram(): + """Provides an example mermaid diagram for the client. + + Returns: + Dict containing an example mermaid diagram + """ + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + logger.info(f"Resource called: example://mermaid-diagram at {timestamp}") + logger.bind(raw_input=True).debug( + f"[RESOURCE CALL][{timestamp}] example://mermaid-diagram" + ) + return """ + ```mermaid + graph TD + A[Start] --> B{Is it valid?} + B -->|Yes| C[Output valid result] + B -->|No| D[Output error message] + C --> E[End] + D --> E + ``` + """ + + +@mcp.prompt("validate-string-as-mermaid") +def validate_string_as_mermaid(diagram_text: str) -> MermaidValidationResult: + """Validate a string as a mermaid diagram. + + Uses mermaid-cli to validate an input string as a mermaid diagram. + Expects input to be mermaid syntax only, no wrapping code blocks or ```mermaid tags. + """ + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + logger.info(f"Prompt called: validate-string-as-mermaid at {timestamp}") + logger.debug(f"Original input (first 100 chars): {diagram_text[:100]}...") + logger.bind(raw_input=True).debug( + f"[PROMPT CALL][{timestamp}] Original full input: {diagram_text}" + ) + + # Strip whitespace, remove backticks and ```mermaid markers + input_str = diagram_text.strip() + + # Remove ```mermaid and ``` markers + if input_str.startswith("```mermaid"): + input_str = input_str[len("```mermaid") :].strip() + logger.debug("Removed ```mermaid prefix") + if input_str.endswith("```"): + input_str = input_str[:-3].strip() + logger.debug("Removed ``` suffix") + + # Remove any remaining backticks + input_str = input_str.replace("`", "") + + logger.debug( + f"Cleaned input for validation (first 100 chars): {input_str[:100]}..." + ) + logger.bind(raw_input=True).debug( + f"[PROMPT CALL][{timestamp}] Cleaned full input: {input_str}" + ) + + # Validate the cleaned diagram + return validate_mermaid_diagram(input_str) + + +if __name__ == "__main__": + logger.info("Starting mermaid-validator MCP server") + parser = argparse.ArgumentParser(description="Mermaid diagram validator") + parser.add_argument( + "--debug", action="store_true", help="Run validation on a placeholder diagram" + ) + args = parser.parse_args() + + if args.debug: + # Example diagram for debugging + logger.info("Running in debug mode with example diagram") + debug_diagram = get_example_mermaid_diagram() + # Run the validation function + result = asyncio.run(validate_string_as_mermaid(debug_diagram)) + logger.info(f"Debug validation result: {result}") + print(f"Debug validation result: {result}") + else: + transport = os.getenv("MCP_TRANSPORT", "stdio") + mcp.run(transport=transport) diff --git a/pyproject.toml b/pyproject.toml index a724c65..eea26ac 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -13,6 +13,7 @@ dependencies = [ "langchain-mcp-adapters>=0.0.9", "langgraph>=0.3.31", "logfire>=3.14.0", + "loguru>=0.7.3", "mcp==1.6.0", "openai-agents>=0.0.12", "pydantic-ai-slim[mcp]>=0.1.3", diff --git a/uv.lock b/uv.lock index d4a6892..fb6ebc2 100644 --- a/uv.lock +++ b/uv.lock @@ -1064,6 +1064,19 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/5b/3e/6dbe2d1051d7463eb3a8e492e42f464c1eef501b355a5bb25876ca68da28/logfire_api-3.14.0-py3-none-any.whl", hash = "sha256:e01f9049bca809cc102eb7550c4263fe560fa26abd68688e6dc2b8666e506a57", size = 79475, upload-time = "2025-04-11T16:08:13.134Z" }, ] +[[package]] +name = "loguru" +version = "0.7.3" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "colorama", marker = "sys_platform == 'win32'" }, + { name = "win32-setctime", marker = "sys_platform == 'win32'" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/3a/05/a1dae3dffd1116099471c643b8924f5aa6524411dc6c63fdae648c4f1aca/loguru-0.7.3.tar.gz", hash = "sha256:19480589e77d47b8d85b2c827ad95d49bf31b0dcde16593892eb51dd18706eb6", size = 63559, upload-time = "2024-12-06T11:20:56.608Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/0c/29/0348de65b8cc732daa3e33e67806420b2ae89bdce2b04af740289c5c6c8c/loguru-0.7.3-py3-none-any.whl", hash = "sha256:31a33c10c8e1e10422bfd431aeb5d351c7cf7fa671e3c4df004162264b28220c", size = 61595, upload-time = "2024-12-06T11:20:54.538Z" }, +] + [[package]] name = "markdown-it-py" version = "3.0.0" @@ -1531,6 +1544,7 @@ dependencies = [ { name = "langchain-mcp-adapters" }, { name = "langgraph" }, { name = "logfire" }, + { name = "loguru" }, { name = "mcp" }, { name = "openai-agents" }, { name = "pydantic-ai-slim", extra = ["mcp"] }, @@ -1549,6 +1563,7 @@ requires-dist = [ { name = "langchain-mcp-adapters", specifier = ">=0.0.9" }, { name = "langgraph", specifier = ">=0.3.31" }, { name = "logfire", specifier = ">=3.14.0" }, + { name = "loguru", specifier = ">=0.7.3" }, { name = "mcp", specifier = "==1.6.0" }, { name = "openai-agents", specifier = ">=0.0.12" }, { name = "pydantic-ai-slim", extras = ["mcp"], specifier = ">=0.1.3" }, @@ -2019,6 +2034,15 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/fa/a8/5b41e0da817d64113292ab1f8247140aac61cbf6cfd085d6a0fa77f4984f/websockets-15.0.1-py3-none-any.whl", hash = "sha256:f7a866fbc1e97b5c617ee4116daaa09b722101d4a3c170c787450ba409f9736f", size = 169743, upload-time = "2025-03-05T20:03:39.41Z" }, ] +[[package]] +name = "win32-setctime" +version = "1.2.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/b3/8f/705086c9d734d3b663af0e9bb3d4de6578d08f46b1b101c2442fd9aecaa2/win32_setctime-1.2.0.tar.gz", hash = "sha256:ae1fdf948f5640aae05c511ade119313fb6a30d7eabe25fef9764dca5873c4c0", size = 4867, upload-time = "2024-12-07T15:28:28.314Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/e1/07/c6fe3ad3e685340704d314d765b7912993bcb8dc198f0e7a89382d37974b/win32_setctime-1.2.0-py3-none-any.whl", hash = "sha256:95d644c4e708aba81dc3704a116d8cbc974d70b3bdb8be1d150e36be6e9d1390", size = 4083, upload-time = "2024-12-07T15:28:26.465Z" }, +] + [[package]] name = "wrapt" version = "1.17.2"