From fbdfd8d7f6583a9523a77238df0c7b4e03f48068 Mon Sep 17 00:00:00 2001 From: David Montague <35119617+dmontagu@users.noreply.github.com> Date: Fri, 20 Feb 2026 17:52:27 -0700 Subject: [PATCH 01/49] Add execution environments abstraction and toolset Introduces ExecutionEnvironment ABC and three implementations (LocalEnvironment, DockerEnvironment, MemoryEnvironment) along with ExecutionEnvironmentToolset for exposing coding-agent-style tools (ls, shell, read_file, write_file, replace_str, glob, grep). This is the foundation for building coding agents and other agents that need shell and filesystem access, split out from the broader code-mode work for independent review and merge. --- .gitignore | 1 + docs/api/environments.md | 31 + docs/environments.md | 327 ++ docs/install.md | 1 + mkdocs.yml | 3 + .../pydantic_ai/environments/__init__.py | 27 + .../pydantic_ai/environments/_base.py | 559 ++++ .../pydantic_ai/environments/docker.py | 574 ++++ .../pydantic_ai/environments/local.py | 345 ++ .../pydantic_ai/environments/memory.py | 267 ++ .../toolsets/execution_environment.py | 426 +++ pydantic_ai_slim/pyproject.toml | 2 + tests/test_environments.py | 2941 +++++++++++++++++ 13 files changed, 5504 insertions(+) create mode 100644 docs/api/environments.md create mode 100644 docs/environments.md create mode 100644 pydantic_ai_slim/pydantic_ai/environments/__init__.py create mode 100644 pydantic_ai_slim/pydantic_ai/environments/_base.py create mode 100644 pydantic_ai_slim/pydantic_ai/environments/docker.py create mode 100644 pydantic_ai_slim/pydantic_ai/environments/local.py create mode 100644 pydantic_ai_slim/pydantic_ai/environments/memory.py create mode 100644 pydantic_ai_slim/pydantic_ai/toolsets/execution_environment.py create mode 100644 tests/test_environments.py diff --git a/.gitignore b/.gitignore index dbdfd83ee0..71791d4aa3 100644 --- a/.gitignore +++ b/.gitignore @@ -7,6 +7,7 @@ __pycache__ /scratch/ /.coverage env*/ +!**/environments/ /TODO.md /postgres-data/ .DS_Store diff --git a/docs/api/environments.md b/docs/api/environments.md new file mode 100644 index 0000000000..8752e5d6f5 --- /dev/null +++ b/docs/api/environments.md @@ -0,0 +1,31 @@ +# `pydantic_ai.environments` + +::: pydantic_ai.environments + options: + members: + - ExecutionEnvironment + - ExecutionEnvironmentToolset + - ExecutionProcess + - ExecutionResult + - FileInfo + +## `pydantic_ai.environments.local` + +::: pydantic_ai.environments.local + options: + members: + - LocalEnvironment + +## `pydantic_ai.environments.docker` + +::: pydantic_ai.environments.docker + options: + members: + - DockerEnvironment + +## `pydantic_ai.environments.memory` + +::: pydantic_ai.environments.memory + options: + members: + - MemoryEnvironment diff --git a/docs/environments.md b/docs/environments.md new file mode 100644 index 0000000000..1288d50f2e --- /dev/null +++ b/docs/environments.md @@ -0,0 +1,327 @@ +# Execution Environments & Sandboxes + +Pydantic AI provides [`ExecutionEnvironment`][pydantic_ai.environments.ExecutionEnvironment] — an abstraction for environments where agents can execute commands, read/write files, and search the filesystem — along with [`ExecutionEnvironmentToolset`][pydantic_ai.environments.ExecutionEnvironmentToolset], a ready-made [toolset](toolsets.md) that exposes these capabilities as tools. + +This is the foundation for building coding agents, data analysis bots, and other agents that need to interact with a shell and filesystem. + +## Quick Start + +```python {title="environments_quickstart.py" test="skip"} +from pydantic_ai import Agent +from pydantic_ai.environments import ExecutionEnvironmentToolset +from pydantic_ai.environments.local import LocalEnvironment + +env = LocalEnvironment(root_dir='/tmp/workspace') +toolset = ExecutionEnvironmentToolset(env) + +agent = Agent('openai:gpt-5.2', toolsets=[toolset]) + +async def main(): + async with env: + result = await agent.run('Create a Python script that prints the first 10 Fibonacci numbers, then run it.') + print(result.output) +``` + +## Environments + +An [`ExecutionEnvironment`][pydantic_ai.environments.ExecutionEnvironment] defines where and how commands run. Three implementations are included: + +| Environment | Isolation | Use case | +|---|---|---| +| [`LocalEnvironment`][pydantic_ai.environments.local.LocalEnvironment] | None — runs on host | Development, testing, trusted agents | +| [`DockerEnvironment`][pydantic_ai.environments.docker.DockerEnvironment] | Container-level | Production, untrusted code | +| [`MemoryEnvironment`][pydantic_ai.environments.memory.MemoryEnvironment] | In-memory (no filesystem) | Unit testing | + +All environments are async context managers. Enter the environment before running the agent, and exit it to clean up: + +```python {title="environments_lifecycle.py" test="skip"} +from pydantic_ai.environments.docker import DockerEnvironment + +env = DockerEnvironment(image='python:3.12-slim') + +async def main(): + async with env: + result = await env.shell('python -c "print(42)"') + print(result.output) +``` + +### LocalEnvironment + +[`LocalEnvironment`][pydantic_ai.environments.local.LocalEnvironment] runs commands as local subprocesses within a specified root directory. It provides no isolation — use it for development, testing, and trusted agents. + +```python {title="environments_local.py"} +from pydantic_ai.environments.local import LocalEnvironment + +env = LocalEnvironment( + root_dir='/tmp/workspace', + env_vars={'PYTHONPATH': '/tmp/workspace/lib'}, + inherit_env=True, # inherit host environment variables (default) +) +``` + +File operations (read, write, edit, ls, glob, grep) are confined to the root directory — path traversal attempts raise `PermissionError`. + +!!! info "Environment variable inheritance" + By default, `LocalEnvironment` inherits the host's environment variables. Set `inherit_env=False` for a clean environment where only explicitly provided `env_vars` (and per-call `env` overrides) are available. This is useful for reproducibility and testing. + +### DockerEnvironment + +[`DockerEnvironment`][pydantic_ai.environments.docker.DockerEnvironment] runs commands inside a Docker container with configurable resource limits, security options, and network access. + +Requires the `docker` package: `pip install pydantic-ai-slim[docker-sandbox]` + +```python {title="environments_docker.py" test="skip"} +from pydantic_ai.environments.docker import DockerEnvironment + +env = DockerEnvironment( + image='my-sandbox:latest', + env_vars={'MPLBACKEND': 'Agg'}, + memory_limit='512m', + cpu_limit=1.0, + network_disabled=True, +) +``` + +#### Building a custom Docker image + +`DockerEnvironment` runs whatever image you give it — it doesn't install packages at startup. Pre-build a custom image with any libraries your agent needs, so containers start fast and reproducibly. + +**Example Dockerfile** — a Python data-science sandbox: + +```dockerfile {title="Dockerfile" test="skip" lint="skip"} +FROM python:3.12-slim + +# Install OS-level tools the agent might use (optional) +RUN apt-get update \ + && apt-get install -y --no-install-recommends git curl jq \ + && rm -rf /var/lib/apt/lists/* + +# Install Python packages +RUN pip install --no-cache-dir numpy pandas matplotlib requests + +WORKDIR /workspace +``` + +Build and tag the image: + +```bash +docker build -t my-sandbox:latest . +``` + +Then pass the tag to `DockerEnvironment`: + +```python {title="environments_docker_custom.py" test="skip"} +from pydantic_ai.environments.docker import DockerEnvironment + +env = DockerEnvironment(image='my-sandbox:latest') +``` + +!!! tip "Tips for custom images" + + - **Start from a slim base** (`python:3.12-slim`, `node:22-slim`, etc.) to keep image size and attack surface small. + - **Pin package versions** (e.g. `numpy==2.2.3`) for reproducible builds. + - **Use `--no-cache-dir`** with pip to avoid bloating the image with cached wheels. + - **Build once, run many times.** The image is pulled from the local Docker cache on each `DockerEnvironment` startup — no rebuild needed. + - **Use a registry** for team or CI workflows: push your image to Docker Hub, GitHub Container Registry, or a private registry, then reference it by its full name (e.g. `ghcr.io/myorg/my-sandbox:latest`). + - **For Node.js** or other runtimes, adjust the base image and install command accordingly: + + ```dockerfile {test="skip" lint="skip"} + FROM node:22-slim + RUN npm install -g typescript ts-node express + WORKDIR /workspace + ``` + +For running untrusted code, you can harden the container with Linux security options: + +```python {title="environments_docker_hardened.py" test="skip"} +from pydantic_ai.environments.docker import DockerEnvironment + +env = DockerEnvironment( + image='python:3.12-slim', + network_disabled=True, + read_only=True, + cap_drop=['ALL'], + security_opt=['no-new-privileges'], + user='nobody', + pids_limit=256, + tmpfs={'/tmp': 'noexec,nosuid,size=64m', '/workspace': 'size=128m'}, + init=True, + memory_limit='512m', + cpu_limit=1.0, +) +``` + +This drops all Linux capabilities, prevents privilege escalation, runs as an unprivileged user, limits the number of processes, and makes the root filesystem read-only (with writable tmpfs mounts for scratch space and the working directory). + +## ExecutionEnvironmentToolset + +[`ExecutionEnvironmentToolset`][pydantic_ai.environments.ExecutionEnvironmentToolset] wraps an environment and exposes coding-agent-style tools that models are well-trained on (matching tools that popular coding agents expose): + +| Tool | Description | +|---|---| +| `ls` | List directory contents | +| `shell` | Execute shell commands | +| `read_file` | Read files with line numbers (renders images for multimodal models) | +| `write_file` | Create or overwrite files | +| `replace_str` | Edit files by exact string replacement | +| `glob` | Find files by pattern | +| `grep` | Search file contents with regex | + +Tools are dynamically registered based on the environment's capabilities. You can selectively include or exclude capabilities: + +```python {title="environments_selective_tools.py"} +from pydantic_ai.environments import ExecutionEnvironmentToolset +from pydantic_ai.environments.memory import MemoryEnvironment + +# Only file tools — no shell or search +toolset = ExecutionEnvironmentToolset( + MemoryEnvironment(), + include=frozenset({'read_file', 'write_file', 'edit_file'}), +) +``` + +### Using with an Agent + +The toolset manages the environment lifecycle when used as a context manager: + +```python {title="environments_agent.py" test="skip"} +from pydantic_ai import Agent +from pydantic_ai.environments import ExecutionEnvironmentToolset +from pydantic_ai.environments.docker import DockerEnvironment + +env = DockerEnvironment(image='python:3.12-slim') +toolset = ExecutionEnvironmentToolset(env) + +agent = Agent('openai:gpt-5.2', toolsets=[toolset]) + +async def main(): + async with toolset: # starts the Docker container + result = await agent.run('Fetch https://httpbin.org/get and print the response') + print(result.output) + # container cleaned up automatically +``` + +### Environment Overrides + +You can swap the backing environment at runtime using [`use_environment()`][pydantic_ai.environments.ExecutionEnvironmentToolset.use_environment]: + +```python {title="environments_override.py" test="skip"} +from pydantic_ai import Agent +from pydantic_ai.environments import ExecutionEnvironmentToolset +from pydantic_ai.environments.docker import DockerEnvironment +from pydantic_ai.environments.local import LocalEnvironment + +toolset = ExecutionEnvironmentToolset(LocalEnvironment('/tmp/dev')) + +agent = Agent('openai:gpt-5.2', toolsets=[toolset]) + +async def main(): + # Default: local environment + async with LocalEnvironment('/tmp/dev') as local_env: + with toolset.use_environment(local_env): + await agent.run('echo "running locally"') + + # Override: Docker environment for untrusted input + async with DockerEnvironment() as docker_env: + with toolset.use_environment(docker_env): + await agent.run('echo "running in Docker"') +``` + +## Per-Call Environment Variables + +All environments support per-call environment variables via the `env` parameter on [`shell()`][pydantic_ai.environments.ExecutionEnvironment.shell] and [`create_process()`][pydantic_ai.environments.ExecutionEnvironment.create_process]. These are merged on top of any baseline `env_vars`: + +```python {title="environments_env_vars.py" test="skip"} +from pydantic_ai.environments.local import LocalEnvironment + +env = LocalEnvironment(env_vars={'BASE_URL': 'https://api.example.com'}) + +async def main(): + async with env: + # Uses BASE_URL from baseline + API_KEY from per-call + result = await env.shell( + 'curl -H "Authorization: Bearer $API_KEY" $BASE_URL/data', + env={'API_KEY': 'sk-test-123'}, + ) + print(result.output) +``` + +## Interactive Processes + +For long-running or interactive workloads, use [`create_process()`][pydantic_ai.environments.ExecutionEnvironment.create_process] to get an [`ExecutionProcess`][pydantic_ai.environments.ExecutionProcess] with bidirectional streaming I/O: + +```python {title="environments_process.py" test="skip"} +from pydantic_ai.environments.local import LocalEnvironment + +env = LocalEnvironment() + +async def main(): + async with env: + async with await env.create_process('python3 -u worker.py') as proc: + await proc.send(b'{"task": "analyze"}\n') + response = await proc.recv(timeout=10.0) + print(response.decode()) +``` + +## Execution Model + +Each call to `shell()` or `create_process()` starts a fresh process. Shell state (like `cd`, shell variables) does not persist between calls. This is the same model used by other coding agents like Claude Code and Codex. + +To run commands in a specific directory, chain them: + +```python {title="environments_chaining.py" test="skip" lint="skip"} +result = await env.shell('cd /some/path && python script.py') +``` + +Filesystem changes (created files, installed packages) persist for the lifetime of the environment. + +## Building a Custom Environment + +You can implement [`ExecutionEnvironment`][pydantic_ai.environments.ExecutionEnvironment] to integrate with any execution backend. The only abstract member is `capabilities`; override the methods that match your declared capabilities. Override [`create_process()`][pydantic_ai.environments.ExecutionEnvironment.create_process] if you need interactive process support. + +```python {title="environments_custom.py" test="skip" lint="skip"} +from typing import Literal + +from pydantic_ai.environments import ExecutionEnvironment, ExecutionProcess, ExecutionResult, FileInfo +from pydantic_ai.environments._base import Capability + +class MyCloudEnvironment(ExecutionEnvironment): + @property + def capabilities(self) -> frozenset[Capability]: + return frozenset({'shell', 'read_file', 'write_file', 'replace_str', 'ls', 'glob', 'grep'}) + + async def shell( + self, command: str, *, timeout: float | None = 120, env: dict[str, str] | None = None + ) -> ExecutionResult: + # Run a command in your cloud environment + ... + + async def read_file( + self, path: str, *, offset: int = 0, limit: int = 2000 + ) -> str | bytes: + ... + + async def write_file(self, path: str, content: str | bytes) -> None: + ... + + async def replace_str( + self, path: str, old: str, new: str, *, replace_all: bool = False + ) -> int: + ... + + async def ls(self, path: str = '.') -> list[FileInfo]: + ... + + async def glob(self, pattern: str, *, path: str = '.') -> list[str]: + ... + + async def grep( + self, + pattern: str, + *, + path: str | None = None, + glob_pattern: str | None = None, + output_mode: Literal['content', 'files_with_matches', 'count'] = 'content', + ) -> str: + ... +``` diff --git a/docs/install.md b/docs/install.md index 2eced2cf2c..1d4e624850 100644 --- a/docs/install.md +++ b/docs/install.md @@ -67,6 +67,7 @@ pip/uv-add "pydantic-ai-slim[openai]" * `ag-ui` - installs [AG-UI Event Stream Protocol](ui/ag-ui.md) dependencies `ag-ui-protocol` [PyPI ↗](https://pypi.org/project/ag-ui-protocol){:target="_blank"} and `starlette` [PyPI ↗](https://pypi.org/project/starlette){:target="_blank"} * `dbos` - installs [DBOS Durable Execution](durable_execution/dbos.md) dependency `dbos` [PyPI ↗](https://pypi.org/project/dbos){:target="_blank"} * `prefect` - installs [Prefect Durable Execution](durable_execution/prefect.md) dependency `prefect` [PyPI ↗](https://pypi.org/project/prefect){:target="_blank"} +* `docker-sandbox` - installs [Docker Sandbox](environments.md#dockerenvironment) dependency `docker` [PyPI ↗](https://pypi.org/project/docker){:target="_blank"} You can also install dependencies for multiple models and use cases, for example: diff --git a/mkdocs.yml b/mkdocs.yml index a100a42cd9..1aad2360e9 100644 --- a/mkdocs.yml +++ b/mkdocs.yml @@ -46,6 +46,8 @@ nav: - builtin-tools.md - common-tools.md - third-party-tools.md + - Execution Environments: + - environments.md - Advanced Features: - input.md - thinking.md @@ -171,6 +173,7 @@ nav: - api/result.md - api/retries.md - api/run.md + - api/environments.md - api/settings.md - api/tools.md - api/toolsets.md diff --git a/pydantic_ai_slim/pydantic_ai/environments/__init__.py b/pydantic_ai_slim/pydantic_ai/environments/__init__.py new file mode 100644 index 0000000000..da2b4a950b --- /dev/null +++ b/pydantic_ai_slim/pydantic_ai/environments/__init__.py @@ -0,0 +1,27 @@ +"""Execution environment abstractions for agents. + +This package provides: + +- `ExecutionEnvironment` — abstract base class for execution environments +- `ExecutionProcess` — interactive process handle with bidirectional I/O +- `ExecutionEnvironmentToolset` — toolset exposing coding-agent-style tools backed by an environment +- `ExecutionResult`, `FileInfo` — result types + +Implementations: + +- `environments.docker.DockerEnvironment` — Docker container-based sandbox (isolated) +- `environments.local.LocalEnvironment` — local subprocess environment (no isolation, for dev/testing) +- `environments.memory.MemoryEnvironment` — in-memory environment for testing +""" + +from pydantic_ai.toolsets.execution_environment import ExecutionEnvironmentToolset + +from ._base import ExecutionEnvironment, ExecutionProcess, ExecutionResult, FileInfo + +__all__ = ( + 'ExecutionResult', + 'ExecutionEnvironment', + 'ExecutionEnvironmentToolset', + 'ExecutionProcess', + 'FileInfo', +) diff --git a/pydantic_ai_slim/pydantic_ai/environments/_base.py b/pydantic_ai_slim/pydantic_ai/environments/_base.py new file mode 100644 index 0000000000..548d39f2bd --- /dev/null +++ b/pydantic_ai_slim/pydantic_ai/environments/_base.py @@ -0,0 +1,559 @@ +"""Base abstractions for execution environments. + +This module defines the core types, the `ExecutionEnvironment` ABC, and the +`ExecutionProcess` ABC for interactive execution with bidirectional streaming I/O. +""" + +from __future__ import annotations + +import fnmatch +import re +from abc import ABC, abstractmethod +from dataclasses import dataclass +from typing import Any, Literal + +from typing_extensions import Self + +# --- Capability type alias --- + +Capability = Literal[ + 'ls', + 'shell', + 'read_file', + 'write_file', + 'replace_str', + 'apply_patch', + 'glob', + 'grep', +] +"""Fine-grained capability identifier listing actual method names. + +Used in `capabilities` to declare which methods an environment implements. +Toolsets are responsible for mapping these to LLM-facing tool names. +""" + + +# --- Data types --- + + +@dataclass +class ExecutionResult: + """Result of a completed command execution.""" + + output: str + """The combined stdout/stderr output of the command.""" + + exit_code: int + """The exit code of the command.""" + + truncated: bool = False + """Whether the output was truncated due to length limits.""" + + +@dataclass +class FileInfo: + """Metadata about a file or directory.""" + + name: str + """The file or directory name.""" + + path: str + """The full path.""" + + is_dir: bool + """Whether this entry is a directory.""" + + size: int | None = None + """The file size in bytes, or None for directories.""" + + +class ExecutionProcess(ABC): + r"""Handle to a running process with bidirectional streaming I/O. + + Used for interactive execution where a script outputs data, + waits for input, processes it, and outputs more data. + """ + + @abstractmethod + async def send(self, data: bytes) -> None: + """Write data to the process's stdin. + + Args: + data: The bytes to write to stdin. + """ + + @abstractmethod + async def recv(self, timeout: float | None = None) -> bytes: + """Read available output from stdout. + + Blocks until data is available, the process exits, or the timeout expires. + + Args: + timeout: Maximum seconds to wait for data. None means wait indefinitely. + + Raises: + TimeoutError: If the timeout expires with no data available. + """ + + @abstractmethod + async def recv_stderr(self, timeout: float | None = None) -> bytes: + """Read available output from stderr. + + Args: + timeout: Maximum seconds to wait for data. None means wait indefinitely. + + Raises: + TimeoutError: If the timeout expires with no data available. + """ + + @property + @abstractmethod + def returncode(self) -> int | None: + """Return code if the process has exited, None if still running.""" + + @abstractmethod + async def wait(self, timeout: float | None = None) -> int: + """Wait for the process to exit. + + Args: + timeout: Maximum seconds to wait. None means wait indefinitely. + + Returns: + The process exit code. + + Raises: + TimeoutError: If the timeout expires before the process exits. + """ + + @abstractmethod + async def kill(self) -> None: + """Kill the process.""" + + async def __aenter__(self) -> Self: + return self + + async def __aexit__(self, *args: Any) -> None: + if self.returncode is None: + await self.kill() + + +# --- Constants --- + +IMAGE_EXTENSIONS = frozenset( + { + '.png', + '.jpg', + '.jpeg', + '.gif', + '.webp', + '.bmp', + '.svg', + } +) + +IMAGE_MEDIA_TYPES: dict[str, str] = { + '.png': 'image/png', + '.jpg': 'image/jpeg', + '.jpeg': 'image/jpeg', + '.gif': 'image/gif', + '.webp': 'image/webp', + '.bmp': 'image/bmp', + '.svg': 'image/svg+xml', +} + +MAX_OUTPUT_CHARS = 100_000 + + +# --- ExecutionEnvironment --- + + +class ExecutionEnvironment(ABC): + """Abstract base class for execution environments. + + An execution environment provides a place where agents can execute + commands, read/write files, and search the filesystem. + + Implementations range from in-memory (for testing) to local subprocess, + Docker containers, and cloud-hosted VMs. + + The only abstract member is `capabilities`; all tool methods raise + `NotImplementedError` by default. Concrete subclasses override the + methods that match their declared capabilities. + """ + + # --- Capability introspection --- + + @property + @abstractmethod + def capabilities(self) -> frozenset[Capability]: + """Capabilities this environment supports (high-level). + + Used by toolsets to decide which tools to register. Only methods + corresponding to declared capabilities need to be implemented. + """ + ... + + def instructions(self, capability: Capability) -> str | None: + """Per-capability instructions for the LLM. + + Override to provide environment-specific hints that toolsets include + in the tool description shown to the model, e.g.:: + + def instructions(self, capability): + if capability == 'shell': + return 'Bash in Docker container, numpy/pandas installed' + if capability == 'grep': + return 'Uses POSIX basic regex, not Python re syntax' + return None + + Args: + capability: The capability name (e.g. `'shell'`). + + Returns: + Instruction text for the LLM, or None for no extra instructions. + """ + return None + + # --- Tool methods --- + # All raise NotImplementedError by default. Concrete subclasses override + # the methods that match their declared capabilities. + + async def ls(self, path: str = '.') -> list[FileInfo]: + """List directory contents. + + Args: + path: The directory path within the environment. + + Returns: + A list of `FileInfo` entries. + """ + raise NotImplementedError(f'{type(self).__name__} does not support ls.') + + async def shell( + self, + command: str, + *, + timeout: float | None = 120, + env: dict[str, str] | None = None, + ) -> ExecutionResult: + """Execute a shell command and return the result. + + Args: + command: The shell command to execute. + timeout: Maximum seconds to wait for completion. + Pass `None` to disable the timeout. + env: Additional environment variables for this command. + Merged with (and overrides) any baseline environment variables. + + Returns: + An `ExecutionResult` with the command output and exit code. + """ + raise NotImplementedError(f'{type(self).__name__} does not support shell.') + + async def read_file( + self, + path: str, + *, + offset: int = 0, + limit: int = 2000, + ) -> str | bytes: + """Read a file from the environment. + + For text files, returns a string with `cat -n` style line numbers. + For binary files (images), returns raw bytes. + + Args: + path: The file path within the environment. + offset: The line number to start reading from (0-indexed). + Ignored for binary files. + limit: Maximum number of lines to read. + Ignored for binary files. + + Returns: + Text content with line numbers (`str`), or raw bytes for binary files. + """ + raise NotImplementedError(f'{type(self).__name__} does not support read_file.') + + async def write_file(self, path: str, content: str | bytes) -> None: + """Create or overwrite a file in the environment. + + Args: + path: The file path within the environment. + content: The file content (text or binary). + """ + raise NotImplementedError(f'{type(self).__name__} does not support write_file.') + + async def replace_str( + self, + path: str, + old: str, + new: str, + *, + replace_all: bool = False, + ) -> int: + """Edit a file by exact string replacement. + + Args: + path: The file path within the environment. + old: The exact text to find. + new: The replacement text. + replace_all: If True, replace all occurrences. If False, the + old string must appear exactly once or an error is raised. + + Returns: + The number of replacements made. + + Raises: + FileNotFoundError: If the file does not exist. + ValueError: If `old` is not found, or appears multiple times + when `replace_all` is False. + """ + raise NotImplementedError(f'{type(self).__name__} does not support replace_str.') + + async def apply_patch(self, path: str, patch: str) -> str: + """Apply a unified diff patch to a file. + + Args: + path: The file path within the environment. + patch: The unified diff patch content. + + Returns: + The resulting file content after applying the patch. + """ + raise NotImplementedError(f'{type(self).__name__} does not support apply_patch.') + + async def glob(self, pattern: str, *, path: str = '.') -> list[str]: + """Find files matching a glob pattern. + + Args: + pattern: The glob pattern (e.g. `'**/*.py'`). + path: The directory to search in. + + Returns: + A list of matching file paths. + """ + raise NotImplementedError(f'{type(self).__name__} does not support glob.') + + async def grep( + self, + pattern: str, + *, + path: str | None = None, + glob_pattern: str | None = None, + output_mode: Literal['content', 'files_with_matches', 'count'] = 'content', + ) -> str: + """Search file contents with a regex pattern. + + Args: + pattern: The regex pattern to search for. + path: The file or directory to search in. + glob_pattern: Optional glob to filter which files are searched. + output_mode: Controls output format: + - `'content'` (default): matching lines as `file:line_number:text` + - `'files_with_matches'`: only file paths containing matches + - `'count'`: `file:count` pairs + + Returns: + Matching lines formatted as text. + """ + raise NotImplementedError(f'{type(self).__name__} does not support grep.') + + # --- Internal helpers (not tools) --- + + async def create_process( + self, + command: str, + *, + env: dict[str, str] | None = None, + ) -> ExecutionProcess: + r"""Create an interactive process with streaming stdin/stdout. + + Args: + command: The shell command to run. + env: Additional environment variables for this process. + + Returns: + An `ExecutionProcess` handle for bidirectional I/O. + """ + raise NotImplementedError(f'{type(self).__name__} does not support interactive processes.') + + # --- Lifecycle --- + + async def __aenter__(self) -> Self: + """Start the environment (e.g., create a Docker container).""" + return self + + async def __aexit__(self, *args: Any) -> None: + """Stop the environment and clean up resources.""" + + +# --- Helper functions --- + + +def shell_escape(s: str) -> str: + """Escape a string for safe use in shell commands.""" + return "'" + s.replace("'", "'\\''") + "'" + + +def format_lines(text: str, offset: int, limit: int) -> str: + """Format text with line numbers and continuation hints. + + Shared helper used by `LocalEnvironment` and `MemoryEnvironment` + to produce consistent `cat -n` style output. + """ + lines = text.splitlines(keepends=True) + total_lines = len(lines) + + if offset >= total_lines and total_lines > 0: + raise ValueError(f'Offset {offset} exceeds file length ({total_lines} lines).') + + selected = lines[offset : offset + limit] + + numbered = [f'{i:>6}\t{line}' for i, line in enumerate(selected, start=offset + 1)] + result = ''.join(numbered) + if not result.endswith('\n'): + result += '\n' + + remaining = total_lines - (offset + len(selected)) + if remaining > 0: + next_offset = offset + len(selected) + result += f'... ({remaining} more lines. Use offset={next_offset} to continue reading.)\n' + + return result + + +def collect_grep_matches( + rel_path: str, + text: str, + compiled: re.Pattern[str], + output_mode: Literal['content', 'files_with_matches', 'count'], + results: list[str], +) -> None: + """Collect grep matches from a single file into `results`. + + Shared helper used by `LocalEnvironment` and `MemoryEnvironment`. + """ + if output_mode == 'files_with_matches': + if any(compiled.search(line) for line in text.splitlines()): + results.append(rel_path) + elif output_mode == 'count': + match_count = sum(1 for line in text.splitlines() if compiled.search(line)) + if match_count > 0: + results.append(f'{rel_path}:{match_count}') + else: + for line_num, line in enumerate(text.splitlines(), start=1): + if compiled.search(line): + results.append(f'{rel_path}:{line_num}:{line}') + + +def glob_match(path: str, pattern: str) -> bool: + """Match a path against a glob pattern with `**` support. + + `fnmatch` does not support `**` for recursive matching. + This helper converts glob patterns to regex so that `**` + matches zero or more path segments (including `/`). + """ + if '**' not in pattern: + return fnmatch.fnmatch(path, pattern) + + regex = '' + i = 0 + while i < len(pattern): + if pattern[i : i + 3] == '**/': + regex += '(.*/)?' + i += 3 + elif pattern[i : i + 2] == '**': + regex += '.*' + i += 2 + elif pattern[i] == '*': + regex += '[^/]*' + i += 1 + elif pattern[i] == '?': + regex += '[^/]' + i += 1 + else: + regex += re.escape(pattern[i]) + i += 1 + return bool(re.fullmatch(regex, path)) + + +# --- Shell command builders for Docker environments --- + + +def build_read_file_cmd(path: str, *, offset: int = 0, limit: int = 2000) -> str: + """Build a shell command that reads a file with line numbers. + + Uses `awk` for reliable line numbering that handles tabs correctly. + Includes a continuation hint when more lines remain, consistent + with the `format_lines` helper used by Local/Memory environments. + """ + escaped = shell_escape(path) + start = offset + 1 + end = offset + limit + return ( + f'awk \'NR>={start} && NR<={end} {{printf "%6d\\t%s\\n", NR, $0}}' + f' END {{if(NR>{end}) printf "... (%d more lines. Use offset={end} to continue reading.)\\n", NR-{end}}}\'' + f' {escaped}' + ) + + +def build_grep_cmd( + pattern: str, + *, + path: str | None = None, + glob_pattern: str | None = None, + output_mode: Literal['content', 'files_with_matches', 'count'] = 'content', +) -> str: + """Build a shell `grep` command from structured arguments.""" + parts = ['grep', '-rI'] # -I skips binary files + if output_mode == 'files_with_matches': + parts.append('-l') + elif output_mode == 'count': + parts.append('-c') + else: + parts.append('-n') + if glob_pattern: + parts.extend(['--include', shell_escape(glob_pattern)]) + parts.append(shell_escape(pattern)) + parts.append(shell_escape(path or '.')) + return ' '.join(parts) + + +def filter_grep_count_output(text: str) -> str: + """Filter `grep -c` output to remove files with 0 matches.""" + return '\n'.join(line for line in text.splitlines() if not line.endswith(':0')) + + +def build_glob_cmd(pattern: str, *, path: str = '.') -> str: + """Build a shell `find` command to match files by pattern.""" + return f'find {shell_escape(path)} -path {shell_escape(pattern)} -o -name {shell_escape(pattern)} 2>/dev/null | head -100' + + +def parse_glob_output(text: str) -> list[str]: + """Parse output of a find/glob command into a list of paths.""" + text = text.strip() + if not text: + return [] + return [line for line in text.splitlines() if line] + + +def apply_edit(text: str, old_string: str, new_string: str, path: str, *, replace_all: bool) -> tuple[str, int]: + """Apply a string replacement edit, returning the new text and the number of replacements. + + Raises: + ValueError: If old_string is not found, or appears multiple times + when replace_all is False. + """ + count = text.count(old_string) + + if count == 0: + raise ValueError(f'old_string not found in {path}.') + if not replace_all and count > 1: + raise ValueError(f'old_string found {count} times in {path}. Use replace_all=True or provide more context.') + + if replace_all: + new_text = text.replace(old_string, new_string) + else: + new_text = text.replace(old_string, new_string, 1) + + return new_text, count if replace_all else 1 diff --git a/pydantic_ai_slim/pydantic_ai/environments/docker.py b/pydantic_ai_slim/pydantic_ai/environments/docker.py new file mode 100644 index 0000000000..38cfc7a275 --- /dev/null +++ b/pydantic_ai_slim/pydantic_ai/environments/docker.py @@ -0,0 +1,574 @@ +"""Docker container-based environment for isolated code execution. + +Requires the `docker` package: `pip install pydantic-ai-slim[docker-sandbox]` +""" + +from __future__ import annotations + +import io +import math +import posixpath +import struct +import tarfile +from pathlib import PurePosixPath +from typing import Any, Literal, cast + +import anyio +import anyio.to_thread +from typing_extensions import Self + +from ._base import ( + IMAGE_EXTENSIONS, + MAX_OUTPUT_CHARS, + Capability, + ExecutionEnvironment, + ExecutionProcess, + ExecutionResult, + FileInfo, + apply_edit, + build_glob_cmd, + build_grep_cmd, + build_read_file_cmd, + filter_grep_count_output, + parse_glob_output, + shell_escape, +) + +try: + import docker + from docker.errors import DockerException + from docker.models.containers import Container +except ImportError as _import_error: + raise ImportError( + 'The `docker` package is required for DockerEnvironment. ' + 'Install it with: pip install pydantic-ai-slim[docker-sandbox]' + ) from _import_error + + +def _put_file(container: Container, path: str, data: bytes) -> None: + """Write file data into a container via put_archive.""" + parent = str(PurePosixPath(path).parent) + filename = PurePosixPath(path).name + f = io.BytesIO() + with tarfile.open(fileobj=f, mode='w') as tar: + info = tarfile.TarInfo(name=filename) + info.size = len(data) + tar.addfile(info, io.BytesIO(data)) + f.seek(0) + container.put_archive(parent, f) # pyright: ignore[reportUnknownMemberType] + + +class DockerEnvironmentProcess(ExecutionProcess): + """Interactive process inside a Docker container using exec with socket I/O. + + Docker's exec socket uses a multiplexed stream protocol where stdout and + stderr frames are interleaved with 8-byte headers indicating the stream + type. This class properly separates the two streams so that `recv()` + returns only stdout data and `recv_stderr()` returns only stderr data. + When one stream is requested but the other arrives first, the unexpected + frame is buffered for the next call to the appropriate method. + """ + + _STDOUT = 1 + _STDERR = 2 + + def __init__(self, container: Container, command: str, work_dir: str, env: dict[str, str] | None = None) -> None: + self._container = container + self._command = command + self._work_dir = work_dir + self._env = env + self._exec_id: str | None = None + self._socket: Any = None + self._returncode: int | None = None + self._stdout_buffer: list[bytes] = [] + self._stderr_buffer: list[bytes] = [] + self._eof = False + + async def _start(self) -> None: + """Start the exec and open the socket (called from __aenter__).""" + + def _do_start() -> tuple[str, Any]: + client: Any = self._container.client + kwargs: dict[str, Any] = { + 'stdin': True, + 'stdout': True, + 'stderr': True, + 'workdir': self._work_dir, + } + if self._env: + kwargs['environment'] = self._env + exec_id: str = client.api.exec_create( + self._container.id, + ['sh', '-c', self._command], + **kwargs, + )['Id'] + sock = client.api.exec_start(exec_id, socket=True) + # docker-py returns a SocketIO wrapper; get the raw socket + raw = getattr(sock, '_sock', sock) + return exec_id, raw + + self._exec_id, self._socket = await anyio.to_thread.run_sync(_do_start) + + async def __aenter__(self) -> Self: + if self._exec_id is None: # pragma: no branch + await self._start() + return self + + async def send(self, data: bytes) -> None: + await anyio.to_thread.run_sync(self._socket.sendall, data) + + async def recv(self, timeout: float | None = None) -> bytes: + if self._stdout_buffer: + return self._stdout_buffer.pop(0) + if timeout is not None: + with anyio.fail_after(timeout): + return await self._recv_stream(self._STDOUT) + return await self._recv_stream(self._STDOUT) + + async def recv_stderr(self, timeout: float | None = None) -> bytes: + if self._stderr_buffer: + return self._stderr_buffer.pop(0) + if timeout is not None: + with anyio.fail_after(timeout): + return await self._recv_stream(self._STDERR) + return await self._recv_stream(self._STDERR) + + async def _recv_stream(self, wanted: int) -> bytes: + """Read frames until one for the wanted stream type arrives.""" + while True: + stream_type, data = await anyio.to_thread.run_sync(self._read_frame) + if not data: + return b'' + if stream_type == wanted: + return data + # Buffer the frame for the other stream + if stream_type == self._STDOUT: + self._stdout_buffer.append(data) + else: + self._stderr_buffer.append(data) + + def _read_frame(self) -> tuple[int, bytes]: + """Read one frame from the Docker multiplexed stream. + + Docker exec socket uses a multiplexed protocol: + - 8 byte header: [stream_type(1), 0, 0, 0, size(4)] + - followed by `size` bytes of data + + Returns: + A `(stream_type, data)` tuple. `stream_type` is 1 for stdout + and 2 for stderr. Returns `(0, b'')` on EOF. + """ + if self._eof: + return 0, b'' + + header = b'' + while len(header) < 8: + chunk = self._socket.recv(8 - len(header)) + if not chunk: + self._eof = True + return 0, b'' + header += chunk + + stream_type = header[0] + size = struct.unpack('>I', header[4:8])[0] + if size == 0: + return stream_type, b'' + + data = b'' + while len(data) < size: + chunk = self._socket.recv(size - len(data)) + if not chunk: + self._eof = True + break + data += chunk + return stream_type, data + + @property + def returncode(self) -> int | None: + if self._returncode is not None: + return self._returncode + if self._exec_id is None: + return None + try: + client: Any = self._container.client + info = client.api.exec_inspect(self._exec_id) + rc = info.get('ExitCode') + if not info.get('Running', False) and rc is not None: + self._returncode = rc + return rc + except (DockerException, OSError): + # Docker API may raise various errors (connection, not found, etc.) + # when inspecting exec state — treat as "still running" + pass + return None + + async def wait(self, timeout: float | None = None) -> int: + async def _poll() -> int: + while True: + rc = self.returncode + if rc is not None: + return rc + await anyio.sleep(0.1) + + if timeout is not None: + with anyio.fail_after(timeout): + return await _poll() + return await _poll() + + async def kill(self) -> None: + # Docker exec doesn't provide a direct kill; close the socket + try: + self._socket.close() + except OSError: + pass + + +class DockerEnvironment(ExecutionEnvironment): + """Docker container-based environment for isolated code execution. + + Provides isolated code execution with configurable resource limits, + network access, and persistent or ephemeral workspaces. + + Usage: + ```python {test="skip" lint="skip"} + async with DockerEnvironment(image='python:3.12-slim') as env: + result = await env.shell('python -c "print(42)"') + print(result.output) + ``` + """ + + def __init__( + self, + *, + image: str = 'python:3.12-slim', + env_vars: dict[str, str] | None = None, + work_dir: str = '/workspace', + volumes: dict[str, dict[str, str]] | None = None, + memory_limit: str | None = None, + cpu_limit: float | None = None, + pids_limit: int | None = None, + network_disabled: bool = False, + read_only: bool = False, + cap_drop: list[str] | None = None, + security_opt: list[str] | None = None, + user: str | None = None, + tmpfs: dict[str, str] | None = None, + init: bool = False, + ) -> None: + """Create a Docker environment. + + Args: + image: Docker image to use. Pre-build custom images with any + required packages before passing them here. + env_vars: Baseline environment variables to set in the container. + work_dir: Working directory inside the container. + volumes: Volume mounts (Docker format). + memory_limit: Memory limit (e.g. '512m', '1g'). + cpu_limit: CPU limit (e.g. 1.0 for one CPU). + pids_limit: Maximum number of PIDs in the container (e.g. 256). + Prevents fork bombs. + network_disabled: Whether to disable network access. + read_only: Whether to mount the root filesystem as read-only. + Use with `tmpfs` to provide writable scratch space. + cap_drop: Linux capabilities to drop (e.g. `['ALL']`). + security_opt: Security options (e.g. `['no-new-privileges']`). + user: User to run as inside the container (e.g. `'nobody'`). + tmpfs: tmpfs mounts as `{path: options}` + (e.g. `{'/tmp': 'noexec,nosuid,size=64m'}`). + init: Whether to use `--init` to run an init process as PID 1. + Ensures proper signal handling and zombie reaping. + """ + self._image = image + self._env_vars = env_vars or {} + self._work_dir = work_dir + self._volumes = volumes + self._memory_limit = memory_limit + self._cpu_limit = cpu_limit + self._pids_limit = pids_limit + self._network_disabled = network_disabled + self._read_only = read_only + self._cap_drop = cap_drop + self._security_opt = security_opt + self._user = user + self._tmpfs = tmpfs + self._init = init + + self._client: docker.DockerClient | None = None + self._container: Container | None = None + + @property + def capabilities(self) -> frozenset[Capability]: # pragma: lax no cover + return frozenset( + { + 'ls', + 'shell', + 'read_file', + 'write_file', + 'replace_str', + 'glob', + 'grep', + } + ) + + def instructions(self, capability: Capability) -> str | None: + if capability == 'grep': # pragma: lax no cover + return 'Uses POSIX basic regex, not Python `re` syntax.' + elif capability == 'glob': # pragma: lax no cover + return 'Uses `find` for pattern matching; `**` is not supported.' + elif capability == 'shell': # pragma: lax no cover + return 'Runs inside a Docker container.' + return None # pragma: lax no cover + + async def __aenter__(self) -> Self: + await anyio.to_thread.run_sync(self._setup) + return self + + def _setup(self) -> None: + """Start container (sync, runs in executor).""" + self._client = docker.from_env() + + # Create and start container + kwargs: dict[str, Any] = { + 'image': self._image, + 'command': 'sleep infinity', + 'detach': True, + 'working_dir': self._work_dir, + 'environment': self._env_vars, + 'auto_remove': False, + } + if self._volumes: + kwargs['volumes'] = self._volumes + if self._memory_limit: + kwargs['mem_limit'] = self._memory_limit + if self._cpu_limit: + kwargs['nano_cpus'] = int(self._cpu_limit * 1e9) + if self._pids_limit is not None: + kwargs['pids_limit'] = self._pids_limit + if self._network_disabled: + kwargs['network_disabled'] = True + if self._read_only: + kwargs['read_only'] = True + if self._cap_drop: + kwargs['cap_drop'] = self._cap_drop + if self._security_opt: + kwargs['security_opt'] = self._security_opt + if self._user: + kwargs['user'] = self._user + if self._tmpfs: + kwargs['tmpfs'] = self._tmpfs + if self._init: + kwargs['init'] = True + + self._container = cast(Container, self._client.containers.run(**kwargs)) + + # Ensure work_dir exists + self._container.exec_run(['mkdir', '-p', self._work_dir]) + + async def __aexit__(self, *_args: Any) -> None: + if self._container is not None: # pragma: no branch + await anyio.to_thread.run_sync(self._teardown) + + def _teardown(self) -> None: + """Stop and remove container (sync, runs in executor).""" + if self._container is not None: # pragma: no branch + try: + self._container.stop(timeout=5) + except Exception: + # Best-effort cleanup: container may already be stopped or removed + pass + try: + self._container.remove(force=True) + except Exception: + # Best-effort cleanup: container may already be removed + pass + self._container = None + + @property + def container(self) -> Container: + if self._container is None: + raise RuntimeError('DockerEnvironment not started. Use `async with DockerEnvironment(...) as env:`') + return self._container + + def _resolve_path(self, path: str) -> str: + """Resolve a path relative to work_dir for Docker API calls. + + Docker API methods like `put_archive` and `get_archive` resolve + paths against the container root `/`, not the working directory. + This helper ensures relative paths are resolved against `work_dir`. + """ + if not path.startswith('/'): + return f'{self._work_dir}/{path}' + return path + + async def create_process( + self, + command: str, + *, + env: dict[str, str] | None = None, + ) -> ExecutionProcess: + return DockerEnvironmentProcess(self.container, command, self._work_dir, env=env) + + async def shell( + self, + command: str, + *, + timeout: float | None = 120, + env: dict[str, str] | None = None, + ) -> ExecutionResult: + """Execute a command in the container.""" + + def _exec() -> tuple[int, bytes]: + if timeout is not None: + wrapped = f'timeout {math.ceil(timeout)} sh -c {shell_escape(command)}' + else: + wrapped = command + exec_kwargs: dict[str, Any] = {'workdir': self._work_dir} + if env: + exec_kwargs['environment'] = env + exit_code, output = self.container.exec_run( + ['sh', '-c', wrapped], + **exec_kwargs, + ) + return exit_code, output + + exit_code, output_bytes = await anyio.to_thread.run_sync(_exec) + output = output_bytes.decode('utf-8', errors='replace') + truncated = len(output) > MAX_OUTPUT_CHARS + if truncated: + output = output[:MAX_OUTPUT_CHARS] + # timeout command returns 124 on timeout + if exit_code == 124 and timeout is not None: + output += '\n[Command timed out]' + return ExecutionResult(output=output, exit_code=exit_code, truncated=truncated) + + async def read_file(self, path: str, *, offset: int = 0, limit: int = 2000) -> str | bytes: + ext = posixpath.splitext(path)[1].lower() + if ext in IMAGE_EXTENSIONS: + return await anyio.to_thread.run_sync(self._read_file_bytes_sync, path) + + def _read() -> str | bytes: + cmd = build_read_file_cmd(path, offset=offset, limit=limit) + exit_code, output = self.container.exec_run(['sh', '-c', cmd], workdir=self._work_dir) + if exit_code != 0: + raise FileNotFoundError(f'File not found or not readable: {path}') + try: + return output.decode('utf-8') + except UnicodeDecodeError: + return self._read_file_bytes_sync(path) + + return await anyio.to_thread.run_sync(_read) + + def _read_file_bytes_sync(self, path: str) -> bytes: + """Read raw file bytes using Docker's get_archive API.""" + bits, _ = self.container.get_archive(self._resolve_path(path)) + # get_archive returns a tar stream + tar_bytes = b''.join(bits) + with tarfile.open(fileobj=io.BytesIO(tar_bytes)) as tar: + members = tar.getmembers() + if not members: + raise FileNotFoundError(f'File not found: {path}') + extracted = tar.extractfile(members[0]) + if extracted is None: + raise FileNotFoundError(f'Cannot read file: {path}') + return extracted.read() + + async def write_file(self, path: str, content: str | bytes) -> None: + def _write() -> None: + full_path = self._resolve_path(path) + # Ensure parent directory exists + parent = str(PurePosixPath(full_path).parent) + self.container.exec_run(['mkdir', '-p', parent]) + + data = content.encode('utf-8') if isinstance(content, str) else content + _put_file(self.container, full_path, data) + + await anyio.to_thread.run_sync(_write) + + async def replace_str( + self, + path: str, + old: str, + new: str, + *, + replace_all: bool = False, + ) -> int: + def _edit() -> int: + raw = self._read_file_bytes_sync(path) + text = raw.decode('utf-8') + new_text, count = apply_edit(text, old, new, path, replace_all=replace_all) + _put_file(self.container, self._resolve_path(path), new_text.encode('utf-8')) + return count + + return await anyio.to_thread.run_sync(_edit) + + async def ls(self, path: str = '.') -> list[FileInfo]: + def _ls() -> list[FileInfo]: + cmd = f'ls -la {shell_escape(path)}' + exit_code, output = self.container.exec_run(['sh', '-c', cmd], workdir=self._work_dir) + if exit_code != 0: + raise NotADirectoryError(f'Not a directory or not found: {path}') + + entries: list[FileInfo] = [] + for line in output.decode('utf-8', errors='replace').splitlines(): + # Skip total line and empty lines + if not line or line.startswith('total'): + continue + parts = line.split(None, 8) + if len(parts) < 9: + continue + perms, _, _, _, size_str, _, _, _, name = parts + is_dir = perms.startswith('d') + try: + size = int(size_str) if not is_dir else None + except ValueError: + size = None + entry_path = f'{path}/{name}' if path != '.' else name + entries.append(FileInfo(name=name, path=entry_path, is_dir=is_dir, size=size)) + return entries + + return await anyio.to_thread.run_sync(_ls) + + async def glob(self, pattern: str, *, path: str = '.') -> list[str]: + def _glob() -> list[str]: + cmd = build_glob_cmd(pattern, path=path) + _, output = self.container.exec_run(['sh', '-c', cmd], workdir=self._work_dir) + return parse_glob_output(output.decode('utf-8', errors='replace')) + + return await anyio.to_thread.run_sync(_glob) + + async def grep( + self, + pattern: str, + *, + path: str | None = None, + glob_pattern: str | None = None, + output_mode: Literal['content', 'files_with_matches', 'count'] = 'content', + ) -> str: + def _grep() -> str: + cmd = build_grep_cmd(pattern, path=path, glob_pattern=glob_pattern, output_mode=output_mode) + _, output = self.container.exec_run(['sh', '-c', cmd], workdir=self._work_dir) + text = output.decode('utf-8', errors='replace').strip() + if output_mode == 'count': + text = filter_grep_count_output(text) + return text + + return await anyio.to_thread.run_sync(_grep) + + async def is_alive(self) -> bool: + """Check if the container is running. + + Returns: + True if the container is running, False otherwise. + """ + if self._container is None: + return False + + def _check() -> bool: + assert self._container is not None + try: + self._container.reload() + return self._container.status == 'running' + except Exception: + return False + + return await anyio.to_thread.run_sync(_check) diff --git a/pydantic_ai_slim/pydantic_ai/environments/local.py b/pydantic_ai_slim/pydantic_ai/environments/local.py new file mode 100644 index 0000000000..c06ba38a41 --- /dev/null +++ b/pydantic_ai_slim/pydantic_ai/environments/local.py @@ -0,0 +1,345 @@ +"""Local subprocess-based execution environment for development and testing. + +Runs commands directly on the host machine within a specified root directory. +**No isolation** — use `DockerEnvironment` for untrusted code. +""" + +from __future__ import annotations + +import re +import subprocess +from pathlib import Path +from typing import Any, Literal + +import anyio +import anyio.abc +from typing_extensions import Self + +from ._base import ( + IMAGE_EXTENSIONS, + MAX_OUTPUT_CHARS, + Capability, + ExecutionEnvironment, + ExecutionProcess, + ExecutionResult, + FileInfo, + apply_edit, + collect_grep_matches, + format_lines, +) + + +class LocalEnvironmentProcess(ExecutionProcess): + """Interactive process backed by `anyio.abc.Process`.""" + + def __init__(self, proc: anyio.abc.Process) -> None: + self._proc = proc + + async def send(self, data: bytes) -> None: + stdin = self._proc.stdin + if stdin is None: + raise RuntimeError('Process stdin is not available.') + await stdin.send(data) + + async def recv(self, timeout: float | None = None) -> bytes: + stdout = self._proc.stdout + if stdout is None: + raise RuntimeError('Process stdout is not available.') + try: + if timeout is not None: + with anyio.fail_after(timeout): + return await stdout.receive(8192) + return await stdout.receive(8192) + except anyio.EndOfStream: + return b'' + + async def recv_stderr(self, timeout: float | None = None) -> bytes: + stderr = self._proc.stderr + if stderr is None: + raise RuntimeError('Process stderr is not available.') + try: + if timeout is not None: + with anyio.fail_after(timeout): + return await stderr.receive(8192) + return await stderr.receive(8192) + except anyio.EndOfStream: + return b'' + + @property + def returncode(self) -> int | None: + return self._proc.returncode + + async def wait(self, timeout: float | None = None) -> int: + if timeout is not None: + with anyio.fail_after(timeout): + return await self._proc.wait() + return await self._proc.wait() + + async def kill(self) -> None: + try: + self._proc.kill() + except ProcessLookupError: + pass + await self._proc.aclose() + _close_subprocess_transport(self._proc) + + +def _close_subprocess_transport(proc: anyio.abc.Process) -> None: + """Close the underlying asyncio subprocess transport to prevent ResourceWarning on Python 3.10. + + On Python 3.10, asyncio subprocess transports are not closed by + `Process.wait()` or `Process.aclose()` and their `__del__` + emits `ResourceWarning: unclosed transport`. Python 3.11+ fixed + this, but we still support 3.10. + """ + inner = getattr(proc, '_process', None) # anyio wraps asyncio.subprocess.Process + transport = getattr(inner, '_transport', None) + if transport is not None: # pragma: no branch + transport.close() + + +class LocalEnvironment(ExecutionEnvironment): + """Local subprocess-based execution environment for development and testing. + + Runs commands directly on the host machine within a specified root + directory. Provides no isolation — use `DockerEnvironment` for untrusted code. + + Usage: + ```python {test="skip" lint="skip"} + async with LocalEnvironment(root_dir='/tmp/workspace') as env: + result = await env.shell('python script.py') + print(result.output) + ``` + """ + + def __init__( + self, + root_dir: str | Path = '.', + *, + env_vars: dict[str, str] | None = None, + inherit_env: bool = True, + ) -> None: + """Create a local execution environment. + + Args: + root_dir: The working directory for all operations. + Defaults to the current directory. + env_vars: Baseline environment variables for all commands. + inherit_env: Whether to inherit the host's environment variables. + When True (default), `env_vars` and per-call `env` are merged + on top of `os.environ`. When False, only `env_vars` and per-call + `env` are used (useful for reproducibility and testing). + """ + self._root_dir = Path(root_dir).resolve() + self._env_vars = env_vars or {} + self._inherit_env = inherit_env + + @property + def capabilities(self) -> frozenset[Capability]: + return frozenset({'ls', 'shell', 'read_file', 'write_file', 'replace_str', 'glob', 'grep'}) + + async def __aenter__(self) -> Self: + self._root_dir.mkdir(parents=True, exist_ok=True) + return self + + async def __aexit__(self, *_args: Any) -> None: + pass + + def _resolve_path(self, path: str) -> Path: + """Resolve a path relative to root_dir, preventing traversal.""" + resolved = (self._root_dir / path).resolve() + if not resolved.is_relative_to(self._root_dir): + raise PermissionError(f'Path {path!r} resolves outside the environment root.') + return resolved + + def _build_env(self, env: dict[str, str] | None) -> dict[str, str] | None: + """Merge baseline env vars with per-call overrides.""" + if not self._env_vars and not env and self._inherit_env: + return None # subprocess inherits naturally + import os + + merged = {**os.environ} if self._inherit_env else {} + merged.update(self._env_vars) + if env: + merged.update(env) + return merged + + async def create_process( + self, + command: str, + *, + env: dict[str, str] | None = None, + ) -> ExecutionProcess: + proc = await anyio.open_process( + command, + stdin=subprocess.PIPE, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + cwd=self._root_dir, + env=self._build_env(env), + ) + return LocalEnvironmentProcess(proc) + + async def shell( + self, + command: str, + *, + timeout: float | None = 120, + env: dict[str, str] | None = None, + ) -> ExecutionResult: + """Execute a command using subprocess for simplicity and reliability.""" + proc = await anyio.open_process( + command, + stdout=subprocess.PIPE, + stderr=subprocess.STDOUT, + cwd=self._root_dir, + env=self._build_env(env), + ) + try: + assert proc.stdout is not None + chunks: list[bytes] = [] + if timeout is not None: + with anyio.fail_after(timeout): + async for chunk in proc.stdout: + chunks.append(chunk) + await proc.wait() + else: + async for chunk in proc.stdout: + chunks.append(chunk) + await proc.wait() + except TimeoutError: + proc.kill() + with anyio.CancelScope(shield=True): + await proc.wait() + _close_subprocess_transport(proc) + return ExecutionResult(output='[Command timed out]', exit_code=-1) + + _close_subprocess_transport(proc) + stdout = b''.join(chunks) + output = stdout.decode('utf-8', errors='replace') + truncated = len(output) > MAX_OUTPUT_CHARS + if truncated: + output = output[:MAX_OUTPUT_CHARS] + return ExecutionResult( + output=output, + exit_code=proc.returncode if proc.returncode is not None else 0, + truncated=truncated, + ) + + async def read_file(self, path: str, *, offset: int = 0, limit: int = 2000) -> str | bytes: + resolved = self._resolve_path(path) + if not resolved.is_file(): + if resolved.is_dir(): + raise FileNotFoundError(f"'{path}' is a directory, not a file.") + raise FileNotFoundError(f'File not found: {path}') + + if resolved.suffix.lower() in IMAGE_EXTENSIONS: + return resolved.read_bytes() + + raw = resolved.read_bytes() + try: + text = raw.decode('utf-8') + except UnicodeDecodeError: + return raw + return format_lines(text, offset, limit) + + async def write_file(self, path: str, content: str | bytes) -> None: + resolved = self._resolve_path(path) + resolved.parent.mkdir(parents=True, exist_ok=True) + if isinstance(content, bytes): + resolved.write_bytes(content) + else: + resolved.write_text(content, encoding='utf-8') + + async def replace_str( + self, + path: str, + old: str, + new: str, + *, + replace_all: bool = False, + ) -> int: + resolved = self._resolve_path(path) + if not resolved.is_file(): + raise FileNotFoundError(f'File not found: {path}') + + text = resolved.read_text(encoding='utf-8') + new_text, count = apply_edit(text, old, new, path, replace_all=replace_all) + resolved.write_text(new_text, encoding='utf-8') + return count + + async def ls(self, path: str = '.') -> list[FileInfo]: + resolved = self._resolve_path(path) + if not resolved.is_dir(): + raise NotADirectoryError(f'Not a directory: {path}') + + entries: list[FileInfo] = [] + for entry in sorted(resolved.iterdir()): + try: + stat = entry.stat() + entries.append( + FileInfo( + name=entry.name, + path=str(entry.relative_to(self._root_dir)), + is_dir=entry.is_dir(), + size=stat.st_size if not entry.is_dir() else None, + ) + ) + except OSError: # pragma: no cover + continue + return entries + + async def glob(self, pattern: str, *, path: str = '.') -> list[str]: + resolved = self._resolve_path(path) + matches: list[str] = [] + for match in sorted(resolved.glob(pattern)): + try: + rel = str(match.relative_to(self._root_dir)) + matches.append(rel) + except ValueError: # pragma: no cover + continue + return matches + + async def grep( + self, + pattern: str, + *, + path: str | None = None, + glob_pattern: str | None = None, + output_mode: Literal['content', 'files_with_matches', 'count'] = 'content', + ) -> str: + search_dir = self._resolve_path(path or '.') + compiled = re.compile(pattern) + + if search_dir.is_file(): + files = [search_dir] + elif glob_pattern: + files = sorted(search_dir.glob(glob_pattern)) + else: + files = sorted(search_dir.rglob('*')) + + results: list[str] = [] + for file_path in files: + if not file_path.is_file(): + continue + # Skip hidden files/directories (e.g. .git/, .venv/) + if any(part.startswith('.') for part in file_path.relative_to(self._root_dir).parts): + continue + try: + raw = file_path.read_bytes() + except OSError: + continue + + # Skip binary files (null byte in first 8KB) + if b'\x00' in raw[:8192]: + continue + + text = raw.decode('utf-8', errors='replace') + rel_path = str(file_path.relative_to(self._root_dir)) + collect_grep_matches(rel_path, text, compiled, output_mode, results) + + if len(results) > 1000: + results.append('[... truncated at 1000 matches]') + break + + return '\n'.join(results) diff --git a/pydantic_ai_slim/pydantic_ai/environments/memory.py b/pydantic_ai_slim/pydantic_ai/environments/memory.py new file mode 100644 index 0000000000..5e406b7fc9 --- /dev/null +++ b/pydantic_ai_slim/pydantic_ai/environments/memory.py @@ -0,0 +1,267 @@ +"""In-memory execution environment for testing. + +All file operations use an in-memory dictionary. Shell commands are handled +by an optional callback — if not provided, `shell()` raises `RuntimeError`. +""" + +from __future__ import annotations + +import fnmatch +import posixpath +import re +from collections.abc import Callable +from typing import TYPE_CHECKING, Literal + +from ._base import ( + IMAGE_EXTENSIONS, + ExecutionEnvironment, + ExecutionResult, + FileInfo, + apply_edit, + collect_grep_matches, + format_lines, + glob_match, +) + +if TYPE_CHECKING: + from ._base import Capability + + +class MemoryEnvironment(ExecutionEnvironment): + """In-memory execution environment for testing. + + File operations use an in-memory dictionary, making tests fast and + isolated with no filesystem access. Shell commands can optionally be + handled by a user-provided callback. + + This is the testing counterpart to `LocalEnvironment`, analogous to + how `TestModel` and `FunctionModel` relate to real model classes. + + Usage: + ```python {test="skip" lint="skip"} + from pydantic_ai.environments.memory import MemoryEnvironment + + env = MemoryEnvironment(files={'main.py': 'print("hello")'}) + async with env: + content = await env.read_file('main.py') + assert 'hello' in content + ``` + """ + + def __init__( + self, + files: dict[str, str | bytes] | None = None, + *, + command_handler: Callable[[str], ExecutionResult] | None = None, + ) -> None: + """Create an in-memory execution environment. + + Args: + files: Initial files to populate the environment with. + Keys are file paths, values are file contents (str or bytes). + command_handler: Optional callback for `shell()` calls. + Receives the command string and returns an `ExecutionResult`. + If not provided, `shell()` raises `RuntimeError`. + """ + self._files: dict[str, str | bytes] = {} + if files: + for path, content in files.items(): + self._files[self._normalize(path)] = content + self._command_handler = command_handler + + @property + def capabilities(self) -> frozenset[Capability]: + return frozenset({'ls', 'read_file', 'write_file', 'replace_str', 'glob', 'grep'}) + + @staticmethod + def _normalize(path: str) -> str: + """Normalize a path for consistent storage.""" + normalized = posixpath.normpath(path) + # Strip leading './' or '/' + if normalized.startswith('./'): # pragma: no cover + normalized = normalized[2:] + elif normalized.startswith('/'): + normalized = normalized[1:] + return normalized + + async def shell( + self, + command: str, + *, + timeout: float | None = 120, + env: dict[str, str] | None = None, + ) -> ExecutionResult: + """Execute a command using the configured handler. + + Args: + command: The shell command to execute. + timeout: Ignored for MemoryEnvironment. + env: Ignored for MemoryEnvironment. + + Returns: + The result from the command handler. + + Raises: + RuntimeError: If no command_handler was provided. + """ + if self._command_handler is None: + raise RuntimeError( + 'MemoryEnvironment has no command_handler configured. ' + 'Pass command_handler= to the constructor to handle shell() calls.' + ) + return self._command_handler(command) + + async def read_file(self, path: str, *, offset: int = 0, limit: int = 2000) -> str | bytes: + normalized = self._normalize(path) + + # Check if path is a "directory" (any file starts with path/) + if any(k.startswith(normalized + '/') for k in self._files): + if normalized not in self._files: + raise FileNotFoundError(f"'{path}' is a directory, not a file.") + + if normalized not in self._files: + raise FileNotFoundError(f'File not found: {path}') + + content = self._files[normalized] + + # Return raw bytes for image files + ext = posixpath.splitext(normalized)[1].lower() + if ext in IMAGE_EXTENSIONS: + if isinstance(content, bytes): + return content + return content.encode('utf-8') + + # Text mode + if isinstance(content, bytes): + try: + text = content.decode('utf-8') + except UnicodeDecodeError: + return content + else: + text = content + + return format_lines(text, offset, limit) + + async def write_file(self, path: str, content: str | bytes) -> None: + self._files[self._normalize(path)] = content + + async def replace_str( + self, + path: str, + old: str, + new: str, + *, + replace_all: bool = False, + ) -> int: + normalized = self._normalize(path) + if normalized not in self._files: + raise FileNotFoundError(f'File not found: {path}') + + content = self._files[normalized] + text = content.decode('utf-8') if isinstance(content, bytes) else content + new_text, count = apply_edit(text, old, new, path, replace_all=replace_all) + self._files[normalized] = new_text + return count + + async def ls(self, path: str = '.') -> list[FileInfo]: + normalized = self._normalize(path) + + # Collect direct children + entries: dict[str, FileInfo] = {} + for file_path in sorted(self._files): + if normalized == '.': + rel = file_path + elif file_path.startswith(normalized + '/'): + rel = file_path[len(normalized) + 1 :] + else: + continue + + # Get the first component (direct child) + parts = rel.split('/', 1) + name = parts[0] + if name in entries: + continue + + is_dir = len(parts) > 1 + if is_dir: + entries[name] = FileInfo( + name=name, + path=f'{normalized}/{name}' if normalized != '.' else name, + is_dir=True, + ) + else: + content = self._files[file_path] + size = len(content) if isinstance(content, bytes) else len(content.encode('utf-8')) + entries[name] = FileInfo( + name=name, + path=f'{normalized}/{name}' if normalized != '.' else name, + is_dir=False, + size=size, + ) + + if not entries and normalized != '.': + raise NotADirectoryError(f'Not a directory: {path}') + + return list(entries.values()) + + async def glob(self, pattern: str, *, path: str = '.') -> list[str]: + normalized = self._normalize(path) + matches: list[str] = [] + for file_path in sorted(self._files): + if normalized != '.': + if not file_path.startswith(normalized + '/'): + continue + rel = file_path[len(normalized) + 1 :] + else: + rel = file_path + + if glob_match(rel, pattern): + matches.append(file_path) + + return matches + + async def grep( + self, + pattern: str, + *, + path: str | None = None, + glob_pattern: str | None = None, + output_mode: Literal['content', 'files_with_matches', 'count'] = 'content', + ) -> str: + normalized = self._normalize(path or '.') + compiled = re.compile(pattern) + + results: list[str] = [] + for file_path in sorted(self._files): + # Path filtering + if normalized != '.': + if normalized == file_path: + pass # exact file match + elif not file_path.startswith(normalized + '/'): + continue + + # Glob filtering + if glob_pattern and not fnmatch.fnmatch(posixpath.basename(file_path), glob_pattern): + continue + + # Skip hidden files + if any(part.startswith('.') for part in file_path.split('/')): + continue + + content = self._files[file_path] + + # Skip binary files + if isinstance(content, bytes): + if b'\x00' in content[:8192]: + continue + text = content.decode('utf-8', errors='replace') + else: + text = content + + collect_grep_matches(file_path, text, compiled, output_mode, results) + + if len(results) > 1000: + results.append('[... truncated at 1000 matches]') + break + + return '\n'.join(results) diff --git a/pydantic_ai_slim/pydantic_ai/toolsets/execution_environment.py b/pydantic_ai_slim/pydantic_ai/toolsets/execution_environment.py new file mode 100644 index 0000000000..fd9cd0bf82 --- /dev/null +++ b/pydantic_ai_slim/pydantic_ai/toolsets/execution_environment.py @@ -0,0 +1,426 @@ +"""ExecutionEnvironmentToolset — exposes coding-agent-style tools backed by an ExecutionEnvironment.""" + +from __future__ import annotations + +import posixpath +import re +from asyncio import Lock +from collections.abc import Iterator +from contextlib import AsyncExitStack, contextmanager +from contextvars import ContextVar +from typing import Any, Literal + +from typing_extensions import Self + +from ..environments._base import ( + IMAGE_EXTENSIONS, + IMAGE_MEDIA_TYPES, + ExecutionEnvironment, +) +from ..exceptions import ModelRetry +from ..messages import BinaryContent +from ..toolsets.function import FunctionToolset + +Capability = Literal['ls', 'shell', 'read_file', 'write_file', 'edit_file', 'glob', 'grep'] +"""Toolset-level capability used in `include`/`exclude`. + +These are higher-level than the environment's fine-grained capabilities. +The toolset maps these to the appropriate environment capabilities. +""" + +EditStrategy = Literal['replace_str', 'apply_patch'] +"""Specific edit tool strategy. Expanded from the `edit_file` capability.""" + + +class ExecutionEnvironmentToolset(FunctionToolset[Any]): + """Toolset providing coding-agent-style tools backed by an `ExecutionEnvironment`. + + Tool names and schemas are designed to match what popular coding agents + expose, so models are well-trained on them. + + Tools are dynamically registered based on the environment's `capabilities`, + filtered by `include`/`exclude`. + + The environment can be: + - Passed directly at construction time (most common) + - Set/overridden via context var using `use_environment()` (for testing or per-call-site config) + + Usage: + ```python {test="skip" lint="skip"} + from pydantic_ai import Agent + from pydantic_ai.environments import ExecutionEnvironmentToolset + from pydantic_ai.environments.docker import DockerEnvironment + + env = DockerEnvironment(image='python:3.12-slim') + toolset = ExecutionEnvironmentToolset(env) + + agent = Agent('openai:gpt-5.2', toolsets=[toolset]) + + async with env: + result = await agent.run('Write a script that prints hello') + ``` + """ + + def __init__( + self, + environment: ExecutionEnvironment | None = None, + *, + include: frozenset[Capability] | None = None, + exclude: frozenset[Capability] | None = None, + edit_strategy: EditStrategy | None = None, + require_shell_approval: bool = False, + require_write_approval: bool = False, + image_support: bool = True, + max_image_bytes: int = 50 * 1024 * 1024, + max_retries: int = 1, + id: str | None = None, + ): + """Create a new execution environment toolset. + + Args: + environment: The execution environment to use for tool execution. + Can also be set later via `use_environment()`. + include: Capabilities to include. `None` means all capabilities + from the environment. Pass an explicit set to restrict to + specific capabilities. + exclude: Capabilities to exclude. `None` defaults to no exclusions. + Pass an explicit set to exclude specific capabilities. + edit_strategy: Which edit strategy to use. `None` auto-selects + `'replace_str'` if supported by the environment. + require_shell_approval: Whether the `shell` tool requires human-in-the-loop + approval before execution. Recommended for `LocalEnvironment` where + commands run directly on the host. + require_write_approval: Whether `write_file` and edit tools require + human-in-the-loop approval before execution. + image_support: Whether `read_file` should return images as `BinaryContent` + for multimodal models (otherwise returns a placeholder message). + max_image_bytes: Maximum image file size to return as BinaryContent. + max_retries: Maximum retries per tool call. + id: Optional unique ID for the toolset (required for durable execution). + """ + super().__init__(max_retries=max_retries, id=id) + self._default_environment = environment + self._environment_override: ContextVar[ExecutionEnvironment | None] = ContextVar( + f'_environment_override_{id or "environment"}', default=None + ) + self._include = include + self._exclude = exclude or frozenset() + self._edit_strategy: EditStrategy | None = edit_strategy + self._image_support = image_support + self._max_image_bytes = max_image_bytes + self._require_shell_approval = require_shell_approval + self._require_write_approval = require_write_approval + self._enter_lock: Lock = Lock() + self._running_count: int = 0 + self._exit_stack: AsyncExitStack | None = None + + # Register tools based on what we know at init time. + # If no environment is provided, we register a full set of tools and + # let runtime errors catch unsupported capabilities. + self._register_tools(environment) + + def _resolve_capabilities(self, env: ExecutionEnvironment | None) -> set[Capability]: + """Determine which toolset-level capabilities to register as tools.""" + if env is not None: + env_caps = env.capabilities + available: set[Capability] = set() + # Map env capabilities back to toolset capabilities + for cap in ('ls', 'shell', 'read_file', 'write_file', 'glob', 'grep'): + if cap in env_caps: + available.add(cap) + # Check for edit_file: env has replace_str or apply_patch + if 'replace_str' in env_caps or 'apply_patch' in env_caps: + available.add('edit_file') + else: + # No environment yet — register everything (runtime will error on unsupported) + available = {'ls', 'shell', 'read_file', 'write_file', 'edit_file', 'glob', 'grep'} + + if self._include is not None: + available &= self._include + + available -= self._exclude + return available + + def _resolve_edit_tool(self, env: ExecutionEnvironment | None) -> EditStrategy | None: + """Determine which edit strategy to use.""" + if self._edit_strategy is not None: + return self._edit_strategy + if env is not None: + env_caps = env.capabilities + if 'replace_str' in env_caps: + return 'replace_str' + if 'apply_patch' in env_caps: + return 'apply_patch' + return None + # Default when no environment is available + return 'replace_str' + + def _register_tools(self, env: ExecutionEnvironment | None) -> None: + """Register tools dynamically based on capabilities.""" + caps = self._resolve_capabilities(env) + + if 'ls' in caps: + self._register_ls() + if 'shell' in caps: + self._register_shell() + if 'read_file' in caps: + self._register_read_file() + if 'write_file' in caps: + self._register_write_file() + if 'edit_file' in caps: + edit_strategy = self._resolve_edit_tool(env) + if edit_strategy == 'replace_str': + self._register_replace_str() + if 'glob' in caps: + self._register_glob() + if 'grep' in caps: + self._register_grep() + + def _register_ls(self) -> None: + async def ls(path: str = '.') -> str: + """List directory contents. + + Args: + path: The directory path to list. Defaults to the working directory. + """ + try: + entries = await self.required_environment.ls(path) + except (NotADirectoryError, PermissionError, OSError) as e: + return f'Error: {e}' + if not entries: + return 'Empty directory.' + lines: list[str] = [] + for entry in entries: + if entry.is_dir: + lines.append(f'{entry.name}/') + elif entry.size is not None: + lines.append(f'{entry.name} ({entry.size} bytes)') + else: + lines.append(entry.name) + return '\n'.join(lines) + + self.tool(ls) + + def _register_shell(self) -> None: + async def shell(command: str, timeout: int = 120) -> str: + """Execute a shell command and return its output. + + Use this for running scripts, installing packages, and other terminal operations. + + Args: + command: The shell command to execute. + timeout: Maximum seconds to wait for the command to complete. + """ + result = await self.required_environment.shell(command, timeout=timeout) + parts: list[str] = [] + if result.output: + parts.append(result.output) + if result.truncated: + parts.append('[output truncated]') + parts.append(f'Exit code: {result.exit_code}') + return '\n'.join(parts) + + self.tool(requires_approval=self._require_shell_approval)(shell) + + def _register_read_file(self) -> None: + async def read_file(path: str, offset: int = 0, limit: int = 2000) -> Any: + """Read a file from the filesystem. + + Returns text files with line numbers, or renders image files for visual inspection. + Use offset and limit to read specific sections of large files. + + Args: + path: The file path to read. + offset: The line number to start reading from (0-indexed). + limit: Maximum number of lines to read. + """ + try: + content = await self.required_environment.read_file(path, offset=offset, limit=limit) + if isinstance(content, bytes): + ext = posixpath.splitext(path)[1].lower() + if ext in IMAGE_EXTENSIONS: + # Image file — return as BinaryContent or placeholder + if self._image_support: + if len(content) > self._max_image_bytes: + return ( + f'Error: Image too large ({len(content)} bytes, max {self._max_image_bytes} bytes).' + ) + media_type = IMAGE_MEDIA_TYPES.get(ext, 'application/octet-stream') + return BinaryContent(data=content, media_type=media_type) + else: + return f'[Image file: {path} — image_support is disabled on this toolset]' + else: + return f'[Binary file: {path} — cannot display as text]' + return content + except (FileNotFoundError, PermissionError, ValueError, OSError) as e: + return f'Error: {e}' + + self.tool(read_file) + + def _register_write_file(self) -> None: + async def write_file(path: str, content: str) -> str: + """Create or overwrite a file. + + The file and any parent directories will be created if they do not exist. + + Args: + path: The file path to write. + content: The content to write to the file. + """ + try: + await self.required_environment.write_file(path, content) + return f'File written: {path}' + except (PermissionError, OSError) as e: + return f'Error: {e}' + + self.tool(requires_approval=self._require_write_approval)(write_file) + + def _register_replace_str(self) -> None: + async def replace_str(path: str, old: str, new: str, replace_all: bool = False) -> str: + """Edit a file by exact string replacement. + + The old string must match exactly (including whitespace and indentation). + For uniqueness, include surrounding context lines. + Only use this after reading the file first. + + Args: + path: The file path to edit. + old: The exact text to find and replace. + new: The replacement text. + replace_all: Replace all occurrences. Defaults to false (old must be unique). + """ + try: + count = await self.required_environment.replace_str(path, old, new, replace_all=replace_all) + return f'Replaced {count} occurrence{"s" if count != 1 else ""} in {path}.' + except (FileNotFoundError, ValueError) as e: + raise ModelRetry(str(e)) + + self.tool(requires_approval=self._require_write_approval)(replace_str) + + def _register_glob(self) -> None: + async def glob_tool(pattern: str, path: str = '.') -> str: + """Find files matching a glob pattern. + + Supports patterns like `**/*.py`, `src/**/*.ts`. + Returns up to 100 matching file paths. + + Args: + pattern: The glob pattern to match files against. + path: The directory to search in. Defaults to the working directory. + """ + try: + matches = await self.required_environment.glob(pattern, path=path) + except (PermissionError, OSError) as e: + return f'Error: {e}' + if not matches: + return 'No files found.' + truncated = len(matches) > 100 + matches = matches[:100] + result = '\n'.join(matches) + if truncated: + result += '\n[... truncated, showing first 100 matches]' + return result + + self.tool(name='glob')(glob_tool) + + def _register_grep(self) -> None: + async def grep_tool( + pattern: str, + path: str | None = None, + glob: str | None = None, + output_mode: Literal['content', 'files_with_matches', 'count'] = 'content', + ) -> str: + """Search file contents with a regex pattern. + + Args: + pattern: The regex pattern to search for. + path: The file or directory to search in. + glob: Glob pattern to filter which files are searched (e.g. `*.py`). + output_mode: Controls output format: + `content` (default) shows matching lines with file paths and line numbers, + `files_with_matches` shows only file paths, + `count` shows match counts per file. + """ + try: + result = await self.required_environment.grep( + pattern, path=path, glob_pattern=glob, output_mode=output_mode + ) + except (PermissionError, OSError, re.error) as e: + return f'Error: {e}' + if not result.strip(): + return 'No matches found.' + return result + + self.tool(name='grep')(grep_tool) + + @property + def tool_name_conflict_hint(self) -> str: + return 'Wrap the ExecutionEnvironmentToolset in a PrefixedToolset to avoid name conflicts.' + + @property + def environment(self) -> ExecutionEnvironment | None: + """The active execution environment, or None if not configured. + + Checks the context var override first, then falls back to the default. + """ + override = self._environment_override.get() + if override is not None: + return override + return self._default_environment + + @property + def required_environment(self) -> ExecutionEnvironment: + """The active execution environment, raising if not configured. + + Raises: + RuntimeError: If no environment is available. + """ + env = self.environment + if env is not None: + return env + raise RuntimeError( + 'No execution environment configured. Pass one to ExecutionEnvironmentToolset() or use .use_environment().' + ) + + @contextmanager + def use_environment(self, environment: ExecutionEnvironment) -> Iterator[None]: + """Override the execution environment for the current context. + + Useful for testing or using different environments at different call sites. + + Usage: + ```python {test="skip" lint="skip"} + with toolset.use_environment(test_env): + result = await agent.run('test prompt', toolsets=[toolset]) + ``` + + Args: + environment: The execution environment to use within this context. + """ + token = self._environment_override.set(environment) + try: + yield + finally: + self._environment_override.reset(token) + + # --- Lifecycle --- + + async def __aenter__(self) -> Self: + async with self._enter_lock: + self._running_count += 1 + if self._running_count == 1: + self._exit_stack = AsyncExitStack() + try: + await self._exit_stack.enter_async_context(self.required_environment) + except Exception: + self._running_count -= 1 + raise + return self + + async def __aexit__(self, *args: Any) -> bool | None: + async with self._enter_lock: + self._running_count -= 1 + if self._running_count == 0 and self._exit_stack is not None: + await self._exit_stack.aclose() + self._exit_stack = None + return None diff --git a/pydantic_ai_slim/pyproject.toml b/pydantic_ai_slim/pyproject.toml index 154836fe6b..df382e2a84 100644 --- a/pydantic_ai_slim/pyproject.toml +++ b/pydantic_ai_slim/pyproject.toml @@ -129,6 +129,8 @@ temporal = ["temporalio==1.20.0"] dbos = ["dbos>=2.10.0"] # Prefect prefect = ["prefect>=3.4.21"] +# Sandboxes +docker-sandbox = ["docker>=7.0"] [tool.hatch.metadata] allow-direct-references = true diff --git a/tests/test_environments.py b/tests/test_environments.py new file mode 100644 index 0000000000..69220c3e70 --- /dev/null +++ b/tests/test_environments.py @@ -0,0 +1,2941 @@ +"""Tests for pydantic_ai.environments — ExecutionEnvironment, ExecutionEnvironmentToolset, LocalEnvironment, and MemoryEnvironment.""" + +from __future__ import annotations + +import io +import os +import struct +import tarfile +from pathlib import Path +from typing import Any +from unittest.mock import MagicMock + +import pytest +from inline_snapshot import snapshot + +from pydantic_ai import ToolCallPart +from pydantic_ai._run_context import RunContext +from pydantic_ai._tool_manager import ToolManager +from pydantic_ai.environments import ExecutionEnvironmentToolset, ExecutionResult, FileInfo +from pydantic_ai.environments._base import ( + apply_edit, + build_glob_cmd, + build_grep_cmd, + build_read_file_cmd, + filter_grep_count_output, + format_lines, + glob_match, + parse_glob_output, + shell_escape, +) +from pydantic_ai.environments.local import LocalEnvironment +from pydantic_ai.environments.memory import MemoryEnvironment +from pydantic_ai.exceptions import UnexpectedModelBehavior +from pydantic_ai.models.test import TestModel +from pydantic_ai.usage import RunUsage + +try: + from docker.errors import NotFound as DockerNotFound + + from pydantic_ai.environments.docker import ( + DockerEnvironment, + DockerEnvironmentProcess, + _put_file, + ) +except ImportError: # pragma: lax no cover + docker_installed = False +else: + docker_installed = True + +pytestmark = pytest.mark.anyio + +docker_skip = pytest.mark.skipif(not docker_installed, reason='docker package not installed') + + +def build_run_context(deps: Any = None, run_step: int = 0) -> RunContext[Any]: + return RunContext( + deps=deps, + model=TestModel(), + usage=RunUsage(), + prompt=None, + messages=[], + run_step=run_step, + ) + + +# --- Data types --- + + +def test_execute_result(): + result = ExecutionResult(output='hello\n', exit_code=0) + assert result.output == 'hello\n' + assert result.exit_code == 0 + assert result.truncated is False + + +def test_execute_result_truncated(): + result = ExecutionResult(output='data', exit_code=1, truncated=True) + assert result.truncated is True + + +def test_file_info(): + info = FileInfo(name='test.py', path='src/test.py', is_dir=False, size=42) + assert info.name == 'test.py' + assert info.is_dir is False + assert info.size == 42 + + +def test_file_info_directory(): + info = FileInfo(name='src', path='src', is_dir=True) + assert info.is_dir is True + assert info.size is None + + +# --- LocalEnvironment: execute --- + + +async def test_local_execute_basic(tmp_path: Path): + async with LocalEnvironment(tmp_path) as env: + result = await env.shell('echo hello') + assert result.exit_code == 0 + assert 'hello' in result.output + + +async def test_local_execute_exit_code(tmp_path: Path): + async with LocalEnvironment(tmp_path) as env: + result = await env.shell('exit 42') + assert result.exit_code == 42 + + +async def test_local_execute_timeout(tmp_path: Path): + async with LocalEnvironment(tmp_path) as env: + result = await env.shell('sleep 10', timeout=0.5) + assert result.exit_code == -1 + assert 'timed out' in result.output.lower() + + +async def test_local_execute_stderr(tmp_path: Path): + async with LocalEnvironment(tmp_path) as env: + result = await env.shell('echo error >&2') + assert 'error' in result.output + + +# --- LocalEnvironment: environment variables --- + + +async def test_local_env_vars_baseline(tmp_path: Path): + async with LocalEnvironment(tmp_path, env_vars={'MY_VAR': 'baseline'}) as env: + result = await env.shell('echo $MY_VAR') + assert 'baseline' in result.output + + +async def test_local_env_vars_per_call(tmp_path: Path): + async with LocalEnvironment(tmp_path) as env: + result = await env.shell('echo $CALL_VAR', env={'CALL_VAR': 'per_call'}) + assert 'per_call' in result.output + + +async def test_local_env_vars_merged(tmp_path: Path): + async with LocalEnvironment(tmp_path, env_vars={'BASE': 'one'}) as env: + result = await env.shell('echo $BASE $EXTRA', env={'EXTRA': 'two'}) + assert 'one' in result.output + assert 'two' in result.output + + +async def test_local_env_vars_per_call_overrides_baseline(tmp_path: Path): + async with LocalEnvironment(tmp_path, env_vars={'VAR': 'old'}) as env: + result = await env.shell('echo $VAR', env={'VAR': 'new'}) + assert 'new' in result.output + assert 'old' not in result.output + + +async def test_local_inherit_env_true(tmp_path: Path): + os.environ['_TEST_INHERIT_CHECK'] = 'inherited' + try: + async with LocalEnvironment(tmp_path, inherit_env=True) as env: + result = await env.shell('echo $_TEST_INHERIT_CHECK') + assert 'inherited' in result.output + finally: + del os.environ['_TEST_INHERIT_CHECK'] + + +async def test_local_inherit_env_false(tmp_path: Path): + os.environ['_TEST_INHERIT_CHECK'] = 'should_not_see' + try: + async with LocalEnvironment(tmp_path, inherit_env=False) as env: + result = await env.shell('echo x${_TEST_INHERIT_CHECK}x') + assert result.output.strip() == 'xx' + finally: + del os.environ['_TEST_INHERIT_CHECK'] + + +async def test_local_inherit_env_false_with_explicit_vars(tmp_path: Path): + async with LocalEnvironment(tmp_path, env_vars={'ONLY_THIS': 'yes'}, inherit_env=False) as env: + result = await env.shell('/bin/echo $ONLY_THIS') + assert 'yes' in result.output + + +# --- LocalEnvironment: file operations --- + + +async def test_local_write_and_read(tmp_path: Path): + async with LocalEnvironment(tmp_path) as env: + await env.write_file('test.txt', 'line one\nline two\n') + content = await env.read_file('test.txt') + assert isinstance(content, str) + assert 'line one' in content + assert 'line two' in content + + +async def test_local_read_line_numbers(tmp_path: Path): + async with LocalEnvironment(tmp_path) as env: + await env.write_file('numbered.txt', 'alpha\nbeta\ngamma\n') + content = await env.read_file('numbered.txt') + assert content == snapshot("""\ + 1\talpha + 2\tbeta + 3\tgamma +""") + + +async def test_local_read_with_offset_limit(tmp_path: Path): + async with LocalEnvironment(tmp_path) as env: + lines = '\n'.join(f'line {i}' for i in range(20)) + await env.write_file('long.txt', lines) + + content = await env.read_file('long.txt', offset=5, limit=3) + assert content == snapshot("""\ + 6\tline 5 + 7\tline 6 + 8\tline 7 +... (12 more lines. Use offset=8 to continue reading.) +""") + + +async def test_local_read_continuation_hint(tmp_path: Path): + async with LocalEnvironment(tmp_path) as env: + lines = '\n'.join(f'line {i}' for i in range(20)) + await env.write_file('long.txt', lines) + + content = await env.read_file('long.txt', offset=0, limit=5) + assert content == snapshot("""\ + 1\tline 0 + 2\tline 1 + 3\tline 2 + 4\tline 3 + 5\tline 4 +... (15 more lines. Use offset=5 to continue reading.) +""") + + +async def test_local_read_offset_out_of_bounds(tmp_path: Path): + async with LocalEnvironment(tmp_path) as env: + await env.write_file('short.txt', 'one\ntwo\n') + with pytest.raises(ValueError, match='Offset 100 exceeds file length'): + await env.read_file('short.txt', offset=100) + + +async def test_local_read_directory_error(tmp_path: Path): + async with LocalEnvironment(tmp_path) as env: + (tmp_path / 'subdir').mkdir() + with pytest.raises(FileNotFoundError, match='is a directory'): + await env.read_file('subdir') + + +async def test_local_read_nonexistent(tmp_path: Path): + async with LocalEnvironment(tmp_path) as env: + with pytest.raises(FileNotFoundError): + await env.read_file('nonexistent.txt') + + +async def test_local_write_creates_parent_dirs(tmp_path: Path): + async with LocalEnvironment(tmp_path) as env: + await env.write_file('deep/nested/dir/file.txt', 'content') + content = await env.read_file('deep/nested/dir/file.txt') + assert isinstance(content, str) + assert 'content' in content + + +async def test_local_write_binary(tmp_path: Path): + async with LocalEnvironment(tmp_path) as env: + await env.write_file('binary.bin', b'\x00\x01\x02\x03') + assert (tmp_path / 'binary.bin').read_bytes() == b'\x00\x01\x02\x03' + + +async def test_local_read_file_bytes(tmp_path: Path): + # Create a minimal PNG (1x1 transparent pixel) + png_data = ( + b'\x89PNG\r\n\x1a\n\x00\x00\x00\rIHDR\x00\x00\x00\x01' + b'\x00\x00\x00\x01\x08\x06\x00\x00\x00\x1f\x15\xc4\x89' + b'\x00\x00\x00\nIDATx\x9cc\x00\x01\x00\x00\x05\x00\x01' + b'\r\n\xb4\x00\x00\x00\x00IEND\xaeB`\x82' + ) + async with LocalEnvironment(tmp_path) as env: + await env.write_file('image.png', png_data) + result = await env.read_file('image.png') + assert isinstance(result, bytes) + assert result == png_data + + +# --- LocalEnvironment: edit_file --- + + +async def test_local_edit_single_replacement(tmp_path: Path): + async with LocalEnvironment(tmp_path) as env: + await env.write_file('edit.txt', 'foo bar baz') + count = await env.replace_str('edit.txt', 'bar', 'BAR') + assert count == 1 + content = (tmp_path / 'edit.txt').read_text() + assert content == 'foo BAR baz' + + +async def test_local_edit_replace_all(tmp_path: Path): + async with LocalEnvironment(tmp_path) as env: + await env.write_file('edit.txt', 'aaa bbb aaa') + count = await env.replace_str('edit.txt', 'aaa', 'xxx', replace_all=True) + assert count == 2 + content = (tmp_path / 'edit.txt').read_text() + assert content == 'xxx bbb xxx' + + +async def test_local_edit_not_found(tmp_path: Path): + async with LocalEnvironment(tmp_path) as env: + await env.write_file('edit.txt', 'hello world') + with pytest.raises(ValueError, match='not found'): + await env.replace_str('edit.txt', 'missing', 'replacement') + + +async def test_local_edit_ambiguous_without_replace_all(tmp_path: Path): + async with LocalEnvironment(tmp_path) as env: + await env.write_file('edit.txt', 'dup dup dup') + with pytest.raises(ValueError, match='3 times'): + await env.replace_str('edit.txt', 'dup', 'unique') + + +async def test_local_edit_nonexistent_file(tmp_path: Path): + async with LocalEnvironment(tmp_path) as env: + with pytest.raises(FileNotFoundError): + await env.replace_str('missing.txt', 'old', 'new') + + +async def test_local_edit_multiline(tmp_path: Path): + async with LocalEnvironment(tmp_path) as env: + await env.write_file('code.py', 'def foo():\n return "old"\n\nprint("test")\n') + count = await env.replace_str('code.py', 'def foo():\n return "old"', 'def foo():\n return "new"') + assert count == 1 + content = (tmp_path / 'code.py').read_text() + assert 'return "new"' in content + assert 'return "old"' not in content + assert 'print("test")' in content + + +# --- LocalEnvironment: ls --- + + +async def test_local_ls(tmp_path: Path): + async with LocalEnvironment(tmp_path) as env: + await env.write_file('a.txt', 'a') + await env.write_file('b.txt', 'b') + (tmp_path / 'subdir').mkdir() + + entries = await env.ls('.') + names = {e.name for e in entries} + assert 'a.txt' in names + assert 'b.txt' in names + assert 'subdir' in names + + dirs = [e for e in entries if e.is_dir] + files = [e for e in entries if not e.is_dir] + assert any(d.name == 'subdir' for d in dirs) + assert all(f.size is not None and f.size > 0 for f in files) + + +async def test_local_ls_not_a_directory(tmp_path: Path): + async with LocalEnvironment(tmp_path) as env: + await env.write_file('file.txt', 'content') + with pytest.raises(NotADirectoryError): + await env.ls('file.txt') + + +# --- LocalEnvironment: glob --- + + +async def test_local_glob(tmp_path: Path): + async with LocalEnvironment(tmp_path) as env: + await env.write_file('src/main.py', '# main') + await env.write_file('src/utils.py', '# utils') + await env.write_file('src/data.json', '{}') + + matches = await env.glob('**/*.py') + assert len(matches) == 2 + assert any('main.py' in m for m in matches) + assert any('utils.py' in m for m in matches) + assert not any('data.json' in m for m in matches) + + +async def test_local_glob_no_matches(tmp_path: Path): + async with LocalEnvironment(tmp_path) as env: + matches = await env.glob('**/*.rs') + assert matches == [] + + +# --- LocalEnvironment: grep --- + + +async def test_local_grep(tmp_path: Path): + async with LocalEnvironment(tmp_path) as env: + await env.write_file('a.py', 'def hello():\n pass\n') + await env.write_file('b.py', 'x = 1\n') + + result = await env.grep('hello') + assert 'a.py' in result + assert 'hello' in result + assert 'b.py' not in result + + +async def test_local_grep_with_glob_pattern(tmp_path: Path): + async with LocalEnvironment(tmp_path) as env: + await env.write_file('code.py', 'target = 1\n') + await env.write_file('code.js', 'target = 2\n') + + result = await env.grep('target', glob_pattern='*.py') + assert 'code.py' in result + assert 'code.js' not in result + + +async def test_local_grep_line_numbers(tmp_path: Path): + async with LocalEnvironment(tmp_path) as env: + await env.write_file('test.txt', 'alpha\nbeta\ngamma\nbeta\n') + + result = await env.grep('beta') + assert result == snapshot('test.txt:2:beta\ntest.txt:4:beta') + + +async def test_local_grep_no_matches(tmp_path: Path): + async with LocalEnvironment(tmp_path) as env: + await env.write_file('test.txt', 'nothing interesting') + result = await env.grep('nonexistent_pattern') + assert result == '' + + +async def test_local_grep_skips_hidden_files(tmp_path: Path): + async with LocalEnvironment(tmp_path) as env: + await env.write_file('visible.py', 'target_string\n') + (tmp_path / '.hidden').mkdir() + (tmp_path / '.hidden' / 'secret.py').write_text('target_string\n') + (tmp_path / '.dotfile').write_text('target_string\n') + + result = await env.grep('target_string') + assert 'visible.py' in result + assert '.hidden' not in result + assert '.dotfile' not in result + + +# --- LocalEnvironment: create_process --- + + +async def test_local_create_process(tmp_path: Path): + async with LocalEnvironment(tmp_path) as env: + proc = await env.create_process('echo interactive') + async with proc: + data = await proc.recv(timeout=5) + assert b'interactive' in data + + +async def test_local_create_process_env(tmp_path: Path): + async with LocalEnvironment(tmp_path) as env: + proc = await env.create_process('echo $PROC_VAR', env={'PROC_VAR': 'from_process'}) + async with proc: + data = await proc.recv(timeout=5) + assert b'from_process' in data + + +async def test_local_create_process_stdin(tmp_path: Path): + async with LocalEnvironment(tmp_path) as env: + # Use head -1 so the process exits after reading one line + proc = await env.create_process('head -1') + async with proc: + await proc.send(b'hello from stdin\n') + data = await proc.recv(timeout=5) + assert b'hello from stdin' in data + + +async def test_local_process_wait(tmp_path: Path): + async with LocalEnvironment(tmp_path) as env: + proc = await env.create_process('exit 7') + async with proc: + rc = await proc.wait(timeout=5) + assert rc == 7 + + +async def test_local_process_kill(tmp_path: Path): + async with LocalEnvironment(tmp_path) as env: + proc = await env.create_process('sleep 60') + # Don't use async with — we want to test manual kill + await proc.kill() + assert proc.returncode is not None + + +# --- LocalEnvironment: path traversal --- + + +async def test_local_path_traversal_blocked(tmp_path: Path): + async with LocalEnvironment(tmp_path) as env: + with pytest.raises(PermissionError, match='outside the environment root'): + await env.read_file('../../../etc/passwd') + + +async def test_local_path_traversal_write_blocked(tmp_path: Path): + async with LocalEnvironment(tmp_path) as env: + with pytest.raises(PermissionError, match='outside the environment root'): + await env.write_file('../escape.txt', 'malicious') + + +# --- LocalEnvironment: creates root dir --- + + +async def test_local_creates_root_dir(tmp_path: Path): + root = tmp_path / 'new_root' + assert not root.exists() + async with LocalEnvironment(root) as env: + assert root.exists() + result = await env.shell('echo works') + assert 'works' in result.output + + +# --- ExecutionEnvironmentToolset --- + + +async def test_toolset_tool_names(): + toolset = ExecutionEnvironmentToolset(LocalEnvironment('.')) + tool_names = sorted(toolset.tools.keys()) + assert tool_names == snapshot(['glob', 'grep', 'ls', 'read_file', 'replace_str', 'shell', 'write_file']) + + +async def test_toolset_include_flags(): + toolset = ExecutionEnvironmentToolset( + LocalEnvironment('.'), + include=frozenset(), + ) + assert toolset.tools == {} + + +async def test_toolset_include_shell_only(): + toolset = ExecutionEnvironmentToolset( + LocalEnvironment('.'), + include=frozenset({'shell'}), + ) + assert sorted(toolset.tools.keys()) == ['shell'] + + +async def test_toolset_bash_tool(tmp_path: Path): + env = LocalEnvironment(tmp_path) + toolset = ExecutionEnvironmentToolset(env) + ctx = build_run_context(None) + manager = await ToolManager[None](toolset).for_run_step(ctx) + + async with env: + result = await manager.handle_call(ToolCallPart(tool_name='shell', args={'command': 'echo hello'})) + assert result == snapshot("""\ +hello + +Exit code: 0\ +""") + + +async def test_toolset_read_write_tools(tmp_path: Path): + env = LocalEnvironment(tmp_path) + toolset = ExecutionEnvironmentToolset(env) + ctx = build_run_context(None) + manager = await ToolManager[None](toolset).for_run_step(ctx) + + async with env: + # Write + write_result = await manager.handle_call( + ToolCallPart(tool_name='write_file', args={'path': 'test.txt', 'content': 'hello world'}) + ) + assert write_result == snapshot('File written: test.txt') + + # Read + read_result = await manager.handle_call(ToolCallPart(tool_name='read_file', args={'path': 'test.txt'})) + assert read_result == snapshot(' 1\thello world\n') + + +async def test_toolset_edit_retry_on_error(tmp_path: Path): + env = LocalEnvironment(tmp_path) + toolset = ExecutionEnvironmentToolset(env, max_retries=0) + ctx = build_run_context(None) + manager = await ToolManager[None](toolset).for_run_step(ctx) + + async with env: + await env.write_file('test.txt', 'content') + + # Edit with non-matching string: ModelRetry is raised by tool, but with max_retries=0 + # the ToolManager wraps it into UnexpectedModelBehavior + with pytest.raises(UnexpectedModelBehavior, match='exceeded max retries count of 0'): + await manager.handle_call( + ToolCallPart( + tool_name='replace_str', + args={'path': 'test.txt', 'old': 'nonexistent', 'new': 'replacement'}, + ) + ) + + +async def test_toolset_glob_tool(tmp_path: Path): + env = LocalEnvironment(tmp_path) + toolset = ExecutionEnvironmentToolset(env) + ctx = build_run_context(None) + manager = await ToolManager[None](toolset).for_run_step(ctx) + + async with env: + await env.write_file('a.py', '# a') + await env.write_file('b.py', '# b') + + result = await manager.handle_call(ToolCallPart(tool_name='glob', args={'pattern': '*.py'})) + assert result == snapshot("""\ +a.py +b.py\ +""") + + +async def test_toolset_grep_tool(tmp_path: Path): + env = LocalEnvironment(tmp_path) + toolset = ExecutionEnvironmentToolset(env) + ctx = build_run_context(None) + manager = await ToolManager[None](toolset).for_run_step(ctx) + + async with env: + await env.write_file('search.py', 'def find_me():\n pass\n') + + result = await manager.handle_call(ToolCallPart(tool_name='grep', args={'pattern': 'find_me'})) + assert result == snapshot('search.py:1:def find_me():') + + +# --- ExecutionEnvironmentToolset: error handling --- + + +async def test_toolset_read_nonexistent_returns_error(tmp_path: Path): + """read_file on a nonexistent file returns an error string instead of crashing.""" + env = LocalEnvironment(tmp_path) + toolset = ExecutionEnvironmentToolset(env) + ctx = build_run_context(None) + manager = await ToolManager[None](toolset).for_run_step(ctx) + + async with env: + result = await manager.handle_call(ToolCallPart(tool_name='read_file', args={'path': 'nope.txt'})) + assert 'Error:' in str(result) + + +async def test_toolset_read_path_traversal_returns_error(tmp_path: Path): + """read_file with path traversal returns an error string.""" + env = LocalEnvironment(tmp_path) + toolset = ExecutionEnvironmentToolset(env) + ctx = build_run_context(None) + manager = await ToolManager[None](toolset).for_run_step(ctx) + + async with env: + result = await manager.handle_call(ToolCallPart(tool_name='read_file', args={'path': '../../etc/passwd'})) + assert 'Error:' in str(result) + + +async def test_toolset_write_path_traversal_returns_error(tmp_path: Path): + """write_file with path traversal returns an error string.""" + env = LocalEnvironment(tmp_path) + toolset = ExecutionEnvironmentToolset(env) + ctx = build_run_context(None) + manager = await ToolManager[None](toolset).for_run_step(ctx) + + async with env: + result = await manager.handle_call( + ToolCallPart(tool_name='write_file', args={'path': '../../tmp/evil.txt', 'content': 'bad'}) + ) + assert 'Error:' in str(result) + + +async def test_toolset_glob_path_traversal_returns_error(tmp_path: Path): + """glob with path traversal returns an error string.""" + env = LocalEnvironment(tmp_path) + toolset = ExecutionEnvironmentToolset(env) + ctx = build_run_context(None) + manager = await ToolManager[None](toolset).for_run_step(ctx) + + async with env: + result = await manager.handle_call( + ToolCallPart(tool_name='glob', args={'pattern': '*.py', 'path': '../../etc'}) + ) + assert 'Error:' in str(result) + + +async def test_toolset_grep_invalid_regex_returns_error(tmp_path: Path): + """grep with invalid regex returns an error string.""" + env = LocalEnvironment(tmp_path) + toolset = ExecutionEnvironmentToolset(env) + ctx = build_run_context(None) + manager = await ToolManager[None](toolset).for_run_step(ctx) + + async with env: + await env.write_file('test.txt', 'content') + + result = await manager.handle_call(ToolCallPart(tool_name='grep', args={'pattern': '[invalid'})) + assert 'Error:' in str(result) + + +async def test_toolset_read_offset_out_of_bounds_returns_error(tmp_path: Path): + """read_file with offset past EOF returns an error string.""" + env = LocalEnvironment(tmp_path) + toolset = ExecutionEnvironmentToolset(env) + ctx = build_run_context(None) + manager = await ToolManager[None](toolset).for_run_step(ctx) + + async with env: + await env.write_file('short.txt', 'one\ntwo\n') + + result = await manager.handle_call( + ToolCallPart(tool_name='read_file', args={'path': 'short.txt', 'offset': 100}) + ) + assert 'Error:' in str(result) + assert 'Offset 100 exceeds' in str(result) + + +async def test_toolset_read_continuation_hint(tmp_path: Path): + """read_file includes continuation hint when there are more lines.""" + env = LocalEnvironment(tmp_path) + toolset = ExecutionEnvironmentToolset(env) + ctx = build_run_context(None) + manager = await ToolManager[None](toolset).for_run_step(ctx) + + async with env: + lines = '\n'.join(f'line {i}' for i in range(20)) + await env.write_file('long.txt', lines) + + result = await manager.handle_call( + ToolCallPart(tool_name='read_file', args={'path': 'long.txt', 'offset': 0, 'limit': 5}) + ) + assert result == snapshot("""\ + 1 line 0 + 2 line 1 + 3 line 2 + 4 line 3 + 5 line 4 +... (15 more lines. Use offset=5 to continue reading.) +""") + + +# --- ExecutionEnvironmentToolset: approval flags --- + + +async def test_toolset_require_shell_approval(): + """require_shell_approval sets requires_approval on the shell tool.""" + toolset = ExecutionEnvironmentToolset(require_shell_approval=True) + ctx = build_run_context(None) + tools = await toolset.get_tools(ctx) + assert tools['shell'].tool_def.kind == 'unapproved' + # Other tools should be normal + assert tools['read_file'].tool_def.kind == 'function' + + +async def test_toolset_require_write_approval(): + """require_write_approval sets requires_approval on write_file and replace_str.""" + toolset = ExecutionEnvironmentToolset(require_write_approval=True) + ctx = build_run_context(None) + tools = await toolset.get_tools(ctx) + assert tools['write_file'].tool_def.kind == 'unapproved' + assert tools['replace_str'].tool_def.kind == 'unapproved' + # read_file and search tools should NOT require approval + assert tools['read_file'].tool_def.kind == 'function' + assert tools['glob'].tool_def.kind == 'function' + assert tools['grep'].tool_def.kind == 'function' + + +async def test_toolset_default_no_approval(): + """By default, no tools require approval.""" + toolset = ExecutionEnvironmentToolset() + ctx = build_run_context(None) + tools = await toolset.get_tools(ctx) + for tool in tools.values(): + assert tool.tool_def.kind == 'function' + + +# --- ExecutionEnvironmentToolset: environment management --- + + +async def test_toolset_environment_property(): + env = LocalEnvironment('.') + toolset = ExecutionEnvironmentToolset(env) + assert toolset.environment is env + assert toolset.required_environment is env + + +async def test_toolset_no_environment_returns_none(): + toolset = ExecutionEnvironmentToolset() + assert toolset.environment is None + + +async def test_toolset_no_environment_required_raises(): + toolset = ExecutionEnvironmentToolset() + with pytest.raises(RuntimeError, match='No execution environment configured'): + _ = toolset.required_environment + + +async def test_toolset_use_environment(): + env1 = LocalEnvironment('/tmp/env1') + env2 = LocalEnvironment('/tmp/env2') + toolset = ExecutionEnvironmentToolset(env1) + + assert toolset.environment is env1 + with toolset.use_environment(env2): + assert toolset.environment is env2 + assert toolset.environment is env1 + + +async def test_toolset_use_environment_no_default(): + env = LocalEnvironment('.') + toolset = ExecutionEnvironmentToolset() + + assert toolset.environment is None + + with toolset.use_environment(env): + assert toolset.environment is env + + assert toolset.environment is None + + +async def test_toolset_instructions(): + """Environment instructions is accessible for each tool.""" + env = LocalEnvironment('.') + # LocalEnvironment returns None for all tool descriptions by default + assert env.instructions('shell') is None + assert env.instructions('read_file') is None + + +async def test_toolset_tool_name_conflict_hint(): + toolset = ExecutionEnvironmentToolset(LocalEnvironment('.')) + assert 'PrefixedToolset' in toolset.tool_name_conflict_hint + + +# --- ExecutionEnvironmentToolset: lifecycle --- + + +async def test_toolset_lifecycle(tmp_path: Path): + env = LocalEnvironment(tmp_path) + toolset = ExecutionEnvironmentToolset(env) + + async with toolset: + result = await env.shell('echo lifecycle') + assert 'lifecycle' in result.output + + +# --- ExecutionEnvironmentToolset: image support --- + + +async def test_toolset_image_support_disabled(tmp_path: Path): + env = LocalEnvironment(tmp_path) + toolset = ExecutionEnvironmentToolset(env, image_support=False) + ctx = build_run_context(None) + manager = await ToolManager[None](toolset).for_run_step(ctx) + + async with env: + await env.write_file('photo.png', b'\x89PNG\r\n\x1a\n') + result = await manager.handle_call(ToolCallPart(tool_name='read_file', args={'path': 'photo.png'})) + assert result == snapshot('[Image file: photo.png — image_support is disabled on this toolset]') + + +# --- LocalEnvironment: grep output modes --- + + +async def test_local_grep_files_with_matches(tmp_path: Path): + async with LocalEnvironment(tmp_path) as env: + await env.write_file('a.py', 'target = 1\nother = 2\n') + await env.write_file('b.py', 'target = 3\ntarget = 4\n') + await env.write_file('c.py', 'nothing here\n') + + result = await env.grep('target', output_mode='files_with_matches') + lines = result.strip().splitlines() + assert sorted(lines) == ['a.py', 'b.py'] + + +async def test_local_grep_count(tmp_path: Path): + async with LocalEnvironment(tmp_path) as env: + await env.write_file('a.py', 'target = 1\nother = 2\n') + await env.write_file('b.py', 'target = 3\ntarget = 4\n') + await env.write_file('c.py', 'nothing here\n') + + result = await env.grep('target', output_mode='count') + lines = sorted(result.strip().splitlines()) + assert lines == ['a.py:1', 'b.py:2'] + + +async def test_local_grep_content_default(tmp_path: Path): + """Default output_mode is 'content' with file:line:text format.""" + async with LocalEnvironment(tmp_path) as env: + await env.write_file('test.py', 'hello\nworld\n') + + result = await env.grep('hello') + assert result == snapshot('test.py:1:hello') + + +# --- LocalEnvironment: binary file detection --- + + +async def test_local_grep_skips_binary_files(tmp_path: Path): + async with LocalEnvironment(tmp_path) as env: + await env.write_file('text.py', 'findme = True\n') + await env.write_file('binary.pyc', b'\x00\x01\x02findme\x03\x04') + + result = await env.grep('findme') + assert 'text.py' in result + assert 'binary.pyc' not in result + + +async def test_local_grep_binary_detection_first_8kb(tmp_path: Path): + """Binary detection checks only the first 8KB.""" + async with LocalEnvironment(tmp_path) as env: + # File with null byte after 8KB — should be treated as text + content = 'findme\n' + ('x' * 8200) + '\x00' + await env.write_file('mostly_text.txt', content) + + result = await env.grep('findme') + assert 'mostly_text.txt' in result + + +# --- Toolset: grep output_mode --- + + +async def test_toolset_grep_files_with_matches(tmp_path: Path): + env = LocalEnvironment(tmp_path) + toolset = ExecutionEnvironmentToolset(env) + ctx = build_run_context(None) + manager = await ToolManager[None](toolset).for_run_step(ctx) + + async with env: + await env.write_file('a.py', 'target = 1\n') + await env.write_file('b.py', 'other = 2\n') + + result = await manager.handle_call( + ToolCallPart(tool_name='grep', args={'pattern': 'target', 'output_mode': 'files_with_matches'}) + ) + assert result == snapshot('a.py') + + +async def test_toolset_grep_count(tmp_path: Path): + env = LocalEnvironment(tmp_path) + toolset = ExecutionEnvironmentToolset(env) + ctx = build_run_context(None) + manager = await ToolManager[None](toolset).for_run_step(ctx) + + async with env: + await env.write_file('a.py', 'x = 1\nx = 2\nx = 3\n') + + result = await manager.handle_call( + ToolCallPart(tool_name='grep', args={'pattern': 'x', 'output_mode': 'count'}) + ) + assert result == snapshot('a.py:3') + + +# --- MemoryEnvironment --- + + +async def test_memory_read_write(): + async with MemoryEnvironment() as env: + await env.write_file('test.txt', 'hello world\n') + content = await env.read_file('test.txt') + assert content == snapshot("""\ + 1\thello world +""") + + +async def test_memory_initial_files(): + env = MemoryEnvironment(files={'a.txt': 'alpha', 'b.txt': 'beta'}) + async with env: + a = await env.read_file('a.txt') + assert isinstance(a, str) + assert 'alpha' in a + b = await env.read_file('b.txt') + assert isinstance(b, str) + assert 'beta' in b + + +async def test_memory_read_nonexistent(): + async with MemoryEnvironment() as env: + with pytest.raises(FileNotFoundError): + await env.read_file('nope.txt') + + +async def test_memory_read_directory_error(): + env = MemoryEnvironment(files={'dir/file.txt': 'content'}) + async with env: + with pytest.raises(FileNotFoundError, match='is a directory'): + await env.read_file('dir') + + +async def test_memory_read_offset_limit(): + lines = '\n'.join(f'line {i}' for i in range(20)) + env = MemoryEnvironment(files={'long.txt': lines}) + async with env: + content = await env.read_file('long.txt', offset=5, limit=3) + assert isinstance(content, str) + assert 'line 5' in content + assert 'line 7' in content + assert 'line 4' not in content + assert 'line 8' not in content + + +async def test_memory_read_continuation_hint(): + lines = '\n'.join(f'line {i}' for i in range(20)) + env = MemoryEnvironment(files={'long.txt': lines}) + async with env: + content = await env.read_file('long.txt', offset=0, limit=5) + assert isinstance(content, str) + assert '15 more lines' in content + assert 'offset=5' in content + + +async def test_memory_read_offset_out_of_bounds(): + env = MemoryEnvironment(files={'short.txt': 'one\ntwo\n'}) + async with env: + with pytest.raises(ValueError, match='Offset 100 exceeds'): + await env.read_file('short.txt', offset=100) + + +async def test_memory_edit_file(): + env = MemoryEnvironment(files={'code.py': 'old_value = 1'}) + async with env: + count = await env.replace_str('code.py', 'old_value', 'new_value') + assert count == 1 + content = await env.read_file('code.py') + assert isinstance(content, str) + assert 'new_value' in content + assert 'old_value' not in content + + +async def test_memory_edit_file_not_found(): + async with MemoryEnvironment() as env: + with pytest.raises(FileNotFoundError): + await env.replace_str('nope.txt', 'a', 'b') + + +async def test_memory_edit_string_not_found(): + env = MemoryEnvironment(files={'f.txt': 'hello'}) + async with env: + with pytest.raises(ValueError, match='not found'): + await env.replace_str('f.txt', 'missing', 'replacement') + + +async def test_memory_edit_ambiguous(): + env = MemoryEnvironment(files={'f.txt': 'dup dup dup'}) + async with env: + with pytest.raises(ValueError, match='3 times'): + await env.replace_str('f.txt', 'dup', 'x') + + +async def test_memory_edit_replace_all(): + env = MemoryEnvironment(files={'f.txt': 'aaa bbb aaa'}) + async with env: + count = await env.replace_str('f.txt', 'aaa', 'xxx', replace_all=True) + assert count == 2 + content = await env.read_file('f.txt') + assert isinstance(content, str) + assert 'xxx bbb xxx' in content + + +async def test_memory_ls(): + env = MemoryEnvironment( + files={ + 'a.txt': 'a', + 'b.txt': 'bb', + 'sub/c.txt': 'ccc', + } + ) + async with env: + entries = await env.ls('.') + names = {e.name for e in entries} + assert names == {'a.txt', 'b.txt', 'sub'} + + dirs = [e for e in entries if e.is_dir] + files = [e for e in entries if not e.is_dir] + assert len(dirs) == 1 + assert dirs[0].name == 'sub' + assert all(f.size is not None for f in files) + + +async def test_memory_ls_subdirectory(): + env = MemoryEnvironment(files={'sub/a.txt': 'a', 'sub/b.txt': 'b'}) + async with env: + entries = await env.ls('sub') + names = {e.name for e in entries} + assert names == {'a.txt', 'b.txt'} + + +async def test_memory_ls_not_a_directory(): + async with MemoryEnvironment() as env: + with pytest.raises(NotADirectoryError): + await env.ls('nonexistent') + + +async def test_memory_glob(): + env = MemoryEnvironment( + files={ + 'src/main.py': '# main', + 'src/utils.py': '# utils', + 'src/data.json': '{}', + } + ) + async with env: + matches = await env.glob('*.py', path='src') + assert sorted(matches) == ['src/main.py', 'src/utils.py'] + + +async def test_memory_glob_no_matches(): + env = MemoryEnvironment(files={'a.py': ''}) + async with env: + matches = await env.glob('*.rs') + assert matches == [] + + +async def test_memory_grep_content(): + env = MemoryEnvironment( + files={ + 'a.py': 'def hello():\n pass\n', + 'b.py': 'x = 1\n', + } + ) + async with env: + result = await env.grep('hello') + assert result == snapshot('a.py:1:def hello():') + + +async def test_memory_grep_files_with_matches(): + env = MemoryEnvironment( + files={ + 'a.py': 'target = 1\n', + 'b.py': 'target = 2\ntarget = 3\n', + 'c.py': 'nothing\n', + } + ) + async with env: + result = await env.grep('target', output_mode='files_with_matches') + lines = sorted(result.strip().splitlines()) + assert lines == ['a.py', 'b.py'] + + +async def test_memory_grep_count(): + env = MemoryEnvironment( + files={ + 'a.py': 'x = 1\n', + 'b.py': 'x = 2\nx = 3\n', + } + ) + async with env: + result = await env.grep('x', output_mode='count') + lines = sorted(result.strip().splitlines()) + assert lines == ['a.py:1', 'b.py:2'] + + +async def test_memory_grep_skips_binary(): + env = MemoryEnvironment( + files={ + 'text.py': 'findme = True\n', + 'binary.dat': b'\x00\x01findme\x02', + } + ) + async with env: + result = await env.grep('findme') + assert 'text.py' in result + assert 'binary.dat' not in result + + +async def test_memory_grep_skips_hidden(): + env = MemoryEnvironment( + files={ + 'visible.py': 'target\n', + '.hidden/secret.py': 'target\n', + } + ) + async with env: + result = await env.grep('target') + assert 'visible.py' in result + assert '.hidden' not in result + + +async def test_memory_grep_with_glob_pattern(): + env = MemoryEnvironment( + files={ + 'code.py': 'target\n', + 'code.js': 'target\n', + } + ) + async with env: + result = await env.grep('target', glob_pattern='*.py') + assert 'code.py' in result + assert 'code.js' not in result + + +async def test_memory_execute_with_handler(): + def handler(cmd: str) -> ExecutionResult: + return ExecutionResult(output=f'ran: {cmd}\n', exit_code=0) + + async with MemoryEnvironment(command_handler=handler) as env: + result = await env.shell('echo hello') + assert result.output == 'ran: echo hello\n' + assert result.exit_code == 0 + + +async def test_memory_execute_no_handler(): + async with MemoryEnvironment() as env: + with pytest.raises(RuntimeError, match='no command_handler'): + await env.shell('echo hello') + + +async def test_memory_create_process_not_supported(): + async with MemoryEnvironment() as env: + with pytest.raises(NotImplementedError): + await env.create_process('echo hello') + + +async def test_memory_write_binary(): + async with MemoryEnvironment() as env: + await env.write_file('data.bin', b'\x00\x01\x02') + # Non-image binary files are returned as text (decoded) + content = await env.read_file('data.bin') + assert isinstance(content, str) + + +async def test_memory_read_file_bytes(): + png_data = b'\x89PNG\r\n\x1a\n\x00\x00\x00\rIHDR' + env = MemoryEnvironment(files={'img.png': png_data}) + async with env: + result = await env.read_file('img.png') + assert isinstance(result, bytes) + assert result == png_data + + +# --- MemoryEnvironment with ExecutionEnvironmentToolset --- + + +async def test_memory_toolset_integration(): + """MemoryEnvironment works with ExecutionEnvironmentToolset for full agent testing.""" + env = MemoryEnvironment(files={'main.py': 'print("hello")\n'}) + toolset = ExecutionEnvironmentToolset(env, exclude=frozenset({'shell'})) + ctx = build_run_context(None) + manager = await ToolManager[None](toolset).for_run_step(ctx) + + async with env: + # read_file + result = await manager.handle_call(ToolCallPart(tool_name='read_file', args={'path': 'main.py'})) + assert result == snapshot(' 1\tprint("hello")\n') + + # write_file + result = await manager.handle_call( + ToolCallPart(tool_name='write_file', args={'path': 'new.py', 'content': 'x = 1'}) + ) + assert result == snapshot('File written: new.py') + + # glob + result = await manager.handle_call(ToolCallPart(tool_name='glob', args={'pattern': '*.py'})) + assert result == snapshot("""\ +main.py +new.py\ +""") + + # grep + result = await manager.handle_call(ToolCallPart(tool_name='grep', args={'pattern': 'hello'})) + assert result == snapshot('main.py:1:print("hello")') + + +# --- Docker instantiation tests --- + + +@docker_skip +def test_docker_sandbox_instantiation(): + """DockerEnvironment can be constructed without starting Docker.""" + + # Verify construction succeeds with default and custom settings + sandbox = DockerEnvironment(image='python:3.12-slim') + assert isinstance(sandbox, DockerEnvironment) + + sandbox_with_opts = DockerEnvironment( + image='node:20-slim', + memory_limit='512m', + cpu_limit=1.0, + network_disabled=True, + ) + assert isinstance(sandbox_with_opts, DockerEnvironment) + + # Verify security hardening parameters are accepted + sandbox_hardened = DockerEnvironment( + image='python:3.12-slim', + network_disabled=True, + read_only=True, + cap_drop=['ALL'], + security_opt=['no-new-privileges'], + user='nobody', + pids_limit=256, + tmpfs={'/tmp': 'noexec,nosuid,size=64m'}, + init=True, + ) + assert isinstance(sandbox_hardened, DockerEnvironment) + + +# --- Agent-level integration test --- + + +async def test_agent_with_execution_toolset(): + """Agent with ExecutionEnvironmentToolset runs end-to-end using TestModel and MemoryEnvironment.""" + from pydantic_ai import Agent + + env = MemoryEnvironment( + files={'data.txt': 'hello world\n'}, + command_handler=lambda cmd: ExecutionResult(output=f'executed: {cmd}\n', exit_code=0), + ) + toolset = ExecutionEnvironmentToolset(env) + + agent = Agent('test', toolsets=[toolset]) + + async with env: + result = await agent.run('Read the file data.txt') + # The TestModel will call tools and we verify it completes without error + assert result.output is not None + + +# pyright: reportPrivateUsage=false, reportUnknownMemberType=false, reportUnknownArgumentType=false, reportUnknownVariableType=false, reportPossiblyUnboundVariable=false + + +# --- _base.py helper functions --- + + +def test_shell_escape(): + assert shell_escape('hello') == "'hello'" + assert shell_escape("it's") == "'it'\\''s'" + assert shell_escape('') == "''" + assert shell_escape('a b c') == "'a b c'" + + +def test_format_lines_empty_file(): + """format_lines on empty string returns just a newline.""" + result = format_lines('', 0, 2000) + assert result == '\n' + + +def test_format_lines_trailing_newline(): + """format_lines adds trailing newline when text doesn't end with one.""" + result = format_lines('no trailing newline', 0, 2000) + assert result.endswith('\n') + assert '1\tno trailing newline' in result + + +def test_glob_match_simple(): + assert glob_match('foo.py', '*.py') is True + assert glob_match('foo.txt', '*.py') is False + + +def test_glob_match_double_star(): + """glob_match with ** patterns for recursive matching.""" + assert glob_match('src/main.py', '**/*.py') is True + assert glob_match('deep/nested/dir/file.py', '**/*.py') is True + assert glob_match('file.py', '**/*.py') is True + assert glob_match('src/main.txt', '**/*.py') is False + + +def test_glob_match_double_star_prefix(): + """glob_match with **/ prefix.""" + assert glob_match('a/b/c.txt', '**/c.txt') is True + assert glob_match('c.txt', '**/c.txt') is True + + +def test_glob_match_double_star_suffix(): + """glob_match with ** at end.""" + assert glob_match('src/foo/bar', 'src/**') is True + + +def test_glob_match_question_mark(): + """glob_match with ? wildcard.""" + assert glob_match('test.py', 'tes?.py') is True + assert glob_match('test.py', 'te??.py') is True + assert glob_match('test.py', 't???.py') is True # t + 3 chars (est) + .py + assert glob_match('test.py', 't????.py') is False # needs 4 chars between t and .py + + +def test_build_read_file_cmd_default(): + cmd = build_read_file_cmd('test.txt') + assert 'awk' in cmd + assert "'test.txt'" in cmd + assert 'NR>=1' in cmd + assert 'NR<=2000' in cmd + + +def test_build_read_file_cmd_with_offset(): + cmd = build_read_file_cmd('file.py', offset=10, limit=50) + assert 'NR>=11' in cmd + assert 'NR<=60' in cmd + assert "'file.py'" in cmd + + +def test_build_read_file_cmd_continuation_hint(): + """build_read_file_cmd includes a continuation hint in the awk END block.""" + cmd = build_read_file_cmd('file.py', offset=0, limit=10) + assert 'more lines' in cmd + assert 'offset=10' in cmd + + +def test_build_grep_cmd_content(): + cmd = build_grep_cmd('pattern') + assert 'grep -rI' in cmd + assert '-n' in cmd + assert "'pattern'" in cmd + assert "'.'" in cmd + + +def test_build_grep_cmd_files_with_matches(): + cmd = build_grep_cmd('pat', output_mode='files_with_matches') + assert '-l' in cmd + assert '-n' not in cmd + + +def test_build_grep_cmd_count(): + cmd = build_grep_cmd('pat', output_mode='count') + assert '-c' in cmd + + +def test_build_grep_cmd_with_path(): + cmd = build_grep_cmd('pat', path='src') + assert "'src'" in cmd + + +def test_build_grep_cmd_with_glob_pattern(): + """glob_pattern is shell-escaped to prevent injection.""" + cmd = build_grep_cmd('pat', glob_pattern='*.py') + assert '--include' in cmd + assert "'*.py'" in cmd + + +def test_build_grep_cmd_glob_pattern_escaping(): + """Verify glob_pattern with special chars is properly shell-escaped.""" + cmd = build_grep_cmd('pat', glob_pattern='*.py') + # The glob pattern should be shell-escaped (wrapped in single quotes) + assert "--include '*.py'" in cmd + + # Even a malicious glob_pattern gets safely escaped + cmd2 = build_grep_cmd('pat', glob_pattern='$(evil)') + assert '$(evil)' not in cmd2.replace("'$(evil)'", '') # Only appears inside quotes + + +def test_build_glob_cmd(): + cmd = build_glob_cmd('*.py') + assert 'find' in cmd + assert "'*.py'" in cmd + assert "'.'" in cmd + + +def test_build_glob_cmd_with_path(): + cmd = build_glob_cmd('*.py', path='src') + assert "'src'" in cmd + + +def test_parse_glob_output_empty(): + assert parse_glob_output('') == [] + assert parse_glob_output(' ') == [] + assert parse_glob_output('\n') == [] + + +def test_parse_glob_output_multiline(): + assert parse_glob_output('a.py\nb.py\nc.py\n') == ['a.py', 'b.py', 'c.py'] + + +def test_filter_grep_count_output(): + text = 'a.py:3\nb.py:0\nc.py:1' + result = filter_grep_count_output(text) + assert result == 'a.py:3\nc.py:1' + + +def test_filter_grep_count_output_all_zero(): + text = 'a.py:0\nb.py:0' + result = filter_grep_count_output(text) + assert result == '' + + +def test_apply_edit_basic(): + new_text, count = apply_edit('hello world', 'world', 'earth', 'test.txt', replace_all=False) + assert new_text == 'hello earth' + assert count == 1 + + +def test_apply_edit_replace_all(): + new_text, count = apply_edit('aaa bbb aaa', 'aaa', 'xxx', 'test.txt', replace_all=True) + assert new_text == 'xxx bbb xxx' + assert count == 2 + + +def test_apply_edit_not_found(): + with pytest.raises(ValueError, match='not found'): + apply_edit('hello', 'missing', 'x', 'test.txt', replace_all=False) + + +def test_apply_edit_ambiguous(): + with pytest.raises(ValueError, match='2 times'): + apply_edit('aa bb aa', 'aa', 'x', 'test.txt', replace_all=False) + + +# --- LocalEnvironment: additional edge cases --- + + +async def test_local_execute_no_timeout(tmp_path: Path): + """execute() with timeout=None completes without timeout.""" + async with LocalEnvironment(tmp_path) as env: + result = await env.shell('echo no_timeout', timeout=None) + assert result.exit_code == 0 + assert 'no_timeout' in result.output + + +async def test_local_read_file_bytes_directory(tmp_path: Path): + """read_file_bytes on a directory raises FileNotFoundError.""" + async with LocalEnvironment(tmp_path) as env: + (tmp_path / 'adir').mkdir() + with pytest.raises(FileNotFoundError, match='is a directory'): + await env.read_file('adir') + + +async def test_local_read_file_bytes_nonexistent(tmp_path: Path): + """read_file_bytes on a nonexistent file raises FileNotFoundError.""" + async with LocalEnvironment(tmp_path) as env: + with pytest.raises(FileNotFoundError): + await env.read_file('nope.bin') + + +async def test_local_grep_specific_file(tmp_path: Path): + """grep targeting a specific file works.""" + async with LocalEnvironment(tmp_path) as env: + await env.write_file('target.py', 'findme = True\n') + await env.write_file('other.py', 'findme = False\n') + + result = await env.grep('findme', path='target.py') + assert 'target.py' in result + assert 'other.py' not in result + + +# --- MemoryEnvironment: additional edge cases --- + + +async def test_memory_normalize_paths(): + """MemoryEnvironment normalizes paths correctly.""" + async with MemoryEnvironment() as env: + await env.write_file('./test.txt', 'content') + content = await env.read_file('test.txt') + assert isinstance(content, str) + assert 'content' in content + + +async def test_memory_normalize_leading_slash(): + """MemoryEnvironment strips leading slashes.""" + async with MemoryEnvironment() as env: + await env.write_file('/test.txt', 'content') + content = await env.read_file('test.txt') + assert isinstance(content, str) + assert 'content' in content + + +async def test_memory_read_file_text(): + """read_file on text file returns formatted string.""" + env = MemoryEnvironment(files={'text.txt': 'hello'}) + async with env: + result = await env.read_file('text.txt') + assert isinstance(result, str) + assert 'hello' in result + + +async def test_memory_read_file_not_found(): + """read_file on missing file raises FileNotFoundError.""" + async with MemoryEnvironment() as env: + with pytest.raises(FileNotFoundError): + await env.read_file('missing.txt') + + +async def test_memory_edit_binary(): + """edit_file works on binary content.""" + env = MemoryEnvironment(files={'data.txt': b'hello world'}) + async with env: + count = await env.replace_str('data.txt', 'world', 'earth') + assert count == 1 + + +async def test_memory_grep_exact_path(): + """grep with path= targeting an exact file.""" + env = MemoryEnvironment( + files={ + 'src/a.py': 'target\n', + 'src/b.py': 'target\n', + } + ) + async with env: + result = await env.grep('target', path='src/a.py') + assert 'src/a.py' in result + assert 'src/b.py' not in result + + +async def test_memory_grep_no_text_content(): + """grep with text bytes (non-binary) works.""" + env = MemoryEnvironment(files={'data.txt': b'findme in bytes'}) + async with env: + result = await env.grep('findme') + assert 'data.txt' in result + + +async def test_memory_glob_recursive(): + """glob with ** pattern.""" + env = MemoryEnvironment( + files={ + 'src/a.py': '', + 'src/sub/b.py': '', + 'other.txt': '', + } + ) + async with env: + matches = await env.glob('**/*.py') + assert 'src/a.py' in matches + assert 'src/sub/b.py' in matches + assert 'other.txt' not in matches + + +async def test_memory_glob_in_subdirectory(): + """glob with path= restricts to subdirectory.""" + env = MemoryEnvironment( + files={ + 'src/a.py': '', + 'lib/b.py': '', + } + ) + async with env: + matches = await env.glob('*.py', path='src') + assert 'src/a.py' in matches + assert 'lib/b.py' not in matches + + +async def test_memory_ls_with_bytes(): + """ls reports size correctly for bytes content.""" + env = MemoryEnvironment(files={'data.bin': b'\x00\x01\x02'}) + async with env: + entries = await env.ls('.') + assert len(entries) == 1 + assert entries[0].size == 3 + assert entries[0].is_dir is False + + +# --- ExecutionEnvironmentToolset: additional coverage --- + + +async def test_toolset_bash_truncated(tmp_path: Path): + """bash tool truncation message when output exceeds limit.""" + env = LocalEnvironment(tmp_path) + toolset = ExecutionEnvironmentToolset(env) + ctx = build_run_context(None) + manager = await ToolManager[None](toolset).for_run_step(ctx) + + async with env: + # Generate output longer than MAX_OUTPUT_CHARS (100_000) + result = await manager.handle_call( + ToolCallPart(tool_name='shell', args={'command': 'python3 -c "print(\'x\' * 200000)"'}) + ) + assert '[output truncated]' in str(result) + assert 'Exit code: 0' in str(result) + + +async def test_toolset_image_too_large(tmp_path: Path): + """read_file on an image that's too large returns error string.""" + env = LocalEnvironment(tmp_path) + toolset = ExecutionEnvironmentToolset(env, max_image_bytes=10) # Very small limit + ctx = build_run_context(None) + manager = await ToolManager[None](toolset).for_run_step(ctx) + + async with env: + # Write a PNG file that exceeds the limit + await env.write_file('big.png', b'\x89PNG\r\n\x1a\n' + b'\x00' * 100) + result = await manager.handle_call(ToolCallPart(tool_name='read_file', args={'path': 'big.png'})) + assert 'Image too large' in str(result) + + +async def test_toolset_image_read(tmp_path: Path): + """read_file on an image returns BinaryContent.""" + from pydantic_ai.messages import BinaryContent + + env = LocalEnvironment(tmp_path) + toolset = ExecutionEnvironmentToolset(env) + ctx = build_run_context(None) + manager = await ToolManager[None](toolset).for_run_step(ctx) + + async with env: + png_data = ( + b'\x89PNG\r\n\x1a\n\x00\x00\x00\rIHDR\x00\x00\x00\x01' + b'\x00\x00\x00\x01\x08\x06\x00\x00\x00\x1f\x15\xc4\x89' + b'\x00\x00\x00\nIDATx\x9cc\x00\x01\x00\x00\x05\x00\x01' + b'\r\n\xb4\x00\x00\x00\x00IEND\xaeB`\x82' + ) + await env.write_file('img.png', png_data) + result = await manager.handle_call(ToolCallPart(tool_name='read_file', args={'path': 'img.png'})) + assert isinstance(result, BinaryContent) + assert result.media_type == 'image/png' + + +async def test_toolset_grep_no_matches(tmp_path: Path): + """grep with no matches returns 'No matches found.'.""" + env = LocalEnvironment(tmp_path) + toolset = ExecutionEnvironmentToolset(env) + ctx = build_run_context(None) + manager = await ToolManager[None](toolset).for_run_step(ctx) + + async with env: + await env.write_file('test.txt', 'nothing relevant\n') + result = await manager.handle_call(ToolCallPart(tool_name='grep', args={'pattern': 'nonexistent_xyz'})) + assert result == snapshot('No matches found.') + + +async def test_toolset_glob_no_matches(tmp_path: Path): + """glob with no matches returns 'No files found.'.""" + env = LocalEnvironment(tmp_path) + toolset = ExecutionEnvironmentToolset(env) + ctx = build_run_context(None) + manager = await ToolManager[None](toolset).for_run_step(ctx) + + async with env: + result = await manager.handle_call(ToolCallPart(tool_name='glob', args={'pattern': '*.nonexistent'})) + assert result == snapshot('No files found.') + + +async def test_toolset_edit_success(tmp_path: Path): + """edit_file tool returns success message.""" + env = LocalEnvironment(tmp_path) + toolset = ExecutionEnvironmentToolset(env) + ctx = build_run_context(None) + manager = await ToolManager[None](toolset).for_run_step(ctx) + + async with env: + await env.write_file('code.py', 'old_value = 1\n') + result = await manager.handle_call( + ToolCallPart( + tool_name='replace_str', + args={'path': 'code.py', 'old': 'old_value', 'new': 'new_value'}, + ) + ) + assert result == snapshot('Replaced 1 occurrence in code.py.') + + +async def test_toolset_with_custom_env_instructions(): + """Environment instructions is used per-tool.""" + + class CustomEnv(MemoryEnvironment): + def instructions(self, capability: str) -> str | None: + if capability == 'grep': + return 'Custom grep description.' + return None + + env = CustomEnv() + assert env.instructions('grep') == 'Custom grep description.' + assert env.instructions('read_file') is None + + +async def test_toolset_lifecycle_ref_counting(tmp_path: Path): + """Multiple context manager entries share the environment.""" + env = LocalEnvironment(tmp_path) + toolset = ExecutionEnvironmentToolset(env) + + async with toolset: + async with toolset: + # Both entries active + result = await env.shell('echo shared') + assert 'shared' in result.output + # Still alive after one exit + result = await env.shell('echo still_alive') + assert 'still_alive' in result.output + + +# --- DockerEnvironment: mocked tests --- + + +def _make_tar(filename: str, data: bytes) -> bytes: + """Create a tar archive with a single file.""" + f = io.BytesIO() + with tarfile.open(fileobj=f, mode='w') as tar: + info = tarfile.TarInfo(name=filename) + info.size = len(data) + tar.addfile(info, io.BytesIO(data)) + f.seek(0) + return f.read() + + +class MockContainer: + """Mock Docker container for testing.""" + + def __init__(self) -> None: + self._files: dict[str, bytes] = {} + self.id = 'mock-container-id' + self.status = 'running' + self.client = MagicMock() + + def exec_run( + self, + cmd: list[str] | str, + workdir: str | None = None, + environment: dict[str, str] | None = None, + **kwargs: Any, + ) -> tuple[int, bytes]: + """Simulate exec_run by executing simple commands.""" + if isinstance(cmd, list): + cmd_str = ' '.join(cmd) + else: + cmd_str = cmd # pragma: no cover + + # Handle mkdir -p + if 'mkdir -p' in cmd_str: + return 0, b'' + + # Handle awk (read_file) + if 'awk' in cmd_str: + # Try to find the file by matching path in the awk command. + # The path is shell-escaped (e.g. 'test.txt'), so check both + # the full path and relative to workdir. + for fpath, data in self._files.items(): + # Check if the filename or path appears in the command + name = fpath.rsplit('/', 1)[-1] if '/' in fpath else fpath + if name in cmd_str or fpath in cmd_str: # pragma: no branch + text = data.decode('utf-8', errors='replace') + lines = text.splitlines(keepends=True) + numbered = [f'{i:>6}\t{line}' for i, line in enumerate(lines, start=1)] + return 0, ''.join(numbered).encode('utf-8') + return 1, b'File not found' + + # Handle ls -la + if 'ls -la' in cmd_str: + output_lines = ['total 0'] + for path, data in sorted(self._files.items()): + name = path.rsplit('/', 1)[-1] if '/' in path else path + output_lines.append(f'-rw-r--r-- 1 root root {len(data)} Jan 1 00:00 {name}') + return 0, '\n'.join(output_lines).encode('utf-8') + + # Handle find (glob) + if 'find' in cmd_str: + matches = [] + for path in sorted(self._files): + matches.append(path) # pragma: no cover + return 0, '\n'.join(matches).encode('utf-8') + + # Handle grep + if 'grep' in cmd_str: + return 0, b'match:1:result' + + # Handle general commands + if 'echo' in cmd_str: + # Extract the echo argument + msg = cmd_str.split('echo ', 1)[-1] if 'echo ' in cmd_str else '' + return 0, (msg + '\n').encode('utf-8') + + if 'exit' in cmd_str: # pragma: no cover + return 1, b'' + + return 0, b'' # pragma: no cover + + def put_archive(self, path: str, data: Any) -> bool: + """Simulate file upload by extracting tar data.""" + tar_data = data.read() if hasattr(data, 'read') else data + with tarfile.open(fileobj=io.BytesIO(tar_data)) as tar: + for member in tar.getmembers(): + extracted = tar.extractfile(member) + if extracted: # pragma: no branch + full_path = f'{path}/{member.name}' if path != '.' else member.name + self._files[full_path] = extracted.read() + return True + + def get_archive(self, path: str) -> tuple[list[bytes], dict[str, Any]]: + """Simulate file download.""" + if path not in self._files: + # Check if file exists at resolved path + for fpath, data in self._files.items(): # pragma: no cover + if fpath.endswith(path) or path.endswith(fpath.split('/')[-1]): + return [_make_tar(fpath.split('/')[-1], data)], {} # pragma: no cover + raise DockerNotFound('File not found') # pragma: no cover + data = self._files[path] + return [_make_tar(path.split('/')[-1], data)], {} + + def stop(self, timeout: int = 5) -> None: # pragma: no cover + self.status = 'stopped' + + def remove(self, force: bool = False) -> None: + pass + + def reload(self) -> None: + pass + + +@pytest.fixture +def mock_container() -> MockContainer: + return MockContainer() + + +@pytest.fixture +def mock_docker_sandbox(mock_container: MockContainer) -> Any: + """Create a DockerEnvironment with a mock container.""" + if not docker_installed: + pytest.skip('docker package not installed') + + sandbox = DockerEnvironment(image='python:3.12-slim') + sandbox._container = mock_container # type: ignore[assignment] + sandbox._client = MagicMock() + return sandbox + + +async def test_docker_execute(mock_docker_sandbox: Any) -> None: + """DockerEnvironment.execute runs commands in container.""" + result = await mock_docker_sandbox.shell('echo hello') + assert result.exit_code == 0 + assert isinstance(result.output, str) + + +async def test_docker_execute_timeout(mock_docker_sandbox: Any) -> None: + """DockerEnvironment.execute wraps command with timeout.""" + result = await mock_docker_sandbox.shell('echo test', timeout=30) + assert result.exit_code == 0 + + +async def test_docker_execute_no_timeout(mock_docker_sandbox: Any) -> None: + """DockerEnvironment.execute with timeout=None.""" + result = await mock_docker_sandbox.shell('echo test', timeout=None) + assert result.exit_code == 0 + + +async def test_docker_execute_with_env(mock_docker_sandbox: Any) -> None: + """DockerEnvironment.execute passes env vars.""" + result = await mock_docker_sandbox.shell('echo test', env={'KEY': 'value'}) + assert result.exit_code == 0 + + +async def test_docker_write_read_file(mock_docker_sandbox: Any) -> None: + """DockerEnvironment write and read files.""" + await mock_docker_sandbox.write_file('test.txt', 'hello world\n') + content = await mock_docker_sandbox.read_file('test.txt') + assert isinstance(content, str) + + +async def test_docker_write_file_binary(mock_docker_sandbox: Any) -> None: + """DockerEnvironment write binary file.""" + await mock_docker_sandbox.write_file('data.bin', b'\x00\x01\x02') + + +async def test_docker_read_file_not_found(mock_docker_sandbox: Any) -> None: + """DockerEnvironment.read_file on missing file raises FileNotFoundError.""" + with pytest.raises(FileNotFoundError): + await mock_docker_sandbox.read_file('nonexistent.txt') + + +async def test_docker_read_file_image(mock_docker_sandbox: Any, mock_container: MockContainer) -> None: + """DockerEnvironment.read_file returns raw bytes for image files.""" + png_data = b'\x89PNG\r\n\x1a\n' + mock_container._files['/workspace/image.png'] = png_data + result = await mock_docker_sandbox.read_file('image.png') + assert isinstance(result, bytes) + assert result == png_data + + +async def test_docker_edit_file(mock_docker_sandbox: Any, mock_container: MockContainer) -> None: + """DockerEnvironment.edit_file replaces text.""" + mock_container._files['/workspace/code.py'] = b'old_value = 1' + count = await mock_docker_sandbox.replace_str('code.py', 'old_value', 'new_value') + assert count == 1 + + +async def test_docker_ls(mock_docker_sandbox: Any, mock_container: MockContainer) -> None: + """DockerEnvironment.ls returns file entries.""" + mock_container._files['test.txt'] = b'hello' + entries = await mock_docker_sandbox.ls('.') + assert isinstance(entries, list) + + +async def test_docker_glob(mock_docker_sandbox: Any) -> None: + """DockerEnvironment.glob returns matching paths.""" + matches = await mock_docker_sandbox.glob('*.py') + assert isinstance(matches, list) + + +async def test_docker_grep(mock_docker_sandbox: Any) -> None: + """DockerEnvironment.grep returns matches.""" + result = await mock_docker_sandbox.grep('pattern') + assert isinstance(result, str) + + +async def test_docker_grep_with_options(mock_docker_sandbox: Any) -> None: + """DockerEnvironment.grep with output_mode and glob_pattern.""" + result = await mock_docker_sandbox.grep('pattern', glob_pattern='*.py', output_mode='files_with_matches') + assert isinstance(result, str) + + +async def test_docker_grep_count(mock_docker_sandbox: Any, mock_container: MockContainer) -> None: + """DockerEnvironment.grep count mode filters zero-count results.""" + # Override exec_run to return count-style output + original_exec_run = mock_container.exec_run + + def count_exec_run(cmd: Any, **kwargs: Any) -> tuple[int, bytes]: + if isinstance(cmd, list) and 'sh' in cmd[0]: + cmd_str = cmd[-1] if len(cmd) > 1 else '' + if 'grep' in cmd_str and '-c' in cmd_str: + return 0, b'a.py:3\nb.py:0\nc.py:1' + return original_exec_run(cmd, **kwargs) # pragma: no cover + + mock_container.exec_run = count_exec_run # type: ignore[assignment] + result = await mock_docker_sandbox.grep('pattern', output_mode='count') + assert 'b.py:0' not in result + + +@docker_skip +async def test_docker_container_property(mock_docker_sandbox: Any) -> None: + """DockerEnvironment.container raises when not started.""" + + sandbox = DockerEnvironment() + with pytest.raises(RuntimeError, match='not started'): + _ = sandbox.container + + +async def test_docker_create_process(mock_docker_sandbox: Any) -> None: + """DockerEnvironment.create_process returns a DockerEnvironmentProcess.""" + proc = await mock_docker_sandbox.create_process('echo test') + assert proc is not None + + +async def test_docker_instructions(mock_docker_sandbox: Any) -> None: + """DockerEnvironment.instructions provides per-tool descriptions.""" + grep_desc = mock_docker_sandbox.instructions('grep') + assert grep_desc is not None + assert 'POSIX' in grep_desc + + +async def test_docker_is_alive(mock_docker_sandbox: Any) -> None: + """DockerEnvironment.is_alive checks container status.""" + result = await mock_docker_sandbox.is_alive() + assert result is True + + +@docker_skip +async def test_docker_is_alive_not_started() -> None: + """DockerEnvironment.is_alive returns False when not started.""" + + sandbox = DockerEnvironment() + result = await sandbox.is_alive() + assert result is False + + +async def test_docker_resolve_path(mock_docker_sandbox: Any) -> None: + """DockerEnvironment._resolve_path resolves relative paths.""" + assert mock_docker_sandbox._resolve_path('test.txt') == '/workspace/test.txt' + assert mock_docker_sandbox._resolve_path('/abs/path') == '/abs/path' + assert mock_docker_sandbox._resolve_path('sub/dir/file.py') == '/workspace/sub/dir/file.py' + + +@docker_skip +def test_docker_put_file() -> None: + """_put_file creates a tar archive and uploads it.""" + + container = MockContainer() + _put_file(container, '/workspace/test.txt', b'hello') # type: ignore[arg-type] + assert '/workspace/test.txt' in container._files + assert container._files['/workspace/test.txt'] == b'hello' + + +@docker_skip +def test_docker_sandbox_process_read_frame() -> None: + """DockerEnvironmentProcess._read_frame parses multiplexed stream frames.""" + + container = MockContainer() + proc = DockerEnvironmentProcess(container, 'echo test', '/workspace') # type: ignore[arg-type] + + # Create a mock socket with a multiplexed frame + stdout_data = b'hello from stdout' + header = struct.pack('>BxxxI', 1, len(stdout_data)) # stream_type=1 (stdout) + + mock_socket = MagicMock() + mock_socket.recv.side_effect = [header, stdout_data] + proc._socket = mock_socket + + stream_type, data = proc._read_frame() + assert stream_type == 1 + assert data == stdout_data + + +@docker_skip +def test_docker_sandbox_process_read_frame_stderr() -> None: + """DockerEnvironmentProcess._read_frame handles stderr frames.""" + + container = MockContainer() + proc = DockerEnvironmentProcess(container, 'echo test', '/workspace') # type: ignore[arg-type] + + stderr_data = b'error output' + header = struct.pack('>BxxxI', 2, len(stderr_data)) # stream_type=2 (stderr) + + mock_socket = MagicMock() + mock_socket.recv.side_effect = [header, stderr_data] + proc._socket = mock_socket + + stream_type, data = proc._read_frame() + assert stream_type == 2 + assert data == stderr_data + + +@docker_skip +def test_docker_sandbox_process_read_frame_eof() -> None: + """DockerEnvironmentProcess._read_frame returns empty on EOF.""" + + container = MockContainer() + proc = DockerEnvironmentProcess(container, 'echo test', '/workspace') # type: ignore[arg-type] + + mock_socket = MagicMock() + mock_socket.recv.return_value = b'' # EOF + proc._socket = mock_socket + + stream_type, data = proc._read_frame() + assert stream_type == 0 + assert data == b'' + assert proc._eof is True + + +@docker_skip +def test_docker_sandbox_process_read_frame_zero_size() -> None: + """DockerEnvironmentProcess._read_frame handles zero-size frames.""" + + container = MockContainer() + proc = DockerEnvironmentProcess(container, 'echo test', '/workspace') # type: ignore[arg-type] + + header = struct.pack('>BxxxI', 1, 0) # zero size + + mock_socket = MagicMock() + mock_socket.recv.return_value = header + proc._socket = mock_socket + + stream_type, data = proc._read_frame() + assert stream_type == 1 + assert data == b'' + + +@docker_skip +def test_docker_sandbox_process_already_eof() -> None: + """DockerEnvironmentProcess._read_frame returns empty when already at EOF.""" + + container = MockContainer() + proc = DockerEnvironmentProcess(container, 'echo test', '/workspace') # type: ignore[arg-type] + proc._eof = True + + stream_type, data = proc._read_frame() + assert stream_type == 0 + assert data == b'' + + +# --- Additional coverage: _base.py --- + + +async def test_glob_match_question_mark_in_doublestar_pattern(): + """glob_match with ? inside a ** pattern.""" + assert glob_match('a/b/test.py', '**/?est.py') is True + assert glob_match('test.py', '?est.py') is True + + +async def test_execution_environment_aenter_aexit(): + """ExecutionEnvironment base __aenter__/__aexit__ are exercised by subclasses.""" + # MemoryEnvironment exercises the base class path + env = MemoryEnvironment() + async with env: + pass + + +# --- Additional coverage: _toolset.py --- + + +async def test_toolset_bash_empty_output(tmp_path: Path): + """ExecutionEnvironmentToolset bash returns just exit code when no output.""" + env = LocalEnvironment(tmp_path) + toolset = ExecutionEnvironmentToolset(env) + ctx = build_run_context() + manager = await ToolManager[None](toolset).for_run_step(ctx) + + async with env: + result = await manager.handle_call(ToolCallPart(tool_name='shell', args={'command': 'true'})) + assert 'Exit code: 0' in str(result) + + +async def test_toolset_glob_truncation(tmp_path: Path): + """ExecutionEnvironmentToolset glob truncates after 100 matches.""" + env = LocalEnvironment(tmp_path) + # Create 110 files + for i in range(110): + (tmp_path / f'file_{i:03d}.txt').write_text(f'content {i}') + + toolset = ExecutionEnvironmentToolset(env) + ctx = build_run_context() + manager = await ToolManager[None](toolset).for_run_step(ctx) + + async with env: + result = await manager.handle_call(ToolCallPart(tool_name='glob', args={'pattern': '*.txt'})) + assert 'truncated' in str(result) + + +async def test_toolset_grep_no_matches_returns_message(tmp_path: Path): + """ExecutionEnvironmentToolset grep returns message when no matches.""" + (tmp_path / 'test.txt').write_text('hello world') + env = LocalEnvironment(tmp_path) + toolset = ExecutionEnvironmentToolset(env) + ctx = build_run_context() + manager = await ToolManager[None](toolset).for_run_step(ctx) + + async with env: + result = await manager.handle_call(ToolCallPart(tool_name='grep', args={'pattern': 'zzz_nonexistent'})) + assert 'No matches' in str(result) + + +async def test_toolset_lifecycle_error(tmp_path: Path): + """ExecutionEnvironmentToolset handles environment startup failures.""" + + class FailingEnv(LocalEnvironment): + async def __aenter__(self): + raise RuntimeError('Setup failed') + + env = FailingEnv(tmp_path) + toolset = ExecutionEnvironmentToolset(env) + with pytest.raises(RuntimeError, match='Setup failed'): + async with toolset: + pass + + +# --- Additional coverage: local.py --- + + +async def test_local_process_stdin_not_available(): + """LocalEnvironmentProcess.send raises when stdin is None.""" + from pydantic_ai.environments.local import LocalEnvironmentProcess + + mock_proc = MagicMock() + mock_proc.stdin = None + proc = LocalEnvironmentProcess(mock_proc) + with pytest.raises(RuntimeError, match='stdin'): + await proc.send(b'data') + + +async def test_local_process_stdout_not_available(): + """LocalEnvironmentProcess.recv raises when stdout is None.""" + from pydantic_ai.environments.local import LocalEnvironmentProcess + + mock_proc = MagicMock() + mock_proc.stdout = None + proc = LocalEnvironmentProcess(mock_proc) + with pytest.raises(RuntimeError, match='stdout'): + await proc.recv() + + +async def test_local_process_stderr_not_available(): + """LocalEnvironmentProcess.recv_stderr raises when stderr is None.""" + from pydantic_ai.environments.local import LocalEnvironmentProcess + + mock_proc = MagicMock() + mock_proc.stderr = None + proc = LocalEnvironmentProcess(mock_proc) + with pytest.raises(RuntimeError, match='stderr'): + await proc.recv_stderr() + + +async def test_local_process_recv_stderr_timeout(tmp_path: Path): + """LocalEnvironmentProcess.recv_stderr with timeout.""" + env = LocalEnvironment(tmp_path) + proc = await env.create_process('python -c "import sys; sys.stderr.write(\'err\\n\')"') + async with proc: + data = await proc.recv_stderr(timeout=5.0) + assert b'err' in data + + +async def test_local_process_recv_stderr_eof(tmp_path: Path): + """LocalEnvironmentProcess.recv_stderr returns empty on EOF.""" + env = LocalEnvironment(tmp_path) + proc = await env.create_process('echo done') + async with proc: + await proc.wait(timeout=5.0) + # After process exits, stderr should return empty + data = await proc.recv_stderr() + assert data == b'' + + +async def test_local_process_kill_terminates_sleep(tmp_path: Path): + """LocalEnvironmentProcess.kill terminates process.""" + env = LocalEnvironment(tmp_path) + proc = await env.create_process('sleep 60') + async with proc: + await proc.kill() + # After kill, returncode should be set + + +async def test_local_read_file_bytes_directory_raises_error(tmp_path: Path): + """LocalEnvironment.read_file_bytes raises on directory.""" + (tmp_path / 'subdir').mkdir() + env = LocalEnvironment(tmp_path) + with pytest.raises(FileNotFoundError, match='directory'): + await env.read_file('subdir') + + +async def test_local_read_file_bytes_not_found(tmp_path: Path): + """LocalEnvironment.read_file_bytes raises on missing file.""" + env = LocalEnvironment(tmp_path) + with pytest.raises(FileNotFoundError, match='not found'): + await env.read_file('nonexistent.txt') + + +async def test_local_grep_on_file(tmp_path: Path): + """LocalEnvironment.grep on a specific file path.""" + (tmp_path / 'target.py').write_text('found = True\nmissed = False\n') + env = LocalEnvironment(tmp_path) + result = await env.grep('found', path='target.py') + assert 'found' in result + assert 'missed' not in result + + +async def test_local_grep_with_glob_pattern_filters_by_extension(tmp_path: Path): + """LocalEnvironment.grep with glob filtering.""" + (tmp_path / 'a.py').write_text('match_here\n') + (tmp_path / 'b.txt').write_text('match_here\n') + env = LocalEnvironment(tmp_path) + result = await env.grep('match_here', glob_pattern='*.py') + assert 'a.py' in result + assert 'b.txt' not in result + + +async def test_local_grep_skips_binary_files_with_null_bytes(tmp_path: Path): + """LocalEnvironment.grep skips files with null bytes.""" + (tmp_path / 'binary.bin').write_bytes(b'\x00binary content') + (tmp_path / 'text.txt').write_text('searchable\n') + env = LocalEnvironment(tmp_path) + result = await env.grep('searchable') + assert 'text.txt' in result + assert 'binary' not in result + + +async def test_local_grep_skips_hidden_files_in_hidden_dirs(tmp_path: Path): + """LocalEnvironment.grep skips hidden files/dirs.""" + hidden_dir = tmp_path / '.hidden' + hidden_dir.mkdir() + (hidden_dir / 'secret.txt').write_text('findme\n') + (tmp_path / 'visible.txt').write_text('findme\n') + env = LocalEnvironment(tmp_path) + result = await env.grep('findme') + assert 'visible.txt' in result + assert '.hidden' not in result + + +async def test_local_execute_output_truncation(tmp_path: Path): + """LocalEnvironment.execute truncates long output.""" + # Write a script that outputs lots of text + script = tmp_path / 'big.py' + script.write_text("print('x' * 200000)") + env = LocalEnvironment(tmp_path) + result = await env.shell(f'python {script}') + assert result.truncated is True + assert len(result.output) == 100_000 + + +# --- Additional coverage: memory.py --- + + +async def test_memory_normalize_leading_slash_in_constructor(): + """MemoryEnvironment normalizes paths with leading /.""" + env = MemoryEnvironment(files={'/abs/path.txt': 'content'}) + content = await env.read_file('abs/path.txt') + assert isinstance(content, str) + assert 'content' in content + + +async def test_memory_read_file_directory_error(): + """MemoryEnvironment.read_file raises on directory paths.""" + env = MemoryEnvironment(files={'dir/file.txt': 'content'}) + with pytest.raises(FileNotFoundError, match='directory'): + await env.read_file('dir') + + +async def test_memory_read_file_bytes_not_found_raises_error(): + """MemoryEnvironment.read_file_bytes raises on missing file.""" + env = MemoryEnvironment() + with pytest.raises(FileNotFoundError): + await env.read_file('missing.txt') + + +async def test_memory_ls_non_root_directory(): + """MemoryEnvironment.ls lists files in a subdirectory.""" + env = MemoryEnvironment(files={'sub/a.txt': 'a', 'sub/b.txt': 'b', 'other.txt': 'c'}) + entries = await env.ls('sub') + assert len(entries) == 2 + names = {e.name for e in entries} + assert names == {'a.txt', 'b.txt'} + + +async def test_memory_ls_with_subdirs(): + """MemoryEnvironment.ls shows directories in listing.""" + env = MemoryEnvironment(files={'dir/sub/file.txt': 'content'}) + entries = await env.ls('dir') + assert len(entries) == 1 + assert entries[0].name == 'sub' + assert entries[0].is_dir is True + + +async def test_memory_ls_skips_non_children(): + """MemoryEnvironment.ls skips files not under the directory.""" + env = MemoryEnvironment(files={'a/b.txt': 'x', 'c/d.txt': 'y'}) + entries = await env.ls('a') + assert len(entries) == 1 + assert entries[0].name == 'b.txt' + + +async def test_memory_grep_binary_skip(): + """MemoryEnvironment.grep skips binary files.""" + env = MemoryEnvironment(files={'binary.bin': b'\x00binary data', 'text.txt': 'findme'}) + result = await env.grep('findme') + assert 'text.txt' in result + assert 'binary' not in result + + +async def test_memory_grep_path_filter(): + """MemoryEnvironment.grep filters by exact file path.""" + env = MemoryEnvironment(files={'sub/target.py': 'match_here', 'other.py': 'match_here'}) + result = await env.grep('match_here', path='sub') + assert 'sub/target.py' in result + assert 'other.py' not in result + + +async def test_memory_glob_in_subdirectory_with_path_filter(): + """MemoryEnvironment.glob works with path parameter.""" + env = MemoryEnvironment(files={'src/a.py': 'a', 'src/b.txt': 'b', 'other.py': 'c'}) + matches = await env.glob('*.py', path='src') + assert 'src/a.py' in matches + assert 'other.py' not in matches + + +# --- Additional Docker coverage: lifecycle, process, truncation --- + + +async def test_docker_execute_truncation(mock_docker_sandbox: Any, mock_container: MockContainer) -> None: + """DockerEnvironment.execute truncates long output.""" + original = mock_container.exec_run + + def big_output(cmd: Any, **kwargs: Any) -> tuple[int, bytes]: + if isinstance(cmd, list) and 'echo' in str(cmd): + return 0, b'x' * 200_000 + return original(cmd, **kwargs) # pragma: no cover + + mock_container.exec_run = big_output # type: ignore[assignment] + result = await mock_docker_sandbox.shell('echo big') + assert result.truncated is True + assert len(result.output) == 100_000 + + +async def test_docker_execute_timeout_exit_code(mock_docker_sandbox: Any, mock_container: MockContainer) -> None: + """DockerEnvironment.execute handles timeout exit code 124.""" + + def timeout_result(cmd: Any, **kwargs: Any) -> tuple[int, bytes]: + return 124, b'partial output' + + mock_container.exec_run = timeout_result # type: ignore[assignment] + result = await mock_docker_sandbox.shell('sleep 999', timeout=1) + assert result.exit_code == 124 + assert '[Command timed out]' in result.output + + +@docker_skip +async def test_docker_setup_teardown() -> None: + """DockerEnvironment._setup and _teardown with mocked Docker client.""" + from unittest.mock import patch as mock_patch + + sandbox = DockerEnvironment(image='python:3.12-slim') + + mock_client = MagicMock() + mock_container_obj = MagicMock() + mock_client.containers.run.return_value = mock_container_obj + + with mock_patch('pydantic_ai.environments.docker.docker') as mock_docker: + mock_docker.from_env.return_value = mock_client + sandbox._setup() + assert sandbox._container is not None + + # Teardown + sandbox._teardown() + mock_container_obj.stop.assert_called() + mock_container_obj.remove.assert_called() + assert sandbox._container is None + + +@docker_skip +async def test_docker_teardown_cleanup_errors() -> None: + """DockerEnvironment._teardown handles exceptions gracefully.""" + + sandbox = DockerEnvironment() + mock_container = MagicMock() + mock_container.stop.side_effect = Exception('stop failed') + mock_container.remove.side_effect = Exception('remove failed') + sandbox._container = mock_container + + # Should not raise + sandbox._teardown() + assert sandbox._container is None + + +@docker_skip +async def test_docker_setup_with_all_options() -> None: + """DockerEnvironment._setup passes all container options.""" + from unittest.mock import patch as mock_patch + + sandbox = DockerEnvironment( + image='python:3.12-slim', + env_vars={'KEY': 'val'}, + volumes={'/host': {'bind': '/container', 'mode': 'rw'}}, + memory_limit='512m', + cpu_limit=1.0, + pids_limit=256, + network_disabled=True, + read_only=True, + cap_drop=['ALL'], + security_opt=['no-new-privileges'], + user='nobody', + tmpfs={'/tmp': 'noexec,nosuid,size=64m'}, + init=True, + ) + + mock_client = MagicMock() + mock_container = MagicMock() + mock_client.containers.run.return_value = mock_container + + with mock_patch('pydantic_ai.environments.docker.docker') as mock_docker: + mock_docker.from_env.return_value = mock_client + sandbox._setup() + + call_kwargs = mock_client.containers.run.call_args[1] + assert call_kwargs['volumes'] == {'/host': {'bind': '/container', 'mode': 'rw'}} + assert call_kwargs['mem_limit'] == '512m' + assert call_kwargs['nano_cpus'] == int(1e9) + assert call_kwargs['pids_limit'] == 256 + assert call_kwargs['network_disabled'] is True + assert call_kwargs['read_only'] is True + assert call_kwargs['cap_drop'] == ['ALL'] + assert call_kwargs['security_opt'] == ['no-new-privileges'] + assert call_kwargs['user'] == 'nobody' + assert call_kwargs['tmpfs'] == {'/tmp': 'noexec,nosuid,size=64m'} + assert call_kwargs['init'] is True + + +@docker_skip +async def test_docker_process_recv_with_buffered_data() -> None: + """DockerEnvironmentProcess.recv returns buffered stdout data first.""" + + container = MockContainer() + proc = DockerEnvironmentProcess(container, 'echo test', '/workspace') # type: ignore[arg-type] + proc._stdout_buffer.append(b'buffered data') + + result = await proc.recv() + assert result == b'buffered data' + assert proc._stdout_buffer == [] + + +@docker_skip +async def test_docker_process_recv_stderr_with_buffered_data() -> None: + """DockerEnvironmentProcess.recv_stderr returns buffered stderr data first.""" + + container = MockContainer() + proc = DockerEnvironmentProcess(container, 'echo test', '/workspace') # type: ignore[arg-type] + proc._stderr_buffer.append(b'buffered error') + + result = await proc.recv_stderr() + assert result == b'buffered error' + assert proc._stderr_buffer == [] + + +@docker_skip +async def test_docker_process_recv_stream_buffers_other() -> None: + """DockerEnvironmentProcess._recv_stream buffers frames for the other stream.""" + + container = MockContainer() + proc = DockerEnvironmentProcess(container, 'echo test', '/workspace') # type: ignore[arg-type] + + # First frame is stderr (type 2), second is stdout (type 1) + stderr_data = b'error output' + stdout_data = b'stdout output' + stderr_header = struct.pack('>BxxxI', 2, len(stderr_data)) + stdout_header = struct.pack('>BxxxI', 1, len(stdout_data)) + + mock_socket = MagicMock() + mock_socket.recv.side_effect = [stderr_header, stderr_data, stdout_header, stdout_data] + proc._socket = mock_socket + + # Requesting stdout should buffer stderr and return stdout + result = await proc.recv() + assert result == stdout_data + assert proc._stderr_buffer == [stderr_data] + + +@docker_skip +async def test_docker_process_recv_stream_eof() -> None: + """DockerEnvironmentProcess._recv_stream returns empty on EOF.""" + + container = MockContainer() + proc = DockerEnvironmentProcess(container, 'echo test', '/workspace') # type: ignore[arg-type] + + mock_socket = MagicMock() + mock_socket.recv.return_value = b'' # EOF + proc._socket = mock_socket + + result = await proc.recv() + assert result == b'' + + +@docker_skip +async def test_docker_process_kill() -> None: + """DockerEnvironmentProcess.kill closes the socket.""" + + container = MockContainer() + proc = DockerEnvironmentProcess(container, 'echo test', '/workspace') # type: ignore[arg-type] + mock_socket = MagicMock() + proc._socket = mock_socket + + await proc.kill() + mock_socket.close.assert_called_once() + + +@docker_skip +async def test_docker_process_kill_oserror() -> None: + """DockerEnvironmentProcess.kill handles OSError.""" + + container = MockContainer() + proc = DockerEnvironmentProcess(container, 'echo test', '/workspace') # type: ignore[arg-type] + mock_socket = MagicMock() + mock_socket.close.side_effect = OSError('socket error') + proc._socket = mock_socket + + # Should not raise + await proc.kill() + + +@docker_skip +async def test_docker_process_returncode() -> None: + """DockerEnvironmentProcess.returncode checks exec status.""" + + container = MockContainer() + proc = DockerEnvironmentProcess(container, 'echo test', '/workspace') # type: ignore[arg-type] + + # No exec_id means returncode is None + assert proc.returncode is None + + # With exec_id and cached returncode + proc._exec_id = 'exec-123' + proc._returncode = 0 + assert proc.returncode == 0 + + +@docker_skip +async def test_docker_process_returncode_from_inspect() -> None: + """DockerEnvironmentProcess.returncode polls Docker API.""" + + container = MockContainer() + container.client.api.exec_inspect.return_value = {'ExitCode': 42, 'Running': False} + proc = DockerEnvironmentProcess(container, 'echo test', '/workspace') # type: ignore[arg-type] + proc._exec_id = 'exec-123' + + assert proc.returncode == 42 + assert proc._returncode == 42 + + +@docker_skip +async def test_docker_process_returncode_still_running() -> None: + """DockerEnvironmentProcess.returncode returns None when process is running (ExitCode=0, Running=True).""" + + container = MockContainer() + # Docker returns ExitCode=0 + Running=True for still-running processes + container.client.api.exec_inspect.return_value = {'ExitCode': 0, 'Running': True} + proc = DockerEnvironmentProcess(container, 'echo test', '/workspace') # type: ignore[arg-type] + proc._exec_id = 'exec-123' + + assert proc.returncode is None + + +@docker_skip +async def test_docker_process_returncode_inspect_error() -> None: + """DockerEnvironmentProcess.returncode handles API errors.""" + + container = MockContainer() + container.client.api.exec_inspect.side_effect = OSError('connection failed') + proc = DockerEnvironmentProcess(container, 'echo test', '/workspace') # type: ignore[arg-type] + proc._exec_id = 'exec-123' + + assert proc.returncode is None + + +@docker_skip +async def test_docker_process_send() -> None: + """DockerEnvironmentProcess.send writes to socket.""" + + container = MockContainer() + proc = DockerEnvironmentProcess(container, 'echo test', '/workspace') # type: ignore[arg-type] + mock_socket = MagicMock() + proc._socket = mock_socket + + await proc.send(b'hello') + mock_socket.sendall.assert_called_once_with(b'hello') + + +@docker_skip +async def test_docker_process_recv_with_timeout() -> None: + """DockerEnvironmentProcess.recv with timeout.""" + + container = MockContainer() + proc = DockerEnvironmentProcess(container, 'echo test', '/workspace') # type: ignore[arg-type] + + stdout_data = b'data' + header = struct.pack('>BxxxI', 1, len(stdout_data)) + mock_socket = MagicMock() + mock_socket.recv.side_effect = [header, stdout_data] + proc._socket = mock_socket + + result = await proc.recv(timeout=5.0) + assert result == stdout_data + + +@docker_skip +async def test_docker_process_recv_stderr_with_timeout() -> None: + """DockerEnvironmentProcess.recv_stderr with timeout.""" + + container = MockContainer() + proc = DockerEnvironmentProcess(container, 'echo test', '/workspace') # type: ignore[arg-type] + + stderr_data = b'error' + header = struct.pack('>BxxxI', 2, len(stderr_data)) + mock_socket = MagicMock() + mock_socket.recv.side_effect = [header, stderr_data] + proc._socket = mock_socket + + result = await proc.recv_stderr(timeout=5.0) + assert result == stderr_data + + +@docker_skip +async def test_docker_read_frame_data_eof_during_read() -> None: + """DockerEnvironmentProcess._read_frame handles EOF during data read.""" + + container = MockContainer() + proc = DockerEnvironmentProcess(container, 'echo test', '/workspace') # type: ignore[arg-type] + + # Header says 100 bytes but socket returns less then EOF + header = struct.pack('>BxxxI', 1, 100) + mock_socket = MagicMock() + mock_socket.recv.side_effect = [header, b'partial', b''] # EOF during data + proc._socket = mock_socket + + stream_type, data = proc._read_frame() + assert stream_type == 1 + assert data == b'partial' + assert proc._eof is True + + +@docker_skip +async def test_docker_process_start_with_env() -> None: + """DockerEnvironmentProcess._do_start passes env to exec_create.""" + + container = MockContainer() + container.client.api.exec_create.return_value = {'Id': 'exec-test'} + mock_sock = MagicMock() + container.client.api.exec_start.return_value = mock_sock + + proc = DockerEnvironmentProcess( + container, # type: ignore[arg-type] + 'echo test', + '/workspace', + env={'FOO': 'bar'}, + ) + await proc._start() + + assert proc._exec_id == 'exec-test' + call_kwargs = container.client.api.exec_create.call_args[1] + assert call_kwargs['environment'] == {'FOO': 'bar'} + + +@docker_skip +async def test_docker_process_aenter() -> None: + """DockerEnvironmentProcess.__aenter__ starts the process.""" + + container = MockContainer() + container.client.api.exec_create.return_value = {'Id': 'exec-aenter'} + mock_sock = MagicMock() + container.client.api.exec_start.return_value = mock_sock + + proc = DockerEnvironmentProcess(container, 'echo test', '/workspace') # type: ignore[arg-type] + entered = await proc.__aenter__() + assert entered is proc + assert proc._exec_id == 'exec-aenter' + + +async def test_docker_ls_not_found(mock_docker_sandbox: Any, mock_container: MockContainer) -> None: + """DockerEnvironment.ls raises NotADirectoryError on missing dirs.""" + original = mock_container.exec_run + + def fail_ls(cmd: Any, **kwargs: Any) -> tuple[int, bytes]: + if isinstance(cmd, list) and 'ls -la' in ' '.join(cmd): + return 1, b'ls: cannot access: No such file or directory' + return original(cmd, **kwargs) # pragma: no cover + + mock_container.exec_run = fail_ls # type: ignore[assignment] + with pytest.raises(NotADirectoryError): + await mock_docker_sandbox.ls('nonexistent') + + +@docker_skip +async def test_docker_read_file_image_not_found(mock_docker_sandbox: Any, mock_container: MockContainer) -> None: + """DockerEnvironment.read_file raises DockerNotFound for missing image files.""" + + def fail_get_archive(path: str) -> Any: + raise DockerNotFound('File not found') + + mock_container.get_archive = fail_get_archive + with pytest.raises(DockerNotFound): + await mock_docker_sandbox.read_file('missing.png') + + +async def test_local_process_wait_no_timeout(tmp_path: Path): + """LocalEnvironmentProcess.wait without timeout (line 74).""" + env = LocalEnvironment(tmp_path) + proc = await env.create_process('true') + async with proc: + exit_code = await proc.wait() # no timeout + assert exit_code == 0 + + +async def test_memory_normalize_absolute_path(): + """MemoryEnvironment._normalize strips leading / (line 76).""" + env = MemoryEnvironment(files={'path.txt': 'content'}) + # Normalize /path.txt should strip leading / + normalized = env._normalize('/path.txt') + assert normalized == 'path.txt' + + +async def test_memory_read_file_that_is_also_directory_prefix(): + """MemoryEnvironment.read_file when path exists as both file and directory prefix.""" + # 'dir' exists as a file AND 'dir/child.txt' makes it look like a directory too + env = MemoryEnvironment(files={'dir': 'I am a file', 'dir/child.txt': 'child content'}) + async with env: + content = await env.read_file('dir') + assert isinstance(content, str) + assert 'I am a file' in content + + +# --- ExecutionEnvironmentToolset: capability and edit strategy resolution --- + + +def test_resolve_edit_tool_explicit_strategy(): + """Passing edit_strategy to constructor overrides auto-detection.""" + env = MemoryEnvironment() + toolset = ExecutionEnvironmentToolset(env, edit_strategy='apply_patch') + strategy = toolset._resolve_edit_tool(env) + assert strategy == 'apply_patch' + + +def test_resolve_edit_tool_apply_patch_fallback(): + """When env has apply_patch but not replace_str, resolves to apply_patch.""" + from pydantic_ai.environments._base import Capability as EnvCapability, ExecutionEnvironment as BaseEnv + + class _ApplyPatchEnv(BaseEnv): + @property + def capabilities(self) -> frozenset[EnvCapability]: + return frozenset({'apply_patch'}) + + toolset = ExecutionEnvironmentToolset(_ApplyPatchEnv()) + strategy = toolset._resolve_edit_tool(_ApplyPatchEnv()) + assert strategy == 'apply_patch' + + +def test_resolve_edit_tool_neither(): + """When env has neither replace_str nor apply_patch, returns None.""" + from pydantic_ai.environments._base import Capability as EnvCapability, ExecutionEnvironment as BaseEnv + + class _NoEditEnv(BaseEnv): + @property + def capabilities(self) -> frozenset[EnvCapability]: + return frozenset({'ls'}) + + toolset = ExecutionEnvironmentToolset(_NoEditEnv()) + strategy = toolset._resolve_edit_tool(_NoEditEnv()) + assert strategy is None + + +# --- ExecutionEnvironmentToolset: ls formatting through toolset --- + + +async def test_toolset_ls_error_handling(): + """Toolset ls returns error string when environment raises.""" + from pydantic_ai.environments._base import Capability as EnvCapability, ExecutionEnvironment as BaseEnv + + class _ErrorLsEnv(BaseEnv): + @property + def capabilities(self) -> frozenset[EnvCapability]: + return frozenset({'ls'}) + + async def ls(self, path: str = '.') -> list[FileInfo]: + raise NotADirectoryError(f'Not a directory: {path}') + + env = _ErrorLsEnv() + toolset = ExecutionEnvironmentToolset(env) + ctx = build_run_context() + tools = await toolset.get_tools(ctx) + result = await toolset.call_tool('ls', {'path': '/bad'}, ctx, tools['ls']) + assert 'Error:' in str(result) + + +async def test_toolset_ls_formats_dirs(): + """Toolset ls formats directory entries with trailing /.""" + env = MemoryEnvironment(files={'sub/a.txt': 'hello'}) + toolset = ExecutionEnvironmentToolset(env) + ctx = build_run_context() + tools = await toolset.get_tools(ctx) + async with env: + result = await toolset.call_tool('ls', {'path': '.'}, ctx, tools['ls']) + assert 'sub/' in str(result) + + +async def test_toolset_ls_formats_files_without_size(): + """Toolset ls formats file entries without size (just the name).""" + from pydantic_ai.environments._base import Capability as EnvCapability, ExecutionEnvironment as BaseEnv + + class _NoSizeEnv(BaseEnv): + @property + def capabilities(self) -> frozenset[EnvCapability]: + return frozenset({'ls'}) + + async def ls(self, path: str = '.') -> list[FileInfo]: + return [FileInfo(name='readme.txt', path='readme.txt', is_dir=False, size=None)] + + env = _NoSizeEnv() + toolset = ExecutionEnvironmentToolset(env) + ctx = build_run_context() + tools = await toolset.get_tools(ctx) + result = await toolset.call_tool('ls', {'path': '.'}, ctx, tools['ls']) + assert str(result) == 'readme.txt' + + +async def test_toolset_ls_empty_directory(): + """Toolset ls returns 'Empty directory.' for empty listings.""" + from pydantic_ai.environments._base import Capability as EnvCapability, ExecutionEnvironment as BaseEnv + + class _EmptyLsEnv(BaseEnv): + @property + def capabilities(self) -> frozenset[EnvCapability]: + return frozenset({'ls'}) + + async def ls(self, path: str = '.') -> list[FileInfo]: + return [] + + env = _EmptyLsEnv() + toolset = ExecutionEnvironmentToolset(env) + ctx = build_run_context() + tools = await toolset.get_tools(ctx) + result = await toolset.call_tool('ls', {'path': '.'}, ctx, tools['ls']) + assert str(result) == 'Empty directory.' + + +# --- Memory image file stored as string --- + + +async def test_memory_read_image_stored_as_string(): + """MemoryEnvironment returns bytes for image files even when stored as a string.""" + env = MemoryEnvironment(files={'image.png': 'fake png data'}) + async with env: + result = await env.read_file('image.png') + assert isinstance(result, bytes) + assert result == b'fake png data' From 68b40361a7922b44c010b5b2deb45a097930c8ec Mon Sep 17 00:00:00 2001 From: David Montague <35119617+dmontagu@users.noreply.github.com> Date: Sat, 21 Feb 2026 01:37:41 -0700 Subject: [PATCH 02/49] Add environment_factory for per-run environment isolation MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit When multiple agent.run() calls execute concurrently, a shared environment means they all operate on the same filesystem and processes. The new environment_factory parameter creates a fresh, isolated environment per async-with entry using ContextVar-scoped state. Also renames environment → shared_environment to make concurrency semantics explicit (positional arg, so existing callers still work). --- docs/environments.md | 33 ++++- .../toolsets/execution_environment.py | 88 +++++++---- tests/test_environments.py | 138 +++++++++++++++++- 3 files changed, 226 insertions(+), 33 deletions(-) diff --git a/docs/environments.md b/docs/environments.md index 1288d50f2e..c906808ebb 100644 --- a/docs/environments.md +++ b/docs/environments.md @@ -176,7 +176,7 @@ from pydantic_ai.environments.memory import MemoryEnvironment # Only file tools — no shell or search toolset = ExecutionEnvironmentToolset( MemoryEnvironment(), - include=frozenset({'read_file', 'write_file', 'edit_file'}), + include={'read_file', 'write_file', 'edit_file'}, ) ``` @@ -201,6 +201,9 @@ async def main(): # container cleaned up automatically ``` +!!! note "Shared environment" + When you pass an environment directly, all concurrent `agent.run()` calls share the same environment instance (same container, filesystem, and processes). For isolated concurrent runs, use `environment_factory` — see [Concurrent Runs](#concurrent-runs) below. + ### Environment Overrides You can swap the backing environment at runtime using [`use_environment()`][pydantic_ai.environments.ExecutionEnvironmentToolset.use_environment]: @@ -227,6 +230,34 @@ async def main(): await agent.run('echo "running in Docker"') ``` +### Concurrent Runs + +When multiple `agent.run()` calls execute concurrently (e.g. via `asyncio.gather`), a shared environment means they all operate on the same filesystem and processes, which can cause interference. Use `environment_factory` to create a fresh, isolated environment for each run: + +```python {title="environments_concurrent.py" test="skip"} +import asyncio + +from pydantic_ai import Agent +from pydantic_ai.environments import ExecutionEnvironmentToolset +from pydantic_ai.environments.docker import DockerEnvironment + +# Each concurrent run gets its own container +toolset = ExecutionEnvironmentToolset( + environment_factory=lambda: DockerEnvironment(image='python:3.12-slim') +) + +agent = Agent('openai:gpt-5.2', toolsets=[toolset]) + +async def main(): + # Each agent.run() enters its own `async with toolset:`, creating a separate container + results = await asyncio.gather( + agent.run('task A'), + agent.run('task B'), + ) +``` + +The factory is called once per `async with toolset:` entry, and the created environment is automatically cleaned up on exit. + ## Per-Call Environment Variables All environments support per-call environment variables via the `env` parameter on [`shell()`][pydantic_ai.environments.ExecutionEnvironment.shell] and [`create_process()`][pydantic_ai.environments.ExecutionEnvironment.create_process]. These are merged on top of any baseline `env_vars`: diff --git a/pydantic_ai_slim/pydantic_ai/toolsets/execution_environment.py b/pydantic_ai_slim/pydantic_ai/toolsets/execution_environment.py index fd9cd0bf82..ef40d282d8 100644 --- a/pydantic_ai_slim/pydantic_ai/toolsets/execution_environment.py +++ b/pydantic_ai_slim/pydantic_ai/toolsets/execution_environment.py @@ -5,9 +5,9 @@ import posixpath import re from asyncio import Lock -from collections.abc import Iterator +from collections.abc import Callable, Iterator, Sequence from contextlib import AsyncExitStack, contextmanager -from contextvars import ContextVar +from contextvars import ContextVar, Token from typing import Any, Literal from typing_extensions import Self @@ -42,7 +42,8 @@ class ExecutionEnvironmentToolset(FunctionToolset[Any]): filtered by `include`/`exclude`. The environment can be: - - Passed directly at construction time (most common) + - Passed directly at construction time via `shared_environment` (shared across concurrent runs) + - Created per-run via `environment_factory` (isolated concurrent runs) - Set/overridden via context var using `use_environment()` (for testing or per-call-site config) Usage: @@ -63,10 +64,11 @@ class ExecutionEnvironmentToolset(FunctionToolset[Any]): def __init__( self, - environment: ExecutionEnvironment | None = None, + shared_environment: ExecutionEnvironment | None = None, *, - include: frozenset[Capability] | None = None, - exclude: frozenset[Capability] | None = None, + environment_factory: Callable[[], ExecutionEnvironment] | None = None, + include: Sequence[Capability] | None = None, + exclude: Sequence[Capability] | None = None, edit_strategy: EditStrategy | None = None, require_shell_approval: bool = False, require_write_approval: bool = False, @@ -78,8 +80,13 @@ def __init__( """Create a new execution environment toolset. Args: - environment: The execution environment to use for tool execution. + shared_environment: A shared execution environment for tool execution. + All concurrent runs share this single environment instance. Can also be set later via `use_environment()`. + environment_factory: A callable that creates a fresh environment per + `async with toolset:` entry. Use this for concurrent runs that need + isolation (e.g. separate Docker containers). Mutually exclusive with + `shared_environment`. include: Capabilities to include. `None` means all capabilities from the environment. Pass an explicit set to restrict to specific capabilities. @@ -98,13 +105,20 @@ def __init__( max_retries: Maximum retries per tool call. id: Optional unique ID for the toolset (required for durable execution). """ + if shared_environment is not None and environment_factory is not None: + raise ValueError('Cannot provide both shared_environment and environment_factory.') + super().__init__(max_retries=max_retries, id=id) - self._default_environment = environment + self._shared_environment = shared_environment + self._environment_factory = environment_factory self._environment_override: ContextVar[ExecutionEnvironment | None] = ContextVar( f'_environment_override_{id or "environment"}', default=None ) - self._include = include - self._exclude = exclude or frozenset() + self._per_run_state: ContextVar[tuple[AsyncExitStack, Token[ExecutionEnvironment | None]] | None] = ContextVar( + f'_per_run_state_{id or "environment"}', default=None + ) + self._include: frozenset[Capability] | None = frozenset(include) if include is not None else None + self._exclude: frozenset[Capability] = frozenset(exclude) if exclude else frozenset() self._edit_strategy: EditStrategy | None = edit_strategy self._image_support = image_support self._max_image_bytes = max_image_bytes @@ -115,9 +129,9 @@ def __init__( self._exit_stack: AsyncExitStack | None = None # Register tools based on what we know at init time. - # If no environment is provided, we register a full set of tools and - # let runtime errors catch unsupported capabilities. - self._register_tools(environment) + # When using environment_factory, no environment is available yet, so we + # register a full set of tools and let runtime errors catch unsupported capabilities. + self._register_tools(shared_environment) def _resolve_capabilities(self, env: ExecutionEnvironment | None) -> set[Capability]: """Determine which toolset-level capabilities to register as tools.""" @@ -361,12 +375,13 @@ def tool_name_conflict_hint(self) -> str: def environment(self) -> ExecutionEnvironment | None: """The active execution environment, or None if not configured. - Checks the context var override first, then falls back to the default. + Checks the context var override first (which includes per-run factory + environments), then falls back to the shared environment. """ override = self._environment_override.get() if override is not None: return override - return self._default_environment + return self._shared_environment @property def required_environment(self) -> ExecutionEnvironment: @@ -406,21 +421,36 @@ def use_environment(self, environment: ExecutionEnvironment) -> Iterator[None]: # --- Lifecycle --- async def __aenter__(self) -> Self: - async with self._enter_lock: - self._running_count += 1 - if self._running_count == 1: - self._exit_stack = AsyncExitStack() - try: - await self._exit_stack.enter_async_context(self.required_environment) - except Exception: - self._running_count -= 1 - raise + if self._environment_factory is not None: + env = self._environment_factory() + stack = AsyncExitStack() + await stack.enter_async_context(env) + token = self._environment_override.set(env) + self._per_run_state.set((stack, token)) + else: + async with self._enter_lock: + self._running_count += 1 + if self._running_count == 1: + self._exit_stack = AsyncExitStack() + try: + await self._exit_stack.enter_async_context(self.required_environment) + except Exception: + self._running_count -= 1 + raise return self async def __aexit__(self, *args: Any) -> bool | None: - async with self._enter_lock: - self._running_count -= 1 - if self._running_count == 0 and self._exit_stack is not None: - await self._exit_stack.aclose() - self._exit_stack = None + if self._environment_factory is not None: + state = self._per_run_state.get() + if state is not None: + stack, token = state + await stack.aclose() + self._environment_override.reset(token) + self._per_run_state.set(None) + else: + async with self._enter_lock: + self._running_count -= 1 + if self._running_count == 0 and self._exit_stack is not None: + await self._exit_stack.aclose() + self._exit_stack = None return None diff --git a/tests/test_environments.py b/tests/test_environments.py index 69220c3e70..5658b79a53 100644 --- a/tests/test_environments.py +++ b/tests/test_environments.py @@ -515,7 +515,7 @@ async def test_toolset_tool_names(): async def test_toolset_include_flags(): toolset = ExecutionEnvironmentToolset( LocalEnvironment('.'), - include=frozenset(), + include={}, ) assert toolset.tools == {} @@ -523,7 +523,7 @@ async def test_toolset_include_flags(): async def test_toolset_include_shell_only(): toolset = ExecutionEnvironmentToolset( LocalEnvironment('.'), - include=frozenset({'shell'}), + include={'shell'}, ) assert sorted(toolset.tools.keys()) == ['shell'] @@ -1215,7 +1215,7 @@ async def test_memory_read_file_bytes(): async def test_memory_toolset_integration(): """MemoryEnvironment works with ExecutionEnvironmentToolset for full agent testing.""" env = MemoryEnvironment(files={'main.py': 'print("hello")\n'}) - toolset = ExecutionEnvironmentToolset(env, exclude=frozenset({'shell'})) + toolset = ExecutionEnvironmentToolset(env, exclude={'shell'}) ctx = build_run_context(None) manager = await ToolManager[None](toolset).for_run_step(ctx) @@ -2929,6 +2929,138 @@ async def ls(self, path: str = '.') -> list[FileInfo]: assert str(result) == 'Empty directory.' +# --- ExecutionEnvironmentToolset: environment_factory --- + + +async def test_toolset_factory_basic(): + """Factory creates a fresh environment per __aenter__.""" + envs_created: list[MemoryEnvironment] = [] + + def factory() -> MemoryEnvironment: + env = MemoryEnvironment() + envs_created.append(env) + return env + + toolset = ExecutionEnvironmentToolset(environment_factory=factory) + + async with toolset: + assert len(envs_created) == 1 + assert toolset.environment is envs_created[0] + + # Second entry creates a new environment + async with toolset: + assert len(envs_created) == 2 + assert toolset.environment is envs_created[1] + assert envs_created[0] is not envs_created[1] + + +async def test_toolset_factory_concurrent(): + """Concurrent __aenter__ calls get different environments.""" + import asyncio + + envs_created: list[MemoryEnvironment] = [] + + def factory() -> MemoryEnvironment: + env = MemoryEnvironment() + envs_created.append(env) + return env + + toolset = ExecutionEnvironmentToolset(environment_factory=factory) + + async def enter_and_check() -> MemoryEnvironment: + async with toolset: + env = toolset.environment + assert isinstance(env, MemoryEnvironment) + return env + + env1, env2 = await asyncio.gather(enter_and_check(), enter_and_check()) + assert len(envs_created) == 2 + assert env1 is not env2 + + +async def test_toolset_factory_concurrent_isolation(): + """Two concurrent runs each write a file and don't see each other's files.""" + import asyncio + + def factory() -> MemoryEnvironment: + return MemoryEnvironment() + + toolset = ExecutionEnvironmentToolset(environment_factory=factory) + ctx = build_run_context() + + async def write_and_read(filename: str, content: str) -> tuple[str, str]: + """Write a file, then try to read a file the other task wrote.""" + other_file = 'b.txt' if filename == 'a.txt' else 'a.txt' + async with toolset: + manager = await ToolManager[None](toolset).for_run_step(ctx) + await manager.handle_call(ToolCallPart(tool_name='write_file', args={'path': filename, 'content': content})) + # Small delay so both tasks have a chance to write + await asyncio.sleep(0.01) + other_result = await manager.handle_call(ToolCallPart(tool_name='read_file', args={'path': other_file})) + return content, str(other_result) + + (content_a, read_b), (content_b, read_a) = await asyncio.gather( + write_and_read('a.txt', 'alpha'), + write_and_read('b.txt', 'beta'), + ) + + assert content_a == 'alpha' + assert content_b == 'beta' + # Each run should NOT see the other's file — they have isolated environments + assert 'Error' in read_b + assert 'Error' in read_a + + +async def test_toolset_factory_cleanup(): + """__aexit__ properly cleans up factory-created environments.""" + entered = 0 + exited = 0 + + class TrackingEnv(MemoryEnvironment): + async def __aenter__(self): + nonlocal entered + entered += 1 + return await super().__aenter__() + + async def __aexit__(self, *args: Any): + nonlocal exited + exited += 1 + return await super().__aexit__(*args) + + toolset = ExecutionEnvironmentToolset(environment_factory=TrackingEnv) + + async with toolset: + assert entered == 1 + assert exited == 0 + + assert entered == 1 + assert exited == 1 + + +async def test_toolset_factory_mutual_exclusivity(): + """Passing both shared_environment and environment_factory raises ValueError.""" + env = MemoryEnvironment() + with pytest.raises(ValueError, match='Cannot provide both'): + ExecutionEnvironmentToolset(env, environment_factory=MemoryEnvironment) + + +async def test_toolset_factory_with_use_environment(): + """use_environment() overrides the factory-created environment within the context.""" + override_env = MemoryEnvironment() + + toolset = ExecutionEnvironmentToolset(environment_factory=MemoryEnvironment) + + async with toolset: + factory_env = toolset.environment + assert factory_env is not override_env + + with toolset.use_environment(override_env): + assert toolset.environment is override_env + + # After exiting use_environment, factory env is restored + assert toolset.environment is factory_env + + # --- Memory image file stored as string --- From c847585a64783ad4e208e456da9d9d1f6f4f1360 Mon Sep 17 00:00:00 2001 From: David Montague <35119617+dmontagu@users.noreply.github.com> Date: Sat, 21 Feb 2026 01:42:25 -0700 Subject: [PATCH 03/49] Remove unused variable in doc example --- docs/environments.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/environments.md b/docs/environments.md index c906808ebb..eb9180531a 100644 --- a/docs/environments.md +++ b/docs/environments.md @@ -250,7 +250,7 @@ agent = Agent('openai:gpt-5.2', toolsets=[toolset]) async def main(): # Each agent.run() enters its own `async with toolset:`, creating a separate container - results = await asyncio.gather( + await asyncio.gather( agent.run('task A'), agent.run('task B'), ) From 00be4cac6b9e9e57e04b91c5a74eafdaa3d8eee6 Mon Sep 17 00:00:00 2001 From: David Montague <35119617+dmontagu@users.noreply.github.com> Date: Sat, 21 Feb 2026 02:09:53 -0700 Subject: [PATCH 04/49] Fix type errors: use lists instead of sets for include/exclude args --- docs/environments.md | 2 +- tests/test_environments.py | 6 +++--- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/docs/environments.md b/docs/environments.md index eb9180531a..5e83a3a3dc 100644 --- a/docs/environments.md +++ b/docs/environments.md @@ -176,7 +176,7 @@ from pydantic_ai.environments.memory import MemoryEnvironment # Only file tools — no shell or search toolset = ExecutionEnvironmentToolset( MemoryEnvironment(), - include={'read_file', 'write_file', 'edit_file'}, + include=['read_file', 'write_file', 'edit_file'], ) ``` diff --git a/tests/test_environments.py b/tests/test_environments.py index 5658b79a53..393d0e78d0 100644 --- a/tests/test_environments.py +++ b/tests/test_environments.py @@ -515,7 +515,7 @@ async def test_toolset_tool_names(): async def test_toolset_include_flags(): toolset = ExecutionEnvironmentToolset( LocalEnvironment('.'), - include={}, + include=[], ) assert toolset.tools == {} @@ -523,7 +523,7 @@ async def test_toolset_include_flags(): async def test_toolset_include_shell_only(): toolset = ExecutionEnvironmentToolset( LocalEnvironment('.'), - include={'shell'}, + include=['shell'], ) assert sorted(toolset.tools.keys()) == ['shell'] @@ -1215,7 +1215,7 @@ async def test_memory_read_file_bytes(): async def test_memory_toolset_integration(): """MemoryEnvironment works with ExecutionEnvironmentToolset for full agent testing.""" env = MemoryEnvironment(files={'main.py': 'print("hello")\n'}) - toolset = ExecutionEnvironmentToolset(env, exclude={'shell'}) + toolset = ExecutionEnvironmentToolset(env, exclude=['shell']) ctx = build_run_context(None) manager = await ToolManager[None](toolset).for_run_step(ctx) From 33604e65db18351d89c44ba312dcaf5280e59288 Mon Sep 17 00:00:00 2001 From: David Montague <35119617+dmontagu@users.noreply.github.com> Date: Sat, 21 Feb 2026 03:01:02 -0700 Subject: [PATCH 05/49] Work around huggingface/vllm dependency conflict Mark huggingface and outlines-vllm-offline extras as conflicting in uv, and exclude outlines-vllm-offline from --all-extras in CI and Makefile. --- .github/workflows/ci.yml | 8 +- Makefile | 20 +- pydantic_ai_slim/pyproject.toml | 8 + uv.lock | 872 +++++++++++++++++--------------- 4 files changed, 489 insertions(+), 419 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 0f29afeaff..e761bb6ba7 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -30,7 +30,7 @@ jobs: cache-suffix: lint - name: Install dependencies - run: uv sync --all-extras --all-packages --group lint + run: uv sync --all-extras --no-extra outlines-vllm-offline --all-packages --group lint - uses: pre-commit/action@v3.0.0 with: @@ -113,7 +113,7 @@ jobs: - name: standard command: "" - name: all-extras - command: "--all-extras" + command: "--all-extras --no-extra outlines-vllm-offline" env: CI: true COVERAGE_PROCESS_START: ./pyproject.toml @@ -194,7 +194,7 @@ jobs: - run: unset UV_FROZEN - - run: uv run --all-extras --resolution lowest-direct coverage run -m pytest --durations=100 -n auto --dist=loadgroup + - run: uv run --all-extras --no-extra outlines-vllm-offline --resolution lowest-direct coverage run -m pytest --durations=100 -n auto --dist=loadgroup env: COVERAGE_FILE: .coverage/.coverage.${{matrix.python-version}}-lowest-versions @@ -232,7 +232,7 @@ jobs: restore-keys: | hf-${{ runner.os }}- - - run: uv run --all-extras python tests/import_examples.py + - run: uv run --all-extras --no-extra outlines-vllm-offline python tests/import_examples.py coverage: runs-on: ubuntu-latest diff --git a/Makefile b/Makefile index 067ad11dec..51fa6627c8 100644 --- a/Makefile +++ b/Makefile @@ -10,19 +10,19 @@ .PHONY: install install: .uv .pre-commit ## Install the package, dependencies, and pre-commit for local development - uv sync --frozen --all-extras --all-packages --group lint --group docs + uv sync --frozen --all-extras --no-extra outlines-vllm-offline --all-packages --group lint --group docs pre-commit install --install-hooks .PHONY: install-all-python install-all-python: ## Install and synchronize an interpreter for every python version - UV_PROJECT_ENVIRONMENT=.venv310 uv sync --python 3.10 --frozen --all-extras --all-packages --group lint --group docs - UV_PROJECT_ENVIRONMENT=.venv311 uv sync --python 3.11 --frozen --all-extras --all-packages --group lint --group docs - UV_PROJECT_ENVIRONMENT=.venv312 uv sync --python 3.12 --frozen --all-extras --all-packages --group lint --group docs - UV_PROJECT_ENVIRONMENT=.venv313 uv sync --python 3.13 --frozen --all-extras --all-packages --group lint --group docs + UV_PROJECT_ENVIRONMENT=.venv310 uv sync --python 3.10 --frozen --all-extras --no-extra outlines-vllm-offline --all-packages --group lint --group docs + UV_PROJECT_ENVIRONMENT=.venv311 uv sync --python 3.11 --frozen --all-extras --no-extra outlines-vllm-offline --all-packages --group lint --group docs + UV_PROJECT_ENVIRONMENT=.venv312 uv sync --python 3.12 --frozen --all-extras --no-extra outlines-vllm-offline --all-packages --group lint --group docs + UV_PROJECT_ENVIRONMENT=.venv313 uv sync --python 3.13 --frozen --all-extras --no-extra outlines-vllm-offline --all-packages --group lint --group docs .PHONY: sync sync: .uv ## Update local packages and uv.lock - uv sync --all-extras --all-packages --group lint --group docs + uv sync --all-extras --no-extra outlines-vllm-offline --all-packages --group lint --group docs .PHONY: format format: ## Format the code @@ -57,10 +57,10 @@ test: ## Run tests without coverage (fast, for local dev) .PHONY: test-all-python test-all-python: ## Run tests on Python 3.10 to 3.13 - COLUMNS=150 UV_PROJECT_ENVIRONMENT=.venv310 uv run --python 3.10 --all-extras --all-packages coverage run -p -m pytest - COLUMNS=150 UV_PROJECT_ENVIRONMENT=.venv311 uv run --python 3.11 --all-extras --all-packages coverage run -p -m pytest - COLUMNS=150 UV_PROJECT_ENVIRONMENT=.venv312 uv run --python 3.12 --all-extras --all-packages coverage run -p -m pytest - COLUMNS=150 UV_PROJECT_ENVIRONMENT=.venv313 uv run --python 3.13 --all-extras --all-packages coverage run -p -m pytest + COLUMNS=150 UV_PROJECT_ENVIRONMENT=.venv310 uv run --python 3.10 --all-extras --no-extra outlines-vllm-offline --all-packages coverage run -p -m pytest + COLUMNS=150 UV_PROJECT_ENVIRONMENT=.venv311 uv run --python 3.11 --all-extras --no-extra outlines-vllm-offline --all-packages coverage run -p -m pytest + COLUMNS=150 UV_PROJECT_ENVIRONMENT=.venv312 uv run --python 3.12 --all-extras --no-extra outlines-vllm-offline --all-packages coverage run -p -m pytest + COLUMNS=150 UV_PROJECT_ENVIRONMENT=.venv313 uv run --python 3.13 --all-extras --no-extra outlines-vllm-offline --all-packages coverage run -p -m pytest @uv run coverage combine @uv run coverage report diff --git a/pydantic_ai_slim/pyproject.toml b/pydantic_ai_slim/pyproject.toml index df382e2a84..d9feb2c7c4 100644 --- a/pydantic_ai_slim/pyproject.toml +++ b/pydantic_ai_slim/pyproject.toml @@ -141,5 +141,13 @@ pai = "pydantic_ai._cli:cli_exit" # TODO remove this when clai has been out for [tool.hatch.build.targets.wheel] packages = ["pydantic_ai"] +[tool.uv] +conflicts = [ + [ + { extra = "huggingface" }, + { extra = "outlines-vllm-offline" }, + ], +] + [tool.uv.sources] pydantic-graph = { workspace = true } diff --git a/uv.lock b/uv.lock index f506b20c05..3433629050 100644 --- a/uv.lock +++ b/uv.lock @@ -7,6 +7,10 @@ resolution-markers = [ "python_full_version == '3.11.*'", "python_full_version < '3.11'", ] +conflicts = [[ + { package = "pydantic-ai-slim", extra = "huggingface" }, + { package = "pydantic-ai-slim", extra = "outlines-vllm-offline" }, +]] [manifest] members = [ @@ -23,7 +27,8 @@ name = "accelerate" version = "1.12.0" source = { registry = "https://pypi.org/simple" } dependencies = [ - { name = "huggingface-hub" }, + { name = "huggingface-hub", version = "0.36.2", source = { registry = "https://pypi.org/simple" }, marker = "extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline'" }, + { name = "huggingface-hub", version = "1.4.1", source = { registry = "https://pypi.org/simple" }, marker = "extra == 'extra-16-pydantic-ai-slim-huggingface' or extra != 'extra-16-pydantic-ai-slim-outlines-vllm-offline'" }, { name = "numpy" }, { name = "packaging" }, { name = "psutil" }, @@ -73,7 +78,7 @@ source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "aiohappyeyeballs" }, { name = "aiosignal" }, - { name = "async-timeout", marker = "python_full_version < '3.11'" }, + { name = "async-timeout", marker = "python_full_version < '3.11' or (extra == 'extra-16-pydantic-ai-slim-huggingface' and extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline')" }, { name = "attrs" }, { name = "frozenlist" }, { name = "multidict" }, @@ -201,7 +206,7 @@ version = "1.4.0" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "frozenlist" }, - { name = "typing-extensions", marker = "python_full_version < '3.13'" }, + { name = "typing-extensions", marker = "python_full_version < '3.13' or (extra == 'extra-16-pydantic-ai-slim-huggingface' and extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline')" }, ] sdist = { url = "https://files.pythonhosted.org/packages/61/62/06741b579156360248d1ec624842ad0edf697050bbaf7c3e46394e106ad1/aiosignal-1.4.0.tar.gz", hash = "sha256:f47eecd9468083c2029cc99945502cb7708b082c232f9aca65da147157b251c7", size = 25007, upload-time = "2025-07-03T22:54:43.528Z" } wheels = [ @@ -227,7 +232,7 @@ source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "mako" }, { name = "sqlalchemy" }, - { name = "tomli", marker = "python_full_version < '3.11'" }, + { name = "tomli", marker = "python_full_version < '3.11' or (extra == 'extra-16-pydantic-ai-slim-huggingface' and extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline')" }, { name = "typing-extensions" }, ] sdist = { url = "https://files.pythonhosted.org/packages/02/a6/74c8cadc2882977d80ad756a13857857dbcf9bd405bc80b662eb10651282/alembic-1.17.2.tar.gz", hash = "sha256:bbe9751705c5e0f14877f02d46c53d10885e377e3d90eda810a016f9baa19e8e", size = 1988064, upload-time = "2025-11-14T20:35:04.057Z" } @@ -294,10 +299,10 @@ name = "anyio" version = "4.8.0" source = { registry = "https://pypi.org/simple" } dependencies = [ - { name = "exceptiongroup", marker = "python_full_version < '3.11'" }, + { name = "exceptiongroup", marker = "python_full_version < '3.11' or (extra == 'extra-16-pydantic-ai-slim-huggingface' and extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline')" }, { name = "idna" }, { name = "sniffio" }, - { name = "typing-extensions", marker = "python_full_version < '3.13'" }, + { name = "typing-extensions", marker = "python_full_version < '3.13' or (extra == 'extra-16-pydantic-ai-slim-huggingface' and extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline')" }, ] sdist = { url = "https://files.pythonhosted.org/packages/a3/73/199a98fc2dae33535d6b8e8e6ec01f8c1d76c9adb096c6b7d64823038cde/anyio-4.8.0.tar.gz", hash = "sha256:1d9fe889df5212298c0c0723fa20479d1b94883a2df44bd3897aa91083316f7a", size = 181126, upload-time = "2025-01-05T13:13:11.095Z" } wheels = [ @@ -309,7 +314,7 @@ name = "apache-tvm-ffi" version = "0.1.8.post2" source = { registry = "https://pypi.org/simple" } dependencies = [ - { name = "typing-extensions", marker = "python_full_version < '3.12'" }, + { name = "typing-extensions" }, ] sdist = { url = "https://files.pythonhosted.org/packages/e3/e9/a13952726228fa6282154ecf927092396bc759739e5e045019f6ab92f3ca/apache_tvm_ffi-0.1.8.post2.tar.gz", hash = "sha256:4513e38852894f290172ecfefcbc18d34e817fd29c16a0f1770e130c82b4067e", size = 2441111, upload-time = "2026-01-13T18:11:27.864Z" } wheels = [ @@ -350,7 +355,7 @@ dependencies = [ { name = "pyyaml" }, { name = "requests" }, { name = "requests-oauthlib" }, - { name = "tzdata", marker = "sys_platform == 'win32'" }, + { name = "tzdata", marker = "sys_platform == 'win32' or (extra == 'extra-16-pydantic-ai-slim-huggingface' and extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline')" }, ] sdist = { url = "https://files.pythonhosted.org/packages/a9/a7/bb182d81f35c3fe405505f0976da4b74f942cfdd53c7193b0fe50412aa27/apprise-1.9.6.tar.gz", hash = "sha256:4206be9cb5694a3d08dd8e0393bbb9b36212ac3a7769c2633620055e75c6caef", size = 1921714, upload-time = "2025-12-07T19:24:30.587Z" } wheels = [ @@ -383,7 +388,7 @@ name = "asgiref" version = "3.11.0" source = { registry = "https://pypi.org/simple" } dependencies = [ - { name = "typing-extensions", marker = "python_full_version < '3.11'" }, + { name = "typing-extensions", marker = "python_full_version < '3.11' or (extra == 'extra-16-pydantic-ai-slim-huggingface' and extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline')" }, ] sdist = { url = "https://files.pythonhosted.org/packages/76/b9/4db2509eabd14b4a8c71d1b24c8d5734c52b8560a7b1e1a8b56c8d25568b/asgiref-3.11.0.tar.gz", hash = "sha256:13acff32519542a1736223fb79a715acdebe24286d98e8b164a73085f40da2c4", size = 37969, upload-time = "2025-11-19T15:32:20.106Z" } wheels = [ @@ -425,7 +430,7 @@ name = "asyncpg" version = "0.31.0" source = { registry = "https://pypi.org/simple" } dependencies = [ - { name = "async-timeout", marker = "python_full_version < '3.11'" }, + { name = "async-timeout", marker = "python_full_version < '3.11' or (extra == 'extra-16-pydantic-ai-slim-huggingface' and extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline')" }, ] sdist = { url = "https://files.pythonhosted.org/packages/fe/cc/d18065ce2380d80b1bcce927c24a2642efd38918e33fd724bc4bca904877/asyncpg-0.31.0.tar.gz", hash = "sha256:c989386c83940bfbd787180f2b1519415e2d3d6277a70d9d0f0145ac73500735", size = 993667, upload-time = "2025-11-24T23:27:00.812Z" } wheels = [ @@ -621,8 +626,8 @@ dependencies = [ { name = "pathspec" }, { name = "platformdirs" }, { name = "pytokens" }, - { name = "tomli", marker = "python_full_version < '3.11'" }, - { name = "typing-extensions", marker = "python_full_version < '3.11'" }, + { name = "tomli", marker = "python_full_version < '3.11' or (extra == 'extra-16-pydantic-ai-slim-huggingface' and extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline')" }, + { name = "typing-extensions", marker = "python_full_version < '3.11' or (extra == 'extra-16-pydantic-ai-slim-huggingface' and extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline')" }, ] sdist = { url = "https://files.pythonhosted.org/packages/c4/d9/07b458a3f1c525ac392b5edc6b191ff140b596f9d77092429417a54e249d/black-25.12.0.tar.gz", hash = "sha256:8d3dd9cea14bff7ddc0eb243c811cdb1a011ebb4800a5f0335a01a68654796a7", size = 659264, upload-time = "2025-12-08T01:40:52.501Z" } wheels = [ @@ -770,7 +775,7 @@ source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "botocore-stubs" }, { name = "types-s3transfer" }, - { name = "typing-extensions", marker = "python_full_version < '3.12'" }, + { name = "typing-extensions", marker = "python_full_version < '3.12' or (extra == 'extra-16-pydantic-ai-slim-huggingface' and extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline')" }, ] sdist = { url = "https://files.pythonhosted.org/packages/ac/42/2a73afec394eec6350d59c4deb4bda2639f7fc0ca8dfb2a41dcc4115f07e/boto3_stubs-1.42.14.tar.gz", hash = "sha256:b06c4be79348573fa03fc7fbe4bd82ebbc7e1e27cf208c8f5ab7bfcb75f55c05", size = 101097, upload-time = "2025-12-19T20:41:44.497Z" } wheels = [ @@ -981,7 +986,7 @@ name = "cffi" version = "2.0.0" source = { registry = "https://pypi.org/simple" } dependencies = [ - { name = "pycparser", marker = "implementation_name != 'PyPy'" }, + { name = "pycparser", marker = "implementation_name != 'PyPy' or (extra == 'extra-16-pydantic-ai-slim-huggingface' and extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline')" }, ] sdist = { url = "https://files.pythonhosted.org/packages/eb/56/b1ba7935a17738ae8453301356628e8147c79dbb825bcbc73dc7401f9846/cffi-2.0.0.tar.gz", hash = "sha256:44d1b5909021139fe36001ae048dbdde8214afa20200eda0f64c068cac5d5529", size = 523588, upload-time = "2025-09-08T23:24:04.541Z" } wheels = [ @@ -1171,7 +1176,7 @@ name = "click" version = "8.1.8" source = { registry = "https://pypi.org/simple" } dependencies = [ - { name = "colorama", marker = "sys_platform == 'win32'" }, + { name = "colorama", marker = "sys_platform == 'win32' or (extra == 'extra-16-pydantic-ai-slim-huggingface' and extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline')" }, ] sdist = { url = "https://files.pythonhosted.org/packages/b9/2e/0090cbf739cee7d23781ad4b89a9894a41538e4fcf4c31dcdd705b78eb8b/click-8.1.8.tar.gz", hash = "sha256:ed53c9d8990d83c2a27deae68e4ee337473f6330c040a31d4225c9574d16096a", size = 226593, upload-time = "2024-12-21T18:38:44.339Z" } wheels = [ @@ -1220,10 +1225,10 @@ name = "compressed-tensors" version = "0.13.0" source = { registry = "https://pypi.org/simple" } dependencies = [ - { name = "loguru", marker = "python_full_version < '3.12'" }, - { name = "pydantic", marker = "python_full_version < '3.12'" }, - { name = "torch", marker = "python_full_version < '3.12'" }, - { name = "transformers", marker = "python_full_version < '3.12'" }, + { name = "loguru" }, + { name = "pydantic" }, + { name = "torch" }, + { name = "transformers", version = "4.57.6", source = { registry = "https://pypi.org/simple" } }, ] sdist = { url = "https://files.pythonhosted.org/packages/fc/65/88dd1c58fb9d0ded51b5c86471b937a1525f91fad2211a6f051dc1ea822d/compressed_tensors-0.13.0.tar.gz", hash = "sha256:23893824d3498ea3f1a829f14a8fa85f9a5e76a34c711a038b8d7c619ca9a67c", size = 200995, upload-time = "2025-12-16T16:03:55.397Z" } wheels = [ @@ -1340,7 +1345,7 @@ wheels = [ [package.optional-dependencies] toml = [ - { name = "tomli", marker = "python_full_version <= '3.11'" }, + { name = "tomli", marker = "python_full_version <= '3.11' or (extra == 'extra-16-pydantic-ai-slim-huggingface' and extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline')" }, ] [[package]] @@ -1348,8 +1353,8 @@ name = "cryptography" version = "46.0.5" source = { registry = "https://pypi.org/simple" } dependencies = [ - { name = "cffi", marker = "platform_python_implementation != 'PyPy'" }, - { name = "typing-extensions", marker = "python_full_version < '3.11'" }, + { name = "cffi", marker = "platform_python_implementation != 'PyPy' or (extra == 'extra-16-pydantic-ai-slim-huggingface' and extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline')" }, + { name = "typing-extensions", marker = "python_full_version < '3.11' or (extra == 'extra-16-pydantic-ai-slim-huggingface' and extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline')" }, ] sdist = { url = "https://files.pythonhosted.org/packages/60/04/ee2a9e8542e4fa2773b81771ff8349ff19cdd56b7258a0cc442639052edb/cryptography-46.0.5.tar.gz", hash = "sha256:abace499247268e3757271b2f1e244b36b06f8515cf27c4d49468fc9eb16e93d", size = 750064, upload-time = "2026-02-10T19:18:38.255Z" } wheels = [ @@ -1421,7 +1426,7 @@ name = "cuda-bindings" version = "13.1.1" source = { registry = "https://pypi.org/simple" } dependencies = [ - { name = "cuda-pathfinder", marker = "python_full_version < '3.12'" }, + { name = "cuda-pathfinder" }, ] wheels = [ { url = "https://files.pythonhosted.org/packages/60/63/579402b642f5b9b8ceb79e456b39b5771f27e132a8af3b140e54d69790fc/cuda_bindings-13.1.1-cp310-cp310-manylinux_2_24_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:4400370a83f1538e25ed4c18c34a0e9d5fad39741e282e69ce24d1479a11017d", size = 15777291, upload-time = "2025-12-09T22:05:41.109Z" }, @@ -1457,8 +1462,8 @@ name = "cuda-python" version = "13.1.1" source = { registry = "https://pypi.org/simple" } dependencies = [ - { name = "cuda-bindings", marker = "python_full_version < '3.12'" }, - { name = "cuda-pathfinder", marker = "python_full_version < '3.12'" }, + { name = "cuda-bindings" }, + { name = "cuda-pathfinder" }, ] wheels = [ { url = "https://files.pythonhosted.org/packages/cd/08/b5e3b9822662d72d540d830531e3ab6a7cabbda3dd56175696aabccfeb76/cuda_python-13.1.1-py3-none-any.whl", hash = "sha256:944cc4fe6482673d28dd545797a28840945a1668739328fa2ad1e9be4f7050d9", size = 8038, upload-time = "2025-12-09T22:13:10.719Z" }, @@ -1469,8 +1474,8 @@ name = "cupy-cuda12x" version = "13.6.0" source = { registry = "https://pypi.org/simple" } dependencies = [ - { name = "fastrlock", marker = "python_full_version < '3.12'" }, - { name = "numpy", marker = "python_full_version < '3.12'" }, + { name = "fastrlock" }, + { name = "numpy" }, ] wheels = [ { url = "https://files.pythonhosted.org/packages/f7/2e/db22c5148884e4e384f6ebbc7971fa3710f3ba67ca492798890a0fdebc45/cupy_cuda12x-13.6.0-cp310-cp310-manylinux2014_aarch64.whl", hash = "sha256:9e37f60f27ff9625dfdccc4688a09852707ec613e32ea9404f425dd22a386d14", size = 126341714, upload-time = "2025-08-18T08:24:08.335Z" }, @@ -1496,8 +1501,8 @@ dependencies = [ { name = "docstring-parser" }, { name = "rich" }, { name = "rich-rst" }, - { name = "tomli", marker = "python_full_version < '3.11'" }, - { name = "typing-extensions", marker = "python_full_version < '3.11'" }, + { name = "tomli", marker = "python_full_version < '3.11' or (extra == 'extra-16-pydantic-ai-slim-huggingface' and extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline')" }, + { name = "typing-extensions", marker = "python_full_version < '3.11' or (extra == 'extra-16-pydantic-ai-slim-huggingface' and extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline')" }, ] sdist = { url = "https://files.pythonhosted.org/packages/40/99/e1b75193ee23bd10a05a3b90c065d419b1c8c18f61cae6b8218c7158f792/cyclopts-4.4.1.tar.gz", hash = "sha256:368a404926b46a49dc328a33ccd7e55ba879296a28e64a42afe2f6667704cecf", size = 159245, upload-time = "2025-12-21T13:59:02.266Z" } wheels = [ @@ -1513,7 +1518,8 @@ dependencies = [ { name = "filelock" }, { name = "fsspec", extra = ["http"] }, { name = "httpx" }, - { name = "huggingface-hub" }, + { name = "huggingface-hub", version = "0.36.2", source = { registry = "https://pypi.org/simple" }, marker = "extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline'" }, + { name = "huggingface-hub", version = "1.4.1", source = { registry = "https://pypi.org/simple" }, marker = "extra == 'extra-16-pydantic-ai-slim-huggingface' or extra != 'extra-16-pydantic-ai-slim-outlines-vllm-offline'" }, { name = "multiprocess" }, { name = "numpy" }, { name = "packaging" }, @@ -1591,8 +1597,8 @@ name = "depyf" version = "0.20.0" source = { registry = "https://pypi.org/simple" } dependencies = [ - { name = "astor", marker = "python_full_version < '3.12'" }, - { name = "dill", marker = "python_full_version < '3.12'" }, + { name = "astor" }, + { name = "dill" }, ] sdist = { url = "https://files.pythonhosted.org/packages/88/35/83fb0178212279aa0af031031905804c6de5618435d229f41ed21bb9ad2c/depyf-0.20.0.tar.gz", hash = "sha256:fb7683bd72c44f67b56029df2c47721e9a02ffa4d7b19095f1c54c4ebf797a98", size = 6168761, upload-time = "2025-10-13T12:33:38.589Z" } wheels = [ @@ -1678,7 +1684,7 @@ name = "docker" version = "7.1.0" source = { registry = "https://pypi.org/simple" } dependencies = [ - { name = "pywin32", marker = "sys_platform == 'win32'" }, + { name = "pywin32", marker = "sys_platform == 'win32' or (extra == 'extra-16-pydantic-ai-slim-huggingface' and extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline')" }, { name = "requests" }, { name = "urllib3" }, ] @@ -1811,7 +1817,7 @@ name = "exceptiongroup" version = "1.3.1" source = { registry = "https://pypi.org/simple" } dependencies = [ - { name = "typing-extensions", marker = "python_full_version < '3.13'" }, + { name = "typing-extensions", marker = "python_full_version < '3.13' or (extra == 'extra-16-pydantic-ai-slim-huggingface' and extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline')" }, ] sdist = { url = "https://files.pythonhosted.org/packages/50/79/66800aadf48771f6b62f7eb014e352e5d06856655206165d775e675a02c9/exceptiongroup-1.3.1.tar.gz", hash = "sha256:8b412432c6055b0b7d14c310000ae93352ed6754f70fa8f7c34141f91c4e3219", size = 30371, upload-time = "2025-11-21T23:01:54.787Z" } wheels = [ @@ -1852,7 +1858,7 @@ source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "redis" }, { name = "sortedcontainers" }, - { name = "typing-extensions", marker = "python_full_version < '3.11'" }, + { name = "typing-extensions", marker = "python_full_version < '3.11' or (extra == 'extra-16-pydantic-ai-slim-huggingface' and extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline')" }, ] sdist = { url = "https://files.pythonhosted.org/packages/5f/f9/57464119936414d60697fcbd32f38909bb5688b616ae13de6e98384433e0/fakeredis-2.33.0.tar.gz", hash = "sha256:d7bc9a69d21df108a6451bbffee23b3eba432c21a654afc7ff2d295428ec5770", size = 175187, upload-time = "2025-12-16T19:45:52.269Z" } wheels = [ @@ -1895,14 +1901,14 @@ wheels = [ [package.optional-dependencies] standard = [ - { name = "email-validator", marker = "python_full_version < '3.12'" }, - { name = "fastapi-cli", extra = ["standard"], marker = "python_full_version < '3.12'" }, - { name = "httpx", marker = "python_full_version < '3.12'" }, - { name = "jinja2", marker = "python_full_version < '3.12'" }, - { name = "pydantic-extra-types", marker = "python_full_version < '3.12'" }, - { name = "pydantic-settings", marker = "python_full_version < '3.12'" }, - { name = "python-multipart", marker = "python_full_version < '3.12'" }, - { name = "uvicorn", extra = ["standard"], marker = "python_full_version < '3.12'" }, + { name = "email-validator" }, + { name = "fastapi-cli", extra = ["standard"], marker = "extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline'" }, + { name = "httpx" }, + { name = "jinja2" }, + { name = "pydantic-extra-types" }, + { name = "pydantic-settings" }, + { name = "python-multipart" }, + { name = "uvicorn", extra = ["standard"], marker = "extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline'" }, ] [[package]] @@ -1910,10 +1916,10 @@ name = "fastapi-cli" version = "0.0.16" source = { registry = "https://pypi.org/simple" } dependencies = [ - { name = "rich-toolkit", marker = "python_full_version < '3.12'" }, + { name = "rich-toolkit" }, { name = "tomli", marker = "python_full_version < '3.11'" }, - { name = "typer", marker = "python_full_version < '3.12'" }, - { name = "uvicorn", extra = ["standard"], marker = "python_full_version < '3.12'" }, + { name = "typer" }, + { name = "uvicorn", extra = ["standard"], marker = "extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline'" }, ] sdist = { url = "https://files.pythonhosted.org/packages/99/75/9407a6b452be4c988feacec9c9d2f58d8f315162a6c7258d5a649d933ebe/fastapi_cli-0.0.16.tar.gz", hash = "sha256:e8a2a1ecf7a4e062e3b2eec63ae34387d1e142d4849181d936b23c4bdfe29073", size = 19447, upload-time = "2025-11-10T19:01:07.856Z" } wheels = [ @@ -1922,8 +1928,8 @@ wheels = [ [package.optional-dependencies] standard = [ - { name = "fastapi-cloud-cli", marker = "python_full_version < '3.12'" }, - { name = "uvicorn", extra = ["standard"], marker = "python_full_version < '3.12'" }, + { name = "fastapi-cloud-cli" }, + { name = "uvicorn", extra = ["standard"], marker = "extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline'" }, ] [[package]] @@ -1931,14 +1937,14 @@ name = "fastapi-cloud-cli" version = "0.7.0" source = { registry = "https://pypi.org/simple" } dependencies = [ - { name = "fastar", marker = "python_full_version < '3.12'" }, - { name = "httpx", marker = "python_full_version < '3.12'" }, - { name = "pydantic", extra = ["email"], marker = "python_full_version < '3.12'" }, - { name = "rich-toolkit", marker = "python_full_version < '3.12'" }, - { name = "rignore", marker = "python_full_version < '3.12'" }, - { name = "sentry-sdk", marker = "python_full_version < '3.12'" }, - { name = "typer", marker = "python_full_version < '3.12'" }, - { name = "uvicorn", extra = ["standard"], marker = "python_full_version < '3.12'" }, + { name = "fastar" }, + { name = "httpx" }, + { name = "pydantic", extra = ["email"], marker = "extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline'" }, + { name = "rich-toolkit" }, + { name = "rignore" }, + { name = "sentry-sdk" }, + { name = "typer" }, + { name = "uvicorn", extra = ["standard"], marker = "extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline'" }, ] sdist = { url = "https://files.pythonhosted.org/packages/cf/0d/3b0d2991f481c122c552b4ae38a8b400a75ab0edbc85536f2a6224f72da2/fastapi_cloud_cli-0.7.0.tar.gz", hash = "sha256:8b025944475c3d53262105886dfe051f46383e4f287787a46892b524922ac0b6", size = 30906, upload-time = "2025-12-16T12:51:49.082Z" } wheels = [ @@ -2146,6 +2152,7 @@ version = "0.8.3" source = { registry = "https://pypi.org/simple" } sdist = { url = "https://files.pythonhosted.org/packages/73/b1/1c3d635d955f2b4bf34d45abf8f35492e04dbd7804e94ce65d9f928ef3ec/fastrlock-0.8.3.tar.gz", hash = "sha256:4af6734d92eaa3ab4373e6c9a1dd0d5ad1304e172b1521733c6c3b3d73c8fa5d", size = 79327, upload-time = "2024-12-17T11:03:39.638Z" } wheels = [ + { url = "https://files.pythonhosted.org/packages/e7/02/3f771177380d8690812d5b2b7736dc6b6c8cd1c317e4572e65f823eede08/fastrlock-0.8.3-cp310-cp310-macosx_11_0_universal2.whl", hash = "sha256:cc5fa9166e05409f64a804d5b6d01af670979cdb12cd2594f555cb33cdc155bd", size = 55094, upload-time = "2024-12-17T11:01:49.721Z" }, { url = "https://files.pythonhosted.org/packages/be/b4/aae7ed94b8122c325d89eb91336084596cebc505dc629b795fcc9629606d/fastrlock-0.8.3-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_24_aarch64.whl", hash = "sha256:7a77ebb0a24535ef4f167da2c5ee35d9be1e96ae192137e9dc3ff75b8dfc08a5", size = 48220, upload-time = "2024-12-17T11:01:51.071Z" }, { url = "https://files.pythonhosted.org/packages/96/87/9807af47617fdd65c68b0fcd1e714542c1d4d3a1f1381f591f1aa7383a53/fastrlock-0.8.3-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_24_i686.whl", hash = "sha256:d51f7fb0db8dab341b7f03a39a3031678cf4a98b18533b176c533c122bfce47d", size = 49551, upload-time = "2024-12-17T11:01:52.316Z" }, { url = "https://files.pythonhosted.org/packages/9d/12/e201634810ac9aee59f93e3953cb39f98157d17c3fc9d44900f1209054e9/fastrlock-0.8.3-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_24_x86_64.whl", hash = "sha256:767ec79b7f6ed9b9a00eb9ff62f2a51f56fdb221c5092ab2dadec34a9ccbfc6e", size = 49398, upload-time = "2024-12-17T11:01:53.514Z" }, @@ -2153,6 +2160,7 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/b5/9e/1ae90829dd40559ab104e97ebe74217d9da794c4bb43016da8367ca7a596/fastrlock-0.8.3-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:92577ff82ef4a94c5667d6d2841f017820932bc59f31ffd83e4a2c56c1738f90", size = 52495, upload-time = "2024-12-17T11:01:57.76Z" }, { url = "https://files.pythonhosted.org/packages/e5/8c/5e746ee6f3d7afbfbb0d794c16c71bfd5259a4e3fb1dda48baf31e46956c/fastrlock-0.8.3-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:3df8514086e16bb7c66169156a8066dc152f3be892c7817e85bf09a27fa2ada2", size = 51972, upload-time = "2024-12-17T11:02:01.384Z" }, { url = "https://files.pythonhosted.org/packages/76/a7/8b91068f00400931da950f143fa0f9018bd447f8ed4e34bed3fe65ed55d2/fastrlock-0.8.3-cp310-cp310-win_amd64.whl", hash = "sha256:001fd86bcac78c79658bac496e8a17472d64d558cd2227fdc768aa77f877fe40", size = 30946, upload-time = "2024-12-17T11:02:03.491Z" }, + { url = "https://files.pythonhosted.org/packages/90/9e/647951c579ef74b6541493d5ca786d21a0b2d330c9514ba2c39f0b0b0046/fastrlock-0.8.3-cp311-cp311-macosx_11_0_universal2.whl", hash = "sha256:f68c551cf8a34b6460a3a0eba44bd7897ebfc820854e19970c52a76bf064a59f", size = 55233, upload-time = "2024-12-17T11:02:04.795Z" }, { url = "https://files.pythonhosted.org/packages/be/91/5f3afba7d14b8b7d60ac651375f50fff9220d6ccc3bef233d2bd74b73ec7/fastrlock-0.8.3-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_24_aarch64.whl", hash = "sha256:55d42f6286b9d867370af4c27bc70d04ce2d342fe450c4a4fcce14440514e695", size = 48911, upload-time = "2024-12-17T11:02:06.173Z" }, { url = "https://files.pythonhosted.org/packages/d5/7a/e37bd72d7d70a8a551b3b4610d028bd73ff5d6253201d5d3cf6296468bee/fastrlock-0.8.3-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_24_i686.whl", hash = "sha256:bbc3bf96dcbd68392366c477f78c9d5c47e5d9290cb115feea19f20a43ef6d05", size = 50357, upload-time = "2024-12-17T11:02:07.418Z" }, { url = "https://files.pythonhosted.org/packages/0d/ef/a13b8bab8266840bf38831d7bf5970518c02603d00a548a678763322d5bf/fastrlock-0.8.3-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_24_x86_64.whl", hash = "sha256:77ab8a98417a1f467dafcd2226718f7ca0cf18d4b64732f838b8c2b3e4b55cb5", size = 50222, upload-time = "2024-12-17T11:02:08.745Z" }, @@ -2160,11 +2168,13 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/c0/8f/65907405a8cdb2fc8beaf7d09a9a07bb58deff478ff391ca95be4f130b70/fastrlock-0.8.3-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:8c9d459ce344c21ff03268212a1845aa37feab634d242131bc16c2a2355d5f65", size = 53362, upload-time = "2024-12-17T11:02:12.476Z" }, { url = "https://files.pythonhosted.org/packages/ec/b9/ae6511e52738ba4e3a6adb7c6a20158573fbc98aab448992ece25abb0b07/fastrlock-0.8.3-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:33e6fa4af4f3af3e9c747ec72d1eadc0b7ba2035456c2afb51c24d9e8a56f8fd", size = 52836, upload-time = "2024-12-17T11:02:13.74Z" }, { url = "https://files.pythonhosted.org/packages/88/3e/c26f8192c93e8e43b426787cec04bb46ac36e72b1033b7fe5a9267155fdf/fastrlock-0.8.3-cp311-cp311-win_amd64.whl", hash = "sha256:5e5f1665d8e70f4c5b4a67f2db202f354abc80a321ce5a26ac1493f055e3ae2c", size = 31046, upload-time = "2024-12-17T11:02:15.033Z" }, + { url = "https://files.pythonhosted.org/packages/00/df/56270f2e10c1428855c990e7a7e5baafa9e1262b8e789200bd1d047eb501/fastrlock-0.8.3-cp312-cp312-macosx_11_0_universal2.whl", hash = "sha256:8cb2cf04352ea8575d496f31b3b88c42c7976e8e58cdd7d1550dfba80ca039da", size = 55727, upload-time = "2024-12-17T11:02:17.26Z" }, { url = "https://files.pythonhosted.org/packages/57/21/ea1511b0ef0d5457efca3bf1823effb9c5cad4fc9dca86ce08e4d65330ce/fastrlock-0.8.3-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:85a49a1f1e020097d087e1963e42cea6f307897d5ebe2cb6daf4af47ffdd3eed", size = 52201, upload-time = "2024-12-17T11:02:19.512Z" }, { url = "https://files.pythonhosted.org/packages/80/07/cdecb7aa976f34328372f1c4efd6c9dc1b039b3cc8d3f38787d640009a25/fastrlock-0.8.3-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:5f13ec08f1adb1aa916c384b05ecb7dbebb8df9ea81abd045f60941c6283a670", size = 53924, upload-time = "2024-12-17T11:02:20.85Z" }, { url = "https://files.pythonhosted.org/packages/88/6d/59c497f8db9a125066dd3a7442fab6aecbe90d6fec344c54645eaf311666/fastrlock-0.8.3-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:0ea4e53a04980d646def0f5e4b5e8bd8c7884288464acab0b37ca0c65c482bfe", size = 52140, upload-time = "2024-12-17T11:02:22.263Z" }, { url = "https://files.pythonhosted.org/packages/62/04/9138943c2ee803d62a48a3c17b69de2f6fa27677a6896c300369e839a550/fastrlock-0.8.3-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:38340f6635bd4ee2a4fb02a3a725759fe921f2ca846cb9ca44531ba739cc17b4", size = 53261, upload-time = "2024-12-17T11:02:24.418Z" }, { url = "https://files.pythonhosted.org/packages/e2/4b/db35a52589764c7745a613b6943bbd018f128d42177ab92ee7dde88444f6/fastrlock-0.8.3-cp312-cp312-win_amd64.whl", hash = "sha256:da06d43e1625e2ffddd303edcd6d2cd068e1c486f5fd0102b3f079c44eb13e2c", size = 31235, upload-time = "2024-12-17T11:02:25.708Z" }, + { url = "https://files.pythonhosted.org/packages/92/74/7b13d836c3f221cff69d6f418f46c2a30c4b1fe09a8ce7db02eecb593185/fastrlock-0.8.3-cp313-cp313-macosx_11_0_universal2.whl", hash = "sha256:5264088185ca8e6bc83181dff521eee94d078c269c7d557cc8d9ed5952b7be45", size = 54157, upload-time = "2024-12-17T11:02:29.196Z" }, { url = "https://files.pythonhosted.org/packages/06/77/f06a907f9a07d26d0cca24a4385944cfe70d549a2c9f1c3e3217332f4f12/fastrlock-0.8.3-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:4a98ba46b3e14927550c4baa36b752d0d2f7387b8534864a8767f83cce75c160", size = 50954, upload-time = "2024-12-17T11:02:32.12Z" }, { url = "https://files.pythonhosted.org/packages/f9/4e/94480fb3fd93991dd6f4e658b77698edc343f57caa2870d77b38c89c2e3b/fastrlock-0.8.3-cp313-cp313-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:dbdea6deeccea1917c6017d353987231c4e46c93d5338ca3e66d6cd88fbce259", size = 52535, upload-time = "2024-12-17T11:02:33.402Z" }, { url = "https://files.pythonhosted.org/packages/7d/a7/ee82bb55b6c0ca30286dac1e19ee9417a17d2d1de3b13bb0f20cefb86086/fastrlock-0.8.3-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:c6e5bfecbc0d72ff07e43fed81671747914d6794e0926700677ed26d894d4f4f", size = 50942, upload-time = "2024-12-17T11:02:34.688Z" }, @@ -2207,19 +2217,19 @@ name = "flashinfer-python" version = "0.6.1" source = { registry = "https://pypi.org/simple" } dependencies = [ - { name = "apache-tvm-ffi", marker = "python_full_version < '3.12'" }, - { name = "click", marker = "python_full_version < '3.12'" }, - { name = "einops", marker = "python_full_version < '3.12'" }, - { name = "ninja", marker = "python_full_version < '3.12'" }, - { name = "numpy", marker = "python_full_version < '3.12'" }, - { name = "nvidia-cudnn-frontend", marker = "python_full_version < '3.12'" }, - { name = "nvidia-cutlass-dsl", marker = "python_full_version < '3.12'" }, - { name = "nvidia-ml-py", marker = "python_full_version < '3.12'" }, - { name = "packaging", marker = "python_full_version < '3.12'" }, - { name = "requests", marker = "python_full_version < '3.12'" }, - { name = "tabulate", marker = "python_full_version < '3.12'" }, - { name = "torch", marker = "python_full_version < '3.12'" }, - { name = "tqdm", marker = "python_full_version < '3.12'" }, + { name = "apache-tvm-ffi" }, + { name = "click" }, + { name = "einops" }, + { name = "ninja" }, + { name = "numpy" }, + { name = "nvidia-cudnn-frontend" }, + { name = "nvidia-cutlass-dsl" }, + { name = "nvidia-ml-py" }, + { name = "packaging" }, + { name = "requests" }, + { name = "tabulate" }, + { name = "torch" }, + { name = "tqdm" }, ] sdist = { url = "https://files.pythonhosted.org/packages/68/81/5a84e14df7358d2c2903b18c6f2779bd4b4a6739076d01a847d4c18fb102/flashinfer_python-0.6.1.tar.gz", hash = "sha256:8dc2fc5dc187fc70151d5f39ef560fde8a38117a4f6cf40dce0ddb09cbd4f0bf", size = 5141191, upload-time = "2026-01-14T05:40:27.825Z" } wheels = [ @@ -2397,9 +2407,9 @@ name = "gguf" version = "0.17.1" source = { registry = "https://pypi.org/simple" } dependencies = [ - { name = "numpy", marker = "python_full_version < '3.12'" }, - { name = "pyyaml", marker = "python_full_version < '3.12'" }, - { name = "tqdm", marker = "python_full_version < '3.12'" }, + { name = "numpy" }, + { name = "pyyaml" }, + { name = "tqdm" }, ] sdist = { url = "https://files.pythonhosted.org/packages/08/08/7de1ca4b71e7bf33b547f82bb22505e221b5fa42f67d635e200e0ad22ad6/gguf-0.17.1.tar.gz", hash = "sha256:36ad71aad900a3e75fc94ebe96ea6029f03a4e44be7627ef7ad3d03e8c7bcb53", size = 89338, upload-time = "2025-06-19T14:00:33.705Z" } wheels = [ @@ -2477,14 +2487,15 @@ source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "aiofiles" }, { name = "anyio" }, - { name = "audioop-lts", marker = "python_full_version >= '3.13'" }, + { name = "audioop-lts", marker = "python_full_version >= '3.13' or (extra == 'extra-16-pydantic-ai-slim-huggingface' and extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline')" }, { name = "brotli" }, { name = "fastapi" }, { name = "ffmpy" }, { name = "gradio-client" }, { name = "groovy" }, { name = "httpx" }, - { name = "huggingface-hub" }, + { name = "huggingface-hub", version = "0.36.2", source = { registry = "https://pypi.org/simple" }, marker = "extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline'" }, + { name = "huggingface-hub", version = "1.4.1", source = { registry = "https://pypi.org/simple" }, marker = "extra == 'extra-16-pydantic-ai-slim-huggingface' or extra != 'extra-16-pydantic-ai-slim-outlines-vllm-offline'" }, { name = "jinja2" }, { name = "markupsafe" }, { name = "numpy" }, @@ -2516,7 +2527,8 @@ source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "fsspec" }, { name = "httpx" }, - { name = "huggingface-hub" }, + { name = "huggingface-hub", version = "0.36.2", source = { registry = "https://pypi.org/simple" }, marker = "extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline'" }, + { name = "huggingface-hub", version = "1.4.1", source = { registry = "https://pypi.org/simple" }, marker = "extra == 'extra-16-pydantic-ai-slim-huggingface' or extra != 'extra-16-pydantic-ai-slim-outlines-vllm-offline'" }, { name = "packaging" }, { name = "typing-extensions" }, ] @@ -2713,8 +2725,8 @@ name = "grpcio-reflection" version = "1.76.0" source = { registry = "https://pypi.org/simple" } dependencies = [ - { name = "grpcio", marker = "python_full_version < '3.12'" }, - { name = "protobuf", marker = "python_full_version < '3.12'" }, + { name = "grpcio" }, + { name = "protobuf" }, ] sdist = { url = "https://files.pythonhosted.org/packages/bd/10/767f9c2719c435616141efb3371f6e158f95cdde36a34876ae1d08ba7440/grpcio_reflection-1.76.0.tar.gz", hash = "sha256:e0e7e49921c2ee951e5ddff0bdbacbd1ac1a70888beb61d567f3d01b799decb1", size = 18845, upload-time = "2025-10-21T16:28:57.776Z" } wheels = [ @@ -2864,8 +2876,8 @@ wheels = [ [package.optional-dependencies] brotli = [ - { name = "brotli", marker = "platform_python_implementation == 'CPython'" }, - { name = "brotlicffi", marker = "platform_python_implementation != 'CPython'" }, + { name = "brotli", marker = "platform_python_implementation == 'CPython' or (extra == 'extra-16-pydantic-ai-slim-huggingface' and extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline')" }, + { name = "brotlicffi", marker = "platform_python_implementation != 'CPython' or (extra == 'extra-16-pydantic-ai-slim-huggingface' and extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline')" }, ] http2 = [ { name = "h2" }, @@ -2885,23 +2897,54 @@ wheels = [ [[package]] name = "huggingface-hub" -version = "1.3.4" +version = "0.36.2" source = { registry = "https://pypi.org/simple" } +resolution-markers = [ + "python_full_version >= '3.13'", + "python_full_version == '3.12.*'", + "python_full_version == '3.11.*'", + "python_full_version < '3.11'", +] dependencies = [ - { name = "filelock" }, - { name = "fsspec" }, - { name = "hf-xet", marker = "platform_machine == 'AMD64' or platform_machine == 'aarch64' or platform_machine == 'amd64' or platform_machine == 'arm64' or platform_machine == 'x86_64'" }, - { name = "httpx" }, - { name = "packaging" }, - { name = "pyyaml" }, - { name = "shellingham" }, - { name = "tqdm" }, - { name = "typer-slim" }, - { name = "typing-extensions" }, + { name = "filelock", marker = "extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline'" }, + { name = "fsspec", marker = "extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline'" }, + { name = "hf-xet", marker = "(platform_machine == 'aarch64' and extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline') or (platform_machine == 'amd64' and extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline') or (platform_machine == 'arm64' and extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline') or (platform_machine == 'x86_64' and extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline') or (extra == 'extra-16-pydantic-ai-slim-huggingface' and extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline')" }, + { name = "packaging", marker = "extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline'" }, + { name = "pyyaml", marker = "extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline'" }, + { name = "requests", marker = "extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline'" }, + { name = "tqdm", marker = "extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline'" }, + { name = "typing-extensions", marker = "extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline'" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/af/25/74af9d16cd59ae15b12467a79a84aa0fe24be4aba68fc4da0c1864d49c17/huggingface_hub-1.3.4.tar.gz", hash = "sha256:c20d5484a611b7b7891d272e8fc9f77d5de025b0480bdacfa858efb3780b455f", size = 627683, upload-time = "2026-01-26T14:05:10.656Z" } +sdist = { url = "https://files.pythonhosted.org/packages/7c/b7/8cb61d2eece5fb05a83271da168186721c450eb74e3c31f7ef3169fa475b/huggingface_hub-0.36.2.tar.gz", hash = "sha256:1934304d2fb224f8afa3b87007d58501acfda9215b334eed53072dd5e815ff7a", size = 649782, upload-time = "2026-02-06T09:24:13.098Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/55/07/3d0c34c345043c6a398a5882e196b2220dc5861adfa18322448b90908f26/huggingface_hub-1.3.4-py3-none-any.whl", hash = "sha256:a0c526e76eb316e96a91e8a1a7a93cf66b0dd210be1a17bd5fc5ae53cba76bfd", size = 536611, upload-time = "2026-01-26T14:05:08.549Z" }, + { url = "https://files.pythonhosted.org/packages/a8/af/48ac8483240de756d2438c380746e7130d1c6f75802ef22f3c6d49982787/huggingface_hub-0.36.2-py3-none-any.whl", hash = "sha256:48f0c8eac16145dfce371e9d2d7772854a4f591bcb56c9cf548accf531d54270", size = 566395, upload-time = "2026-02-06T09:24:11.133Z" }, +] + +[[package]] +name = "huggingface-hub" +version = "1.4.1" +source = { registry = "https://pypi.org/simple" } +resolution-markers = [ + "python_full_version >= '3.13'", + "python_full_version == '3.12.*'", + "python_full_version == '3.11.*'", + "python_full_version < '3.11'", +] +dependencies = [ + { name = "filelock", marker = "extra == 'extra-16-pydantic-ai-slim-huggingface' or extra != 'extra-16-pydantic-ai-slim-outlines-vllm-offline'" }, + { name = "fsspec", marker = "extra == 'extra-16-pydantic-ai-slim-huggingface' or extra != 'extra-16-pydantic-ai-slim-outlines-vllm-offline'" }, + { name = "hf-xet", marker = "(platform_machine == 'AMD64' and extra == 'extra-16-pydantic-ai-slim-huggingface') or (platform_machine == 'AMD64' and extra != 'extra-16-pydantic-ai-slim-outlines-vllm-offline') or (platform_machine == 'aarch64' and extra == 'extra-16-pydantic-ai-slim-huggingface') or (platform_machine == 'aarch64' and extra != 'extra-16-pydantic-ai-slim-outlines-vllm-offline') or (platform_machine == 'amd64' and extra == 'extra-16-pydantic-ai-slim-huggingface') or (platform_machine == 'amd64' and extra != 'extra-16-pydantic-ai-slim-outlines-vllm-offline') or (platform_machine == 'arm64' and extra == 'extra-16-pydantic-ai-slim-huggingface') or (platform_machine == 'arm64' and extra != 'extra-16-pydantic-ai-slim-outlines-vllm-offline') or (platform_machine == 'x86_64' and extra == 'extra-16-pydantic-ai-slim-huggingface') or (platform_machine == 'x86_64' and extra != 'extra-16-pydantic-ai-slim-outlines-vllm-offline') or (extra == 'extra-16-pydantic-ai-slim-huggingface' and extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline')" }, + { name = "httpx", marker = "extra == 'extra-16-pydantic-ai-slim-huggingface' or extra != 'extra-16-pydantic-ai-slim-outlines-vllm-offline'" }, + { name = "packaging", marker = "extra == 'extra-16-pydantic-ai-slim-huggingface' or extra != 'extra-16-pydantic-ai-slim-outlines-vllm-offline'" }, + { name = "pyyaml", marker = "extra == 'extra-16-pydantic-ai-slim-huggingface' or extra != 'extra-16-pydantic-ai-slim-outlines-vllm-offline'" }, + { name = "shellingham", marker = "extra == 'extra-16-pydantic-ai-slim-huggingface' or extra != 'extra-16-pydantic-ai-slim-outlines-vllm-offline'" }, + { name = "tqdm", marker = "extra == 'extra-16-pydantic-ai-slim-huggingface' or extra != 'extra-16-pydantic-ai-slim-outlines-vllm-offline'" }, + { name = "typer-slim", marker = "extra == 'extra-16-pydantic-ai-slim-huggingface' or extra != 'extra-16-pydantic-ai-slim-outlines-vllm-offline'" }, + { name = "typing-extensions", marker = "extra == 'extra-16-pydantic-ai-slim-huggingface' or extra != 'extra-16-pydantic-ai-slim-outlines-vllm-offline'" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/c4/fc/eb9bc06130e8bbda6a616e1b80a7aa127681c448d6b49806f61db2670b61/huggingface_hub-1.4.1.tar.gz", hash = "sha256:b41131ec35e631e7383ab26d6146b8d8972abc8b6309b963b306fbcca87f5ed5", size = 642156, upload-time = "2026-02-06T09:20:03.013Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/d5/ae/2f6d96b4e6c5478d87d606a1934b5d436c4a2bce6bb7c6fdece891c128e3/huggingface_hub-1.4.1-py3-none-any.whl", hash = "sha256:9931d075fb7a79af5abc487106414ec5fba2c0ae86104c0c62fd6cae38873d18", size = 553326, upload-time = "2026-02-06T09:20:00.728Z" }, ] [[package]] @@ -3052,7 +3095,7 @@ dependencies = [ { name = "executing" }, { name = "pytest" }, { name = "rich" }, - { name = "tomli", marker = "python_full_version < '3.11'" }, + { name = "tomli", marker = "python_full_version < '3.11' or (extra == 'extra-16-pydantic-ai-slim-huggingface' and extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline')" }, ] sdist = { url = "https://files.pythonhosted.org/packages/1c/b1/52b5ee59f73ed31d5fe21b10881bf2d121d07d54b23c0b6b74186792e620/inline_snapshot-0.31.1.tar.gz", hash = "sha256:4ea5ed70aa1d652713bbfd750606b94bd8a42483f7d3680433b3e92994495f64", size = 2606338, upload-time = "2025-11-07T07:36:18.932Z" } wheels = [ @@ -3094,7 +3137,7 @@ name = "jaraco-context" version = "6.1.0" source = { registry = "https://pypi.org/simple" } dependencies = [ - { name = "backports-tarfile", marker = "python_full_version < '3.12'" }, + { name = "backports-tarfile", marker = "python_full_version < '3.12' or (extra == 'extra-16-pydantic-ai-slim-huggingface' and extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline')" }, ] sdist = { url = "https://files.pythonhosted.org/packages/cb/9c/a788f5bb29c61e456b8ee52ce76dbdd32fd72cd73dd67bc95f42c7a8d13c/jaraco_context-6.1.0.tar.gz", hash = "sha256:129a341b0a85a7db7879e22acd66902fda67882db771754574338898b2d5d86f", size = 15850, upload-time = "2026-01-13T02:53:53.847Z" } wheels = [ @@ -3342,13 +3385,13 @@ name = "keyring" version = "25.7.0" source = { registry = "https://pypi.org/simple" } dependencies = [ - { name = "importlib-metadata", marker = "python_full_version < '3.12'" }, + { name = "importlib-metadata", marker = "python_full_version < '3.12' or (extra == 'extra-16-pydantic-ai-slim-huggingface' and extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline')" }, { name = "jaraco-classes" }, { name = "jaraco-context" }, { name = "jaraco-functools" }, - { name = "jeepney", marker = "sys_platform == 'linux'" }, - { name = "pywin32-ctypes", marker = "sys_platform == 'win32'" }, - { name = "secretstorage", marker = "sys_platform == 'linux'" }, + { name = "jeepney", marker = "sys_platform == 'linux' or (extra == 'extra-16-pydantic-ai-slim-huggingface' and extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline')" }, + { name = "pywin32-ctypes", marker = "sys_platform == 'win32' or (extra == 'extra-16-pydantic-ai-slim-huggingface' and extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline')" }, + { name = "secretstorage", marker = "sys_platform == 'linux' or (extra == 'extra-16-pydantic-ai-slim-huggingface' and extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline')" }, ] sdist = { url = "https://files.pythonhosted.org/packages/43/4b/674af6ef2f97d56f0ab5153bf0bfa28ccb6c3ed4d1babf4305449668807b/keyring-25.7.0.tar.gz", hash = "sha256:fe01bd85eb3f8fb3dd0405defdeac9a5b4f6f0439edbb3149577f244a2e8245b", size = 63516, upload-time = "2025-11-16T16:26:09.482Z" } wheels = [ @@ -3392,7 +3435,7 @@ version = "0.6.4" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "httpx" }, - { name = "orjson", marker = "platform_python_implementation != 'PyPy'" }, + { name = "orjson", marker = "platform_python_implementation != 'PyPy' or (extra == 'extra-16-pydantic-ai-slim-huggingface' and extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline')" }, { name = "packaging" }, { name = "pydantic" }, { name = "requests" }, @@ -3505,9 +3548,12 @@ version = "1.3.0" source = { registry = "https://pypi.org/simple" } sdist = { url = "https://files.pythonhosted.org/packages/95/48/3f7a9d3ff1b36bba92b5107a3a21286821227afe9ea464736133994d61fb/llguidance-1.3.0.tar.gz", hash = "sha256:861249afd51dc325646834462ea827e57a5c2b2042e108e6aae7059fdad9104d", size = 1070460, upload-time = "2025-10-20T19:58:44.164Z" } wheels = [ + { url = "https://files.pythonhosted.org/packages/3b/33/be5acb85cd8cdc4afde33d9c234eece9f318e087920255af3c05864cd3e7/llguidance-1.3.0-cp39-abi3-macosx_10_12_x86_64.whl", hash = "sha256:f7685222660a762e481ac633d49cc559c64980fe2ee59c8f932a5bb5cbc0c2c2", size = 3220647, upload-time = "2025-10-20T19:58:42.542Z" }, { url = "https://files.pythonhosted.org/packages/82/e6/b48bda5b15efeaeb62bd0dba8fc6a01d4ae5457a85dbb5d18632385fe15c/llguidance-1.3.0-cp39-abi3-macosx_11_0_arm64.whl", hash = "sha256:098030ff0687261a3f1bd54cf21fe951fc861d56d37a0671250dd36677eaf224", size = 3099830, upload-time = "2025-10-20T19:58:40.826Z" }, { url = "https://files.pythonhosted.org/packages/aa/11/44389d3d1526d7a5c38ffd587a5ebc61d7bee443ac1dea95f2089ad58f5f/llguidance-1.3.0-cp39-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6f6caca5d78db7f76e1fbb0fff8607b861c32d47fa3d5dee2fc49de27ee269df", size = 2835242, upload-time = "2025-10-20T19:58:34.518Z" }, + { url = "https://files.pythonhosted.org/packages/e7/ca/53ea256396405e4dee70d5a4a35e18543408e18bb16b251d6ca6b5d80310/llguidance-1.3.0-cp39-abi3-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:0612bb3f034d2487b6e8f9561f02a94a6039d88273bf0c5c539a3bd3895e47d2", size = 3297480, upload-time = "2025-10-20T19:58:37.033Z" }, { url = "https://files.pythonhosted.org/packages/83/a8/1ff2bedb8f9acb46a2d2d603415d272bb622c142ea86f5b95445cc6e366c/llguidance-1.3.0-cp39-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bc17e9dd602c3879bf91664a64bf72f54c74dbfbeb24ccfab6a5fe435b12f7aa", size = 3033133, upload-time = "2025-10-20T19:58:38.721Z" }, + { url = "https://files.pythonhosted.org/packages/d7/a7/9b8086c0cfdddf3f6d47b173a404fa7ac46272f7affbee082c36740f4f1c/llguidance-1.3.0-cp39-abi3-win32.whl", hash = "sha256:2f6f558485a43e273fc5c6c974a9a3ace5d5e170076db9b40e0560e41c3ff18f", size = 2598109, upload-time = "2025-10-20T19:58:47.656Z" }, { url = "https://files.pythonhosted.org/packages/5a/7e/809349638231f469b9056c0e1bfd924d5ef5558b3b3ec72d093b6fad33b1/llguidance-1.3.0-cp39-abi3-win_amd64.whl", hash = "sha256:1d1cd1c8618d1a13605d3e057c978651e551c8c469b481ee4041f1d6c436002d", size = 2789946, upload-time = "2025-10-20T19:58:45.958Z" }, ] @@ -3515,10 +3561,6 @@ wheels = [ name = "llvmlite" version = "0.44.0" source = { registry = "https://pypi.org/simple" } -resolution-markers = [ - "python_full_version == '3.11.*'", - "python_full_version < '3.11'", -] sdist = { url = "https://files.pythonhosted.org/packages/89/6a/95a3d3610d5c75293d5dbbb2a76480d5d4eeba641557b69fe90af6c5b84e/llvmlite-0.44.0.tar.gz", hash = "sha256:07667d66a5d150abed9157ab6c0b9393c9356f229784a4385c02f99e94fc94d4", size = 171880, upload-time = "2025-01-20T11:14:41.342Z" } wheels = [ { url = "https://files.pythonhosted.org/packages/41/75/d4863ddfd8ab5f6e70f4504cf8cc37f4e986ec6910f4ef8502bb7d3c1c71/llvmlite-0.44.0-cp310-cp310-macosx_10_14_x86_64.whl", hash = "sha256:9fbadbfba8422123bab5535b293da1cf72f9f478a65645ecd73e781f962ca614", size = 28132306, upload-time = "2025-01-20T11:12:18.634Z" }, @@ -3543,47 +3585,15 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/d0/81/e66fc86539293282fd9cb7c9417438e897f369e79ffb62e1ae5e5154d4dd/llvmlite-0.44.0-cp313-cp313-win_amd64.whl", hash = "sha256:2fb7c4f2fb86cbae6dca3db9ab203eeea0e22d73b99bc2341cdf9de93612e930", size = 30331193, upload-time = "2025-01-20T11:14:38.578Z" }, ] -[[package]] -name = "llvmlite" -version = "0.46.0" -source = { registry = "https://pypi.org/simple" } -resolution-markers = [ - "python_full_version >= '3.13'", - "python_full_version == '3.12.*'", -] -sdist = { url = "https://files.pythonhosted.org/packages/74/cd/08ae687ba099c7e3d21fe2ea536500563ef1943c5105bf6ab4ee3829f68e/llvmlite-0.46.0.tar.gz", hash = "sha256:227c9fd6d09dce2783c18b754b7cd9d9b3b3515210c46acc2d3c5badd9870ceb", size = 193456, upload-time = "2025-12-08T18:15:36.295Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/3d/a4/3959e1c61c5ca9db7921e5fd115b344c29b9d57a5dadd87bef97963ca1a5/llvmlite-0.46.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:4323177e936d61ae0f73e653e2e614284d97d14d5dd12579adc92b6c2b0597b0", size = 37232766, upload-time = "2025-12-08T18:14:34.765Z" }, - { url = "https://files.pythonhosted.org/packages/c2/a5/a4d916f1015106e1da876028606a8e87fd5d5c840f98c87bc2d5153b6a2f/llvmlite-0.46.0-cp310-cp310-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:0a2d461cb89537b7c20feb04c46c32e12d5ad4f0896c9dfc0f60336219ff248e", size = 56275176, upload-time = "2025-12-08T18:14:37.944Z" }, - { url = "https://files.pythonhosted.org/packages/79/7f/a7f2028805dac8c1a6fae7bda4e739b7ebbcd45b29e15bf6d21556fcd3d5/llvmlite-0.46.0-cp310-cp310-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:b1f6595a35b7b39c3518b85a28bf18f45e075264e4b2dce3f0c2a4f232b4a910", size = 55128629, upload-time = "2025-12-08T18:14:41.674Z" }, - { url = "https://files.pythonhosted.org/packages/b2/bc/4689e1ba0c073c196b594471eb21be0aa51d9e64b911728aa13cd85ef0ae/llvmlite-0.46.0-cp310-cp310-win_amd64.whl", hash = "sha256:e7a34d4aa6f9a97ee006b504be6d2b8cb7f755b80ab2f344dda1ef992f828559", size = 38138651, upload-time = "2025-12-08T18:14:45.845Z" }, - { url = "https://files.pythonhosted.org/packages/7a/a1/2ad4b2367915faeebe8447f0a057861f646dbf5fbbb3561db42c65659cf3/llvmlite-0.46.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:82f3d39b16f19aa1a56d5fe625883a6ab600d5cc9ea8906cca70ce94cabba067", size = 37232766, upload-time = "2025-12-08T18:14:48.836Z" }, - { url = "https://files.pythonhosted.org/packages/12/b5/99cf8772fdd846c07da4fd70f07812a3c8fd17ea2409522c946bb0f2b277/llvmlite-0.46.0-cp311-cp311-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:a3df43900119803bbc52720e758c76f316a9a0f34612a886862dfe0a5591a17e", size = 56275175, upload-time = "2025-12-08T18:14:51.604Z" }, - { url = "https://files.pythonhosted.org/packages/38/f2/ed806f9c003563732da156139c45d970ee435bd0bfa5ed8de87ba972b452/llvmlite-0.46.0-cp311-cp311-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:de183fefc8022d21b0aa37fc3e90410bc3524aed8617f0ff76732fc6c3af5361", size = 55128630, upload-time = "2025-12-08T18:14:55.107Z" }, - { url = "https://files.pythonhosted.org/packages/19/0c/8f5a37a65fc9b7b17408508145edd5f86263ad69c19d3574e818f533a0eb/llvmlite-0.46.0-cp311-cp311-win_amd64.whl", hash = "sha256:e8b10bc585c58bdffec9e0c309bb7d51be1f2f15e169a4b4d42f2389e431eb93", size = 38138652, upload-time = "2025-12-08T18:14:58.171Z" }, - { url = "https://files.pythonhosted.org/packages/2b/f8/4db016a5e547d4e054ff2f3b99203d63a497465f81ab78ec8eb2ff7b2304/llvmlite-0.46.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:6b9588ad4c63b4f0175a3984b85494f0c927c6b001e3a246a3a7fb3920d9a137", size = 37232767, upload-time = "2025-12-08T18:15:00.737Z" }, - { url = "https://files.pythonhosted.org/packages/aa/85/4890a7c14b4fa54400945cb52ac3cd88545bbdb973c440f98ca41591cdc5/llvmlite-0.46.0-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:3535bd2bb6a2d7ae4012681ac228e5132cdb75fefb1bcb24e33f2f3e0c865ed4", size = 56275176, upload-time = "2025-12-08T18:15:03.936Z" }, - { url = "https://files.pythonhosted.org/packages/6a/07/3d31d39c1a1a08cd5337e78299fca77e6aebc07c059fbd0033e3edfab45c/llvmlite-0.46.0-cp312-cp312-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:4cbfd366e60ff87ea6cc62f50bc4cd800ebb13ed4c149466f50cf2163a473d1e", size = 55128630, upload-time = "2025-12-08T18:15:07.196Z" }, - { url = "https://files.pythonhosted.org/packages/2a/6b/d139535d7590a1bba1ceb68751bef22fadaa5b815bbdf0e858e3875726b2/llvmlite-0.46.0-cp312-cp312-win_amd64.whl", hash = "sha256:398b39db462c39563a97b912d4f2866cd37cba60537975a09679b28fbbc0fb38", size = 38138940, upload-time = "2025-12-08T18:15:10.162Z" }, - { url = "https://files.pythonhosted.org/packages/e6/ff/3eba7eb0aed4b6fca37125387cd417e8c458e750621fce56d2c541f67fa8/llvmlite-0.46.0-cp313-cp313-macosx_12_0_arm64.whl", hash = "sha256:30b60892d034bc560e0ec6654737aaa74e5ca327bd8114d82136aa071d611172", size = 37232767, upload-time = "2025-12-08T18:15:13.22Z" }, - { url = "https://files.pythonhosted.org/packages/0e/54/737755c0a91558364b9200702c3c9c15d70ed63f9b98a2c32f1c2aa1f3ba/llvmlite-0.46.0-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:6cc19b051753368a9c9f31dc041299059ee91aceec81bd57b0e385e5d5bf1a54", size = 56275176, upload-time = "2025-12-08T18:15:16.339Z" }, - { url = "https://files.pythonhosted.org/packages/e6/91/14f32e1d70905c1c0aa4e6609ab5d705c3183116ca02ac6df2091868413a/llvmlite-0.46.0-cp313-cp313-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:bca185892908f9ede48c0acd547fe4dc1bafefb8a4967d47db6cf664f9332d12", size = 55128629, upload-time = "2025-12-08T18:15:19.493Z" }, - { url = "https://files.pythonhosted.org/packages/4a/a7/d526ae86708cea531935ae777b6dbcabe7db52718e6401e0fb9c5edea80e/llvmlite-0.46.0-cp313-cp313-win_amd64.whl", hash = "sha256:67438fd30e12349ebb054d86a5a1a57fd5e87d264d2451bcfafbbbaa25b82a35", size = 38138941, upload-time = "2025-12-08T18:15:22.536Z" }, - { url = "https://files.pythonhosted.org/packages/95/ae/af0ffb724814cc2ea64445acad05f71cff5f799bb7efb22e47ee99340dbc/llvmlite-0.46.0-cp314-cp314-macosx_12_0_arm64.whl", hash = "sha256:d252edfb9f4ac1fcf20652258e3f102b26b03eef738dc8a6ffdab7d7d341d547", size = 37232768, upload-time = "2025-12-08T18:15:25.055Z" }, - { url = "https://files.pythonhosted.org/packages/c9/19/5018e5352019be753b7b07f7759cdabb69ca5779fea2494be8839270df4c/llvmlite-0.46.0-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:379fdd1c59badeff8982cb47e4694a6143bec3bb49aa10a466e095410522064d", size = 56275173, upload-time = "2025-12-08T18:15:28.109Z" }, - { url = "https://files.pythonhosted.org/packages/9f/c9/d57877759d707e84c082163c543853245f91b70c804115a5010532890f18/llvmlite-0.46.0-cp314-cp314-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:2e8cbfff7f6db0fa2c771ad24154e2a7e457c2444d7673e6de06b8b698c3b269", size = 55128628, upload-time = "2025-12-08T18:15:31.098Z" }, - { url = "https://files.pythonhosted.org/packages/30/a8/e61a8c2b3cc7a597073d9cde1fcbb567e9d827f1db30c93cf80422eac70d/llvmlite-0.46.0-cp314-cp314-win_amd64.whl", hash = "sha256:7821eda3ec1f18050f981819756631d60b6d7ab1a6cf806d9efefbe3f4082d61", size = 39153056, upload-time = "2025-12-08T18:15:33.938Z" }, -] - [[package]] name = "lm-format-enforcer" version = "0.11.3" source = { registry = "https://pypi.org/simple" } dependencies = [ - { name = "interegular", marker = "python_full_version < '3.12'" }, - { name = "packaging", marker = "python_full_version < '3.12'" }, - { name = "pydantic", marker = "python_full_version < '3.12'" }, - { name = "pyyaml", marker = "python_full_version < '3.12'" }, + { name = "interegular" }, + { name = "packaging" }, + { name = "pydantic" }, + { name = "pyyaml" }, ] sdist = { url = "https://files.pythonhosted.org/packages/84/d5/41cd417ba7dfdbbcfe46cebf81fb3dfd7c591b89897560ad05bb410a465d/lm_format_enforcer-0.11.3.tar.gz", hash = "sha256:e68081c108719cce284a9bcc889709b26ffb085a1945b5eba3a12cfa96d528da", size = 40258, upload-time = "2025-08-24T19:37:47.527Z" } wheels = [ @@ -3601,7 +3611,7 @@ dependencies = [ { name = "opentelemetry-sdk" }, { name = "protobuf" }, { name = "rich" }, - { name = "tomli", marker = "python_full_version < '3.11'" }, + { name = "tomli", marker = "python_full_version < '3.11' or (extra == 'extra-16-pydantic-ai-slim-huggingface' and extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline')" }, { name = "typing-extensions" }, ] sdist = { url = "https://files.pythonhosted.org/packages/e2/60/b8040db3598a55da64c45e3e689f2baa87389a4648a6f46ba80be3329f23/logfire-4.16.0.tar.gz", hash = "sha256:03a3ab8fdc13399309cb55d69cba7a6fcbad3526cfad85fc4f72e7d75e22b654", size = 550759, upload-time = "2025-12-04T16:16:39.477Z" } @@ -3637,8 +3647,8 @@ name = "loguru" version = "0.7.3" source = { registry = "https://pypi.org/simple" } dependencies = [ - { name = "colorama", marker = "python_full_version < '3.12' and sys_platform == 'win32'" }, - { name = "win32-setctime", marker = "python_full_version < '3.12' and sys_platform == 'win32'" }, + { name = "colorama", marker = "sys_platform == 'win32'" }, + { name = "win32-setctime", marker = "sys_platform == 'win32'" }, ] sdist = { url = "https://files.pythonhosted.org/packages/3a/05/a1dae3dffd1116099471c643b8924f5aa6524411dc6c63fdae648c4f1aca/loguru-0.7.3.tar.gz", hash = "sha256:19480589e77d47b8d85b2c827ad95d49bf31b0dcde16593892eb51dd18706eb6", size = 63559, upload-time = "2024-12-06T11:20:56.608Z" } wheels = [ @@ -3940,12 +3950,12 @@ dependencies = [ { name = "pydantic-settings" }, { name = "pyjwt", extra = ["crypto"] }, { name = "python-multipart" }, - { name = "pywin32", marker = "sys_platform == 'win32'" }, + { name = "pywin32", marker = "sys_platform == 'win32' or (extra == 'extra-16-pydantic-ai-slim-huggingface' and extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline')" }, { name = "sse-starlette" }, { name = "starlette" }, { name = "typing-extensions" }, { name = "typing-inspection" }, - { name = "uvicorn", marker = "sys_platform != 'emscripten'" }, + { name = "uvicorn", marker = "sys_platform != 'emscripten' or (extra == 'extra-16-pydantic-ai-slim-huggingface' and extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline')" }, ] sdist = { url = "https://files.pythonhosted.org/packages/d5/2d/649d80a0ecf6a1f82632ca44bec21c0461a9d9fc8934d38cb5b319f2db5e/mcp-1.25.0.tar.gz", hash = "sha256:56310361ebf0364e2d438e5b45f7668cbb124e158bb358333cd06e49e83a6802", size = 605387, upload-time = "2025-12-19T10:19:56.985Z" } wheels = [ @@ -3964,7 +3974,7 @@ version = "0.7.22" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "markdown-it-py" }, - { name = "tomli", marker = "python_full_version < '3.11'" }, + { name = "tomli", marker = "python_full_version < '3.11' or (extra == 'extra-16-pydantic-ai-slim-huggingface' and extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline')" }, ] sdist = { url = "https://files.pythonhosted.org/packages/fc/eb/b5cbf2484411af039a3d4aeb53a5160fae25dd8c84af6a4243bc2f3fedb3/mdformat-0.7.22.tar.gz", hash = "sha256:eef84fa8f233d3162734683c2a8a6222227a229b9206872e6139658d99acb1ea", size = 34610, upload-time = "2025-01-30T18:00:51.418Z" } wheels = [ @@ -4007,14 +4017,14 @@ name = "mistral-common" version = "1.9.1" source = { registry = "https://pypi.org/simple" } dependencies = [ - { name = "jsonschema", marker = "python_full_version < '3.12'" }, - { name = "numpy", marker = "python_full_version < '3.12'" }, - { name = "pillow", marker = "python_full_version < '3.12'" }, - { name = "pydantic", marker = "python_full_version < '3.12'" }, - { name = "pydantic-extra-types", extra = ["pycountry"], marker = "python_full_version < '3.12'" }, - { name = "requests", marker = "python_full_version < '3.12'" }, - { name = "tiktoken", marker = "python_full_version < '3.12'" }, - { name = "typing-extensions", marker = "python_full_version < '3.12'" }, + { name = "jsonschema" }, + { name = "numpy" }, + { name = "pillow" }, + { name = "pydantic" }, + { name = "pydantic-extra-types", extra = ["pycountry"], marker = "extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline'" }, + { name = "requests" }, + { name = "tiktoken" }, + { name = "typing-extensions" }, ] sdist = { url = "https://files.pythonhosted.org/packages/db/ce/685b8127a326478e05501cb4c9ca23d1cd9f37e16c465a1e832c75aea709/mistral_common-1.9.1.tar.gz", hash = "sha256:550583d70a395c3586cfb748ffab53bd1d7c3409507f0efc0118bff30ffb26e9", size = 6338922, upload-time = "2026-02-12T10:53:41.639Z" } wheels = [ @@ -4023,7 +4033,7 @@ wheels = [ [package.optional-dependencies] image = [ - { name = "opencv-python-headless", marker = "python_full_version < '3.12'" }, + { name = "opencv-python-headless" }, ] [[package]] @@ -4050,7 +4060,7 @@ version = "1.6.1" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "click" }, - { name = "colorama", marker = "sys_platform == 'win32'" }, + { name = "colorama", marker = "sys_platform == 'win32' or (extra == 'extra-16-pydantic-ai-slim-huggingface' and extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline')" }, { name = "ghp-import" }, { name = "jinja2" }, { name = "markdown" }, @@ -4197,7 +4207,7 @@ dependencies = [ { name = "griffe" }, { name = "mkdocs-autorefs" }, { name = "mkdocstrings" }, - { name = "typing-extensions", marker = "python_full_version < '3.11'" }, + { name = "typing-extensions", marker = "python_full_version < '3.11' or (extra == 'extra-16-pydantic-ai-slim-huggingface' and extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline')" }, ] sdist = { url = "https://files.pythonhosted.org/packages/24/75/d30af27a2906f00eb90143470272376d728521997800f5dce5b340ba35bc/mkdocstrings_python-2.0.1.tar.gz", hash = "sha256:843a562221e6a471fefdd4b45cc6c22d2607ccbad632879234fa9692e9cf7732", size = 199345, upload-time = "2025-12-03T14:26:11.755Z" } wheels = [ @@ -4209,24 +4219,34 @@ name = "mlx" version = "0.30.1" source = { registry = "https://pypi.org/simple" } dependencies = [ - { name = "mlx-metal", marker = "sys_platform == 'darwin'" }, + { name = "mlx-metal", marker = "sys_platform == 'darwin' or (extra == 'extra-16-pydantic-ai-slim-huggingface' and extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline')" }, ] wheels = [ { url = "https://files.pythonhosted.org/packages/cd/8d/16a34feb957ac33525b9b787b5132053a44bc94d1bf40c18639f6e05cd2a/mlx-0.30.1-cp310-cp310-macosx_14_0_arm64.whl", hash = "sha256:391c650f0578ce359c8cffddb204b42798b622f9ee2b57b865d87716c00db536", size = 592926, upload-time = "2025-12-18T01:55:28.757Z" }, { url = "https://files.pythonhosted.org/packages/34/e6/0661455f5f4bd9de257874b28a96a33699d36a1e17ccde821341c0ac1c0e/mlx-0.30.1-cp310-cp310-macosx_15_0_arm64.whl", hash = "sha256:42fefcad72d7488c65649e152a1b28f00c2033d38121afa45ce65ae16ec6b988", size = 592926, upload-time = "2025-12-18T01:55:30.141Z" }, { url = "https://files.pythonhosted.org/packages/d8/37/a322af7dba9101064b5e858d1208e0e66cd83be7d060d14fa03ace37d52e/mlx-0.30.1-cp310-cp310-macosx_26_0_arm64.whl", hash = "sha256:a9db94e7e080672cc0dda9a5f121aebe0d49f7a8cb46706ecfd8b8ce7d99d541", size = 566952, upload-time = "2025-12-18T00:15:50.075Z" }, + { url = "https://files.pythonhosted.org/packages/c9/46/f0005d07fe5687bbf4efc15b468d27f2923f486b07a625d35c7d3cbb4962/mlx-0.30.1-cp310-cp310-manylinux_2_35_aarch64.whl", hash = "sha256:44b2142896c8dd8ab057dd785faf92fa83f3697b4b6bb01ff7515df12b6de666", size = 658049, upload-time = "2025-12-18T01:55:31.748Z" }, + { url = "https://files.pythonhosted.org/packages/cb/95/cc47c4607cc78f55ce3081ade9161961795c15c049bf219f27a393f85767/mlx-0.30.1-cp310-cp310-manylinux_2_35_x86_64.whl", hash = "sha256:37ea97b3c4bd71b19d87c6ef2c9e681e11f37908d8381fc2b785d2509b0681df", size = 692336, upload-time = "2025-12-18T01:55:33.224Z" }, { url = "https://files.pythonhosted.org/packages/07/14/74acbd677ececd17a44dafda1b472aebacef54f60ff9a41a801f711de9a7/mlx-0.30.1-cp311-cp311-macosx_14_0_arm64.whl", hash = "sha256:acfd7d1b8e5b9fa1b7e9fab4cc5ba6a492c559fbb1c5aeab16c1d7a148ab4f1b", size = 593048, upload-time = "2025-12-18T01:55:34.9Z" }, { url = "https://files.pythonhosted.org/packages/58/8c/5309848afb9c53d363f59b88ae5811de248e2817e91aeadf007e2ac8d22b/mlx-0.30.1-cp311-cp311-macosx_15_0_arm64.whl", hash = "sha256:b62030471272d1835b8137164bd43d863cc93ff1d67ec4f1f87bb4c8613dd5a6", size = 593043, upload-time = "2025-12-18T01:55:36.839Z" }, { url = "https://files.pythonhosted.org/packages/e8/5a/0039815a930f0193e2cffb27c57dc6971004bce0086c2bbbdb10395c272c/mlx-0.30.1-cp311-cp311-macosx_26_0_arm64.whl", hash = "sha256:0489cd340f2d262cb3aaad4368e40e84b152e182e4cea37ba018e56c72e1d020", size = 567014, upload-time = "2025-12-18T00:15:51.731Z" }, + { url = "https://files.pythonhosted.org/packages/de/c7/6bdb5497c1f5ed3e33afa7785761ad87fd3436c071805d9a93c905943f04/mlx-0.30.1-cp311-cp311-manylinux_2_35_aarch64.whl", hash = "sha256:fbdcfc3ed556a7e701a8eb67da299e2a25f52615193833ca6374decca3be5bf4", size = 658930, upload-time = "2025-12-18T01:55:38.441Z" }, + { url = "https://files.pythonhosted.org/packages/91/02/2d86a1c116e951eb4d88fe313c321e23628ce7404712e1258cacf925a8b8/mlx-0.30.1-cp311-cp311-manylinux_2_35_x86_64.whl", hash = "sha256:68ec854e7b5f89454e67d6c2fa7bb416b8afb148003ccd775904ec6ec4744818", size = 692484, upload-time = "2025-12-18T01:55:40.254Z" }, { url = "https://files.pythonhosted.org/packages/3a/4b/ad57b2f0ede3f0d009c0e3e1270c219bd18f9025388855ee149680cffa20/mlx-0.30.1-cp312-cp312-macosx_14_0_arm64.whl", hash = "sha256:deaef3ecd2f99930867a29de748e3bffa9cc7e4dfa834f2501c37ed29aece1cc", size = 593397, upload-time = "2025-12-18T01:55:41.814Z" }, { url = "https://files.pythonhosted.org/packages/ef/14/7fa03a0f66ac3cfb2fd6752178a1488f13c7233fff26eed0f832d961db35/mlx-0.30.1-cp312-cp312-macosx_15_0_arm64.whl", hash = "sha256:86ccdcda0b5ea4768b87da25beae5b83ac7cc802506116b6845cea6f450e2377", size = 593397, upload-time = "2025-12-18T01:55:43Z" }, { url = "https://files.pythonhosted.org/packages/9c/c8/9f1343dbe2381f9653df4e0a62dc8bf38f575a2553dc2aa6916de32d2a85/mlx-0.30.1-cp312-cp312-macosx_26_0_arm64.whl", hash = "sha256:a625cb434b2acc5674fe10683374641dab9671fb354ae7c2c67a1fb0405eeb37", size = 567576, upload-time = "2025-12-18T00:15:55.114Z" }, + { url = "https://files.pythonhosted.org/packages/15/ff/485ed9c99c18ef89ac987178c0a526cb4148ba38b14838d315311d9d76a8/mlx-0.30.1-cp312-cp312-manylinux_2_35_aarch64.whl", hash = "sha256:ccc1ff3aca8fb1073c7dcd1274cebe48ae75f852d14b16c7db8228fbbad594dd", size = 643654, upload-time = "2025-12-18T01:55:44.165Z" }, + { url = "https://files.pythonhosted.org/packages/8a/d3/54d3bf5e404c3b6424b49c505dc8b3c06c6bb498fe720195b1fafbd69b5e/mlx-0.30.1-cp312-cp312-manylinux_2_35_x86_64.whl", hash = "sha256:55ed7fc4b389d6e49dac6d34a97b41e61cbe3662ac29c3d29cf612e6b2ed9827", size = 687305, upload-time = "2025-12-18T01:55:45.526Z" }, { url = "https://files.pythonhosted.org/packages/f9/fd/c6f56cd87d48763ed63655ace627c06db9819eae7d43d132f40d4965947a/mlx-0.30.1-cp313-cp313-macosx_14_0_arm64.whl", hash = "sha256:743520758bc8261b2ed8f3b3dc96e4e9236769dd8f61fb17877c5e44037e2058", size = 593366, upload-time = "2025-12-18T01:55:46.786Z" }, { url = "https://files.pythonhosted.org/packages/dc/53/96d8c48b21f91c4216b6d2ef6dfc10862e5fb0b811a2aaf02c96c78601de/mlx-0.30.1-cp313-cp313-macosx_15_0_arm64.whl", hash = "sha256:fc9745bc1860ca60128e3a6d36157da06d936e2b4007a4dcba990b40202f598f", size = 593368, upload-time = "2025-12-18T01:55:48.363Z" }, { url = "https://files.pythonhosted.org/packages/70/ce/476c3b7d3a4153bd0e1c5af1f1b6c09a804b652bbed34072404b322c22e0/mlx-0.30.1-cp313-cp313-macosx_26_0_arm64.whl", hash = "sha256:a1480399c67bb327a66c5527b73915132e3fcaae3bce9634e5c81ccad9f43229", size = 567561, upload-time = "2025-12-18T00:15:56.153Z" }, + { url = "https://files.pythonhosted.org/packages/33/41/7ad1e639fd7dd1cf01a62c1c5b051024a859888c27504996e9d8380e6754/mlx-0.30.1-cp313-cp313-manylinux_2_35_aarch64.whl", hash = "sha256:8e19850a4236a8e174f851f5789b8b62a8eb74f5a8fa49ad8ba286c5ddb5f9bf", size = 643122, upload-time = "2025-12-18T01:55:49.607Z" }, + { url = "https://files.pythonhosted.org/packages/d0/dc/72d3737c5b0662eb5e785d353dbc5e34d793d27b09b99e39993ee051bd19/mlx-0.30.1-cp313-cp313-manylinux_2_35_x86_64.whl", hash = "sha256:1c8ed5bcd9f1910fca209e95859ac737e60b3e1954181b820fa269158f81049a", size = 687254, upload-time = "2025-12-18T01:55:51.239Z" }, { url = "https://files.pythonhosted.org/packages/9b/cc/523448996247bb05d9d68e23bccf3dafdda660befb9330f6bd5fa13361e8/mlx-0.30.1-cp314-cp314-macosx_14_0_arm64.whl", hash = "sha256:d34cc2c25b0ee41c1349f14650db760e282685339858e305453f62405c12bc1b", size = 596006, upload-time = "2025-12-18T01:55:52.463Z" }, { url = "https://files.pythonhosted.org/packages/23/0e/f9f2f9659c34c87be8f4167f6a1d6ed7e826f4889d20eecd4c0d8122f0e9/mlx-0.30.1-cp314-cp314-macosx_15_0_arm64.whl", hash = "sha256:4e47d301e9095b87f0bda8827bfd6ffe744223aba5cee8f28e25894d647f5823", size = 596008, upload-time = "2025-12-18T01:55:54.02Z" }, { url = "https://files.pythonhosted.org/packages/56/a7/49e41fb141de95b6a376091a963c737839c9cda04e423c67f57460a50458/mlx-0.30.1-cp314-cp314-macosx_26_0_arm64.whl", hash = "sha256:cfba13e2a52255d663a1ad62f0f83eb3991e42147edf9a8d38cdd224e48ca49b", size = 570406, upload-time = "2025-12-18T00:15:57.177Z" }, + { url = "https://files.pythonhosted.org/packages/73/99/a43cb112167cf865c069f5e108ae42f5314663930ff3dd86c2d23d984191/mlx-0.30.1-cp314-cp314-manylinux_2_35_aarch64.whl", hash = "sha256:bebfec377208eb29cc88aa86c897c7446aa0984838669e138f273f9225d627ff", size = 646461, upload-time = "2025-12-18T01:55:55.285Z" }, + { url = "https://files.pythonhosted.org/packages/d4/ff/1e1968f107b4221a98dc26832586b1f646b27ddf3e55c95051c09d751f0a/mlx-0.30.1-cp314-cp314-manylinux_2_35_x86_64.whl", hash = "sha256:d18012d5cf0f013bc4a405cfd1e9d2d28e798f4d2dc4f15aa0fbffff73c02ba2", size = 687114, upload-time = "2025-12-18T01:55:56.506Z" }, ] [[package]] @@ -4235,12 +4255,13 @@ version = "0.29.1" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "jinja2" }, - { name = "mlx", marker = "sys_platform == 'darwin'" }, + { name = "mlx", marker = "sys_platform == 'darwin' or (extra == 'extra-16-pydantic-ai-slim-huggingface' and extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline')" }, { name = "numpy" }, { name = "protobuf" }, { name = "pyyaml" }, { name = "sentencepiece" }, - { name = "transformers" }, + { name = "transformers", version = "4.57.6", source = { registry = "https://pypi.org/simple" }, marker = "extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline'" }, + { name = "transformers", version = "5.2.0", source = { registry = "https://pypi.org/simple" }, marker = "extra == 'extra-16-pydantic-ai-slim-huggingface' or extra != 'extra-16-pydantic-ai-slim-outlines-vllm-offline'" }, ] sdist = { url = "https://files.pythonhosted.org/packages/e3/62/f46e1355256a114808517947f8e83ad6be310c7288c551db0fa678f47923/mlx_lm-0.29.1.tar.gz", hash = "sha256:b99180d8f33d33a077b814e550bfb2d8a59ae003d668fd1f4b3fff62a381d34b", size = 232302, upload-time = "2025-12-16T16:58:27.959Z" } wheels = [ @@ -4286,13 +4307,13 @@ name = "model-hosting-container-standards" version = "0.1.13" source = { registry = "https://pypi.org/simple" } dependencies = [ - { name = "fastapi", marker = "python_full_version < '3.12'" }, - { name = "httpx", marker = "python_full_version < '3.12'" }, - { name = "jmespath", marker = "python_full_version < '3.12'" }, - { name = "pydantic", marker = "python_full_version < '3.12'" }, - { name = "setuptools", marker = "python_full_version < '3.12'" }, - { name = "starlette", marker = "python_full_version < '3.12'" }, - { name = "supervisor", marker = "python_full_version < '3.12'" }, + { name = "fastapi" }, + { name = "httpx" }, + { name = "jmespath" }, + { name = "pydantic" }, + { name = "setuptools" }, + { name = "starlette" }, + { name = "supervisor" }, ] sdist = { url = "https://files.pythonhosted.org/packages/d7/b7/a6a31b4dfd30d14b1019dc358f09c9d88ca38e555ba7c976e7d3e6b593fe/model_hosting_container_standards-0.1.13.tar.gz", hash = "sha256:27a1333410dde2719286a300a2803e24fdde407baa91894eb845c0f268aa194d", size = 79116, upload-time = "2026-01-09T21:45:20.683Z" } wheels = [ @@ -4439,7 +4460,7 @@ name = "multidict" version = "6.7.0" source = { registry = "https://pypi.org/simple" } dependencies = [ - { name = "typing-extensions", marker = "python_full_version < '3.11'" }, + { name = "typing-extensions", marker = "python_full_version < '3.11' or (extra == 'extra-16-pydantic-ai-slim-huggingface' and extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline')" }, ] sdist = { url = "https://files.pythonhosted.org/packages/80/1e/5492c365f222f907de1039b91f922b93fa4f764c713ee858d235495d8f50/multidict-6.7.0.tar.gz", hash = "sha256:c6e99d9a65ca282e578dfea819cfa9c0a62b2499d8677392e09feaf305e9e6f5", size = 101834, upload-time = "2025-10-06T14:52:30.657Z" } wheels = [ @@ -4600,10 +4621,10 @@ name = "mypy" version = "1.19.1" source = { registry = "https://pypi.org/simple" } dependencies = [ - { name = "librt", marker = "platform_python_implementation != 'PyPy'" }, + { name = "librt", marker = "platform_python_implementation != 'PyPy' or (extra == 'extra-16-pydantic-ai-slim-huggingface' and extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline')" }, { name = "mypy-extensions" }, { name = "pathspec" }, - { name = "tomli", marker = "python_full_version < '3.11'" }, + { name = "tomli", marker = "python_full_version < '3.11' or (extra == 'extra-16-pydantic-ai-slim-huggingface' and extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline')" }, { name = "typing-extensions" }, ] sdist = { url = "https://files.pythonhosted.org/packages/f5/db/4efed9504bc01309ab9c2da7e352cc223569f05478012b5d9ece38fd44d2/mypy-1.19.1.tar.gz", hash = "sha256:19d88bb05303fe63f71dd2c6270daca27cb9401c4ca8255fe50d1d920e0eb9ba", size = 3582404, upload-time = "2025-12-15T05:03:48.42Z" } @@ -4646,7 +4667,7 @@ name = "mypy-boto3-bedrock-runtime" version = "1.42.3" source = { registry = "https://pypi.org/simple" } dependencies = [ - { name = "typing-extensions", marker = "python_full_version < '3.12'" }, + { name = "typing-extensions", marker = "python_full_version < '3.12' or (extra == 'extra-16-pydantic-ai-slim-huggingface' and extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline')" }, ] sdist = { url = "https://files.pythonhosted.org/packages/1b/95/cb46d84a7a1408e14cac8a8dbbb24a612e438dd10b5f284fb5e01deece3a/mypy_boto3_bedrock_runtime-1.42.3.tar.gz", hash = "sha256:15686cf925719f14bc0d6c85530808736005fb431f007e37d40e10daff4032cc", size = 29476, upload-time = "2025-12-04T20:56:45.423Z" } wheels = [ @@ -4739,13 +4760,9 @@ wheels = [ name = "numba" version = "0.61.2" source = { registry = "https://pypi.org/simple" } -resolution-markers = [ - "python_full_version == '3.11.*'", - "python_full_version < '3.11'", -] dependencies = [ - { name = "llvmlite", version = "0.44.0", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version < '3.12'" }, - { name = "numpy", marker = "python_full_version < '3.12'" }, + { name = "llvmlite" }, + { name = "numpy" }, ] sdist = { url = "https://files.pythonhosted.org/packages/1c/a0/e21f57604304aa03ebb8e098429222722ad99176a4f979d34af1d1ee80da/numba-0.61.2.tar.gz", hash = "sha256:8750ee147940a6637b80ecf7f95062185ad8726c8c28a2295b8ec1160a196f7d", size = 2820615, upload-time = "2025-04-09T02:58:07.659Z" } wheels = [ @@ -4771,42 +4788,6 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/af/a4/6d3a0f2d3989e62a18749e1e9913d5fa4910bbb3e3311a035baea6caf26d/numba-0.61.2-cp313-cp313-win_amd64.whl", hash = "sha256:59321215e2e0ac5fa928a8020ab00b8e57cda8a97384963ac0dfa4d4e6aa54e7", size = 2831846, upload-time = "2025-04-09T02:58:06.125Z" }, ] -[[package]] -name = "numba" -version = "0.63.1" -source = { registry = "https://pypi.org/simple" } -resolution-markers = [ - "python_full_version >= '3.13'", - "python_full_version == '3.12.*'", -] -dependencies = [ - { name = "llvmlite", version = "0.46.0", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version >= '3.12'" }, - { name = "numpy", marker = "python_full_version >= '3.12'" }, -] -sdist = { url = "https://files.pythonhosted.org/packages/dc/60/0145d479b2209bd8fdae5f44201eceb8ce5a23e0ed54c71f57db24618665/numba-0.63.1.tar.gz", hash = "sha256:b320aa675d0e3b17b40364935ea52a7b1c670c9037c39cf92c49502a75902f4b", size = 2761666, upload-time = "2025-12-10T02:57:39.002Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/5e/ce/5283d4ffa568f795bb0fd61ee1f0efc0c6094b94209259167fc8d4276bde/numba-0.63.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:c6d6bf5bf00f7db629305caaec82a2ffb8abe2bf45eaad0d0738dc7de4113779", size = 2680810, upload-time = "2025-12-10T02:56:55.269Z" }, - { url = "https://files.pythonhosted.org/packages/0f/72/a8bda517e26d912633b32626333339b7c769ea73a5c688365ea5f88fd07e/numba-0.63.1-cp310-cp310-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:08653d0dfc9cc9c4c9a8fba29ceb1f2d5340c3b86c4a7e5e07e42b643bc6a2f4", size = 3739735, upload-time = "2025-12-10T02:56:57.922Z" }, - { url = "https://files.pythonhosted.org/packages/ca/17/1913b7c1173b2db30fb7a9696892a7c4c59aeee777a9af6859e9e01bac51/numba-0.63.1-cp310-cp310-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:f09eebf5650246ce2a4e9a8d38270e2d4b0b0ae978103bafb38ed7adc5ea906e", size = 3446707, upload-time = "2025-12-10T02:56:59.837Z" }, - { url = "https://files.pythonhosted.org/packages/b4/77/703db56c3061e9fdad5e79c91452947fdeb2ec0bdfe4affe9b144e7025e0/numba-0.63.1-cp310-cp310-win_amd64.whl", hash = "sha256:f8bba17421d865d8c0f7be2142754ebce53e009daba41c44cf6909207d1a8d7d", size = 2747374, upload-time = "2025-12-10T02:57:07.908Z" }, - { url = "https://files.pythonhosted.org/packages/70/90/5f8614c165d2e256fbc6c57028519db6f32e4982475a372bbe550ea0454c/numba-0.63.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:b33db00f18ccc790ee9911ce03fcdfe9d5124637d1ecc266f5ae0df06e02fec3", size = 2680501, upload-time = "2025-12-10T02:57:09.797Z" }, - { url = "https://files.pythonhosted.org/packages/dc/9d/d0afc4cf915edd8eadd9b2ab5b696242886ee4f97720d9322650d66a88c6/numba-0.63.1-cp311-cp311-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:7d31ea186a78a7c0f6b1b2a3fe68057fdb291b045c52d86232b5383b6cf4fc25", size = 3744945, upload-time = "2025-12-10T02:57:11.697Z" }, - { url = "https://files.pythonhosted.org/packages/05/a9/d82f38f2ab73f3be6f838a826b545b80339762ee8969c16a8bf1d39395a8/numba-0.63.1-cp311-cp311-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:ed3bb2fbdb651d6aac394388130a7001aab6f4541837123a4b4ab8b02716530c", size = 3450827, upload-time = "2025-12-10T02:57:13.709Z" }, - { url = "https://files.pythonhosted.org/packages/18/3f/a9b106e93c5bd7434e65f044bae0d204e20aa7f7f85d72ceb872c7c04216/numba-0.63.1-cp311-cp311-win_amd64.whl", hash = "sha256:1ecbff7688f044b1601be70113e2fb1835367ee0b28ffa8f3adf3a05418c5c87", size = 2747262, upload-time = "2025-12-10T02:57:15.664Z" }, - { url = "https://files.pythonhosted.org/packages/14/9c/c0974cd3d00ff70d30e8ff90522ba5fbb2bcee168a867d2321d8d0457676/numba-0.63.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:2819cd52afa5d8d04e057bdfd54367575105f8829350d8fb5e4066fb7591cc71", size = 2680981, upload-time = "2025-12-10T02:57:17.579Z" }, - { url = "https://files.pythonhosted.org/packages/cb/70/ea2bc45205f206b7a24ee68a159f5097c9ca7e6466806e7c213587e0c2b1/numba-0.63.1-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:5cfd45dbd3d409e713b1ccfdc2ee72ca82006860254429f4ef01867fdba5845f", size = 3801656, upload-time = "2025-12-10T02:57:19.106Z" }, - { url = "https://files.pythonhosted.org/packages/0d/82/4f4ba4fd0f99825cbf3cdefd682ca3678be1702b63362011de6e5f71f831/numba-0.63.1-cp312-cp312-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:69a599df6976c03b7ecf15d05302696f79f7e6d10d620367407517943355bcb0", size = 3501857, upload-time = "2025-12-10T02:57:20.721Z" }, - { url = "https://files.pythonhosted.org/packages/af/fd/6540456efa90b5f6604a86ff50dabefb187e43557e9081adcad3be44f048/numba-0.63.1-cp312-cp312-win_amd64.whl", hash = "sha256:bbad8c63e4fc7eb3cdb2c2da52178e180419f7969f9a685f283b313a70b92af3", size = 2750282, upload-time = "2025-12-10T02:57:22.474Z" }, - { url = "https://files.pythonhosted.org/packages/57/f7/e19e6eff445bec52dde5bed1ebb162925a8e6f988164f1ae4b3475a73680/numba-0.63.1-cp313-cp313-macosx_12_0_arm64.whl", hash = "sha256:0bd4fd820ef7442dcc07da184c3f54bb41d2bdb7b35bacf3448e73d081f730dc", size = 2680954, upload-time = "2025-12-10T02:57:24.145Z" }, - { url = "https://files.pythonhosted.org/packages/e9/6c/1e222edba1e20e6b113912caa9b1665b5809433cbcb042dfd133c6f1fd38/numba-0.63.1-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:53de693abe4be3bd4dee38e1c55f01c55ff644a6a3696a3670589e6e4c39cde2", size = 3809736, upload-time = "2025-12-10T02:57:25.836Z" }, - { url = "https://files.pythonhosted.org/packages/76/0a/590bad11a8b3feeac30a24d01198d46bdb76ad15c70d3a530691ce3cae58/numba-0.63.1-cp313-cp313-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:81227821a72a763c3d4ac290abbb4371d855b59fdf85d5af22a47c0e86bf8c7e", size = 3508854, upload-time = "2025-12-10T02:57:27.438Z" }, - { url = "https://files.pythonhosted.org/packages/4e/f5/3800384a24eed1e4d524669cdbc0b9b8a628800bb1e90d7bd676e5f22581/numba-0.63.1-cp313-cp313-win_amd64.whl", hash = "sha256:eb227b07c2ac37b09432a9bda5142047a2d1055646e089d4a240a2643e508102", size = 2750228, upload-time = "2025-12-10T02:57:30.36Z" }, - { url = "https://files.pythonhosted.org/packages/36/2f/53be2aa8a55ee2608ebe1231789cbb217f6ece7f5e1c685d2f0752e95a5b/numba-0.63.1-cp314-cp314-macosx_12_0_arm64.whl", hash = "sha256:f180883e5508940cc83de8a8bea37fc6dd20fbe4e5558d4659b8b9bef5ff4731", size = 2681153, upload-time = "2025-12-10T02:57:32.016Z" }, - { url = "https://files.pythonhosted.org/packages/13/91/53e59c86759a0648282368d42ba732c29524a745fd555ed1fb1df83febbe/numba-0.63.1-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:f0938764afa82a47c0e895637a6c55547a42c9e1d35cac42285b1fa60a8b02bb", size = 3778718, upload-time = "2025-12-10T02:57:33.764Z" }, - { url = "https://files.pythonhosted.org/packages/6c/0c/2be19eba50b0b7636f6d1f69dfb2825530537708a234ba1ff34afc640138/numba-0.63.1-cp314-cp314-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:f90a929fa5094e062d4e0368ede1f4497d5e40f800e80aa5222c4734236a2894", size = 3478712, upload-time = "2025-12-10T02:57:35.518Z" }, - { url = "https://files.pythonhosted.org/packages/0d/5f/4d0c9e756732577a52211f31da13a3d943d185f7fb90723f56d79c696caa/numba-0.63.1-cp314-cp314-win_amd64.whl", hash = "sha256:8d6d5ce85f572ed4e1a135dbb8c0114538f9dd0e3657eeb0bb64ab204cbe2a8f", size = 2752161, upload-time = "2025-12-10T02:57:37.12Z" }, -] - [[package]] name = "numpy" version = "2.2.6" @@ -4874,7 +4855,9 @@ name = "nvidia-cublas-cu12" version = "12.8.4.1" source = { registry = "https://pypi.org/simple" } wheels = [ + { url = "https://files.pythonhosted.org/packages/29/99/db44d685f0e257ff0e213ade1964fc459b4a690a73293220e98feb3307cf/nvidia_cublas_cu12-12.8.4.1-py3-none-manylinux_2_27_aarch64.whl", hash = "sha256:b86f6dd8935884615a0683b663891d43781b819ac4f2ba2b0c9604676af346d0", size = 590537124, upload-time = "2025-03-07T01:43:53.556Z" }, { url = "https://files.pythonhosted.org/packages/dc/61/e24b560ab2e2eaeb3c839129175fb330dfcfc29e5203196e5541a4c44682/nvidia_cublas_cu12-12.8.4.1-py3-none-manylinux_2_27_x86_64.whl", hash = "sha256:8ac4e771d5a348c551b2a426eda6193c19aa630236b418086020df5ba9667142", size = 594346921, upload-time = "2025-03-07T01:44:31.254Z" }, + { url = "https://files.pythonhosted.org/packages/70/61/7d7b3c70186fb651d0fbd35b01dbfc8e755f69fd58f817f3d0f642df20c3/nvidia_cublas_cu12-12.8.4.1-py3-none-win_amd64.whl", hash = "sha256:47e9b82132fa8d2b4944e708049229601448aaad7e6f296f630f2d1a32de35af", size = 567544208, upload-time = "2025-03-07T01:53:30.535Z" }, ] [[package]] @@ -4882,7 +4865,9 @@ name = "nvidia-cuda-cupti-cu12" version = "12.8.90" source = { registry = "https://pypi.org/simple" } wheels = [ + { url = "https://files.pythonhosted.org/packages/d5/1f/b3bd73445e5cb342727fd24fe1f7b748f690b460acadc27ea22f904502c8/nvidia_cuda_cupti_cu12-12.8.90-py3-none-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:4412396548808ddfed3f17a467b104ba7751e6b58678a4b840675c56d21cf7ed", size = 9533318, upload-time = "2025-03-07T01:40:10.421Z" }, { url = "https://files.pythonhosted.org/packages/f8/02/2adcaa145158bf1a8295d83591d22e4103dbfd821bcaf6f3f53151ca4ffa/nvidia_cuda_cupti_cu12-12.8.90-py3-none-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:ea0cb07ebda26bb9b29ba82cda34849e73c166c18162d3913575b0c9db9a6182", size = 10248621, upload-time = "2025-03-07T01:40:21.213Z" }, + { url = "https://files.pythonhosted.org/packages/41/bc/83f5426095d93694ae39fe1311431b5d5a9bb82e48bf0dd8e19be2765942/nvidia_cuda_cupti_cu12-12.8.90-py3-none-win_amd64.whl", hash = "sha256:bb479dcdf7e6d4f8b0b01b115260399bf34154a1a2e9fe11c85c517d87efd98e", size = 7015759, upload-time = "2025-03-07T01:51:11.355Z" }, ] [[package]] @@ -4891,6 +4876,8 @@ version = "12.8.93" source = { registry = "https://pypi.org/simple" } wheels = [ { url = "https://files.pythonhosted.org/packages/05/6b/32f747947df2da6994e999492ab306a903659555dddc0fbdeb9d71f75e52/nvidia_cuda_nvrtc_cu12-12.8.93-py3-none-manylinux2010_x86_64.manylinux_2_12_x86_64.whl", hash = "sha256:a7756528852ef889772a84c6cd89d41dfa74667e24cca16bb31f8f061e3e9994", size = 88040029, upload-time = "2025-03-07T01:42:13.562Z" }, + { url = "https://files.pythonhosted.org/packages/eb/d1/e50d0acaab360482034b84b6e27ee83c6738f7d32182b987f9c7a4e32962/nvidia_cuda_nvrtc_cu12-12.8.93-py3-none-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:fc1fec1e1637854b4c0a65fb9a8346b51dd9ee69e61ebaccc82058441f15bce8", size = 43106076, upload-time = "2025-03-07T01:41:59.817Z" }, + { url = "https://files.pythonhosted.org/packages/45/51/52a3d84baa2136cc8df15500ad731d74d3a1114d4c123e043cb608d4a32b/nvidia_cuda_nvrtc_cu12-12.8.93-py3-none-win_amd64.whl", hash = "sha256:7a4b6b2904850fe78e0bd179c4b655c404d4bb799ef03ddc60804247099ae909", size = 73586838, upload-time = "2025-03-07T01:52:13.483Z" }, ] [[package]] @@ -4898,7 +4885,9 @@ name = "nvidia-cuda-runtime-cu12" version = "12.8.90" source = { registry = "https://pypi.org/simple" } wheels = [ + { url = "https://files.pythonhosted.org/packages/7c/75/f865a3b236e4647605ea34cc450900854ba123834a5f1598e160b9530c3a/nvidia_cuda_runtime_cu12-12.8.90-py3-none-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:52bf7bbee900262ffefe5e9d5a2a69a30d97e2bc5bb6cc866688caa976966e3d", size = 965265, upload-time = "2025-03-07T01:39:43.533Z" }, { url = "https://files.pythonhosted.org/packages/0d/9b/a997b638fcd068ad6e4d53b8551a7d30fe8b404d6f1804abf1df69838932/nvidia_cuda_runtime_cu12-12.8.90-py3-none-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:adade8dcbd0edf427b7204d480d6066d33902cab2a4707dcfc48a2d0fd44ab90", size = 954765, upload-time = "2025-03-07T01:40:01.615Z" }, + { url = "https://files.pythonhosted.org/packages/30/a5/a515b7600ad361ea14bfa13fb4d6687abf500adc270f19e89849c0590492/nvidia_cuda_runtime_cu12-12.8.90-py3-none-win_amd64.whl", hash = "sha256:c0c6027f01505bfed6c3b21ec546f69c687689aad5f1a377554bc6ca4aa993a8", size = 944318, upload-time = "2025-03-07T01:51:01.794Z" }, ] [[package]] @@ -4909,7 +4898,9 @@ dependencies = [ { name = "nvidia-cublas-cu12" }, ] wheels = [ + { url = "https://files.pythonhosted.org/packages/fa/41/e79269ce215c857c935fd86bcfe91a451a584dfc27f1e068f568b9ad1ab7/nvidia_cudnn_cu12-9.10.2.21-py3-none-manylinux_2_27_aarch64.whl", hash = "sha256:c9132cc3f8958447b4910a1720036d9eff5928cc3179b0a51fb6d167c6cc87d8", size = 705026878, upload-time = "2025-06-06T21:52:51.348Z" }, { url = "https://files.pythonhosted.org/packages/ba/51/e123d997aa098c61d029f76663dedbfb9bc8dcf8c60cbd6adbe42f76d049/nvidia_cudnn_cu12-9.10.2.21-py3-none-manylinux_2_27_x86_64.whl", hash = "sha256:949452be657fa16687d0930933f032835951ef0892b37d2d53824d1a84dc97a8", size = 706758467, upload-time = "2025-06-06T21:54:08.597Z" }, + { url = "https://files.pythonhosted.org/packages/3d/90/0bd6e586701b3a890fd38aa71c387dab4883d619d6e5ad912ccbd05bfd67/nvidia_cudnn_cu12-9.10.2.21-py3-none-win_amd64.whl", hash = "sha256:c6288de7d63e6cf62988f0923f96dc339cea362decb1bf5b3141883392a7d65e", size = 692992268, upload-time = "2025-06-06T21:55:18.114Z" }, ] [[package]] @@ -4942,7 +4933,9 @@ dependencies = [ { name = "nvidia-nvjitlink-cu12" }, ] wheels = [ + { url = "https://files.pythonhosted.org/packages/60/bc/7771846d3a0272026c416fbb7e5f4c1f146d6d80704534d0b187dd6f4800/nvidia_cufft_cu12-11.3.3.83-py3-none-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:848ef7224d6305cdb2a4df928759dca7b1201874787083b6e7550dd6765ce69a", size = 193109211, upload-time = "2025-03-07T01:44:56.873Z" }, { url = "https://files.pythonhosted.org/packages/1f/13/ee4e00f30e676b66ae65b4f08cb5bcbb8392c03f54f2d5413ea99a5d1c80/nvidia_cufft_cu12-11.3.3.83-py3-none-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:4d2dd21ec0b88cf61b62e6b43564355e5222e4a3fb394cac0db101f2dd0d4f74", size = 193118695, upload-time = "2025-03-07T01:45:27.821Z" }, + { url = "https://files.pythonhosted.org/packages/7d/ec/ce1629f1e478bb5ccd208986b5f9e0316a78538dd6ab1d0484f012f8e2a1/nvidia_cufft_cu12-11.3.3.83-py3-none-win_amd64.whl", hash = "sha256:7a64a98ef2a7c47f905aaf8931b69a3a43f27c55530c698bb2ed7c75c0b42cb7", size = 192216559, upload-time = "2025-03-07T01:53:57.106Z" }, ] [[package]] @@ -4951,6 +4944,7 @@ version = "1.13.1.3" source = { registry = "https://pypi.org/simple" } wheels = [ { url = "https://files.pythonhosted.org/packages/bb/fe/1bcba1dfbfb8d01be8d93f07bfc502c93fa23afa6fd5ab3fc7c1df71038a/nvidia_cufile_cu12-1.13.1.3-py3-none-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:1d069003be650e131b21c932ec3d8969c1715379251f8d23a1860554b1cb24fc", size = 1197834, upload-time = "2025-03-07T01:45:50.723Z" }, + { url = "https://files.pythonhosted.org/packages/1e/f5/5607710447a6fe9fd9b3283956fceeee8a06cda1d2f56ce31371f595db2a/nvidia_cufile_cu12-1.13.1.3-py3-none-manylinux_2_27_aarch64.whl", hash = "sha256:4beb6d4cce47c1a0f1013d72e02b0994730359e17801d395bdcbf20cfb3bb00a", size = 1120705, upload-time = "2025-03-07T01:45:41.434Z" }, ] [[package]] @@ -4958,7 +4952,9 @@ name = "nvidia-curand-cu12" version = "10.3.9.90" source = { registry = "https://pypi.org/simple" } wheels = [ + { url = "https://files.pythonhosted.org/packages/45/5e/92aa15eca622a388b80fbf8375d4760738df6285b1e92c43d37390a33a9a/nvidia_curand_cu12-10.3.9.90-py3-none-manylinux_2_27_aarch64.whl", hash = "sha256:dfab99248034673b779bc6decafdc3404a8a6f502462201f2f31f11354204acd", size = 63625754, upload-time = "2025-03-07T01:46:10.735Z" }, { url = "https://files.pythonhosted.org/packages/fb/aa/6584b56dc84ebe9cf93226a5cde4d99080c8e90ab40f0c27bda7a0f29aa1/nvidia_curand_cu12-10.3.9.90-py3-none-manylinux_2_27_x86_64.whl", hash = "sha256:b32331d4f4df5d6eefa0554c565b626c7216f87a06a4f56fab27c3b68a830ec9", size = 63619976, upload-time = "2025-03-07T01:46:23.323Z" }, + { url = "https://files.pythonhosted.org/packages/b9/75/70c05b2f3ed5be3bb30b7102b6eb78e100da4bbf6944fd6725c012831cab/nvidia_curand_cu12-10.3.9.90-py3-none-win_amd64.whl", hash = "sha256:f149a8ca457277da854f89cf282d6ef43176861926c7ac85b2a0fbd237c587ec", size = 62765309, upload-time = "2025-03-07T01:54:20.478Z" }, ] [[package]] @@ -4971,7 +4967,9 @@ dependencies = [ { name = "nvidia-nvjitlink-cu12" }, ] wheels = [ + { url = "https://files.pythonhosted.org/packages/c8/32/f7cd6ce8a7690544d084ea21c26e910a97e077c9b7f07bf5de623ee19981/nvidia_cusolver_cu12-11.7.3.90-py3-none-manylinux_2_27_aarch64.whl", hash = "sha256:db9ed69dbef9715071232caa9b69c52ac7de3a95773c2db65bdba85916e4e5c0", size = 267229841, upload-time = "2025-03-07T01:46:54.356Z" }, { url = "https://files.pythonhosted.org/packages/85/48/9a13d2975803e8cf2777d5ed57b87a0b6ca2cc795f9a4f59796a910bfb80/nvidia_cusolver_cu12-11.7.3.90-py3-none-manylinux_2_27_x86_64.whl", hash = "sha256:4376c11ad263152bd50ea295c05370360776f8c3427b30991df774f9fb26c450", size = 267506905, upload-time = "2025-03-07T01:47:16.273Z" }, + { url = "https://files.pythonhosted.org/packages/13/c0/76ca8551b8a84146ffa189fec81c26d04adba4bc0dbe09cd6e6fd9b7de04/nvidia_cusolver_cu12-11.7.3.90-py3-none-win_amd64.whl", hash = "sha256:4a550db115fcabc4d495eb7d39ac8b58d4ab5d8e63274d3754df1c0ad6a22d34", size = 256720438, upload-time = "2025-03-07T01:54:39.898Z" }, ] [[package]] @@ -4982,7 +4980,9 @@ dependencies = [ { name = "nvidia-nvjitlink-cu12" }, ] wheels = [ + { url = "https://files.pythonhosted.org/packages/bc/f7/cd777c4109681367721b00a106f491e0d0d15cfa1fd59672ce580ce42a97/nvidia_cusparse_cu12-12.5.8.93-py3-none-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:9b6c161cb130be1a07a27ea6923df8141f3c295852f4b260c65f18f3e0a091dc", size = 288117129, upload-time = "2025-03-07T01:47:40.407Z" }, { url = "https://files.pythonhosted.org/packages/c2/f5/e1854cb2f2bcd4280c44736c93550cc300ff4b8c95ebe370d0aa7d2b473d/nvidia_cusparse_cu12-12.5.8.93-py3-none-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:1ec05d76bbbd8b61b06a80e1eaf8cf4959c3d4ce8e711b65ebd0443bb0ebb13b", size = 288216466, upload-time = "2025-03-07T01:48:13.779Z" }, + { url = "https://files.pythonhosted.org/packages/62/07/f3b2ad63f8e3d257a599f422ae34eb565e70c41031aecefa3d18b62cabd1/nvidia_cusparse_cu12-12.5.8.93-py3-none-win_amd64.whl", hash = "sha256:9a33604331cb2cac199f2e7f5104dfbb8a5a898c367a53dfda9ff2acb6b6b4dd", size = 284937404, upload-time = "2025-03-07T01:55:07.742Z" }, ] [[package]] @@ -4990,7 +4990,9 @@ name = "nvidia-cusparselt-cu12" version = "0.7.1" source = { registry = "https://pypi.org/simple" } wheels = [ + { url = "https://files.pythonhosted.org/packages/73/b9/598f6ff36faaece4b3c50d26f50e38661499ff34346f00e057760b35cc9d/nvidia_cusparselt_cu12-0.7.1-py3-none-manylinux2014_aarch64.whl", hash = "sha256:8878dce784d0fac90131b6817b607e803c36e629ba34dc5b433471382196b6a5", size = 283835557, upload-time = "2025-02-26T00:16:54.265Z" }, { url = "https://files.pythonhosted.org/packages/56/79/12978b96bd44274fe38b5dde5cfb660b1d114f70a65ef962bcbbed99b549/nvidia_cusparselt_cu12-0.7.1-py3-none-manylinux2014_x86_64.whl", hash = "sha256:f1bb701d6b930d5a7cea44c19ceb973311500847f81b634d802b7b539dc55623", size = 287193691, upload-time = "2025-02-26T00:15:44.104Z" }, + { url = "https://files.pythonhosted.org/packages/2f/d8/a6b0d0d0c2435e9310f3e2bb0d9c9dd4c33daef86aa5f30b3681defd37ea/nvidia_cusparselt_cu12-0.7.1-py3-none-win_amd64.whl", hash = "sha256:f67fbb5831940ec829c9117b7f33807db9f9678dc2a617fbe781cac17b4e1075", size = 271020911, upload-time = "2025-02-26T00:14:47.204Z" }, ] [[package]] @@ -4998,7 +5000,7 @@ name = "nvidia-cutlass-dsl" version = "4.4.0" source = { registry = "https://pypi.org/simple" } dependencies = [ - { name = "nvidia-cutlass-dsl-libs-base", marker = "python_full_version < '3.12'" }, + { name = "nvidia-cutlass-dsl-libs-base" }, ] wheels = [ { url = "https://files.pythonhosted.org/packages/55/09/42fca58af350265131b6f8665ad5b62526c95e6692788460bd5306d3efe2/nvidia_cutlass_dsl-4.4.0-py3-none-any.whl", hash = "sha256:2d1f34333e4d774002d44b53262d71aaf738700fcf3858290629f9a7b374c61c", size = 10168, upload-time = "2026-02-14T03:38:54.267Z" }, @@ -5009,9 +5011,9 @@ name = "nvidia-cutlass-dsl-libs-base" version = "4.4.0" source = { registry = "https://pypi.org/simple" } dependencies = [ - { name = "cuda-python", marker = "python_full_version < '3.12'" }, - { name = "numpy", marker = "python_full_version < '3.12'" }, - { name = "typing-extensions", marker = "python_full_version < '3.12'" }, + { name = "cuda-python" }, + { name = "numpy" }, + { name = "typing-extensions" }, ] wheels = [ { url = "https://files.pythonhosted.org/packages/ad/af/cf64251bae66077769adbcd9a2e96b86aeb3c41490c5ee0a939a1a3b511e/nvidia_cutlass_dsl_libs_base-4.4.0-cp310-cp310-manylinux_2_28_aarch64.whl", hash = "sha256:703169d0843ad7e310b397aa95128e3fa983571a9a488f826c2968f3e71df2b8", size = 75460001, upload-time = "2026-02-14T03:44:18.705Z" }, @@ -5038,6 +5040,7 @@ name = "nvidia-nccl-cu12" version = "2.27.5" source = { registry = "https://pypi.org/simple" } wheels = [ + { url = "https://files.pythonhosted.org/packages/bb/1c/857979db0ef194ca5e21478a0612bcdbbe59458d7694361882279947b349/nvidia_nccl_cu12-2.27.5-py3-none-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:31432ad4d1fb1004eb0c56203dc9bc2178a1ba69d1d9e02d64a6938ab5e40e7a", size = 322400625, upload-time = "2025-06-26T04:11:04.496Z" }, { url = "https://files.pythonhosted.org/packages/6e/89/f7a07dc961b60645dbbf42e80f2bc85ade7feb9a491b11a1e973aa00071f/nvidia_nccl_cu12-2.27.5-py3-none-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:ad730cf15cb5d25fe849c6e6ca9eb5b76db16a80f13f425ac68d8e2e55624457", size = 322348229, upload-time = "2025-06-26T04:11:28.385Z" }, ] @@ -5047,6 +5050,8 @@ version = "12.8.93" source = { registry = "https://pypi.org/simple" } wheels = [ { url = "https://files.pythonhosted.org/packages/f6/74/86a07f1d0f42998ca31312f998bd3b9a7eff7f52378f4f270c8679c77fb9/nvidia_nvjitlink_cu12-12.8.93-py3-none-manylinux2010_x86_64.manylinux_2_12_x86_64.whl", hash = "sha256:81ff63371a7ebd6e6451970684f916be2eab07321b73c9d244dc2b4da7f73b88", size = 39254836, upload-time = "2025-03-07T01:49:55.661Z" }, + { url = "https://files.pythonhosted.org/packages/2a/a2/8cee5da30d13430e87bf99bb33455d2724d0a4a9cb5d7926d80ccb96d008/nvidia_nvjitlink_cu12-12.8.93-py3-none-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:adccd7161ace7261e01bb91e44e88da350895c270d23f744f0820c818b7229e7", size = 38386204, upload-time = "2025-03-07T01:49:43.612Z" }, + { url = "https://files.pythonhosted.org/packages/ed/d7/34f02dad2e30c31b10a51f6b04e025e5dd60e5f936af9045a9b858a05383/nvidia_nvjitlink_cu12-12.8.93-py3-none-win_amd64.whl", hash = "sha256:bd93fbeeee850917903583587f4fc3a4eafa022e34572251368238ab5e6bd67f", size = 268553710, upload-time = "2025-03-07T01:56:24.13Z" }, ] [[package]] @@ -5054,6 +5059,7 @@ name = "nvidia-nvshmem-cu12" version = "3.3.20" source = { registry = "https://pypi.org/simple" } wheels = [ + { url = "https://files.pythonhosted.org/packages/92/9d/3dd98852568fb845ec1f7902c90a22b240fe1cbabda411ccedf2fd737b7b/nvidia_nvshmem_cu12-3.3.20-py3-none-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:0b0b960da3842212758e4fa4696b94f129090b30e5122fea3c5345916545cff0", size = 124484616, upload-time = "2025-08-04T20:24:59.172Z" }, { url = "https://files.pythonhosted.org/packages/3b/6c/99acb2f9eb85c29fc6f3a7ac4dccfd992e22666dd08a642b303311326a97/nvidia_nvshmem_cu12-3.3.20-py3-none-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:d00f26d3f9b2e3c3065be895e3059d6479ea5c638a3f38c9fec49b1b9dd7c1e5", size = 124657145, upload-time = "2025-08-04T20:25:19.995Z" }, ] @@ -5062,7 +5068,9 @@ name = "nvidia-nvtx-cu12" version = "12.8.90" source = { registry = "https://pypi.org/simple" } wheels = [ + { url = "https://files.pythonhosted.org/packages/10/c0/1b303feea90d296f6176f32a2a70b5ef230f9bdeb3a72bddb0dc922dc137/nvidia_nvtx_cu12-12.8.90-py3-none-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:d7ad891da111ebafbf7e015d34879f7112832fc239ff0d7d776b6cb685274615", size = 91161, upload-time = "2025-03-07T01:42:23.922Z" }, { url = "https://files.pythonhosted.org/packages/a2/eb/86626c1bbc2edb86323022371c39aa48df6fd8b0a1647bc274577f72e90b/nvidia_nvtx_cu12-12.8.90-py3-none-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:5b17e2001cc0d751a5bc2c6ec6d26ad95913324a4adb86788c944f8ce9ba441f", size = 89954, upload-time = "2025-03-07T01:42:44.131Z" }, + { url = "https://files.pythonhosted.org/packages/9f/99/4c9c0c329bf9fc125008c3b54c7c94c0023518d06fc025ae36431375e1fe/nvidia_nvtx_cu12-12.8.90-py3-none-win_amd64.whl", hash = "sha256:619c8304aedc69f02ea82dd244541a83c3d9d40993381b3b590f1adaed3db41e", size = 56492, upload-time = "2025-03-07T01:52:24.69Z" }, ] [[package]] @@ -5098,7 +5106,7 @@ name = "openai-harmony" version = "0.0.8" source = { registry = "https://pypi.org/simple" } dependencies = [ - { name = "pydantic", marker = "python_full_version < '3.12'" }, + { name = "pydantic" }, ] sdist = { url = "https://files.pythonhosted.org/packages/3e/92/2d038d096f29179c7c9571b431f9e739f87a487121901725e23fe338dd9d/openai_harmony-0.0.8.tar.gz", hash = "sha256:6e43f98e6c242fa2de6f8ea12eab24af63fa2ed3e89c06341fb9d92632c5cbdf", size = 284777, upload-time = "2025-11-05T19:07:06.727Z" } wheels = [ @@ -5133,7 +5141,7 @@ name = "opencv-python-headless" version = "4.13.0.92" source = { registry = "https://pypi.org/simple" } dependencies = [ - { name = "numpy", marker = "python_full_version < '3.12'" }, + { name = "numpy" }, ] wheels = [ { url = "https://files.pythonhosted.org/packages/79/42/2310883be3b8826ac58c3f2787b9358a2d46923d61f88fedf930bc59c60c/opencv_python_headless-4.13.0.92-cp37-abi3-macosx_13_0_arm64.whl", hash = "sha256:1a7d040ac656c11b8c38677cc8cccdc149f98535089dbe5b081e80a4e5903209", size = 46247192, upload-time = "2026-02-05T07:01:35.187Z" }, @@ -5461,10 +5469,10 @@ wheels = [ [package.optional-dependencies] llamacpp = [ - { name = "huggingface-hub" }, + { name = "huggingface-hub", version = "0.36.2", source = { registry = "https://pypi.org/simple" }, marker = "extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline'" }, + { name = "huggingface-hub", version = "1.4.1", source = { registry = "https://pypi.org/simple" }, marker = "extra == 'extra-16-pydantic-ai-slim-huggingface' or extra != 'extra-16-pydantic-ai-slim-outlines-vllm-offline'" }, { name = "llama-cpp-python" }, - { name = "numba", version = "0.61.2", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version < '3.12'" }, - { name = "numba", version = "0.63.1", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version >= '3.12'" }, + { name = "numba" }, ] mlxlm = [ { name = "datasets" }, @@ -5478,7 +5486,8 @@ transformers = [ { name = "accelerate" }, { name = "datasets" }, { name = "setuptools" }, - { name = "transformers" }, + { name = "transformers", version = "4.57.6", source = { registry = "https://pypi.org/simple" }, marker = "extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline'" }, + { name = "transformers", version = "5.2.0", source = { registry = "https://pypi.org/simple" }, marker = "extra == 'extra-16-pydantic-ai-slim-huggingface' or extra != 'extra-16-pydantic-ai-slim-outlines-vllm-offline'" }, ] [[package]] @@ -5641,8 +5650,8 @@ name = "pendulum" version = "3.1.0" source = { registry = "https://pypi.org/simple" } dependencies = [ - { name = "python-dateutil", marker = "python_full_version < '3.13'" }, - { name = "tzdata", marker = "python_full_version < '3.13'" }, + { name = "python-dateutil", marker = "python_full_version < '3.13' or (extra == 'extra-16-pydantic-ai-slim-huggingface' and extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline')" }, + { name = "tzdata", marker = "python_full_version < '3.13' or (extra == 'extra-16-pydantic-ai-slim-huggingface' and extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline')" }, ] sdist = { url = "https://files.pythonhosted.org/packages/23/7c/009c12b86c7cc6c403aec80f8a4308598dfc5995e5c523a5491faaa3952e/pendulum-3.1.0.tar.gz", hash = "sha256:66f96303560f41d097bee7d2dc98ffca716fbb3a832c4b3062034c2d45865015", size = 85930, upload-time = "2025-04-19T14:30:01.675Z" } wheels = [ @@ -5863,7 +5872,7 @@ dependencies = [ { name = "orjson" }, { name = "packaging" }, { name = "pathspec" }, - { name = "pendulum", marker = "python_full_version < '3.13'" }, + { name = "pendulum", marker = "python_full_version < '3.13' or (extra == 'extra-16-pydantic-ai-slim-huggingface' and extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline')" }, { name = "pluggy" }, { name = "prometheus-client" }, { name = "pydantic" }, @@ -5879,7 +5888,7 @@ dependencies = [ { name = "rfc3339-validator" }, { name = "rich" }, { name = "ruamel-yaml" }, - { name = "ruamel-yaml-clib", marker = "platform_python_implementation == 'CPython'" }, + { name = "ruamel-yaml-clib", marker = "platform_python_implementation == 'CPython' or (extra == 'extra-16-pydantic-ai-slim-huggingface' and extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline')" }, { name = "semver" }, { name = "sniffio" }, { name = "sqlalchemy", extra = ["asyncio"] }, @@ -5888,7 +5897,7 @@ dependencies = [ { name = "typing-extensions" }, { name = "uvicorn" }, { name = "websockets" }, - { name = "whenever", marker = "python_full_version >= '3.13'" }, + { name = "whenever", marker = "python_full_version >= '3.13' or (extra == 'extra-16-pydantic-ai-slim-huggingface' and extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline')" }, ] sdist = { url = "https://files.pythonhosted.org/packages/5b/46/139cfabbc729f13d4b6df74b56c01ddcbb1118936b312b2ef82e9826d8bc/prefect-3.6.13.tar.gz", hash = "sha256:ee0b39fa390c204ccb3762be00a729edd45c5aa54e0245951f8682f92bfb016b", size = 10811400, upload-time = "2026-01-23T04:17:29.594Z" } wheels = [ @@ -5925,8 +5934,8 @@ name = "prometheus-fastapi-instrumentator" version = "7.1.0" source = { registry = "https://pypi.org/simple" } dependencies = [ - { name = "prometheus-client", marker = "python_full_version < '3.12'" }, - { name = "starlette", marker = "python_full_version < '3.12'" }, + { name = "prometheus-client" }, + { name = "starlette" }, ] sdist = { url = "https://files.pythonhosted.org/packages/69/6d/24d53033cf93826aa7857699a4450c1c67e5b9c710e925b1ed2b320c04df/prometheus_fastapi_instrumentator-7.1.0.tar.gz", hash = "sha256:be7cd61eeea4e5912aeccb4261c6631b3f227d8924542d79eaf5af3f439cbe5e", size = 20220, upload-time = "2025-03-19T19:35:05.351Z" } wheels = [ @@ -6105,8 +6114,8 @@ name = "psycopg" version = "3.3.2" source = { registry = "https://pypi.org/simple" } dependencies = [ - { name = "typing-extensions", marker = "python_full_version < '3.13'" }, - { name = "tzdata", marker = "sys_platform == 'win32'" }, + { name = "typing-extensions", marker = "python_full_version < '3.13' or (extra == 'extra-16-pydantic-ai-slim-huggingface' and extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline')" }, + { name = "tzdata", marker = "sys_platform == 'win32' or (extra == 'extra-16-pydantic-ai-slim-huggingface' and extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline')" }, ] sdist = { url = "https://files.pythonhosted.org/packages/e0/1a/7d9ef4fdc13ef7f15b934c393edc97a35c281bb7d3c3329fbfcbe915a7c2/psycopg-3.3.2.tar.gz", hash = "sha256:707a67975ee214d200511177a6a80e56e654754c9afca06a7194ea6bbfde9ca7", size = 165630, upload-time = "2025-12-06T17:34:53.899Z" } wheels = [ @@ -6115,7 +6124,7 @@ wheels = [ [package.optional-dependencies] binary = [ - { name = "psycopg-binary", marker = "implementation_name != 'pypy'" }, + { name = "psycopg-binary", marker = "implementation_name != 'pypy' or (extra == 'extra-16-pydantic-ai-slim-huggingface' and extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline')" }, ] [[package]] @@ -6523,7 +6532,8 @@ email = [ name = "pydantic-ai" source = { editable = "." } dependencies = [ - { name = "pydantic-ai-slim", extra = ["ag-ui", "anthropic", "bedrock", "cli", "cohere", "evals", "fastmcp", "google", "groq", "huggingface", "logfire", "mcp", "mistral", "openai", "retries", "temporal", "ui", "vertexai", "xai"] }, + { name = "pydantic-ai-slim", extra = ["ag-ui", "anthropic", "bedrock", "cli", "cohere", "evals", "fastmcp", "google", "groq", "logfire", "mcp", "mistral", "openai", "retries", "temporal", "ui", "vertexai", "xai"] }, + { name = "pydantic-ai-slim", extra = ["huggingface"], marker = "extra == 'extra-16-pydantic-ai-slim-huggingface'" }, ] [package.optional-dependencies] @@ -6540,7 +6550,7 @@ outlines-llamacpp = [ { name = "pydantic-ai-slim", extra = ["outlines-llamacpp"] }, ] outlines-mlxlm = [ - { name = "pydantic-ai-slim", extra = ["outlines-mlxlm"], marker = "platform_machine == 'arm64' and sys_platform == 'darwin'" }, + { name = "pydantic-ai-slim", extra = ["outlines-mlxlm"], marker = "(platform_machine == 'arm64' and sys_platform == 'darwin') or (platform_machine != 'arm64' and extra == 'extra-16-pydantic-ai-slim-huggingface' and extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline') or (sys_platform != 'darwin' and extra == 'extra-16-pydantic-ai-slim-huggingface' and extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline')" }, ] outlines-sglang = [ { name = "pydantic-ai-slim", extra = ["outlines-sglang"] }, @@ -6549,7 +6559,7 @@ outlines-transformers = [ { name = "pydantic-ai-slim", extra = ["outlines-transformers"] }, ] outlines-vllm-offline = [ - { name = "pydantic-ai-slim", extra = ["outlines-vllm-offline"] }, + { name = "pydantic-ai-slim", extra = ["outlines-vllm-offline"], marker = "extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline'" }, ] prefect = [ { name = "pydantic-ai-slim", extra = ["prefect"] }, @@ -6712,7 +6722,7 @@ requires-dist = [ name = "pydantic-ai-slim" source = { editable = "pydantic_ai_slim" } dependencies = [ - { name = "exceptiongroup", marker = "python_full_version < '3.11'" }, + { name = "exceptiongroup", marker = "python_full_version < '3.11' or (extra == 'extra-16-pydantic-ai-slim-huggingface' and extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline')" }, { name = "genai-prices" }, { name = "griffelib" }, { name = "httpx" }, @@ -6743,11 +6753,14 @@ cli = [ { name = "rich" }, ] cohere = [ - { name = "cohere", marker = "sys_platform != 'emscripten'" }, + { name = "cohere", marker = "sys_platform != 'emscripten' or (extra == 'extra-16-pydantic-ai-slim-huggingface' and extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline')" }, ] dbos = [ { name = "dbos" }, ] +docker-sandbox = [ + { name = "docker" }, +] duckduckgo = [ { name = "ddgs" }, ] @@ -6767,7 +6780,7 @@ groq = [ { name = "groq" }, ] huggingface = [ - { name = "huggingface-hub" }, + { name = "huggingface-hub", version = "1.4.1", source = { registry = "https://pypi.org/simple" } }, ] logfire = [ { name = "logfire", extra = ["httpx"] }, @@ -6786,20 +6799,21 @@ openrouter = [ { name = "openai" }, ] outlines-llamacpp = [ - { name = "outlines", extra = ["llamacpp"], marker = "python_full_version < '3.14'" }, + { name = "outlines", extra = ["llamacpp"], marker = "python_full_version < '3.14' or (extra == 'extra-16-pydantic-ai-slim-huggingface' and extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline')" }, ] outlines-mlxlm = [ - { name = "outlines", extra = ["mlxlm"], marker = "python_full_version < '3.14' and platform_machine == 'arm64' and sys_platform == 'darwin'" }, + { name = "outlines", extra = ["mlxlm"], marker = "(python_full_version < '3.14' and platform_machine == 'arm64' and sys_platform == 'darwin') or (python_full_version >= '3.14' and extra == 'extra-16-pydantic-ai-slim-huggingface' and extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline') or (platform_machine != 'arm64' and extra == 'extra-16-pydantic-ai-slim-huggingface' and extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline') or (sys_platform != 'darwin' and extra == 'extra-16-pydantic-ai-slim-huggingface' and extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline')" }, ] outlines-sglang = [ - { name = "outlines", extra = ["sglang"], marker = "python_full_version < '3.14'" }, - { name = "pillow", marker = "python_full_version < '3.14'" }, + { name = "outlines", extra = ["sglang"], marker = "python_full_version < '3.14' or (extra == 'extra-16-pydantic-ai-slim-huggingface' and extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline')" }, + { name = "pillow", marker = "python_full_version < '3.14' or (extra == 'extra-16-pydantic-ai-slim-huggingface' and extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline')" }, ] outlines-transformers = [ - { name = "outlines", extra = ["transformers"], marker = "(python_full_version < '3.14' and platform_machine != 'x86_64') or (python_full_version < '3.14' and sys_platform != 'darwin')" }, - { name = "pillow", marker = "python_full_version < '3.14'" }, - { name = "torch", marker = "(python_full_version < '3.14' and platform_machine != 'x86_64') or (python_full_version < '3.14' and sys_platform != 'darwin')" }, - { name = "transformers", marker = "python_full_version < '3.14'" }, + { name = "outlines", extra = ["transformers"], marker = "(python_full_version < '3.14' and platform_machine != 'x86_64') or (python_full_version < '3.14' and sys_platform != 'darwin') or (extra == 'extra-16-pydantic-ai-slim-huggingface' and extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline')" }, + { name = "pillow", marker = "python_full_version < '3.14' or (extra == 'extra-16-pydantic-ai-slim-huggingface' and extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline')" }, + { name = "torch", marker = "(python_full_version < '3.14' and platform_machine != 'x86_64') or (python_full_version < '3.14' and sys_platform != 'darwin') or (extra == 'extra-16-pydantic-ai-slim-huggingface' and extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline')" }, + { name = "transformers", version = "4.57.6", source = { registry = "https://pypi.org/simple" }, marker = "(python_full_version < '3.14' and extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline') or (extra == 'extra-16-pydantic-ai-slim-huggingface' and extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline')" }, + { name = "transformers", version = "5.2.0", source = { registry = "https://pypi.org/simple" }, marker = "(python_full_version < '3.14' and extra == 'extra-16-pydantic-ai-slim-huggingface') or (python_full_version < '3.14' and extra != 'extra-16-pydantic-ai-slim-outlines-vllm-offline') or (extra == 'extra-16-pydantic-ai-slim-huggingface' and extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline')" }, ] outlines-vllm-offline = [ { name = "outlines", marker = "python_full_version < '3.14'" }, @@ -6813,7 +6827,7 @@ retries = [ { name = "tenacity" }, ] sentence-transformers = [ - { name = "sentence-transformers", marker = "python_full_version < '3.14'" }, + { name = "sentence-transformers", marker = "python_full_version < '3.14' or (extra == 'extra-16-pydantic-ai-slim-huggingface' and extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline')" }, ] tavily = [ { name = "tavily-python" }, @@ -6829,7 +6843,7 @@ vertexai = [ { name = "requests" }, ] voyageai = [ - { name = "voyageai", marker = "python_full_version < '3.14'" }, + { name = "voyageai", marker = "python_full_version < '3.14' or (extra == 'extra-16-pydantic-ai-slim-huggingface' and extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline')" }, ] web = [ { name = "httpx" }, @@ -6849,6 +6863,7 @@ requires-dist = [ { name = "cohere", marker = "sys_platform != 'emscripten' and extra == 'cohere'", specifier = ">=5.18.0" }, { name = "dbos", marker = "extra == 'dbos'", specifier = ">=2.10.0" }, { name = "ddgs", marker = "extra == 'duckduckgo'", specifier = ">=9.0.0" }, + { name = "docker", marker = "extra == 'docker-sandbox'", specifier = ">=7.0" }, { name = "exa-py", marker = "extra == 'exa'", specifier = ">=2.0.0" }, { name = "exceptiongroup", marker = "python_full_version < '3.11'", specifier = ">=1.2.2" }, { name = "fasta2a", marker = "extra == 'a2a'", specifier = ">=0.4.1" }, @@ -6899,7 +6914,7 @@ requires-dist = [ { name = "voyageai", marker = "python_full_version < '3.14' and extra == 'voyageai'", specifier = ">=0.3.7" }, { name = "xai-sdk", marker = "extra == 'xai'", specifier = ">=1.5.0" }, ] -provides-extras = ["a2a", "ag-ui", "anthropic", "bedrock", "cli", "cohere", "dbos", "duckduckgo", "evals", "exa", "fastmcp", "google", "groq", "huggingface", "logfire", "mcp", "mistral", "openai", "openrouter", "outlines-llamacpp", "outlines-mlxlm", "outlines-sglang", "outlines-transformers", "outlines-vllm-offline", "prefect", "retries", "sentence-transformers", "tavily", "temporal", "ui", "vertexai", "voyageai", "web", "xai"] +provides-extras = ["a2a", "ag-ui", "anthropic", "bedrock", "cli", "cohere", "dbos", "docker-sandbox", "duckduckgo", "evals", "exa", "fastmcp", "google", "groq", "huggingface", "logfire", "mcp", "mistral", "openai", "openrouter", "outlines-llamacpp", "outlines-mlxlm", "outlines-sglang", "outlines-transformers", "outlines-vllm-offline", "prefect", "retries", "sentence-transformers", "tavily", "temporal", "ui", "vertexai", "voyageai", "web", "xai"] [[package]] name = "pydantic-core" @@ -7076,7 +7091,7 @@ wheels = [ [package.optional-dependencies] pycountry = [ - { name = "pycountry", marker = "python_full_version < '3.12'" }, + { name = "pycountry" }, ] [[package]] @@ -7117,7 +7132,7 @@ version = "0.16.6" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "cloudpickle" }, - { name = "exceptiongroup", marker = "python_full_version < '3.11'" }, + { name = "exceptiongroup", marker = "python_full_version < '3.11' or (extra == 'extra-16-pydantic-ai-slim-huggingface' and extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline')" }, { name = "fakeredis", extra = ["lua"] }, { name = "opentelemetry-api" }, { name = "opentelemetry-exporter-prometheus" }, @@ -7207,13 +7222,13 @@ name = "pytest" version = "9.0.2" source = { registry = "https://pypi.org/simple" } dependencies = [ - { name = "colorama", marker = "sys_platform == 'win32'" }, - { name = "exceptiongroup", marker = "python_full_version < '3.11'" }, + { name = "colorama", marker = "sys_platform == 'win32' or (extra == 'extra-16-pydantic-ai-slim-huggingface' and extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline')" }, + { name = "exceptiongroup", marker = "python_full_version < '3.11' or (extra == 'extra-16-pydantic-ai-slim-huggingface' and extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline')" }, { name = "iniconfig" }, { name = "packaging" }, { name = "pluggy" }, { name = "pygments" }, - { name = "tomli", marker = "python_full_version < '3.11'" }, + { name = "tomli", marker = "python_full_version < '3.11' or (extra == 'extra-16-pydantic-ai-slim-huggingface' and extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline')" }, ] sdist = { url = "https://files.pythonhosted.org/packages/d1/db/7ef3487e0fb0049ddb5ce41d3a49c235bf9ad299b6a25d5780a89f19230f/pytest-9.0.2.tar.gz", hash = "sha256:75186651a92bd89611d1d9fc20f0b4345fd827c41ccd5c299a868a05d70edf11", size = 1568901, upload-time = "2025-12-06T21:30:51.014Z" } wheels = [ @@ -7466,7 +7481,7 @@ name = "pyzmq" version = "27.1.0" source = { registry = "https://pypi.org/simple" } dependencies = [ - { name = "cffi", marker = "python_full_version < '3.12' and implementation_name == 'pypy'" }, + { name = "cffi", marker = "implementation_name == 'pypy'" }, ] sdist = { url = "https://files.pythonhosted.org/packages/04/0b/3c9baedbdf613ecaa7aa07027780b8867f57b6293b6ee50de316c9f3222b/pyzmq-27.1.0.tar.gz", hash = "sha256:ac0765e3d44455adb6ddbf4417dcce460fc40a05978c08efdf2948072f6db540", size = 281750, upload-time = "2025-09-08T23:10:18.157Z" } wheels = [ @@ -7539,14 +7554,14 @@ name = "ray" version = "2.53.0" source = { registry = "https://pypi.org/simple" } dependencies = [ - { name = "click", marker = "python_full_version < '3.12'" }, - { name = "filelock", marker = "python_full_version < '3.12'" }, - { name = "jsonschema", marker = "python_full_version < '3.12'" }, - { name = "msgpack", marker = "python_full_version < '3.12'" }, - { name = "packaging", marker = "python_full_version < '3.12'" }, - { name = "protobuf", marker = "python_full_version < '3.12'" }, - { name = "pyyaml", marker = "python_full_version < '3.12'" }, - { name = "requests", marker = "python_full_version < '3.12'" }, + { name = "click" }, + { name = "filelock" }, + { name = "jsonschema" }, + { name = "msgpack" }, + { name = "packaging" }, + { name = "protobuf" }, + { name = "pyyaml" }, + { name = "requests" }, ] wheels = [ { url = "https://files.pythonhosted.org/packages/2f/99/21986c7f8135dafbf7c49229c52faaa9d2d365db7d86fffe978dde8ee967/ray-2.53.0-cp310-cp310-macosx_12_0_arm64.whl", hash = "sha256:4db914a0a6dd608fa49c066929a1282745a2dbd73caee67d7b80fe684ca65bdd", size = 69473649, upload-time = "2025-12-20T16:05:40.58Z" }, @@ -7568,7 +7583,7 @@ wheels = [ [package.optional-dependencies] cgraph = [ - { name = "cupy-cuda12x", marker = "python_full_version < '3.12' and sys_platform != 'darwin'" }, + { name = "cupy-cuda12x", marker = "sys_platform != 'darwin'" }, ] [[package]] @@ -7585,7 +7600,7 @@ name = "redis" version = "7.1.0" source = { registry = "https://pypi.org/simple" } dependencies = [ - { name = "async-timeout", marker = "python_full_version < '3.11.3'" }, + { name = "async-timeout", marker = "python_full_version < '3.11.3' or (extra == 'extra-16-pydantic-ai-slim-huggingface' and extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline')" }, ] sdist = { url = "https://files.pythonhosted.org/packages/43/c8/983d5c6579a411d8a99bc5823cc5712768859b5ce2c8afe1a65b37832c81/redis-7.1.0.tar.gz", hash = "sha256:b1cc3cfa5a2cb9c2ab3ba700864fb0ad75617b41f01352ce5779dabf6d5f9c3c", size = 4796669, upload-time = "2025-11-19T15:54:39.961Z" } wheels = [ @@ -7599,7 +7614,7 @@ source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "attrs" }, { name = "rpds-py" }, - { name = "typing-extensions", marker = "python_full_version < '3.13'" }, + { name = "typing-extensions", marker = "python_full_version < '3.13' or (extra == 'extra-16-pydantic-ai-slim-huggingface' and extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline')" }, ] sdist = { url = "https://files.pythonhosted.org/packages/2f/db/98b5c277be99dd18bfd91dd04e1b759cad18d1a338188c936e92f921c7e2/referencing-0.36.2.tar.gz", hash = "sha256:df2e89862cd09deabbdba16944cc3f10feb6b3e6f18e902f7cc25609a34775aa", size = 74744, upload-time = "2025-01-25T08:48:16.138Z" } wheels = [ @@ -7796,9 +7811,9 @@ name = "rich-toolkit" version = "0.17.1" source = { registry = "https://pypi.org/simple" } dependencies = [ - { name = "click", marker = "python_full_version < '3.12'" }, - { name = "rich", marker = "python_full_version < '3.12'" }, - { name = "typing-extensions", marker = "python_full_version < '3.12'" }, + { name = "click" }, + { name = "rich" }, + { name = "typing-extensions" }, ] sdist = { url = "https://files.pythonhosted.org/packages/97/09/3f9b8d9daaf235195c626f21e03604c05b987404ee3bcacee0c1f67f2a8e/rich_toolkit-0.17.1.tar.gz", hash = "sha256:5af54df8d1dd9c8530e462e1bdcaed625c9b49f5a55b035aa0ba1c17bdb87c9a", size = 187925, upload-time = "2025-12-17T10:49:22.583Z" } wheels = [ @@ -8065,7 +8080,7 @@ name = "ruamel-yaml" version = "0.18.17" source = { registry = "https://pypi.org/simple" } dependencies = [ - { name = "ruamel-yaml-clib", marker = "python_full_version < '3.15' and platform_python_implementation == 'CPython'" }, + { name = "ruamel-yaml-clib", marker = "(python_full_version < '3.15' and platform_python_implementation == 'CPython') or (python_full_version >= '3.15' and extra == 'extra-16-pydantic-ai-slim-huggingface' and extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline') or (platform_python_implementation != 'CPython' and extra == 'extra-16-pydantic-ai-slim-huggingface' and extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline')" }, ] sdist = { url = "https://files.pythonhosted.org/packages/3a/2b/7a1f1ebcd6b3f14febdc003e658778d81e76b40df2267904ee6b13f0c5c6/ruamel_yaml-0.18.17.tar.gz", hash = "sha256:9091cd6e2d93a3a4b157ddb8fabf348c3de7f1fb1381346d985b6b247dcd8d3c", size = 149602, upload-time = "2025-12-17T20:02:55.757Z" } wheels = [ @@ -8214,10 +8229,10 @@ resolution-markers = [ "python_full_version < '3.11'", ] dependencies = [ - { name = "joblib", marker = "python_full_version < '3.11'" }, - { name = "numpy", marker = "python_full_version < '3.11'" }, - { name = "scipy", version = "1.15.3", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version < '3.11'" }, - { name = "threadpoolctl", marker = "python_full_version < '3.11'" }, + { name = "joblib", marker = "python_full_version < '3.11' or (extra == 'extra-16-pydantic-ai-slim-huggingface' and extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline')" }, + { name = "numpy", marker = "python_full_version < '3.11' or (extra == 'extra-16-pydantic-ai-slim-huggingface' and extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline')" }, + { name = "scipy", version = "1.15.3", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version < '3.11' or (extra == 'extra-16-pydantic-ai-slim-huggingface' and extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline')" }, + { name = "threadpoolctl", marker = "python_full_version < '3.11' or (extra == 'extra-16-pydantic-ai-slim-huggingface' and extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline')" }, ] sdist = { url = "https://files.pythonhosted.org/packages/98/c2/a7855e41c9d285dfe86dc50b250978105dce513d6e459ea66a6aeb0e1e0c/scikit_learn-1.7.2.tar.gz", hash = "sha256:20e9e49ecd130598f1ca38a1d85090e1a600147b9c02fa6f15d69cb53d968fda", size = 7193136, upload-time = "2025-09-09T08:21:29.075Z" } wheels = [ @@ -8263,10 +8278,10 @@ resolution-markers = [ "python_full_version == '3.11.*'", ] dependencies = [ - { name = "joblib", marker = "python_full_version >= '3.11'" }, - { name = "numpy", marker = "python_full_version >= '3.11'" }, - { name = "scipy", version = "1.16.3", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version >= '3.11'" }, - { name = "threadpoolctl", marker = "python_full_version >= '3.11'" }, + { name = "joblib", marker = "python_full_version >= '3.11' or (extra == 'extra-16-pydantic-ai-slim-huggingface' and extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline')" }, + { name = "numpy", marker = "python_full_version >= '3.11' or (extra == 'extra-16-pydantic-ai-slim-huggingface' and extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline')" }, + { name = "scipy", version = "1.16.3", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version >= '3.11' or (extra == 'extra-16-pydantic-ai-slim-huggingface' and extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline')" }, + { name = "threadpoolctl", marker = "python_full_version >= '3.11' or (extra == 'extra-16-pydantic-ai-slim-huggingface' and extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline')" }, ] sdist = { url = "https://files.pythonhosted.org/packages/0e/d4/40988bf3b8e34feec1d0e6a051446b1f66225f8529b9309becaeef62b6c4/scikit_learn-1.8.0.tar.gz", hash = "sha256:9bccbb3b40e3de10351f8f5068e105d0f4083b1a65fa07b6634fbc401a6287fd", size = 7335585, upload-time = "2025-12-10T07:08:53.618Z" } wheels = [ @@ -8316,7 +8331,7 @@ resolution-markers = [ "python_full_version < '3.11'", ] dependencies = [ - { name = "numpy", marker = "python_full_version < '3.11'" }, + { name = "numpy", marker = "python_full_version < '3.11' or (extra == 'extra-16-pydantic-ai-slim-huggingface' and extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline')" }, ] sdist = { url = "https://files.pythonhosted.org/packages/0f/37/6964b830433e654ec7485e45a00fc9a27cf868d622838f6b6d9c5ec0d532/scipy-1.15.3.tar.gz", hash = "sha256:eae3cf522bc7df64b42cad3925c876e1b0b6c35c1337c93e12c0f366f55b0eaf", size = 59419214, upload-time = "2025-05-08T16:13:05.955Z" } wheels = [ @@ -8377,7 +8392,7 @@ resolution-markers = [ "python_full_version == '3.11.*'", ] dependencies = [ - { name = "numpy", marker = "python_full_version >= '3.11'" }, + { name = "numpy", marker = "python_full_version >= '3.11' or (extra == 'extra-16-pydantic-ai-slim-huggingface' and extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline')" }, ] sdist = { url = "https://files.pythonhosted.org/packages/0a/ca/d8ace4f98322d01abcd52d381134344bf7b431eba7ed8b42bdea5a3c2ac9/scipy-1.16.3.tar.gz", hash = "sha256:01e87659402762f43bd2fee13370553a17ada367d42e7487800bf2916535aecb", size = 30597883, upload-time = "2025-10-28T17:38:54.068Z" } wheels = [ @@ -8541,15 +8556,17 @@ name = "sentence-transformers" version = "5.2.2" source = { registry = "https://pypi.org/simple" } dependencies = [ - { name = "huggingface-hub" }, + { name = "huggingface-hub", version = "0.36.2", source = { registry = "https://pypi.org/simple" }, marker = "extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline'" }, + { name = "huggingface-hub", version = "1.4.1", source = { registry = "https://pypi.org/simple" }, marker = "extra == 'extra-16-pydantic-ai-slim-huggingface' or extra != 'extra-16-pydantic-ai-slim-outlines-vllm-offline'" }, { name = "numpy" }, - { name = "scikit-learn", version = "1.7.2", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version < '3.11'" }, - { name = "scikit-learn", version = "1.8.0", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version >= '3.11'" }, - { name = "scipy", version = "1.15.3", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version < '3.11'" }, - { name = "scipy", version = "1.16.3", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version >= '3.11'" }, + { name = "scikit-learn", version = "1.7.2", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version < '3.11' or (extra == 'extra-16-pydantic-ai-slim-huggingface' and extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline')" }, + { name = "scikit-learn", version = "1.8.0", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version >= '3.11' or (extra == 'extra-16-pydantic-ai-slim-huggingface' and extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline')" }, + { name = "scipy", version = "1.15.3", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version < '3.11' or (extra == 'extra-16-pydantic-ai-slim-huggingface' and extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline')" }, + { name = "scipy", version = "1.16.3", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version >= '3.11' or (extra == 'extra-16-pydantic-ai-slim-huggingface' and extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline')" }, { name = "torch" }, { name = "tqdm" }, - { name = "transformers" }, + { name = "transformers", version = "4.57.6", source = { registry = "https://pypi.org/simple" }, marker = "extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline'" }, + { name = "transformers", version = "5.2.0", source = { registry = "https://pypi.org/simple" }, marker = "extra == 'extra-16-pydantic-ai-slim-huggingface' or extra != 'extra-16-pydantic-ai-slim-outlines-vllm-offline'" }, { name = "typing-extensions" }, ] sdist = { url = "https://files.pythonhosted.org/packages/a6/bc/0bc9c0ec1cf83ab2ec6e6f38667d167349b950fff6dd2086b79bd360eeca/sentence_transformers-5.2.2.tar.gz", hash = "sha256:7033ee0a24bc04c664fd490abf2ef194d387b3a58a97adcc528783ff505159fa", size = 381607, upload-time = "2026-01-27T11:11:02.658Z" } @@ -8626,8 +8643,8 @@ name = "sentry-sdk" version = "2.48.0" source = { registry = "https://pypi.org/simple" } dependencies = [ - { name = "certifi", marker = "python_full_version < '3.12'" }, - { name = "urllib3", marker = "python_full_version < '3.12'" }, + { name = "certifi" }, + { name = "urllib3" }, ] sdist = { url = "https://files.pythonhosted.org/packages/40/f0/0e9dc590513d5e742d7799e2038df3a05167cba084c6ca4f3cdd75b55164/sentry_sdk-2.48.0.tar.gz", hash = "sha256:5213190977ff7fdff8a58b722fb807f8d5524a80488626ebeda1b5676c0c1473", size = 384828, upload-time = "2025-12-16T14:55:41.722Z" } wheels = [ @@ -8798,7 +8815,7 @@ name = "sqlalchemy" version = "2.0.45" source = { registry = "https://pypi.org/simple" } dependencies = [ - { name = "greenlet", marker = "platform_machine == 'AMD64' or platform_machine == 'WIN32' or platform_machine == 'aarch64' or platform_machine == 'amd64' or platform_machine == 'ppc64le' or platform_machine == 'win32' or platform_machine == 'x86_64'" }, + { name = "greenlet", marker = "platform_machine == 'AMD64' or platform_machine == 'WIN32' or platform_machine == 'aarch64' or platform_machine == 'amd64' or platform_machine == 'ppc64le' or platform_machine == 'win32' or platform_machine == 'x86_64' or (extra == 'extra-16-pydantic-ai-slim-huggingface' and extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline')" }, { name = "typing-extensions" }, ] sdist = { url = "https://files.pythonhosted.org/packages/be/f9/5e4491e5ccf42f5d9cfc663741d261b3e6e1683ae7812114e7636409fcc6/sqlalchemy-2.0.45.tar.gz", hash = "sha256:1632a4bda8d2d25703fdad6363058d882541bdaaee0e5e3ddfa0cd3229efce88", size = 9869912, upload-time = "2025-12-09T21:05:16.737Z" } @@ -8866,7 +8883,7 @@ version = "0.50.0" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "anyio" }, - { name = "typing-extensions", marker = "python_full_version < '3.13'" }, + { name = "typing-extensions", marker = "python_full_version < '3.13' or (extra == 'extra-16-pydantic-ai-slim-huggingface' and extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline')" }, ] sdist = { url = "https://files.pythonhosted.org/packages/ba/b8/73a0e6a6e079a9d9cfa64113d771e421640b6f679a52eeb9b32f72d871a1/starlette-0.50.0.tar.gz", hash = "sha256:a2a17b22203254bcbc2e1f926d2d55f3f9497f769416b3190768befe598fa3ca", size = 2646985, upload-time = "2025-11-01T15:25:27.516Z" } wheels = [ @@ -8945,7 +8962,7 @@ source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "nexus-rpc" }, { name = "protobuf" }, - { name = "python-dateutil", marker = "python_full_version < '3.11'" }, + { name = "python-dateutil", marker = "python_full_version < '3.11' or (extra == 'extra-16-pydantic-ai-slim-huggingface' and extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline')" }, { name = "types-protobuf" }, { name = "typing-extensions" }, ] @@ -9063,7 +9080,8 @@ name = "tokenizers" version = "0.22.1" source = { registry = "https://pypi.org/simple" } dependencies = [ - { name = "huggingface-hub" }, + { name = "huggingface-hub", version = "0.36.2", source = { registry = "https://pypi.org/simple" }, marker = "extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline'" }, + { name = "huggingface-hub", version = "1.4.1", source = { registry = "https://pypi.org/simple" }, marker = "extra == 'extra-16-pydantic-ai-slim-huggingface' or extra != 'extra-16-pydantic-ai-slim-outlines-vllm-offline'" }, ] sdist = { url = "https://files.pythonhosted.org/packages/1c/46/fb6854cec3278fbfa4a75b50232c77622bc517ac886156e6afbfa4d8fc6e/tokenizers-0.22.1.tar.gz", hash = "sha256:61de6522785310a309b3407bac22d99c4db5dba349935e99e4d15ea2226af2d9", size = 363123, upload-time = "2025-09-19T09:49:23.424Z" } wheels = [ @@ -9158,26 +9176,26 @@ dependencies = [ { name = "filelock" }, { name = "fsspec" }, { name = "jinja2" }, - { name = "networkx", version = "3.4.2", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version < '3.11'" }, - { name = "networkx", version = "3.6.1", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version >= '3.11'" }, - { name = "nvidia-cublas-cu12", marker = "platform_machine == 'x86_64' and sys_platform == 'linux'" }, - { name = "nvidia-cuda-cupti-cu12", marker = "platform_machine == 'x86_64' and sys_platform == 'linux'" }, - { name = "nvidia-cuda-nvrtc-cu12", marker = "platform_machine == 'x86_64' and sys_platform == 'linux'" }, - { name = "nvidia-cuda-runtime-cu12", marker = "platform_machine == 'x86_64' and sys_platform == 'linux'" }, - { name = "nvidia-cudnn-cu12", marker = "platform_machine == 'x86_64' and sys_platform == 'linux'" }, - { name = "nvidia-cufft-cu12", marker = "platform_machine == 'x86_64' and sys_platform == 'linux'" }, - { name = "nvidia-cufile-cu12", marker = "platform_machine == 'x86_64' and sys_platform == 'linux'" }, - { name = "nvidia-curand-cu12", marker = "platform_machine == 'x86_64' and sys_platform == 'linux'" }, - { name = "nvidia-cusolver-cu12", marker = "platform_machine == 'x86_64' and sys_platform == 'linux'" }, - { name = "nvidia-cusparse-cu12", marker = "platform_machine == 'x86_64' and sys_platform == 'linux'" }, - { name = "nvidia-cusparselt-cu12", marker = "platform_machine == 'x86_64' and sys_platform == 'linux'" }, - { name = "nvidia-nccl-cu12", marker = "platform_machine == 'x86_64' and sys_platform == 'linux'" }, - { name = "nvidia-nvjitlink-cu12", marker = "platform_machine == 'x86_64' and sys_platform == 'linux'" }, - { name = "nvidia-nvshmem-cu12", marker = "platform_machine == 'x86_64' and sys_platform == 'linux'" }, - { name = "nvidia-nvtx-cu12", marker = "platform_machine == 'x86_64' and sys_platform == 'linux'" }, - { name = "setuptools", marker = "python_full_version >= '3.12'" }, + { name = "networkx", version = "3.4.2", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version < '3.11' or (extra == 'extra-16-pydantic-ai-slim-huggingface' and extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline')" }, + { name = "networkx", version = "3.6.1", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version >= '3.11' or (extra == 'extra-16-pydantic-ai-slim-huggingface' and extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline')" }, + { name = "nvidia-cublas-cu12", marker = "(platform_machine == 'x86_64' and sys_platform == 'linux') or (platform_machine != 'x86_64' and extra == 'extra-16-pydantic-ai-slim-huggingface' and extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline') or (sys_platform != 'linux' and extra == 'extra-16-pydantic-ai-slim-huggingface' and extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline')" }, + { name = "nvidia-cuda-cupti-cu12", marker = "(platform_machine == 'x86_64' and sys_platform == 'linux') or (platform_machine != 'x86_64' and extra == 'extra-16-pydantic-ai-slim-huggingface' and extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline') or (sys_platform != 'linux' and extra == 'extra-16-pydantic-ai-slim-huggingface' and extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline')" }, + { name = "nvidia-cuda-nvrtc-cu12", marker = "(platform_machine == 'x86_64' and sys_platform == 'linux') or (platform_machine != 'x86_64' and extra == 'extra-16-pydantic-ai-slim-huggingface' and extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline') or (sys_platform != 'linux' and extra == 'extra-16-pydantic-ai-slim-huggingface' and extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline')" }, + { name = "nvidia-cuda-runtime-cu12", marker = "(platform_machine == 'x86_64' and sys_platform == 'linux') or (platform_machine != 'x86_64' and extra == 'extra-16-pydantic-ai-slim-huggingface' and extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline') or (sys_platform != 'linux' and extra == 'extra-16-pydantic-ai-slim-huggingface' and extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline')" }, + { name = "nvidia-cudnn-cu12", marker = "(platform_machine == 'x86_64' and sys_platform == 'linux') or (platform_machine != 'x86_64' and extra == 'extra-16-pydantic-ai-slim-huggingface' and extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline') or (sys_platform != 'linux' and extra == 'extra-16-pydantic-ai-slim-huggingface' and extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline')" }, + { name = "nvidia-cufft-cu12", marker = "(platform_machine == 'x86_64' and sys_platform == 'linux') or (platform_machine != 'x86_64' and extra == 'extra-16-pydantic-ai-slim-huggingface' and extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline') or (sys_platform != 'linux' and extra == 'extra-16-pydantic-ai-slim-huggingface' and extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline')" }, + { name = "nvidia-cufile-cu12", marker = "(platform_machine == 'x86_64' and sys_platform == 'linux') or (platform_machine != 'x86_64' and extra == 'extra-16-pydantic-ai-slim-huggingface' and extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline') or (sys_platform != 'linux' and extra == 'extra-16-pydantic-ai-slim-huggingface' and extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline')" }, + { name = "nvidia-curand-cu12", marker = "(platform_machine == 'x86_64' and sys_platform == 'linux') or (platform_machine != 'x86_64' and extra == 'extra-16-pydantic-ai-slim-huggingface' and extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline') or (sys_platform != 'linux' and extra == 'extra-16-pydantic-ai-slim-huggingface' and extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline')" }, + { name = "nvidia-cusolver-cu12", marker = "(platform_machine == 'x86_64' and sys_platform == 'linux') or (platform_machine != 'x86_64' and extra == 'extra-16-pydantic-ai-slim-huggingface' and extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline') or (sys_platform != 'linux' and extra == 'extra-16-pydantic-ai-slim-huggingface' and extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline')" }, + { name = "nvidia-cusparse-cu12", marker = "(platform_machine == 'x86_64' and sys_platform == 'linux') or (platform_machine != 'x86_64' and extra == 'extra-16-pydantic-ai-slim-huggingface' and extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline') or (sys_platform != 'linux' and extra == 'extra-16-pydantic-ai-slim-huggingface' and extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline')" }, + { name = "nvidia-cusparselt-cu12", marker = "(platform_machine == 'x86_64' and sys_platform == 'linux') or (platform_machine != 'x86_64' and extra == 'extra-16-pydantic-ai-slim-huggingface' and extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline') or (sys_platform != 'linux' and extra == 'extra-16-pydantic-ai-slim-huggingface' and extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline')" }, + { name = "nvidia-nccl-cu12", marker = "(platform_machine == 'x86_64' and sys_platform == 'linux') or (platform_machine != 'x86_64' and extra == 'extra-16-pydantic-ai-slim-huggingface' and extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline') or (sys_platform != 'linux' and extra == 'extra-16-pydantic-ai-slim-huggingface' and extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline')" }, + { name = "nvidia-nvjitlink-cu12", marker = "(platform_machine == 'x86_64' and sys_platform == 'linux') or (platform_machine != 'x86_64' and extra == 'extra-16-pydantic-ai-slim-huggingface' and extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline') or (sys_platform != 'linux' and extra == 'extra-16-pydantic-ai-slim-huggingface' and extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline')" }, + { name = "nvidia-nvshmem-cu12", marker = "(platform_machine == 'x86_64' and sys_platform == 'linux') or (platform_machine != 'x86_64' and extra == 'extra-16-pydantic-ai-slim-huggingface' and extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline') or (sys_platform != 'linux' and extra == 'extra-16-pydantic-ai-slim-huggingface' and extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline')" }, + { name = "nvidia-nvtx-cu12", marker = "(platform_machine == 'x86_64' and sys_platform == 'linux') or (platform_machine != 'x86_64' and extra == 'extra-16-pydantic-ai-slim-huggingface' and extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline') or (sys_platform != 'linux' and extra == 'extra-16-pydantic-ai-slim-huggingface' and extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline')" }, + { name = "setuptools", marker = "python_full_version >= '3.12' or (extra == 'extra-16-pydantic-ai-slim-huggingface' and extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline')" }, { name = "sympy" }, - { name = "triton", marker = "platform_machine == 'x86_64' and sys_platform == 'linux'" }, + { name = "triton", marker = "(platform_machine == 'x86_64' and sys_platform == 'linux') or (platform_machine != 'x86_64' and extra == 'extra-16-pydantic-ai-slim-huggingface' and extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline') or (sys_platform != 'linux' and extra == 'extra-16-pydantic-ai-slim-huggingface' and extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline')" }, { name = "typing-extensions" }, ] wheels = [ @@ -9216,7 +9234,7 @@ name = "torchaudio" version = "2.9.1" source = { registry = "https://pypi.org/simple" } dependencies = [ - { name = "torch", marker = "python_full_version < '3.12'" }, + { name = "torch" }, ] wheels = [ { url = "https://files.pythonhosted.org/packages/1c/87/7de58c8f4c1946ec4d9070354eae73d1e4f3d2426e5cfa45febbd8451ce5/torchaudio-2.9.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:fd13541197e035338bd43225b2067532056486d357c661e12d49ace4fc37f8bb", size = 805912, upload-time = "2025-11-12T15:25:47.857Z" }, @@ -9254,9 +9272,9 @@ name = "torchvision" version = "0.24.1" source = { registry = "https://pypi.org/simple" } dependencies = [ - { name = "numpy", marker = "python_full_version < '3.12'" }, - { name = "pillow", marker = "python_full_version < '3.12'" }, - { name = "torch", marker = "python_full_version < '3.12'" }, + { name = "numpy" }, + { name = "pillow" }, + { name = "torch" }, ] wheels = [ { url = "https://files.pythonhosted.org/packages/f7/09/d51aadf8591138e08b74c64a6eb783630c7a31ca2634416277115a9c3a2b/torchvision-0.24.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:ded5e625788572e4e1c4d155d1bbc48805c113794100d70e19c76e39e4d53465", size = 1891441, upload-time = "2025-11-12T15:25:01.687Z" }, @@ -9294,7 +9312,7 @@ name = "tqdm" version = "4.67.1" source = { registry = "https://pypi.org/simple" } dependencies = [ - { name = "colorama", marker = "sys_platform == 'win32'" }, + { name = "colorama", marker = "sys_platform == 'win32' or (extra == 'extra-16-pydantic-ai-slim-huggingface' and extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline')" }, ] sdist = { url = "https://files.pythonhosted.org/packages/a8/4b/29b4ef32e036bb34e4ab51796dd745cdba7ed47ad142a9f4a1eb8e0c744d/tqdm-4.67.1.tar.gz", hash = "sha256:f8aef9c52c08c13a65f30ea34f4e5aac3fd1a34959879d7e59e63027286627f2", size = 169737, upload-time = "2024-11-24T20:12:22.481Z" } wheels = [ @@ -9303,23 +9321,55 @@ wheels = [ [[package]] name = "transformers" -version = "5.0.0" +version = "4.57.6" source = { registry = "https://pypi.org/simple" } +resolution-markers = [ + "python_full_version >= '3.13'", + "python_full_version == '3.12.*'", + "python_full_version == '3.11.*'", + "python_full_version < '3.11'", +] dependencies = [ - { name = "filelock" }, - { name = "huggingface-hub" }, - { name = "numpy" }, - { name = "packaging" }, - { name = "pyyaml" }, - { name = "regex" }, - { name = "safetensors" }, - { name = "tokenizers" }, - { name = "tqdm" }, - { name = "typer-slim" }, + { name = "filelock", marker = "extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline'" }, + { name = "huggingface-hub", version = "0.36.2", source = { registry = "https://pypi.org/simple" }, marker = "extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline'" }, + { name = "numpy", marker = "extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline'" }, + { name = "packaging", marker = "extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline'" }, + { name = "pyyaml", marker = "extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline'" }, + { name = "regex", marker = "extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline'" }, + { name = "requests", marker = "extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline'" }, + { name = "safetensors", marker = "extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline'" }, + { name = "tokenizers", marker = "extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline'" }, + { name = "tqdm", marker = "extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline'" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/bc/79/845941711811789c85fb7e2599cea425a14a07eda40f50896b9d3fda7492/transformers-5.0.0.tar.gz", hash = "sha256:5f5634efed6cf76ad068cc5834c7adbc32db78bbd6211fb70df2325a9c37dec8", size = 8424830, upload-time = "2026-01-26T10:46:46.813Z" } +sdist = { url = "https://files.pythonhosted.org/packages/c4/35/67252acc1b929dc88b6602e8c4a982e64f31e733b804c14bc24b47da35e6/transformers-4.57.6.tar.gz", hash = "sha256:55e44126ece9dc0a291521b7e5492b572e6ef2766338a610b9ab5afbb70689d3", size = 10134912, upload-time = "2026-01-16T10:38:39.284Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/52/f3/ac976fa8e305c9e49772527e09fbdc27cc6831b8a2f6b6063406626be5dd/transformers-5.0.0-py3-none-any.whl", hash = "sha256:587086f249ce64c817213cf36afdb318d087f790723e9b3d4500b97832afd52d", size = 10142091, upload-time = "2026-01-26T10:46:43.88Z" }, + { url = "https://files.pythonhosted.org/packages/03/b8/e484ef633af3887baeeb4b6ad12743363af7cce68ae51e938e00aaa0529d/transformers-4.57.6-py3-none-any.whl", hash = "sha256:4c9e9de11333ddfe5114bc872c9f370509198acf0b87a832a0ab9458e2bd0550", size = 11993498, upload-time = "2026-01-16T10:38:31.289Z" }, +] + +[[package]] +name = "transformers" +version = "5.2.0" +source = { registry = "https://pypi.org/simple" } +resolution-markers = [ + "python_full_version >= '3.13'", + "python_full_version == '3.12.*'", + "python_full_version == '3.11.*'", + "python_full_version < '3.11'", +] +dependencies = [ + { name = "huggingface-hub", version = "1.4.1", source = { registry = "https://pypi.org/simple" }, marker = "extra == 'extra-16-pydantic-ai-slim-huggingface' or extra != 'extra-16-pydantic-ai-slim-outlines-vllm-offline'" }, + { name = "numpy", marker = "extra == 'extra-16-pydantic-ai-slim-huggingface' or extra != 'extra-16-pydantic-ai-slim-outlines-vllm-offline'" }, + { name = "packaging", marker = "extra == 'extra-16-pydantic-ai-slim-huggingface' or extra != 'extra-16-pydantic-ai-slim-outlines-vllm-offline'" }, + { name = "pyyaml", marker = "extra == 'extra-16-pydantic-ai-slim-huggingface' or extra != 'extra-16-pydantic-ai-slim-outlines-vllm-offline'" }, + { name = "regex", marker = "extra == 'extra-16-pydantic-ai-slim-huggingface' or extra != 'extra-16-pydantic-ai-slim-outlines-vllm-offline'" }, + { name = "safetensors", marker = "extra == 'extra-16-pydantic-ai-slim-huggingface' or extra != 'extra-16-pydantic-ai-slim-outlines-vllm-offline'" }, + { name = "tokenizers", marker = "extra == 'extra-16-pydantic-ai-slim-huggingface' or extra != 'extra-16-pydantic-ai-slim-outlines-vllm-offline'" }, + { name = "tqdm", marker = "extra == 'extra-16-pydantic-ai-slim-huggingface' or extra != 'extra-16-pydantic-ai-slim-outlines-vllm-offline'" }, + { name = "typer-slim", marker = "extra == 'extra-16-pydantic-ai-slim-huggingface' or extra != 'extra-16-pydantic-ai-slim-outlines-vllm-offline'" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/bd/7e/8a0c57d562015e5b16c97c1f0b8e0e92ead2c7c20513225dc12c2043ba9f/transformers-5.2.0.tar.gz", hash = "sha256:0088b8b46ccc9eff1a1dca72b5d618a5ee3b1befc3e418c9512b35dea9f9a650", size = 8618176, upload-time = "2026-02-16T18:54:02.867Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/4e/93/79754b0ca486e556c2b95d4f5afc66aaf4b260694f3d6e1b51da2d036691/transformers-5.2.0-py3-none-any.whl", hash = "sha256:9ecaf243dc45bee11a7d93f8caf03746accc0cb069181bbf4ad8566c53e854b4", size = 10403304, upload-time = "2026-02-16T18:53:59.699Z" }, ] [[package]] @@ -9327,12 +9377,19 @@ name = "triton" version = "3.5.1" source = { registry = "https://pypi.org/simple" } wheels = [ + { url = "https://files.pythonhosted.org/packages/d9/2e/f95e673222afa2c7f0c687d8913e98fcf2589ef0b1405de76894e37fe18f/triton-3.5.1-cp310-cp310-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:f63e34dcb32d7bd3a1d0195f60f30d2aee8b08a69a0424189b71017e23dfc3d2", size = 159821655, upload-time = "2025-11-11T17:51:44.09Z" }, { url = "https://files.pythonhosted.org/packages/fd/6e/676ab5019b4dde8b9b7bab71245102fc02778ef3df48218b298686b9ffd6/triton-3.5.1-cp310-cp310-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:5fc53d849f879911ea13f4a877243afc513187bc7ee92d1f2c0f1ba3169e3c94", size = 170320692, upload-time = "2025-11-11T17:40:46.074Z" }, + { url = "https://files.pythonhosted.org/packages/dc/dc/6ce44d055f2fc2403c4ec6b3cfd3a9b25f57b7d95efadccdea91497f8e81/triton-3.5.1-cp311-cp311-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:da47169e30a779bade679ce78df4810fca6d78a955843d2ddb11f226adc517dc", size = 159928005, upload-time = "2025-11-11T17:51:50.008Z" }, { url = "https://files.pythonhosted.org/packages/b0/72/ec90c3519eaf168f22cb1757ad412f3a2add4782ad3a92861c9ad135d886/triton-3.5.1-cp311-cp311-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:61413522a48add32302353fdbaaf92daaaab06f6b5e3229940d21b5207f47579", size = 170425802, upload-time = "2025-11-11T17:40:53.209Z" }, + { url = "https://files.pythonhosted.org/packages/db/53/2bcc46879910991f09c063eea07627baef2bc62fe725302ba8f46a2c1ae5/triton-3.5.1-cp312-cp312-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:275a045b6ed670dd1bd005c3e6c2d61846c74c66f4512d6f33cc027b11de8fd4", size = 159940689, upload-time = "2025-11-11T17:51:55.938Z" }, { url = "https://files.pythonhosted.org/packages/f2/50/9a8358d3ef58162c0a415d173cfb45b67de60176e1024f71fbc4d24c0b6d/triton-3.5.1-cp312-cp312-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:d2c6b915a03888ab931a9fd3e55ba36785e1fe70cbea0b40c6ef93b20fc85232", size = 170470207, upload-time = "2025-11-11T17:41:00.253Z" }, + { url = "https://files.pythonhosted.org/packages/f1/ba/805684a992ee32d486b7948d36aed2f5e3c643fc63883bf8bdca1c3f3980/triton-3.5.1-cp313-cp313-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:56765ffe12c554cd560698398b8a268db1f616c120007bfd8829d27139abd24a", size = 159955460, upload-time = "2025-11-11T17:52:01.861Z" }, { url = "https://files.pythonhosted.org/packages/27/46/8c3bbb5b0a19313f50edcaa363b599e5a1a5ac9683ead82b9b80fe497c8d/triton-3.5.1-cp313-cp313-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:f3f4346b6ebbd4fad18773f5ba839114f4826037c9f2f34e0148894cd5dd3dba", size = 170470410, upload-time = "2025-11-11T17:41:06.319Z" }, + { url = "https://files.pythonhosted.org/packages/84/1e/7df59baef41931e21159371c481c31a517ff4c2517343b62503d0cd2be99/triton-3.5.1-cp313-cp313t-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:02c770856f5e407d24d28ddc66e33cf026e6f4d360dcb8b2fabe6ea1fc758621", size = 160072799, upload-time = "2025-11-11T17:52:07.293Z" }, { url = "https://files.pythonhosted.org/packages/37/92/e97fcc6b2c27cdb87ce5ee063d77f8f26f19f06916aa680464c8104ef0f6/triton-3.5.1-cp313-cp313t-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:0b4d2c70127fca6a23e247f9348b8adde979d2e7a20391bfbabaac6aebc7e6a8", size = 170579924, upload-time = "2025-11-11T17:41:12.455Z" }, + { url = "https://files.pythonhosted.org/packages/14/f9/0430e879c1e63a1016cb843261528fd3187c872c3a9539132efc39514753/triton-3.5.1-cp314-cp314-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:f617aa7925f9ea9968ec2e1adaf93e87864ff51549c8f04ce658f29bbdb71e2d", size = 159956163, upload-time = "2025-11-11T17:52:12.999Z" }, { url = "https://files.pythonhosted.org/packages/a4/e6/c595c35e5c50c4bc56a7bac96493dad321e9e29b953b526bbbe20f9911d0/triton-3.5.1-cp314-cp314-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:d0637b1efb1db599a8e9dc960d53ab6e4637db7d4ab6630a0974705d77b14b60", size = 170480488, upload-time = "2025-11-11T17:41:18.222Z" }, + { url = "https://files.pythonhosted.org/packages/41/1e/63d367c576c75919e268e4fbc33c1cb33b6dc12bb85e8bfe531c2a8bd5d3/triton-3.5.1-cp314-cp314t-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:8932391d7f93698dfe5bc9bead77c47a24f97329e9f20c10786bb230a9083f56", size = 160073620, upload-time = "2025-11-11T17:52:18.403Z" }, { url = "https://files.pythonhosted.org/packages/16/b5/b0d3d8b901b6a04ca38df5e24c27e53afb15b93624d7fd7d658c7cd9352a/triton-3.5.1-cp314-cp314t-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:bac7f7d959ad0f48c0e97d6643a1cc0fd5786fe61cb1f83b537c6b2d54776478", size = 170582192, upload-time = "2025-11-11T17:41:23.963Z" }, ] @@ -9456,7 +9513,7 @@ name = "tzlocal" version = "5.3.1" source = { registry = "https://pypi.org/simple" } dependencies = [ - { name = "tzdata", marker = "sys_platform == 'win32'" }, + { name = "tzdata", marker = "sys_platform == 'win32' or (extra == 'extra-16-pydantic-ai-slim-huggingface' and extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline')" }, ] sdist = { url = "https://files.pythonhosted.org/packages/8b/2e/c14812d3d4d9cd1773c6be938f89e5735a1f11a9f184ac3639b93cef35d5/tzlocal-5.3.1.tar.gz", hash = "sha256:cceffc7edecefea1f595541dbd6e990cb1ea3d19bf01b2809f362a03dd7921fd", size = 30761, upload-time = "2025-03-05T21:17:41.549Z" } wheels = [ @@ -9508,7 +9565,7 @@ source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "click" }, { name = "h11" }, - { name = "typing-extensions", marker = "python_full_version < '3.11'" }, + { name = "typing-extensions", marker = "python_full_version < '3.11' or (extra == 'extra-16-pydantic-ai-slim-huggingface' and extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline')" }, ] sdist = { url = "https://files.pythonhosted.org/packages/c3/d1/8f3c683c9561a4e6689dd3b1d345c815f10f86acd044ee1fb9a4dcd0b8c5/uvicorn-0.40.0.tar.gz", hash = "sha256:839676675e87e73694518b5574fd0f24c9d97b46bea16df7b8c05ea1a51071ea", size = 81761, upload-time = "2025-12-21T14:16:22.45Z" } wheels = [ @@ -9517,13 +9574,13 @@ wheels = [ [package.optional-dependencies] standard = [ - { name = "colorama", marker = "python_full_version < '3.12' and sys_platform == 'win32'" }, - { name = "httptools", marker = "python_full_version < '3.12'" }, - { name = "python-dotenv", marker = "python_full_version < '3.12'" }, - { name = "pyyaml", marker = "python_full_version < '3.12'" }, - { name = "uvloop", marker = "python_full_version < '3.12' and platform_python_implementation != 'PyPy' and sys_platform != 'cygwin' and sys_platform != 'win32'" }, - { name = "watchfiles", marker = "python_full_version < '3.12'" }, - { name = "websockets", marker = "python_full_version < '3.12'" }, + { name = "colorama", marker = "sys_platform == 'win32'" }, + { name = "httptools" }, + { name = "python-dotenv" }, + { name = "pyyaml" }, + { name = "uvloop", marker = "platform_python_implementation != 'PyPy' and sys_platform != 'cygwin' and sys_platform != 'win32'" }, + { name = "watchfiles" }, + { name = "websockets" }, ] [[package]] @@ -9588,64 +9645,66 @@ name = "vllm" version = "0.15.1" source = { registry = "https://pypi.org/simple" } dependencies = [ - { name = "aiohttp", marker = "python_full_version < '3.12'" }, - { name = "anthropic", marker = "python_full_version < '3.12'" }, - { name = "blake3", marker = "python_full_version < '3.12'" }, - { name = "cachetools", marker = "python_full_version < '3.12'" }, - { name = "cbor2", marker = "python_full_version < '3.12'" }, - { name = "cloudpickle", marker = "python_full_version < '3.12'" }, - { name = "compressed-tensors", marker = "python_full_version < '3.12'" }, - { name = "depyf", marker = "python_full_version < '3.12'" }, - { name = "diskcache", marker = "python_full_version < '3.12'" }, - { name = "einops", marker = "python_full_version < '3.12'" }, - { name = "fastapi", extra = ["standard"], marker = "python_full_version < '3.12'" }, - { name = "filelock", marker = "python_full_version < '3.12'" }, - { name = "flashinfer-python", marker = "python_full_version < '3.12'" }, - { name = "gguf", marker = "python_full_version < '3.12'" }, - { name = "grpcio", marker = "python_full_version < '3.12'" }, - { name = "grpcio-reflection", marker = "python_full_version < '3.12'" }, - { name = "ijson", marker = "python_full_version < '3.12'" }, - { name = "lark", marker = "python_full_version < '3.12'" }, - { name = "llguidance", marker = "(python_full_version < '3.12' and platform_machine == 'aarch64') or (python_full_version < '3.12' and platform_machine == 'arm64') or (python_full_version < '3.12' and platform_machine == 'ppc64le') or (python_full_version < '3.12' and platform_machine == 's390x') or (python_full_version < '3.12' and platform_machine == 'x86_64')" }, - { name = "lm-format-enforcer", marker = "python_full_version < '3.12'" }, - { name = "mcp", marker = "python_full_version < '3.12'" }, - { name = "mistral-common", extra = ["image"], marker = "python_full_version < '3.12'" }, - { name = "model-hosting-container-standards", marker = "python_full_version < '3.12'" }, - { name = "msgspec", marker = "python_full_version < '3.12'" }, - { name = "ninja", marker = "python_full_version < '3.12'" }, - { name = "numba", version = "0.61.2", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version < '3.12'" }, - { name = "numpy", marker = "python_full_version < '3.12'" }, - { name = "openai", marker = "python_full_version < '3.12'" }, - { name = "openai-harmony", marker = "python_full_version < '3.12'" }, - { name = "opencv-python-headless", marker = "python_full_version < '3.12'" }, - { name = "outlines-core", marker = "python_full_version < '3.12'" }, - { name = "partial-json-parser", marker = "python_full_version < '3.12'" }, - { name = "pillow", marker = "python_full_version < '3.12'" }, - { name = "prometheus-client", marker = "python_full_version < '3.12'" }, - { name = "prometheus-fastapi-instrumentator", marker = "python_full_version < '3.12'" }, - { name = "protobuf", marker = "python_full_version < '3.12'" }, - { name = "psutil", marker = "python_full_version < '3.12'" }, - { name = "py-cpuinfo", marker = "python_full_version < '3.12'" }, - { name = "pybase64", marker = "python_full_version < '3.12'" }, - { name = "pydantic", marker = "python_full_version < '3.12'" }, - { name = "python-json-logger", marker = "python_full_version < '3.12'" }, - { name = "pyyaml", marker = "python_full_version < '3.12'" }, - { name = "pyzmq", marker = "python_full_version < '3.12'" }, - { name = "ray", extra = ["cgraph"], marker = "python_full_version < '3.12'" }, - { name = "regex", marker = "python_full_version < '3.12'" }, - { name = "requests", marker = "python_full_version < '3.12'" }, - { name = "sentencepiece", marker = "python_full_version < '3.12'" }, - { name = "setproctitle", marker = "python_full_version < '3.12'" }, - { name = "tiktoken", marker = "python_full_version < '3.12'" }, - { name = "tokenizers", marker = "python_full_version < '3.12'" }, - { name = "torch", marker = "python_full_version < '3.12'" }, - { name = "torchaudio", marker = "python_full_version < '3.12'" }, - { name = "torchvision", marker = "python_full_version < '3.12'" }, - { name = "tqdm", marker = "python_full_version < '3.12'" }, - { name = "transformers", marker = "python_full_version < '3.12'" }, - { name = "typing-extensions", marker = "python_full_version < '3.12'" }, - { name = "watchfiles", marker = "python_full_version < '3.12'" }, - { name = "xgrammar", marker = "(python_full_version < '3.12' and platform_machine == 'aarch64') or (python_full_version < '3.12' and platform_machine == 'arm64') or (python_full_version < '3.12' and platform_machine == 'ppc64le') or (python_full_version < '3.12' and platform_machine == 's390x') or (python_full_version < '3.12' and platform_machine == 'x86_64')" }, + { name = "aiohttp" }, + { name = "anthropic" }, + { name = "blake3" }, + { name = "cachetools" }, + { name = "cbor2" }, + { name = "cloudpickle" }, + { name = "compressed-tensors" }, + { name = "depyf" }, + { name = "diskcache" }, + { name = "einops" }, + { name = "fastapi", extra = ["standard"], marker = "extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline'" }, + { name = "filelock" }, + { name = "flashinfer-python" }, + { name = "gguf" }, + { name = "grpcio" }, + { name = "grpcio-reflection" }, + { name = "ijson" }, + { name = "lark" }, + { name = "llguidance", marker = "platform_machine == 'aarch64' or platform_machine == 'arm64' or platform_machine == 'ppc64le' or platform_machine == 's390x' or platform_machine == 'x86_64'" }, + { name = "lm-format-enforcer" }, + { name = "mcp" }, + { name = "mistral-common", extra = ["image"], marker = "extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline'" }, + { name = "model-hosting-container-standards" }, + { name = "msgspec" }, + { name = "ninja" }, + { name = "numba" }, + { name = "numpy" }, + { name = "openai" }, + { name = "openai-harmony" }, + { name = "opencv-python-headless" }, + { name = "outlines-core" }, + { name = "partial-json-parser" }, + { name = "pillow" }, + { name = "prometheus-client" }, + { name = "prometheus-fastapi-instrumentator" }, + { name = "protobuf" }, + { name = "psutil" }, + { name = "py-cpuinfo" }, + { name = "pybase64" }, + { name = "pydantic" }, + { name = "python-json-logger" }, + { name = "pyyaml" }, + { name = "pyzmq" }, + { name = "ray", extra = ["cgraph"], marker = "extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline'" }, + { name = "regex" }, + { name = "requests" }, + { name = "sentencepiece" }, + { name = "setproctitle" }, + { name = "setuptools", marker = "python_full_version >= '3.12'" }, + { name = "six", marker = "python_full_version >= '3.12'" }, + { name = "tiktoken" }, + { name = "tokenizers" }, + { name = "torch" }, + { name = "torchaudio" }, + { name = "torchvision" }, + { name = "tqdm" }, + { name = "transformers", version = "4.57.6", source = { registry = "https://pypi.org/simple" } }, + { name = "typing-extensions" }, + { name = "watchfiles" }, + { name = "xgrammar", marker = "platform_machine == 'aarch64' or platform_machine == 'arm64' or platform_machine == 'ppc64le' or platform_machine == 's390x' or platform_machine == 'x86_64'" }, ] wheels = [ { url = "https://files.pythonhosted.org/packages/e7/62/17dd4b80508b26c1a85db4fd9789d4726d3f36c95856a89419a178dda461/vllm-0.15.1-cp38-abi3-manylinux_2_31_aarch64.whl", hash = "sha256:97bfc79b0c29d242c57b0d395e48d2949a868957587b853deb813a985a41ed6e", size = 461362624, upload-time = "2026-02-05T00:18:12.38Z" }, @@ -9661,7 +9720,7 @@ dependencies = [ { name = "aiolimiter" }, { name = "ffmpeg-python" }, { name = "langchain-text-splitters" }, - { name = "numpy", marker = "python_full_version < '3.14'" }, + { name = "numpy", marker = "python_full_version < '3.14' or (extra == 'extra-16-pydantic-ai-slim-huggingface' and extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline')" }, { name = "pillow" }, { name = "pydantic" }, { name = "requests" }, @@ -9890,7 +9949,7 @@ name = "whenever" version = "0.8.10" source = { registry = "https://pypi.org/simple" } dependencies = [ - { name = "tzdata", marker = "python_full_version >= '3.13' and sys_platform == 'win32'" }, + { name = "tzdata", marker = "(python_full_version >= '3.13' and sys_platform == 'win32') or (python_full_version < '3.13' and extra == 'extra-16-pydantic-ai-slim-huggingface' and extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline') or (sys_platform != 'win32' and extra == 'extra-16-pydantic-ai-slim-huggingface' and extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline')" }, ] sdist = { url = "https://files.pythonhosted.org/packages/4d/67/cfc23dfe54ced1e4388826b29db9b9ab2c70a342b33b7e92cf15866f35a6/whenever-0.8.10.tar.gz", hash = "sha256:5e2a3da71527e299f98eec5bb38c4e79d9527a127107387456125005884fb235", size = 240223, upload-time = "2025-10-16T20:31:23.538Z" } wheels = [ @@ -10068,24 +10127,27 @@ name = "xgrammar" version = "0.1.29" source = { registry = "https://pypi.org/simple" } dependencies = [ - { name = "mlx-lm", marker = "python_full_version < '3.12' and platform_machine == 'arm64' and sys_platform == 'darwin'" }, - { name = "numpy", marker = "python_full_version < '3.12'" }, - { name = "pydantic", marker = "python_full_version < '3.12'" }, - { name = "torch", marker = "python_full_version < '3.12'" }, - { name = "transformers", marker = "python_full_version < '3.12'" }, - { name = "triton", marker = "python_full_version < '3.12' and platform_machine == 'x86_64' and sys_platform == 'linux'" }, - { name = "typing-extensions", marker = "python_full_version < '3.12'" }, + { name = "mlx-lm", marker = "platform_machine == 'arm64' and sys_platform == 'darwin'" }, + { name = "numpy" }, + { name = "pydantic" }, + { name = "torch" }, + { name = "transformers", version = "4.57.6", source = { registry = "https://pypi.org/simple" } }, + { name = "triton", marker = "platform_machine == 'x86_64' and sys_platform == 'linux'" }, + { name = "typing-extensions" }, ] sdist = { url = "https://files.pythonhosted.org/packages/02/a3/70dbe3ffd331a1e7e1ad5a95690a4086e6c7cdb8089f5c7eda712219ccec/xgrammar-0.1.29.tar.gz", hash = "sha256:cf195afa81b489eebf35d4c6f37f27136d05420739ab4a6f7f065c938d7e4baa", size = 2321317, upload-time = "2025-12-19T08:23:54.53Z" } wheels = [ + { url = "https://files.pythonhosted.org/packages/2e/6d/6384619408da47411c71b2baed3d4bc509a4a9aa0a63d738709b516869b5/xgrammar-0.1.29-cp310-cp310-macosx_10_14_x86_64.whl", hash = "sha256:fdc66e834b915cf956168ac086bd577f138261644b944e73d73f07085682a4d8", size = 16008147, upload-time = "2025-12-19T08:22:59.54Z" }, { url = "https://files.pythonhosted.org/packages/a8/2d/6ead6206bda4582620b176f02840254183c61682e20041a2d950d6f1ee7a/xgrammar-0.1.29-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:48c5a5c60c5ca5ab09ff5ef9f6b382384a04b153bae5908006cd4f7d80d71e07", size = 17914539, upload-time = "2025-12-19T08:23:02.011Z" }, { url = "https://files.pythonhosted.org/packages/04/75/5305fe75823489c160dec8ee2a95a631e44a690eacec765469e513aca738/xgrammar-0.1.29-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7cea3e65d60f8e55568dbb1457e6c4da6d381262a9b1211fe023630630b733d8", size = 34702454, upload-time = "2025-12-19T08:23:05.143Z" }, { url = "https://files.pythonhosted.org/packages/af/3c/7426aadf64a4ecfc1a1966babc57e4694235bf50392e96c506f930a4cdbe/xgrammar-0.1.29-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:866882b58ac654a1d1cd5e0c1ac67824b730aff8a40f9f19f0e8938a107dcd8a", size = 34903300, upload-time = "2025-12-19T08:23:08.098Z" }, { url = "https://files.pythonhosted.org/packages/05/f5/17ebcb575bd105cbcb5fee3c69906cee2423dbfdd73a18a60e205a619244/xgrammar-0.1.29-cp310-cp310-win_amd64.whl", hash = "sha256:8551dae4d38bd20c36a12c90a2954c3832bb6397211fc3aeba0b0d7920a1ea4b", size = 5928622, upload-time = "2025-12-19T08:23:10.485Z" }, + { url = "https://files.pythonhosted.org/packages/c6/de/88832fac40962fd0d4703bd4ba84598b06b8408bdc4a6722744f363f68a6/xgrammar-0.1.29-cp311-cp311-macosx_10_14_x86_64.whl", hash = "sha256:d2a7eef1b75b8d31b868d5c79855622aad203275ff267fc0e0ef77dd91906cfe", size = 16008004, upload-time = "2025-12-19T08:23:11.998Z" }, { url = "https://files.pythonhosted.org/packages/76/f6/4d22eec5305657430955442077306bc6ed85becc564116165d4b3a7049ad/xgrammar-0.1.29-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:4af7f6ce2b2c6295b936b7cbda09f78e33f2c492a139cd64560f5d8d0fe967ed", size = 17914326, upload-time = "2025-12-19T08:23:14.43Z" }, { url = "https://files.pythonhosted.org/packages/87/0b/b5e5c99ce13a9d378a940cda07c5a08b50cc7efb66936c6ac8fa8232a0d5/xgrammar-0.1.29-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:51bcfd63bd48a0b26209ffd2143a42067518559355ec9e4e574cef2ae74fac7c", size = 34699408, upload-time = "2025-12-19T08:23:16.906Z" }, { url = "https://files.pythonhosted.org/packages/a3/a0/4ebc1b3f5af79a3f73d0566034758f3fbcd9c64174646314a9a6f7cc1d27/xgrammar-0.1.29-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e27b50cf8c565845295a8263a4a0790c00a7c1fd783e76222fc0f575654d6f56", size = 34903461, upload-time = "2025-12-19T08:23:19.556Z" }, { url = "https://files.pythonhosted.org/packages/77/21/f6b3978dc9761bbfbbb153d33441206ce2253efa271d8e2d8b6b210d2bd7/xgrammar-0.1.29-cp311-cp311-win_amd64.whl", hash = "sha256:c9f8ea76bcf41b48168974b509b1546d2bee289ff1b20c68bc97434c1ea6e49a", size = 5928633, upload-time = "2025-12-19T08:23:21.67Z" }, + { url = "https://files.pythonhosted.org/packages/c1/d8/fb282fc78be6e9bbefb5cb389f66b22e4efd6ae14f06234f599651620da5/xgrammar-0.1.29-cp312-cp312-macosx_10_14_x86_64.whl", hash = "sha256:d992a3cee7594bbdaa64ae59f90da5ce21c5fe654719df3816014289ada6f04d", size = 16007376, upload-time = "2025-12-19T08:23:23.634Z" }, { url = "https://files.pythonhosted.org/packages/82/a7/2c9767620ee50f2f40f1eb95e55a3a29e1a0670f087ee6dc1bc1c887b906/xgrammar-0.1.29-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:1bbdf02e45cfa8614218ba01ca7952d375f8bc1c13884e3d04daa4b54180cbc2", size = 17913535, upload-time = "2025-12-19T08:23:26.02Z" }, { url = "https://files.pythonhosted.org/packages/57/94/18793c64bf0368075a34c06e196bf002f1e6ab0aee332268f44e8d356d5a/xgrammar-0.1.29-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6eb370a16b27a683e5f2b9e429ab41440c69977d4a504849ed61831b94cc704c", size = 34705239, upload-time = "2025-12-19T08:23:28.369Z" }, { url = "https://files.pythonhosted.org/packages/3e/da/4c14e3e00be698009b52700f15326a23272b4b00475939b6acc86b151188/xgrammar-0.1.29-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:79e6e4f5cd33be77418cf91efc482f2b3d773d309891224383bc8a4948ad7b07", size = 34906135, upload-time = "2025-12-19T08:23:30.838Z" }, From 01a94c3016f38134b2468d0c844e80a45b5eb808 Mon Sep 17 00:00:00 2001 From: David Montague <35119617+dmontagu@users.noreply.github.com> Date: Sat, 21 Feb 2026 03:09:28 -0700 Subject: [PATCH 06/49] Address PR review feedback - Fix _recv_stream EOF check to distinguish zero-size frames from actual EOF - Make MemoryEnvironment.capabilities dynamic: include 'shell' when command_handler is set - Fix LocalEnvironment.grep to use rglob for recursive file search with glob_pattern - Fix glob_match to use regex for all patterns (fnmatch incorrectly matches '/' with '*') - Fix build_glob_cmd: add parentheses for correct find operator precedence, fix ./ prefix for -path - Add double-enter guard in DockerEnvironment._setup to prevent container leak - Add DockerEnvironment.hardened() convenience constructor for security best practices - Rename docker-sandbox optional dependency to docker-environment - Rename 'env' variable to 'environment' in docs to avoid confusion with env vars - Add lifecycle tip about pre-starting the toolset in docs --- docs/environments.md | 38 +- docs/install.md | 2 +- .../pydantic_ai/environments/_base.py | 18 +- .../pydantic_ai/environments/docker.py | 52 +- .../pydantic_ai/environments/local.py | 2 +- .../pydantic_ai/environments/memory.py | 5 +- pydantic_ai_slim/pyproject.toml | 4 +- uv.lock | 476 +++++++++--------- 8 files changed, 322 insertions(+), 275 deletions(-) diff --git a/docs/environments.md b/docs/environments.md index 5e83a3a3dc..0d5474df94 100644 --- a/docs/environments.md +++ b/docs/environments.md @@ -68,7 +68,7 @@ File operations (read, write, edit, ls, glob, grep) are confined to the root dir [`DockerEnvironment`][pydantic_ai.environments.docker.DockerEnvironment] runs commands inside a Docker container with configurable resource limits, security options, and network access. -Requires the `docker` package: `pip install pydantic-ai-slim[docker-sandbox]` +Requires the `docker` package: `pip install pydantic-ai-slim[docker-environment]` ```python {title="environments_docker.py" test="skip"} from pydantic_ai.environments.docker import DockerEnvironment @@ -136,22 +136,21 @@ For running untrusted code, you can harden the container with Linux security opt ```python {title="environments_docker_hardened.py" test="skip"} from pydantic_ai.environments.docker import DockerEnvironment -env = DockerEnvironment( - image='python:3.12-slim', - network_disabled=True, - read_only=True, - cap_drop=['ALL'], - security_opt=['no-new-privileges'], - user='nobody', - pids_limit=256, - tmpfs={'/tmp': 'noexec,nosuid,size=64m', '/workspace': 'size=128m'}, - init=True, - memory_limit='512m', - cpu_limit=1.0, -) +env = DockerEnvironment.hardened(image='python:3.12-slim') ``` -This drops all Linux capabilities, prevents privilege escalation, runs as an unprivileged user, limits the number of processes, and makes the root filesystem read-only (with writable tmpfs mounts for scratch space and the working directory). +This uses the [`hardened()`][pydantic_ai.environments.docker.DockerEnvironment.hardened] convenience constructor, which sets sensible security defaults: network disabled, read-only root filesystem, all capabilities dropped, no privilege escalation, runs as `nobody`, uses an init process, and limits PIDs, memory, and CPU. You can customize the resource limits: + +```python {title="environments_docker_hardened_custom.py" test="skip"} +from pydantic_ai.environments.docker import DockerEnvironment + +env = DockerEnvironment.hardened( + image='my-sandbox:latest', + memory_limit='1g', + cpu_limit=2.0, + pids_limit=512, +) +``` ## ExecutionEnvironmentToolset @@ -201,6 +200,9 @@ async def main(): # container cleaned up automatically ``` +!!! tip "Pre-starting the environment" + Using `async with toolset:` starts the environment once and keeps it alive across all agent runs. Without it, the environment is started and stopped on each `agent.run()` call — for Docker, that means creating and destroying a container every time. Pre-start the toolset for better performance when running the agent multiple times. + !!! note "Shared environment" When you pass an environment directly, all concurrent `agent.run()` calls share the same environment instance (same container, filesystem, and processes). For isolated concurrent runs, use `environment_factory` — see [Concurrent Runs](#concurrent-runs) below. @@ -265,12 +267,12 @@ All environments support per-call environment variables via the `env` parameter ```python {title="environments_env_vars.py" test="skip"} from pydantic_ai.environments.local import LocalEnvironment -env = LocalEnvironment(env_vars={'BASE_URL': 'https://api.example.com'}) +environment = LocalEnvironment(env_vars={'BASE_URL': 'https://api.example.com'}) async def main(): - async with env: + async with environment: # Uses BASE_URL from baseline + API_KEY from per-call - result = await env.shell( + result = await environment.shell( 'curl -H "Authorization: Bearer $API_KEY" $BASE_URL/data', env={'API_KEY': 'sk-test-123'}, ) diff --git a/docs/install.md b/docs/install.md index 1d4e624850..e2ba5ec30f 100644 --- a/docs/install.md +++ b/docs/install.md @@ -67,7 +67,7 @@ pip/uv-add "pydantic-ai-slim[openai]" * `ag-ui` - installs [AG-UI Event Stream Protocol](ui/ag-ui.md) dependencies `ag-ui-protocol` [PyPI ↗](https://pypi.org/project/ag-ui-protocol){:target="_blank"} and `starlette` [PyPI ↗](https://pypi.org/project/starlette){:target="_blank"} * `dbos` - installs [DBOS Durable Execution](durable_execution/dbos.md) dependency `dbos` [PyPI ↗](https://pypi.org/project/dbos){:target="_blank"} * `prefect` - installs [Prefect Durable Execution](durable_execution/prefect.md) dependency `prefect` [PyPI ↗](https://pypi.org/project/prefect){:target="_blank"} -* `docker-sandbox` - installs [Docker Sandbox](environments.md#dockerenvironment) dependency `docker` [PyPI ↗](https://pypi.org/project/docker){:target="_blank"} +* `docker-environment` - installs [Docker Environment](environments.md#dockerenvironment) dependency `docker` [PyPI ↗](https://pypi.org/project/docker){:target="_blank"} You can also install dependencies for multiple models and use cases, for example: diff --git a/pydantic_ai_slim/pydantic_ai/environments/_base.py b/pydantic_ai_slim/pydantic_ai/environments/_base.py index 548d39f2bd..9385f5beba 100644 --- a/pydantic_ai_slim/pydantic_ai/environments/_base.py +++ b/pydantic_ai_slim/pydantic_ai/environments/_base.py @@ -6,7 +6,6 @@ from __future__ import annotations -import fnmatch import re from abc import ABC, abstractmethod from dataclasses import dataclass @@ -449,13 +448,10 @@ def collect_grep_matches( def glob_match(path: str, pattern: str) -> bool: """Match a path against a glob pattern with `**` support. - `fnmatch` does not support `**` for recursive matching. - This helper converts glob patterns to regex so that `**` - matches zero or more path segments (including `/`). + This helper converts glob patterns to regex where `*` matches + within a single path segment and `**` matches zero or more + path segments (including `/`). """ - if '**' not in pattern: - return fnmatch.fnmatch(path, pattern) - regex = '' i = 0 while i < len(pattern): @@ -526,7 +522,13 @@ def filter_grep_count_output(text: str) -> str: def build_glob_cmd(pattern: str, *, path: str = '.') -> str: """Build a shell `find` command to match files by pattern.""" - return f'find {shell_escape(path)} -path {shell_escape(pattern)} -o -name {shell_escape(pattern)} 2>/dev/null | head -100' + # For -path, prepend the search path since find outputs full paths relative to the starting point + path_pattern = f'{path}/{pattern}' if '/' in pattern else pattern + return ( + f'find {shell_escape(path)}' + f' \\( -path {shell_escape(path_pattern)} -o -name {shell_escape(pattern)} \\)' + f' 2>/dev/null | head -100' + ) def parse_glob_output(text: str) -> list[str]: diff --git a/pydantic_ai_slim/pydantic_ai/environments/docker.py b/pydantic_ai_slim/pydantic_ai/environments/docker.py index 38cfc7a275..6345321f21 100644 --- a/pydantic_ai_slim/pydantic_ai/environments/docker.py +++ b/pydantic_ai_slim/pydantic_ai/environments/docker.py @@ -1,6 +1,6 @@ """Docker container-based environment for isolated code execution. -Requires the `docker` package: `pip install pydantic-ai-slim[docker-sandbox]` +Requires the `docker` package: `pip install pydantic-ai-slim[docker-environment]` """ from __future__ import annotations @@ -41,7 +41,7 @@ except ImportError as _import_error: raise ImportError( 'The `docker` package is required for DockerEnvironment. ' - 'Install it with: pip install pydantic-ai-slim[docker-sandbox]' + 'Install it with: pip install pydantic-ai-slim[docker-environment]' ) from _import_error @@ -137,7 +137,7 @@ async def _recv_stream(self, wanted: int) -> bytes: """Read frames until one for the wanted stream type arrives.""" while True: stream_type, data = await anyio.to_thread.run_sync(self._read_frame) - if not data: + if not data and self._eof: return b'' if stream_type == wanted: return data @@ -296,6 +296,50 @@ def __init__( self._client: docker.DockerClient | None = None self._container: Container | None = None + @classmethod + def hardened( + cls, + *, + image: str = 'python:3.12-slim', + env_vars: dict[str, str] | None = None, + work_dir: str = '/workspace', + memory_limit: str = '512m', + cpu_limit: float = 1.0, + pids_limit: int = 256, + ) -> DockerEnvironment: + """Create a hardened Docker environment with security best practices. + + This is a convenience constructor that sets sensible security defaults: + network disabled, read-only root filesystem, all capabilities dropped, + no privilege escalation, runs as `nobody`, and uses an init process. + + The root filesystem is read-only; writable tmpfs mounts are provided at + `/tmp` and the working directory. + + Args: + image: Docker image to use. + env_vars: Baseline environment variables to set in the container. + work_dir: Working directory inside the container. + memory_limit: Memory limit (e.g. '512m', '1g'). + cpu_limit: CPU limit (e.g. 1.0 for one CPU). + pids_limit: Maximum number of PIDs in the container. + """ + return cls( + image=image, + env_vars=env_vars, + work_dir=work_dir, + network_disabled=True, + read_only=True, + cap_drop=['ALL'], + security_opt=['no-new-privileges'], + user='nobody', + pids_limit=pids_limit, + tmpfs={'/tmp': 'noexec,nosuid,size=64m', work_dir: 'size=128m'}, + init=True, + memory_limit=memory_limit, + cpu_limit=cpu_limit, + ) + @property def capabilities(self) -> frozenset[Capability]: # pragma: lax no cover return frozenset( @@ -325,6 +369,8 @@ async def __aenter__(self) -> Self: def _setup(self) -> None: """Start container (sync, runs in executor).""" + if self._container is not None: + return self._client = docker.from_env() # Create and start container diff --git a/pydantic_ai_slim/pydantic_ai/environments/local.py b/pydantic_ai_slim/pydantic_ai/environments/local.py index c06ba38a41..3d62598bbe 100644 --- a/pydantic_ai_slim/pydantic_ai/environments/local.py +++ b/pydantic_ai_slim/pydantic_ai/environments/local.py @@ -314,7 +314,7 @@ async def grep( if search_dir.is_file(): files = [search_dir] elif glob_pattern: - files = sorted(search_dir.glob(glob_pattern)) + files = sorted(search_dir.rglob(glob_pattern)) else: files = sorted(search_dir.rglob('*')) diff --git a/pydantic_ai_slim/pydantic_ai/environments/memory.py b/pydantic_ai_slim/pydantic_ai/environments/memory.py index 5e406b7fc9..da7e9f2078 100644 --- a/pydantic_ai_slim/pydantic_ai/environments/memory.py +++ b/pydantic_ai_slim/pydantic_ai/environments/memory.py @@ -71,7 +71,10 @@ def __init__( @property def capabilities(self) -> frozenset[Capability]: - return frozenset({'ls', 'read_file', 'write_file', 'replace_str', 'glob', 'grep'}) + caps: set[Capability] = {'ls', 'read_file', 'write_file', 'replace_str', 'glob', 'grep'} + if self._command_handler is not None: + caps.add('shell') + return frozenset(caps) @staticmethod def _normalize(path: str) -> str: diff --git a/pydantic_ai_slim/pyproject.toml b/pydantic_ai_slim/pyproject.toml index d9feb2c7c4..7ff583d26c 100644 --- a/pydantic_ai_slim/pyproject.toml +++ b/pydantic_ai_slim/pyproject.toml @@ -129,8 +129,8 @@ temporal = ["temporalio==1.20.0"] dbos = ["dbos>=2.10.0"] # Prefect prefect = ["prefect>=3.4.21"] -# Sandboxes -docker-sandbox = ["docker>=7.0"] +# Execution environments +docker-environment = ["docker>=7.0"] [tool.hatch.metadata] allow-direct-references = true diff --git a/uv.lock b/uv.lock index 3433629050..42fe62a17f 100644 --- a/uv.lock +++ b/uv.lock @@ -27,8 +27,8 @@ name = "accelerate" version = "1.12.0" source = { registry = "https://pypi.org/simple" } dependencies = [ - { name = "huggingface-hub", version = "0.36.2", source = { registry = "https://pypi.org/simple" }, marker = "extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline'" }, - { name = "huggingface-hub", version = "1.4.1", source = { registry = "https://pypi.org/simple" }, marker = "extra == 'extra-16-pydantic-ai-slim-huggingface' or extra != 'extra-16-pydantic-ai-slim-outlines-vllm-offline'" }, + { name = "huggingface-hub", version = "0.36.2", source = { registry = "https://pypi.org/simple" }, marker = "(python_full_version < '3.12' and extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline') or (extra == 'extra-16-pydantic-ai-slim-huggingface' and extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline')" }, + { name = "huggingface-hub", version = "1.4.1", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version >= '3.12' or extra == 'extra-16-pydantic-ai-slim-huggingface' or extra != 'extra-16-pydantic-ai-slim-outlines-vllm-offline'" }, { name = "numpy" }, { name = "packaging" }, { name = "psutil" }, @@ -314,7 +314,7 @@ name = "apache-tvm-ffi" version = "0.1.8.post2" source = { registry = "https://pypi.org/simple" } dependencies = [ - { name = "typing-extensions" }, + { name = "typing-extensions", marker = "python_full_version < '3.12'" }, ] sdist = { url = "https://files.pythonhosted.org/packages/e3/e9/a13952726228fa6282154ecf927092396bc759739e5e045019f6ab92f3ca/apache_tvm_ffi-0.1.8.post2.tar.gz", hash = "sha256:4513e38852894f290172ecfefcbc18d34e817fd29c16a0f1770e130c82b4067e", size = 2441111, upload-time = "2026-01-13T18:11:27.864Z" } wheels = [ @@ -1225,10 +1225,10 @@ name = "compressed-tensors" version = "0.13.0" source = { registry = "https://pypi.org/simple" } dependencies = [ - { name = "loguru" }, - { name = "pydantic" }, - { name = "torch" }, - { name = "transformers", version = "4.57.6", source = { registry = "https://pypi.org/simple" } }, + { name = "loguru", marker = "python_full_version < '3.12'" }, + { name = "pydantic", marker = "python_full_version < '3.12'" }, + { name = "torch", marker = "python_full_version < '3.12'" }, + { name = "transformers", version = "4.57.6", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version < '3.12'" }, ] sdist = { url = "https://files.pythonhosted.org/packages/fc/65/88dd1c58fb9d0ded51b5c86471b937a1525f91fad2211a6f051dc1ea822d/compressed_tensors-0.13.0.tar.gz", hash = "sha256:23893824d3498ea3f1a829f14a8fa85f9a5e76a34c711a038b8d7c619ca9a67c", size = 200995, upload-time = "2025-12-16T16:03:55.397Z" } wheels = [ @@ -1426,7 +1426,7 @@ name = "cuda-bindings" version = "13.1.1" source = { registry = "https://pypi.org/simple" } dependencies = [ - { name = "cuda-pathfinder" }, + { name = "cuda-pathfinder", marker = "python_full_version < '3.12'" }, ] wheels = [ { url = "https://files.pythonhosted.org/packages/60/63/579402b642f5b9b8ceb79e456b39b5771f27e132a8af3b140e54d69790fc/cuda_bindings-13.1.1-cp310-cp310-manylinux_2_24_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:4400370a83f1538e25ed4c18c34a0e9d5fad39741e282e69ce24d1479a11017d", size = 15777291, upload-time = "2025-12-09T22:05:41.109Z" }, @@ -1462,8 +1462,8 @@ name = "cuda-python" version = "13.1.1" source = { registry = "https://pypi.org/simple" } dependencies = [ - { name = "cuda-bindings" }, - { name = "cuda-pathfinder" }, + { name = "cuda-bindings", marker = "python_full_version < '3.12'" }, + { name = "cuda-pathfinder", marker = "python_full_version < '3.12'" }, ] wheels = [ { url = "https://files.pythonhosted.org/packages/cd/08/b5e3b9822662d72d540d830531e3ab6a7cabbda3dd56175696aabccfeb76/cuda_python-13.1.1-py3-none-any.whl", hash = "sha256:944cc4fe6482673d28dd545797a28840945a1668739328fa2ad1e9be4f7050d9", size = 8038, upload-time = "2025-12-09T22:13:10.719Z" }, @@ -1474,8 +1474,8 @@ name = "cupy-cuda12x" version = "13.6.0" source = { registry = "https://pypi.org/simple" } dependencies = [ - { name = "fastrlock" }, - { name = "numpy" }, + { name = "fastrlock", marker = "python_full_version < '3.12'" }, + { name = "numpy", marker = "python_full_version < '3.12'" }, ] wheels = [ { url = "https://files.pythonhosted.org/packages/f7/2e/db22c5148884e4e384f6ebbc7971fa3710f3ba67ca492798890a0fdebc45/cupy_cuda12x-13.6.0-cp310-cp310-manylinux2014_aarch64.whl", hash = "sha256:9e37f60f27ff9625dfdccc4688a09852707ec613e32ea9404f425dd22a386d14", size = 126341714, upload-time = "2025-08-18T08:24:08.335Z" }, @@ -1518,8 +1518,8 @@ dependencies = [ { name = "filelock" }, { name = "fsspec", extra = ["http"] }, { name = "httpx" }, - { name = "huggingface-hub", version = "0.36.2", source = { registry = "https://pypi.org/simple" }, marker = "extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline'" }, - { name = "huggingface-hub", version = "1.4.1", source = { registry = "https://pypi.org/simple" }, marker = "extra == 'extra-16-pydantic-ai-slim-huggingface' or extra != 'extra-16-pydantic-ai-slim-outlines-vllm-offline'" }, + { name = "huggingface-hub", version = "0.36.2", source = { registry = "https://pypi.org/simple" }, marker = "(python_full_version < '3.12' and extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline') or (extra == 'extra-16-pydantic-ai-slim-huggingface' and extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline')" }, + { name = "huggingface-hub", version = "1.4.1", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version >= '3.12' or extra == 'extra-16-pydantic-ai-slim-huggingface' or extra != 'extra-16-pydantic-ai-slim-outlines-vllm-offline'" }, { name = "multiprocess" }, { name = "numpy" }, { name = "packaging" }, @@ -1597,8 +1597,8 @@ name = "depyf" version = "0.20.0" source = { registry = "https://pypi.org/simple" } dependencies = [ - { name = "astor" }, - { name = "dill" }, + { name = "astor", marker = "python_full_version < '3.12'" }, + { name = "dill", marker = "python_full_version < '3.12'" }, ] sdist = { url = "https://files.pythonhosted.org/packages/88/35/83fb0178212279aa0af031031905804c6de5618435d229f41ed21bb9ad2c/depyf-0.20.0.tar.gz", hash = "sha256:fb7683bd72c44f67b56029df2c47721e9a02ffa4d7b19095f1c54c4ebf797a98", size = 6168761, upload-time = "2025-10-13T12:33:38.589Z" } wheels = [ @@ -1901,14 +1901,14 @@ wheels = [ [package.optional-dependencies] standard = [ - { name = "email-validator" }, - { name = "fastapi-cli", extra = ["standard"], marker = "extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline'" }, - { name = "httpx" }, - { name = "jinja2" }, - { name = "pydantic-extra-types" }, - { name = "pydantic-settings" }, - { name = "python-multipart" }, - { name = "uvicorn", extra = ["standard"], marker = "extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline'" }, + { name = "email-validator", marker = "python_full_version < '3.12'" }, + { name = "fastapi-cli", extra = ["standard"], marker = "(python_full_version < '3.12' and extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline') or (extra == 'extra-16-pydantic-ai-slim-huggingface' and extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline')" }, + { name = "httpx", marker = "python_full_version < '3.12'" }, + { name = "jinja2", marker = "python_full_version < '3.12'" }, + { name = "pydantic-extra-types", marker = "python_full_version < '3.12'" }, + { name = "pydantic-settings", marker = "python_full_version < '3.12'" }, + { name = "python-multipart", marker = "python_full_version < '3.12'" }, + { name = "uvicorn", extra = ["standard"], marker = "(python_full_version < '3.12' and extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline') or (extra == 'extra-16-pydantic-ai-slim-huggingface' and extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline')" }, ] [[package]] @@ -1916,10 +1916,10 @@ name = "fastapi-cli" version = "0.0.16" source = { registry = "https://pypi.org/simple" } dependencies = [ - { name = "rich-toolkit" }, + { name = "rich-toolkit", marker = "python_full_version < '3.12'" }, { name = "tomli", marker = "python_full_version < '3.11'" }, - { name = "typer" }, - { name = "uvicorn", extra = ["standard"], marker = "extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline'" }, + { name = "typer", marker = "python_full_version < '3.12'" }, + { name = "uvicorn", extra = ["standard"], marker = "(python_full_version < '3.12' and extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline') or (extra == 'extra-16-pydantic-ai-slim-huggingface' and extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline')" }, ] sdist = { url = "https://files.pythonhosted.org/packages/99/75/9407a6b452be4c988feacec9c9d2f58d8f315162a6c7258d5a649d933ebe/fastapi_cli-0.0.16.tar.gz", hash = "sha256:e8a2a1ecf7a4e062e3b2eec63ae34387d1e142d4849181d936b23c4bdfe29073", size = 19447, upload-time = "2025-11-10T19:01:07.856Z" } wheels = [ @@ -1928,8 +1928,8 @@ wheels = [ [package.optional-dependencies] standard = [ - { name = "fastapi-cloud-cli" }, - { name = "uvicorn", extra = ["standard"], marker = "extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline'" }, + { name = "fastapi-cloud-cli", marker = "python_full_version < '3.12'" }, + { name = "uvicorn", extra = ["standard"], marker = "(python_full_version < '3.12' and extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline') or (extra == 'extra-16-pydantic-ai-slim-huggingface' and extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline')" }, ] [[package]] @@ -1937,14 +1937,14 @@ name = "fastapi-cloud-cli" version = "0.7.0" source = { registry = "https://pypi.org/simple" } dependencies = [ - { name = "fastar" }, - { name = "httpx" }, - { name = "pydantic", extra = ["email"], marker = "extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline'" }, - { name = "rich-toolkit" }, - { name = "rignore" }, - { name = "sentry-sdk" }, - { name = "typer" }, - { name = "uvicorn", extra = ["standard"], marker = "extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline'" }, + { name = "fastar", marker = "python_full_version < '3.12'" }, + { name = "httpx", marker = "python_full_version < '3.12'" }, + { name = "pydantic", extra = ["email"], marker = "(python_full_version < '3.12' and extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline') or (extra == 'extra-16-pydantic-ai-slim-huggingface' and extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline')" }, + { name = "rich-toolkit", marker = "python_full_version < '3.12'" }, + { name = "rignore", marker = "python_full_version < '3.12'" }, + { name = "sentry-sdk", marker = "python_full_version < '3.12'" }, + { name = "typer", marker = "python_full_version < '3.12'" }, + { name = "uvicorn", extra = ["standard"], marker = "(python_full_version < '3.12' and extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline') or (extra == 'extra-16-pydantic-ai-slim-huggingface' and extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline')" }, ] sdist = { url = "https://files.pythonhosted.org/packages/cf/0d/3b0d2991f481c122c552b4ae38a8b400a75ab0edbc85536f2a6224f72da2/fastapi_cloud_cli-0.7.0.tar.gz", hash = "sha256:8b025944475c3d53262105886dfe051f46383e4f287787a46892b524922ac0b6", size = 30906, upload-time = "2025-12-16T12:51:49.082Z" } wheels = [ @@ -2217,19 +2217,19 @@ name = "flashinfer-python" version = "0.6.1" source = { registry = "https://pypi.org/simple" } dependencies = [ - { name = "apache-tvm-ffi" }, - { name = "click" }, - { name = "einops" }, - { name = "ninja" }, - { name = "numpy" }, - { name = "nvidia-cudnn-frontend" }, - { name = "nvidia-cutlass-dsl" }, - { name = "nvidia-ml-py" }, - { name = "packaging" }, - { name = "requests" }, - { name = "tabulate" }, - { name = "torch" }, - { name = "tqdm" }, + { name = "apache-tvm-ffi", marker = "python_full_version < '3.12'" }, + { name = "click", marker = "python_full_version < '3.12'" }, + { name = "einops", marker = "python_full_version < '3.12'" }, + { name = "ninja", marker = "python_full_version < '3.12'" }, + { name = "numpy", marker = "python_full_version < '3.12'" }, + { name = "nvidia-cudnn-frontend", marker = "python_full_version < '3.12'" }, + { name = "nvidia-cutlass-dsl", marker = "python_full_version < '3.12'" }, + { name = "nvidia-ml-py", marker = "python_full_version < '3.12'" }, + { name = "packaging", marker = "python_full_version < '3.12'" }, + { name = "requests", marker = "python_full_version < '3.12'" }, + { name = "tabulate", marker = "python_full_version < '3.12'" }, + { name = "torch", marker = "python_full_version < '3.12'" }, + { name = "tqdm", marker = "python_full_version < '3.12'" }, ] sdist = { url = "https://files.pythonhosted.org/packages/68/81/5a84e14df7358d2c2903b18c6f2779bd4b4a6739076d01a847d4c18fb102/flashinfer_python-0.6.1.tar.gz", hash = "sha256:8dc2fc5dc187fc70151d5f39ef560fde8a38117a4f6cf40dce0ddb09cbd4f0bf", size = 5141191, upload-time = "2026-01-14T05:40:27.825Z" } wheels = [ @@ -2407,9 +2407,9 @@ name = "gguf" version = "0.17.1" source = { registry = "https://pypi.org/simple" } dependencies = [ - { name = "numpy" }, - { name = "pyyaml" }, - { name = "tqdm" }, + { name = "numpy", marker = "python_full_version < '3.12'" }, + { name = "pyyaml", marker = "python_full_version < '3.12'" }, + { name = "tqdm", marker = "python_full_version < '3.12'" }, ] sdist = { url = "https://files.pythonhosted.org/packages/08/08/7de1ca4b71e7bf33b547f82bb22505e221b5fa42f67d635e200e0ad22ad6/gguf-0.17.1.tar.gz", hash = "sha256:36ad71aad900a3e75fc94ebe96ea6029f03a4e44be7627ef7ad3d03e8c7bcb53", size = 89338, upload-time = "2025-06-19T14:00:33.705Z" } wheels = [ @@ -2494,8 +2494,8 @@ dependencies = [ { name = "gradio-client" }, { name = "groovy" }, { name = "httpx" }, - { name = "huggingface-hub", version = "0.36.2", source = { registry = "https://pypi.org/simple" }, marker = "extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline'" }, - { name = "huggingface-hub", version = "1.4.1", source = { registry = "https://pypi.org/simple" }, marker = "extra == 'extra-16-pydantic-ai-slim-huggingface' or extra != 'extra-16-pydantic-ai-slim-outlines-vllm-offline'" }, + { name = "huggingface-hub", version = "0.36.2", source = { registry = "https://pypi.org/simple" }, marker = "(python_full_version < '3.12' and extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline') or (extra == 'extra-16-pydantic-ai-slim-huggingface' and extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline')" }, + { name = "huggingface-hub", version = "1.4.1", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version >= '3.12' or extra == 'extra-16-pydantic-ai-slim-huggingface' or extra != 'extra-16-pydantic-ai-slim-outlines-vllm-offline'" }, { name = "jinja2" }, { name = "markupsafe" }, { name = "numpy" }, @@ -2527,8 +2527,8 @@ source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "fsspec" }, { name = "httpx" }, - { name = "huggingface-hub", version = "0.36.2", source = { registry = "https://pypi.org/simple" }, marker = "extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline'" }, - { name = "huggingface-hub", version = "1.4.1", source = { registry = "https://pypi.org/simple" }, marker = "extra == 'extra-16-pydantic-ai-slim-huggingface' or extra != 'extra-16-pydantic-ai-slim-outlines-vllm-offline'" }, + { name = "huggingface-hub", version = "0.36.2", source = { registry = "https://pypi.org/simple" }, marker = "(python_full_version < '3.12' and extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline') or (extra == 'extra-16-pydantic-ai-slim-huggingface' and extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline')" }, + { name = "huggingface-hub", version = "1.4.1", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version >= '3.12' or extra == 'extra-16-pydantic-ai-slim-huggingface' or extra != 'extra-16-pydantic-ai-slim-outlines-vllm-offline'" }, { name = "packaging" }, { name = "typing-extensions" }, ] @@ -2725,8 +2725,8 @@ name = "grpcio-reflection" version = "1.76.0" source = { registry = "https://pypi.org/simple" } dependencies = [ - { name = "grpcio" }, - { name = "protobuf" }, + { name = "grpcio", marker = "python_full_version < '3.12'" }, + { name = "protobuf", marker = "python_full_version < '3.12'" }, ] sdist = { url = "https://files.pythonhosted.org/packages/bd/10/767f9c2719c435616141efb3371f6e158f95cdde36a34876ae1d08ba7440/grpcio_reflection-1.76.0.tar.gz", hash = "sha256:e0e7e49921c2ee951e5ddff0bdbacbd1ac1a70888beb61d567f3d01b799decb1", size = 18845, upload-time = "2025-10-21T16:28:57.776Z" } wheels = [ @@ -2900,20 +2900,18 @@ name = "huggingface-hub" version = "0.36.2" source = { registry = "https://pypi.org/simple" } resolution-markers = [ - "python_full_version >= '3.13'", - "python_full_version == '3.12.*'", "python_full_version == '3.11.*'", "python_full_version < '3.11'", ] dependencies = [ - { name = "filelock", marker = "extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline'" }, - { name = "fsspec", marker = "extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline'" }, - { name = "hf-xet", marker = "(platform_machine == 'aarch64' and extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline') or (platform_machine == 'amd64' and extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline') or (platform_machine == 'arm64' and extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline') or (platform_machine == 'x86_64' and extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline') or (extra == 'extra-16-pydantic-ai-slim-huggingface' and extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline')" }, - { name = "packaging", marker = "extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline'" }, - { name = "pyyaml", marker = "extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline'" }, - { name = "requests", marker = "extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline'" }, - { name = "tqdm", marker = "extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline'" }, - { name = "typing-extensions", marker = "extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline'" }, + { name = "filelock", marker = "(python_full_version < '3.12' and extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline') or (extra == 'extra-16-pydantic-ai-slim-huggingface' and extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline')" }, + { name = "fsspec", marker = "(python_full_version < '3.12' and extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline') or (extra == 'extra-16-pydantic-ai-slim-huggingface' and extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline')" }, + { name = "hf-xet", marker = "(python_full_version < '3.12' and platform_machine == 'aarch64' and extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline') or (python_full_version < '3.12' and platform_machine == 'amd64' and extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline') or (python_full_version < '3.12' and platform_machine == 'arm64' and extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline') or (python_full_version < '3.12' and platform_machine == 'x86_64' and extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline') or (platform_machine != 'aarch64' and platform_machine != 'amd64' and platform_machine != 'arm64' and platform_machine != 'x86_64' and extra == 'extra-16-pydantic-ai-slim-huggingface' and extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline') or (platform_machine == 'aarch64' and extra == 'extra-16-pydantic-ai-slim-huggingface' and extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline') or (platform_machine == 'amd64' and extra == 'extra-16-pydantic-ai-slim-huggingface' and extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline') or (platform_machine == 'arm64' and extra == 'extra-16-pydantic-ai-slim-huggingface' and extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline') or (platform_machine == 'x86_64' and extra == 'extra-16-pydantic-ai-slim-huggingface' and extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline')" }, + { name = "packaging", marker = "(python_full_version < '3.12' and extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline') or (extra == 'extra-16-pydantic-ai-slim-huggingface' and extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline')" }, + { name = "pyyaml", marker = "(python_full_version < '3.12' and extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline') or (extra == 'extra-16-pydantic-ai-slim-huggingface' and extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline')" }, + { name = "requests", marker = "(python_full_version < '3.12' and extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline') or (extra == 'extra-16-pydantic-ai-slim-huggingface' and extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline')" }, + { name = "tqdm", marker = "(python_full_version < '3.12' and extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline') or (extra == 'extra-16-pydantic-ai-slim-huggingface' and extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline')" }, + { name = "typing-extensions", marker = "(python_full_version < '3.12' and extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline') or (extra == 'extra-16-pydantic-ai-slim-huggingface' and extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline')" }, ] sdist = { url = "https://files.pythonhosted.org/packages/7c/b7/8cb61d2eece5fb05a83271da168186721c450eb74e3c31f7ef3169fa475b/huggingface_hub-0.36.2.tar.gz", hash = "sha256:1934304d2fb224f8afa3b87007d58501acfda9215b334eed53072dd5e815ff7a", size = 649782, upload-time = "2026-02-06T09:24:13.098Z" } wheels = [ @@ -2931,16 +2929,16 @@ resolution-markers = [ "python_full_version < '3.11'", ] dependencies = [ - { name = "filelock", marker = "extra == 'extra-16-pydantic-ai-slim-huggingface' or extra != 'extra-16-pydantic-ai-slim-outlines-vllm-offline'" }, - { name = "fsspec", marker = "extra == 'extra-16-pydantic-ai-slim-huggingface' or extra != 'extra-16-pydantic-ai-slim-outlines-vllm-offline'" }, - { name = "hf-xet", marker = "(platform_machine == 'AMD64' and extra == 'extra-16-pydantic-ai-slim-huggingface') or (platform_machine == 'AMD64' and extra != 'extra-16-pydantic-ai-slim-outlines-vllm-offline') or (platform_machine == 'aarch64' and extra == 'extra-16-pydantic-ai-slim-huggingface') or (platform_machine == 'aarch64' and extra != 'extra-16-pydantic-ai-slim-outlines-vllm-offline') or (platform_machine == 'amd64' and extra == 'extra-16-pydantic-ai-slim-huggingface') or (platform_machine == 'amd64' and extra != 'extra-16-pydantic-ai-slim-outlines-vllm-offline') or (platform_machine == 'arm64' and extra == 'extra-16-pydantic-ai-slim-huggingface') or (platform_machine == 'arm64' and extra != 'extra-16-pydantic-ai-slim-outlines-vllm-offline') or (platform_machine == 'x86_64' and extra == 'extra-16-pydantic-ai-slim-huggingface') or (platform_machine == 'x86_64' and extra != 'extra-16-pydantic-ai-slim-outlines-vllm-offline') or (extra == 'extra-16-pydantic-ai-slim-huggingface' and extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline')" }, - { name = "httpx", marker = "extra == 'extra-16-pydantic-ai-slim-huggingface' or extra != 'extra-16-pydantic-ai-slim-outlines-vllm-offline'" }, - { name = "packaging", marker = "extra == 'extra-16-pydantic-ai-slim-huggingface' or extra != 'extra-16-pydantic-ai-slim-outlines-vllm-offline'" }, - { name = "pyyaml", marker = "extra == 'extra-16-pydantic-ai-slim-huggingface' or extra != 'extra-16-pydantic-ai-slim-outlines-vllm-offline'" }, - { name = "shellingham", marker = "extra == 'extra-16-pydantic-ai-slim-huggingface' or extra != 'extra-16-pydantic-ai-slim-outlines-vllm-offline'" }, - { name = "tqdm", marker = "extra == 'extra-16-pydantic-ai-slim-huggingface' or extra != 'extra-16-pydantic-ai-slim-outlines-vllm-offline'" }, - { name = "typer-slim", marker = "extra == 'extra-16-pydantic-ai-slim-huggingface' or extra != 'extra-16-pydantic-ai-slim-outlines-vllm-offline'" }, - { name = "typing-extensions", marker = "extra == 'extra-16-pydantic-ai-slim-huggingface' or extra != 'extra-16-pydantic-ai-slim-outlines-vllm-offline'" }, + { name = "filelock", marker = "python_full_version >= '3.12' or extra == 'extra-16-pydantic-ai-slim-huggingface' or extra != 'extra-16-pydantic-ai-slim-outlines-vllm-offline'" }, + { name = "fsspec", marker = "python_full_version >= '3.12' or extra == 'extra-16-pydantic-ai-slim-huggingface' or extra != 'extra-16-pydantic-ai-slim-outlines-vllm-offline'" }, + { name = "hf-xet", marker = "(python_full_version >= '3.12' and platform_machine == 'AMD64') or (python_full_version >= '3.12' and platform_machine == 'aarch64') or (python_full_version >= '3.12' and platform_machine == 'amd64') or (python_full_version >= '3.12' and platform_machine == 'arm64') or (python_full_version >= '3.12' and platform_machine == 'x86_64') or (platform_machine != 'AMD64' and platform_machine != 'aarch64' and platform_machine != 'amd64' and platform_machine != 'arm64' and platform_machine != 'x86_64' and extra == 'extra-16-pydantic-ai-slim-huggingface' and extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline') or (platform_machine == 'AMD64' and extra == 'extra-16-pydantic-ai-slim-huggingface') or (platform_machine == 'AMD64' and extra != 'extra-16-pydantic-ai-slim-outlines-vllm-offline') or (platform_machine == 'aarch64' and extra == 'extra-16-pydantic-ai-slim-huggingface') or (platform_machine == 'aarch64' and extra != 'extra-16-pydantic-ai-slim-outlines-vllm-offline') or (platform_machine == 'amd64' and extra == 'extra-16-pydantic-ai-slim-huggingface') or (platform_machine == 'amd64' and extra != 'extra-16-pydantic-ai-slim-outlines-vllm-offline') or (platform_machine == 'arm64' and extra == 'extra-16-pydantic-ai-slim-huggingface') or (platform_machine == 'arm64' and extra != 'extra-16-pydantic-ai-slim-outlines-vllm-offline') or (platform_machine == 'x86_64' and extra == 'extra-16-pydantic-ai-slim-huggingface') or (platform_machine == 'x86_64' and extra != 'extra-16-pydantic-ai-slim-outlines-vllm-offline')" }, + { name = "httpx", marker = "python_full_version >= '3.12' or extra == 'extra-16-pydantic-ai-slim-huggingface' or extra != 'extra-16-pydantic-ai-slim-outlines-vllm-offline'" }, + { name = "packaging", marker = "python_full_version >= '3.12' or extra == 'extra-16-pydantic-ai-slim-huggingface' or extra != 'extra-16-pydantic-ai-slim-outlines-vllm-offline'" }, + { name = "pyyaml", marker = "python_full_version >= '3.12' or extra == 'extra-16-pydantic-ai-slim-huggingface' or extra != 'extra-16-pydantic-ai-slim-outlines-vllm-offline'" }, + { name = "shellingham", marker = "python_full_version >= '3.12' or extra == 'extra-16-pydantic-ai-slim-huggingface' or extra != 'extra-16-pydantic-ai-slim-outlines-vllm-offline'" }, + { name = "tqdm", marker = "python_full_version >= '3.12' or extra == 'extra-16-pydantic-ai-slim-huggingface' or extra != 'extra-16-pydantic-ai-slim-outlines-vllm-offline'" }, + { name = "typer-slim", marker = "python_full_version >= '3.12' or extra == 'extra-16-pydantic-ai-slim-huggingface' or extra != 'extra-16-pydantic-ai-slim-outlines-vllm-offline'" }, + { name = "typing-extensions", marker = "python_full_version >= '3.12' or extra == 'extra-16-pydantic-ai-slim-huggingface' or extra != 'extra-16-pydantic-ai-slim-outlines-vllm-offline'" }, ] sdist = { url = "https://files.pythonhosted.org/packages/c4/fc/eb9bc06130e8bbda6a616e1b80a7aa127681c448d6b49806f61db2670b61/huggingface_hub-1.4.1.tar.gz", hash = "sha256:b41131ec35e631e7383ab26d6146b8d8972abc8b6309b963b306fbcca87f5ed5", size = 642156, upload-time = "2026-02-06T09:20:03.013Z" } wheels = [ @@ -3590,10 +3588,10 @@ name = "lm-format-enforcer" version = "0.11.3" source = { registry = "https://pypi.org/simple" } dependencies = [ - { name = "interegular" }, - { name = "packaging" }, - { name = "pydantic" }, - { name = "pyyaml" }, + { name = "interegular", marker = "python_full_version < '3.12'" }, + { name = "packaging", marker = "python_full_version < '3.12'" }, + { name = "pydantic", marker = "python_full_version < '3.12'" }, + { name = "pyyaml", marker = "python_full_version < '3.12'" }, ] sdist = { url = "https://files.pythonhosted.org/packages/84/d5/41cd417ba7dfdbbcfe46cebf81fb3dfd7c591b89897560ad05bb410a465d/lm_format_enforcer-0.11.3.tar.gz", hash = "sha256:e68081c108719cce284a9bcc889709b26ffb085a1945b5eba3a12cfa96d528da", size = 40258, upload-time = "2025-08-24T19:37:47.527Z" } wheels = [ @@ -3647,8 +3645,8 @@ name = "loguru" version = "0.7.3" source = { registry = "https://pypi.org/simple" } dependencies = [ - { name = "colorama", marker = "sys_platform == 'win32'" }, - { name = "win32-setctime", marker = "sys_platform == 'win32'" }, + { name = "colorama", marker = "python_full_version < '3.12' and sys_platform == 'win32'" }, + { name = "win32-setctime", marker = "python_full_version < '3.12' and sys_platform == 'win32'" }, ] sdist = { url = "https://files.pythonhosted.org/packages/3a/05/a1dae3dffd1116099471c643b8924f5aa6524411dc6c63fdae648c4f1aca/loguru-0.7.3.tar.gz", hash = "sha256:19480589e77d47b8d85b2c827ad95d49bf31b0dcde16593892eb51dd18706eb6", size = 63559, upload-time = "2024-12-06T11:20:56.608Z" } wheels = [ @@ -4017,14 +4015,14 @@ name = "mistral-common" version = "1.9.1" source = { registry = "https://pypi.org/simple" } dependencies = [ - { name = "jsonschema" }, - { name = "numpy" }, - { name = "pillow" }, - { name = "pydantic" }, - { name = "pydantic-extra-types", extra = ["pycountry"], marker = "extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline'" }, - { name = "requests" }, - { name = "tiktoken" }, - { name = "typing-extensions" }, + { name = "jsonschema", marker = "python_full_version < '3.12'" }, + { name = "numpy", marker = "python_full_version < '3.12'" }, + { name = "pillow", marker = "python_full_version < '3.12'" }, + { name = "pydantic", marker = "python_full_version < '3.12'" }, + { name = "pydantic-extra-types", extra = ["pycountry"], marker = "(python_full_version < '3.12' and extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline') or (extra == 'extra-16-pydantic-ai-slim-huggingface' and extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline')" }, + { name = "requests", marker = "python_full_version < '3.12'" }, + { name = "tiktoken", marker = "python_full_version < '3.12'" }, + { name = "typing-extensions", marker = "python_full_version < '3.12'" }, ] sdist = { url = "https://files.pythonhosted.org/packages/db/ce/685b8127a326478e05501cb4c9ca23d1cd9f37e16c465a1e832c75aea709/mistral_common-1.9.1.tar.gz", hash = "sha256:550583d70a395c3586cfb748ffab53bd1d7c3409507f0efc0118bff30ffb26e9", size = 6338922, upload-time = "2026-02-12T10:53:41.639Z" } wheels = [ @@ -4033,7 +4031,7 @@ wheels = [ [package.optional-dependencies] image = [ - { name = "opencv-python-headless" }, + { name = "opencv-python-headless", marker = "python_full_version < '3.12'" }, ] [[package]] @@ -4260,8 +4258,8 @@ dependencies = [ { name = "protobuf" }, { name = "pyyaml" }, { name = "sentencepiece" }, - { name = "transformers", version = "4.57.6", source = { registry = "https://pypi.org/simple" }, marker = "extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline'" }, - { name = "transformers", version = "5.2.0", source = { registry = "https://pypi.org/simple" }, marker = "extra == 'extra-16-pydantic-ai-slim-huggingface' or extra != 'extra-16-pydantic-ai-slim-outlines-vllm-offline'" }, + { name = "transformers", version = "4.57.6", source = { registry = "https://pypi.org/simple" }, marker = "(python_full_version < '3.12' and extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline') or (extra == 'extra-16-pydantic-ai-slim-huggingface' and extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline')" }, + { name = "transformers", version = "5.2.0", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version >= '3.12' or extra == 'extra-16-pydantic-ai-slim-huggingface' or extra != 'extra-16-pydantic-ai-slim-outlines-vllm-offline'" }, ] sdist = { url = "https://files.pythonhosted.org/packages/e3/62/f46e1355256a114808517947f8e83ad6be310c7288c551db0fa678f47923/mlx_lm-0.29.1.tar.gz", hash = "sha256:b99180d8f33d33a077b814e550bfb2d8a59ae003d668fd1f4b3fff62a381d34b", size = 232302, upload-time = "2025-12-16T16:58:27.959Z" } wheels = [ @@ -4307,13 +4305,13 @@ name = "model-hosting-container-standards" version = "0.1.13" source = { registry = "https://pypi.org/simple" } dependencies = [ - { name = "fastapi" }, - { name = "httpx" }, - { name = "jmespath" }, - { name = "pydantic" }, - { name = "setuptools" }, - { name = "starlette" }, - { name = "supervisor" }, + { name = "fastapi", marker = "python_full_version < '3.12'" }, + { name = "httpx", marker = "python_full_version < '3.12'" }, + { name = "jmespath", marker = "python_full_version < '3.12'" }, + { name = "pydantic", marker = "python_full_version < '3.12'" }, + { name = "setuptools", marker = "python_full_version < '3.12'" }, + { name = "starlette", marker = "python_full_version < '3.12'" }, + { name = "supervisor", marker = "python_full_version < '3.12'" }, ] sdist = { url = "https://files.pythonhosted.org/packages/d7/b7/a6a31b4dfd30d14b1019dc358f09c9d88ca38e555ba7c976e7d3e6b593fe/model_hosting_container_standards-0.1.13.tar.gz", hash = "sha256:27a1333410dde2719286a300a2803e24fdde407baa91894eb845c0f268aa194d", size = 79116, upload-time = "2026-01-09T21:45:20.683Z" } wheels = [ @@ -5000,7 +4998,7 @@ name = "nvidia-cutlass-dsl" version = "4.4.0" source = { registry = "https://pypi.org/simple" } dependencies = [ - { name = "nvidia-cutlass-dsl-libs-base" }, + { name = "nvidia-cutlass-dsl-libs-base", marker = "python_full_version < '3.12'" }, ] wheels = [ { url = "https://files.pythonhosted.org/packages/55/09/42fca58af350265131b6f8665ad5b62526c95e6692788460bd5306d3efe2/nvidia_cutlass_dsl-4.4.0-py3-none-any.whl", hash = "sha256:2d1f34333e4d774002d44b53262d71aaf738700fcf3858290629f9a7b374c61c", size = 10168, upload-time = "2026-02-14T03:38:54.267Z" }, @@ -5011,9 +5009,9 @@ name = "nvidia-cutlass-dsl-libs-base" version = "4.4.0" source = { registry = "https://pypi.org/simple" } dependencies = [ - { name = "cuda-python" }, - { name = "numpy" }, - { name = "typing-extensions" }, + { name = "cuda-python", marker = "python_full_version < '3.12'" }, + { name = "numpy", marker = "python_full_version < '3.12'" }, + { name = "typing-extensions", marker = "python_full_version < '3.12'" }, ] wheels = [ { url = "https://files.pythonhosted.org/packages/ad/af/cf64251bae66077769adbcd9a2e96b86aeb3c41490c5ee0a939a1a3b511e/nvidia_cutlass_dsl_libs_base-4.4.0-cp310-cp310-manylinux_2_28_aarch64.whl", hash = "sha256:703169d0843ad7e310b397aa95128e3fa983571a9a488f826c2968f3e71df2b8", size = 75460001, upload-time = "2026-02-14T03:44:18.705Z" }, @@ -5106,7 +5104,7 @@ name = "openai-harmony" version = "0.0.8" source = { registry = "https://pypi.org/simple" } dependencies = [ - { name = "pydantic" }, + { name = "pydantic", marker = "python_full_version < '3.12'" }, ] sdist = { url = "https://files.pythonhosted.org/packages/3e/92/2d038d096f29179c7c9571b431f9e739f87a487121901725e23fe338dd9d/openai_harmony-0.0.8.tar.gz", hash = "sha256:6e43f98e6c242fa2de6f8ea12eab24af63fa2ed3e89c06341fb9d92632c5cbdf", size = 284777, upload-time = "2025-11-05T19:07:06.727Z" } wheels = [ @@ -5141,7 +5139,7 @@ name = "opencv-python-headless" version = "4.13.0.92" source = { registry = "https://pypi.org/simple" } dependencies = [ - { name = "numpy" }, + { name = "numpy", marker = "python_full_version < '3.12'" }, ] wheels = [ { url = "https://files.pythonhosted.org/packages/79/42/2310883be3b8826ac58c3f2787b9358a2d46923d61f88fedf930bc59c60c/opencv_python_headless-4.13.0.92-cp37-abi3-macosx_13_0_arm64.whl", hash = "sha256:1a7d040ac656c11b8c38677cc8cccdc149f98535089dbe5b081e80a4e5903209", size = 46247192, upload-time = "2026-02-05T07:01:35.187Z" }, @@ -5469,8 +5467,8 @@ wheels = [ [package.optional-dependencies] llamacpp = [ - { name = "huggingface-hub", version = "0.36.2", source = { registry = "https://pypi.org/simple" }, marker = "extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline'" }, - { name = "huggingface-hub", version = "1.4.1", source = { registry = "https://pypi.org/simple" }, marker = "extra == 'extra-16-pydantic-ai-slim-huggingface' or extra != 'extra-16-pydantic-ai-slim-outlines-vllm-offline'" }, + { name = "huggingface-hub", version = "0.36.2", source = { registry = "https://pypi.org/simple" }, marker = "(python_full_version < '3.12' and extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline') or (extra == 'extra-16-pydantic-ai-slim-huggingface' and extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline')" }, + { name = "huggingface-hub", version = "1.4.1", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version >= '3.12' or extra == 'extra-16-pydantic-ai-slim-huggingface' or extra != 'extra-16-pydantic-ai-slim-outlines-vllm-offline'" }, { name = "llama-cpp-python" }, { name = "numba" }, ] @@ -5486,8 +5484,8 @@ transformers = [ { name = "accelerate" }, { name = "datasets" }, { name = "setuptools" }, - { name = "transformers", version = "4.57.6", source = { registry = "https://pypi.org/simple" }, marker = "extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline'" }, - { name = "transformers", version = "5.2.0", source = { registry = "https://pypi.org/simple" }, marker = "extra == 'extra-16-pydantic-ai-slim-huggingface' or extra != 'extra-16-pydantic-ai-slim-outlines-vllm-offline'" }, + { name = "transformers", version = "4.57.6", source = { registry = "https://pypi.org/simple" }, marker = "(python_full_version < '3.12' and extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline') or (extra == 'extra-16-pydantic-ai-slim-huggingface' and extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline')" }, + { name = "transformers", version = "5.2.0", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version >= '3.12' or extra == 'extra-16-pydantic-ai-slim-huggingface' or extra != 'extra-16-pydantic-ai-slim-outlines-vllm-offline'" }, ] [[package]] @@ -5934,8 +5932,8 @@ name = "prometheus-fastapi-instrumentator" version = "7.1.0" source = { registry = "https://pypi.org/simple" } dependencies = [ - { name = "prometheus-client" }, - { name = "starlette" }, + { name = "prometheus-client", marker = "python_full_version < '3.12'" }, + { name = "starlette", marker = "python_full_version < '3.12'" }, ] sdist = { url = "https://files.pythonhosted.org/packages/69/6d/24d53033cf93826aa7857699a4450c1c67e5b9c710e925b1ed2b320c04df/prometheus_fastapi_instrumentator-7.1.0.tar.gz", hash = "sha256:be7cd61eeea4e5912aeccb4261c6631b3f227d8924542d79eaf5af3f439cbe5e", size = 20220, upload-time = "2025-03-19T19:35:05.351Z" } wheels = [ @@ -6758,7 +6756,7 @@ cohere = [ dbos = [ { name = "dbos" }, ] -docker-sandbox = [ +docker-environment = [ { name = "docker" }, ] duckduckgo = [ @@ -6812,8 +6810,8 @@ outlines-transformers = [ { name = "outlines", extra = ["transformers"], marker = "(python_full_version < '3.14' and platform_machine != 'x86_64') or (python_full_version < '3.14' and sys_platform != 'darwin') or (extra == 'extra-16-pydantic-ai-slim-huggingface' and extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline')" }, { name = "pillow", marker = "python_full_version < '3.14' or (extra == 'extra-16-pydantic-ai-slim-huggingface' and extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline')" }, { name = "torch", marker = "(python_full_version < '3.14' and platform_machine != 'x86_64') or (python_full_version < '3.14' and sys_platform != 'darwin') or (extra == 'extra-16-pydantic-ai-slim-huggingface' and extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline')" }, - { name = "transformers", version = "4.57.6", source = { registry = "https://pypi.org/simple" }, marker = "(python_full_version < '3.14' and extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline') or (extra == 'extra-16-pydantic-ai-slim-huggingface' and extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline')" }, - { name = "transformers", version = "5.2.0", source = { registry = "https://pypi.org/simple" }, marker = "(python_full_version < '3.14' and extra == 'extra-16-pydantic-ai-slim-huggingface') or (python_full_version < '3.14' and extra != 'extra-16-pydantic-ai-slim-outlines-vllm-offline') or (extra == 'extra-16-pydantic-ai-slim-huggingface' and extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline')" }, + { name = "transformers", version = "4.57.6", source = { registry = "https://pypi.org/simple" }, marker = "(python_full_version < '3.12' and extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline') or (extra == 'extra-16-pydantic-ai-slim-huggingface' and extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline')" }, + { name = "transformers", version = "5.2.0", source = { registry = "https://pypi.org/simple" }, marker = "(python_full_version >= '3.12' and python_full_version < '3.14') or (python_full_version < '3.12' and extra == 'extra-16-pydantic-ai-slim-huggingface') or (python_full_version < '3.12' and extra != 'extra-16-pydantic-ai-slim-outlines-vllm-offline') or (python_full_version >= '3.14' and extra == 'extra-16-pydantic-ai-slim-huggingface' and extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline')" }, ] outlines-vllm-offline = [ { name = "outlines", marker = "python_full_version < '3.14'" }, @@ -6863,7 +6861,7 @@ requires-dist = [ { name = "cohere", marker = "sys_platform != 'emscripten' and extra == 'cohere'", specifier = ">=5.18.0" }, { name = "dbos", marker = "extra == 'dbos'", specifier = ">=2.10.0" }, { name = "ddgs", marker = "extra == 'duckduckgo'", specifier = ">=9.0.0" }, - { name = "docker", marker = "extra == 'docker-sandbox'", specifier = ">=7.0" }, + { name = "docker", marker = "extra == 'docker-environment'", specifier = ">=7.0" }, { name = "exa-py", marker = "extra == 'exa'", specifier = ">=2.0.0" }, { name = "exceptiongroup", marker = "python_full_version < '3.11'", specifier = ">=1.2.2" }, { name = "fasta2a", marker = "extra == 'a2a'", specifier = ">=0.4.1" }, @@ -6914,7 +6912,7 @@ requires-dist = [ { name = "voyageai", marker = "python_full_version < '3.14' and extra == 'voyageai'", specifier = ">=0.3.7" }, { name = "xai-sdk", marker = "extra == 'xai'", specifier = ">=1.5.0" }, ] -provides-extras = ["a2a", "ag-ui", "anthropic", "bedrock", "cli", "cohere", "dbos", "docker-sandbox", "duckduckgo", "evals", "exa", "fastmcp", "google", "groq", "huggingface", "logfire", "mcp", "mistral", "openai", "openrouter", "outlines-llamacpp", "outlines-mlxlm", "outlines-sglang", "outlines-transformers", "outlines-vllm-offline", "prefect", "retries", "sentence-transformers", "tavily", "temporal", "ui", "vertexai", "voyageai", "web", "xai"] +provides-extras = ["a2a", "ag-ui", "anthropic", "bedrock", "cli", "cohere", "dbos", "docker-environment", "duckduckgo", "evals", "exa", "fastmcp", "google", "groq", "huggingface", "logfire", "mcp", "mistral", "openai", "openrouter", "outlines-llamacpp", "outlines-mlxlm", "outlines-sglang", "outlines-transformers", "outlines-vllm-offline", "prefect", "retries", "sentence-transformers", "tavily", "temporal", "ui", "vertexai", "voyageai", "web", "xai"] [[package]] name = "pydantic-core" @@ -7091,7 +7089,7 @@ wheels = [ [package.optional-dependencies] pycountry = [ - { name = "pycountry" }, + { name = "pycountry", marker = "python_full_version < '3.12'" }, ] [[package]] @@ -7481,7 +7479,7 @@ name = "pyzmq" version = "27.1.0" source = { registry = "https://pypi.org/simple" } dependencies = [ - { name = "cffi", marker = "implementation_name == 'pypy'" }, + { name = "cffi", marker = "python_full_version < '3.12' and implementation_name == 'pypy'" }, ] sdist = { url = "https://files.pythonhosted.org/packages/04/0b/3c9baedbdf613ecaa7aa07027780b8867f57b6293b6ee50de316c9f3222b/pyzmq-27.1.0.tar.gz", hash = "sha256:ac0765e3d44455adb6ddbf4417dcce460fc40a05978c08efdf2948072f6db540", size = 281750, upload-time = "2025-09-08T23:10:18.157Z" } wheels = [ @@ -7554,14 +7552,14 @@ name = "ray" version = "2.53.0" source = { registry = "https://pypi.org/simple" } dependencies = [ - { name = "click" }, - { name = "filelock" }, - { name = "jsonschema" }, - { name = "msgpack" }, - { name = "packaging" }, - { name = "protobuf" }, - { name = "pyyaml" }, - { name = "requests" }, + { name = "click", marker = "python_full_version < '3.12'" }, + { name = "filelock", marker = "python_full_version < '3.12'" }, + { name = "jsonschema", marker = "python_full_version < '3.12'" }, + { name = "msgpack", marker = "python_full_version < '3.12'" }, + { name = "packaging", marker = "python_full_version < '3.12'" }, + { name = "protobuf", marker = "python_full_version < '3.12'" }, + { name = "pyyaml", marker = "python_full_version < '3.12'" }, + { name = "requests", marker = "python_full_version < '3.12'" }, ] wheels = [ { url = "https://files.pythonhosted.org/packages/2f/99/21986c7f8135dafbf7c49229c52faaa9d2d365db7d86fffe978dde8ee967/ray-2.53.0-cp310-cp310-macosx_12_0_arm64.whl", hash = "sha256:4db914a0a6dd608fa49c066929a1282745a2dbd73caee67d7b80fe684ca65bdd", size = 69473649, upload-time = "2025-12-20T16:05:40.58Z" }, @@ -7583,7 +7581,7 @@ wheels = [ [package.optional-dependencies] cgraph = [ - { name = "cupy-cuda12x", marker = "sys_platform != 'darwin'" }, + { name = "cupy-cuda12x", marker = "python_full_version < '3.12' and sys_platform != 'darwin'" }, ] [[package]] @@ -7811,9 +7809,9 @@ name = "rich-toolkit" version = "0.17.1" source = { registry = "https://pypi.org/simple" } dependencies = [ - { name = "click" }, - { name = "rich" }, - { name = "typing-extensions" }, + { name = "click", marker = "python_full_version < '3.12'" }, + { name = "rich", marker = "python_full_version < '3.12'" }, + { name = "typing-extensions", marker = "python_full_version < '3.12'" }, ] sdist = { url = "https://files.pythonhosted.org/packages/97/09/3f9b8d9daaf235195c626f21e03604c05b987404ee3bcacee0c1f67f2a8e/rich_toolkit-0.17.1.tar.gz", hash = "sha256:5af54df8d1dd9c8530e462e1bdcaed625c9b49f5a55b035aa0ba1c17bdb87c9a", size = 187925, upload-time = "2025-12-17T10:49:22.583Z" } wheels = [ @@ -8556,8 +8554,8 @@ name = "sentence-transformers" version = "5.2.2" source = { registry = "https://pypi.org/simple" } dependencies = [ - { name = "huggingface-hub", version = "0.36.2", source = { registry = "https://pypi.org/simple" }, marker = "extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline'" }, - { name = "huggingface-hub", version = "1.4.1", source = { registry = "https://pypi.org/simple" }, marker = "extra == 'extra-16-pydantic-ai-slim-huggingface' or extra != 'extra-16-pydantic-ai-slim-outlines-vllm-offline'" }, + { name = "huggingface-hub", version = "0.36.2", source = { registry = "https://pypi.org/simple" }, marker = "(python_full_version < '3.12' and extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline') or (extra == 'extra-16-pydantic-ai-slim-huggingface' and extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline')" }, + { name = "huggingface-hub", version = "1.4.1", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version >= '3.12' or extra == 'extra-16-pydantic-ai-slim-huggingface' or extra != 'extra-16-pydantic-ai-slim-outlines-vllm-offline'" }, { name = "numpy" }, { name = "scikit-learn", version = "1.7.2", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version < '3.11' or (extra == 'extra-16-pydantic-ai-slim-huggingface' and extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline')" }, { name = "scikit-learn", version = "1.8.0", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version >= '3.11' or (extra == 'extra-16-pydantic-ai-slim-huggingface' and extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline')" }, @@ -8565,8 +8563,8 @@ dependencies = [ { name = "scipy", version = "1.16.3", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version >= '3.11' or (extra == 'extra-16-pydantic-ai-slim-huggingface' and extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline')" }, { name = "torch" }, { name = "tqdm" }, - { name = "transformers", version = "4.57.6", source = { registry = "https://pypi.org/simple" }, marker = "extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline'" }, - { name = "transformers", version = "5.2.0", source = { registry = "https://pypi.org/simple" }, marker = "extra == 'extra-16-pydantic-ai-slim-huggingface' or extra != 'extra-16-pydantic-ai-slim-outlines-vllm-offline'" }, + { name = "transformers", version = "4.57.6", source = { registry = "https://pypi.org/simple" }, marker = "(python_full_version < '3.12' and extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline') or (extra == 'extra-16-pydantic-ai-slim-huggingface' and extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline')" }, + { name = "transformers", version = "5.2.0", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version >= '3.12' or extra == 'extra-16-pydantic-ai-slim-huggingface' or extra != 'extra-16-pydantic-ai-slim-outlines-vllm-offline'" }, { name = "typing-extensions" }, ] sdist = { url = "https://files.pythonhosted.org/packages/a6/bc/0bc9c0ec1cf83ab2ec6e6f38667d167349b950fff6dd2086b79bd360eeca/sentence_transformers-5.2.2.tar.gz", hash = "sha256:7033ee0a24bc04c664fd490abf2ef194d387b3a58a97adcc528783ff505159fa", size = 381607, upload-time = "2026-01-27T11:11:02.658Z" } @@ -8643,8 +8641,8 @@ name = "sentry-sdk" version = "2.48.0" source = { registry = "https://pypi.org/simple" } dependencies = [ - { name = "certifi" }, - { name = "urllib3" }, + { name = "certifi", marker = "python_full_version < '3.12'" }, + { name = "urllib3", marker = "python_full_version < '3.12'" }, ] sdist = { url = "https://files.pythonhosted.org/packages/40/f0/0e9dc590513d5e742d7799e2038df3a05167cba084c6ca4f3cdd75b55164/sentry_sdk-2.48.0.tar.gz", hash = "sha256:5213190977ff7fdff8a58b722fb807f8d5524a80488626ebeda1b5676c0c1473", size = 384828, upload-time = "2025-12-16T14:55:41.722Z" } wheels = [ @@ -9080,8 +9078,8 @@ name = "tokenizers" version = "0.22.1" source = { registry = "https://pypi.org/simple" } dependencies = [ - { name = "huggingface-hub", version = "0.36.2", source = { registry = "https://pypi.org/simple" }, marker = "extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline'" }, - { name = "huggingface-hub", version = "1.4.1", source = { registry = "https://pypi.org/simple" }, marker = "extra == 'extra-16-pydantic-ai-slim-huggingface' or extra != 'extra-16-pydantic-ai-slim-outlines-vllm-offline'" }, + { name = "huggingface-hub", version = "0.36.2", source = { registry = "https://pypi.org/simple" }, marker = "(python_full_version < '3.12' and extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline') or (extra == 'extra-16-pydantic-ai-slim-huggingface' and extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline')" }, + { name = "huggingface-hub", version = "1.4.1", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version >= '3.12' or extra == 'extra-16-pydantic-ai-slim-huggingface' or extra != 'extra-16-pydantic-ai-slim-outlines-vllm-offline'" }, ] sdist = { url = "https://files.pythonhosted.org/packages/1c/46/fb6854cec3278fbfa4a75b50232c77622bc517ac886156e6afbfa4d8fc6e/tokenizers-0.22.1.tar.gz", hash = "sha256:61de6522785310a309b3407bac22d99c4db5dba349935e99e4d15ea2226af2d9", size = 363123, upload-time = "2025-09-19T09:49:23.424Z" } wheels = [ @@ -9234,7 +9232,7 @@ name = "torchaudio" version = "2.9.1" source = { registry = "https://pypi.org/simple" } dependencies = [ - { name = "torch" }, + { name = "torch", marker = "python_full_version < '3.12'" }, ] wheels = [ { url = "https://files.pythonhosted.org/packages/1c/87/7de58c8f4c1946ec4d9070354eae73d1e4f3d2426e5cfa45febbd8451ce5/torchaudio-2.9.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:fd13541197e035338bd43225b2067532056486d357c661e12d49ace4fc37f8bb", size = 805912, upload-time = "2025-11-12T15:25:47.857Z" }, @@ -9272,9 +9270,9 @@ name = "torchvision" version = "0.24.1" source = { registry = "https://pypi.org/simple" } dependencies = [ - { name = "numpy" }, - { name = "pillow" }, - { name = "torch" }, + { name = "numpy", marker = "python_full_version < '3.12'" }, + { name = "pillow", marker = "python_full_version < '3.12'" }, + { name = "torch", marker = "python_full_version < '3.12'" }, ] wheels = [ { url = "https://files.pythonhosted.org/packages/f7/09/d51aadf8591138e08b74c64a6eb783630c7a31ca2634416277115a9c3a2b/torchvision-0.24.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:ded5e625788572e4e1c4d155d1bbc48805c113794100d70e19c76e39e4d53465", size = 1891441, upload-time = "2025-11-12T15:25:01.687Z" }, @@ -9324,22 +9322,20 @@ name = "transformers" version = "4.57.6" source = { registry = "https://pypi.org/simple" } resolution-markers = [ - "python_full_version >= '3.13'", - "python_full_version == '3.12.*'", "python_full_version == '3.11.*'", "python_full_version < '3.11'", ] dependencies = [ - { name = "filelock", marker = "extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline'" }, - { name = "huggingface-hub", version = "0.36.2", source = { registry = "https://pypi.org/simple" }, marker = "extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline'" }, - { name = "numpy", marker = "extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline'" }, - { name = "packaging", marker = "extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline'" }, - { name = "pyyaml", marker = "extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline'" }, - { name = "regex", marker = "extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline'" }, - { name = "requests", marker = "extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline'" }, - { name = "safetensors", marker = "extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline'" }, - { name = "tokenizers", marker = "extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline'" }, - { name = "tqdm", marker = "extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline'" }, + { name = "filelock", marker = "(python_full_version < '3.12' and extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline') or (extra == 'extra-16-pydantic-ai-slim-huggingface' and extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline')" }, + { name = "huggingface-hub", version = "0.36.2", source = { registry = "https://pypi.org/simple" }, marker = "(python_full_version < '3.12' and extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline') or (extra == 'extra-16-pydantic-ai-slim-huggingface' and extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline')" }, + { name = "numpy", marker = "(python_full_version < '3.12' and extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline') or (extra == 'extra-16-pydantic-ai-slim-huggingface' and extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline')" }, + { name = "packaging", marker = "(python_full_version < '3.12' and extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline') or (extra == 'extra-16-pydantic-ai-slim-huggingface' and extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline')" }, + { name = "pyyaml", marker = "(python_full_version < '3.12' and extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline') or (extra == 'extra-16-pydantic-ai-slim-huggingface' and extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline')" }, + { name = "regex", marker = "(python_full_version < '3.12' and extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline') or (extra == 'extra-16-pydantic-ai-slim-huggingface' and extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline')" }, + { name = "requests", marker = "(python_full_version < '3.12' and extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline') or (extra == 'extra-16-pydantic-ai-slim-huggingface' and extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline')" }, + { name = "safetensors", marker = "(python_full_version < '3.12' and extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline') or (extra == 'extra-16-pydantic-ai-slim-huggingface' and extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline')" }, + { name = "tokenizers", marker = "(python_full_version < '3.12' and extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline') or (extra == 'extra-16-pydantic-ai-slim-huggingface' and extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline')" }, + { name = "tqdm", marker = "(python_full_version < '3.12' and extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline') or (extra == 'extra-16-pydantic-ai-slim-huggingface' and extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline')" }, ] sdist = { url = "https://files.pythonhosted.org/packages/c4/35/67252acc1b929dc88b6602e8c4a982e64f31e733b804c14bc24b47da35e6/transformers-4.57.6.tar.gz", hash = "sha256:55e44126ece9dc0a291521b7e5492b572e6ef2766338a610b9ab5afbb70689d3", size = 10134912, upload-time = "2026-01-16T10:38:39.284Z" } wheels = [ @@ -9357,15 +9353,15 @@ resolution-markers = [ "python_full_version < '3.11'", ] dependencies = [ - { name = "huggingface-hub", version = "1.4.1", source = { registry = "https://pypi.org/simple" }, marker = "extra == 'extra-16-pydantic-ai-slim-huggingface' or extra != 'extra-16-pydantic-ai-slim-outlines-vllm-offline'" }, - { name = "numpy", marker = "extra == 'extra-16-pydantic-ai-slim-huggingface' or extra != 'extra-16-pydantic-ai-slim-outlines-vllm-offline'" }, - { name = "packaging", marker = "extra == 'extra-16-pydantic-ai-slim-huggingface' or extra != 'extra-16-pydantic-ai-slim-outlines-vllm-offline'" }, - { name = "pyyaml", marker = "extra == 'extra-16-pydantic-ai-slim-huggingface' or extra != 'extra-16-pydantic-ai-slim-outlines-vllm-offline'" }, - { name = "regex", marker = "extra == 'extra-16-pydantic-ai-slim-huggingface' or extra != 'extra-16-pydantic-ai-slim-outlines-vllm-offline'" }, - { name = "safetensors", marker = "extra == 'extra-16-pydantic-ai-slim-huggingface' or extra != 'extra-16-pydantic-ai-slim-outlines-vllm-offline'" }, - { name = "tokenizers", marker = "extra == 'extra-16-pydantic-ai-slim-huggingface' or extra != 'extra-16-pydantic-ai-slim-outlines-vllm-offline'" }, - { name = "tqdm", marker = "extra == 'extra-16-pydantic-ai-slim-huggingface' or extra != 'extra-16-pydantic-ai-slim-outlines-vllm-offline'" }, - { name = "typer-slim", marker = "extra == 'extra-16-pydantic-ai-slim-huggingface' or extra != 'extra-16-pydantic-ai-slim-outlines-vllm-offline'" }, + { name = "huggingface-hub", version = "1.4.1", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version >= '3.12' or extra == 'extra-16-pydantic-ai-slim-huggingface' or extra != 'extra-16-pydantic-ai-slim-outlines-vllm-offline'" }, + { name = "numpy", marker = "python_full_version >= '3.12' or extra == 'extra-16-pydantic-ai-slim-huggingface' or extra != 'extra-16-pydantic-ai-slim-outlines-vllm-offline'" }, + { name = "packaging", marker = "python_full_version >= '3.12' or extra == 'extra-16-pydantic-ai-slim-huggingface' or extra != 'extra-16-pydantic-ai-slim-outlines-vllm-offline'" }, + { name = "pyyaml", marker = "python_full_version >= '3.12' or extra == 'extra-16-pydantic-ai-slim-huggingface' or extra != 'extra-16-pydantic-ai-slim-outlines-vllm-offline'" }, + { name = "regex", marker = "python_full_version >= '3.12' or extra == 'extra-16-pydantic-ai-slim-huggingface' or extra != 'extra-16-pydantic-ai-slim-outlines-vllm-offline'" }, + { name = "safetensors", marker = "python_full_version >= '3.12' or extra == 'extra-16-pydantic-ai-slim-huggingface' or extra != 'extra-16-pydantic-ai-slim-outlines-vllm-offline'" }, + { name = "tokenizers", marker = "python_full_version >= '3.12' or extra == 'extra-16-pydantic-ai-slim-huggingface' or extra != 'extra-16-pydantic-ai-slim-outlines-vllm-offline'" }, + { name = "tqdm", marker = "python_full_version >= '3.12' or extra == 'extra-16-pydantic-ai-slim-huggingface' or extra != 'extra-16-pydantic-ai-slim-outlines-vllm-offline'" }, + { name = "typer-slim", marker = "python_full_version >= '3.12' or extra == 'extra-16-pydantic-ai-slim-huggingface' or extra != 'extra-16-pydantic-ai-slim-outlines-vllm-offline'" }, ] sdist = { url = "https://files.pythonhosted.org/packages/bd/7e/8a0c57d562015e5b16c97c1f0b8e0e92ead2c7c20513225dc12c2043ba9f/transformers-5.2.0.tar.gz", hash = "sha256:0088b8b46ccc9eff1a1dca72b5d618a5ee3b1befc3e418c9512b35dea9f9a650", size = 8618176, upload-time = "2026-02-16T18:54:02.867Z" } wheels = [ @@ -9574,13 +9570,13 @@ wheels = [ [package.optional-dependencies] standard = [ - { name = "colorama", marker = "sys_platform == 'win32'" }, - { name = "httptools" }, - { name = "python-dotenv" }, - { name = "pyyaml" }, - { name = "uvloop", marker = "platform_python_implementation != 'PyPy' and sys_platform != 'cygwin' and sys_platform != 'win32'" }, - { name = "watchfiles" }, - { name = "websockets" }, + { name = "colorama", marker = "python_full_version < '3.12' and sys_platform == 'win32'" }, + { name = "httptools", marker = "python_full_version < '3.12'" }, + { name = "python-dotenv", marker = "python_full_version < '3.12'" }, + { name = "pyyaml", marker = "python_full_version < '3.12'" }, + { name = "uvloop", marker = "python_full_version < '3.12' and platform_python_implementation != 'PyPy' and sys_platform != 'cygwin' and sys_platform != 'win32'" }, + { name = "watchfiles", marker = "python_full_version < '3.12'" }, + { name = "websockets", marker = "python_full_version < '3.12'" }, ] [[package]] @@ -9645,66 +9641,64 @@ name = "vllm" version = "0.15.1" source = { registry = "https://pypi.org/simple" } dependencies = [ - { name = "aiohttp" }, - { name = "anthropic" }, - { name = "blake3" }, - { name = "cachetools" }, - { name = "cbor2" }, - { name = "cloudpickle" }, - { name = "compressed-tensors" }, - { name = "depyf" }, - { name = "diskcache" }, - { name = "einops" }, - { name = "fastapi", extra = ["standard"], marker = "extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline'" }, - { name = "filelock" }, - { name = "flashinfer-python" }, - { name = "gguf" }, - { name = "grpcio" }, - { name = "grpcio-reflection" }, - { name = "ijson" }, - { name = "lark" }, - { name = "llguidance", marker = "platform_machine == 'aarch64' or platform_machine == 'arm64' or platform_machine == 'ppc64le' or platform_machine == 's390x' or platform_machine == 'x86_64'" }, - { name = "lm-format-enforcer" }, - { name = "mcp" }, - { name = "mistral-common", extra = ["image"], marker = "extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline'" }, - { name = "model-hosting-container-standards" }, - { name = "msgspec" }, - { name = "ninja" }, - { name = "numba" }, - { name = "numpy" }, - { name = "openai" }, - { name = "openai-harmony" }, - { name = "opencv-python-headless" }, - { name = "outlines-core" }, - { name = "partial-json-parser" }, - { name = "pillow" }, - { name = "prometheus-client" }, - { name = "prometheus-fastapi-instrumentator" }, - { name = "protobuf" }, - { name = "psutil" }, - { name = "py-cpuinfo" }, - { name = "pybase64" }, - { name = "pydantic" }, - { name = "python-json-logger" }, - { name = "pyyaml" }, - { name = "pyzmq" }, - { name = "ray", extra = ["cgraph"], marker = "extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline'" }, - { name = "regex" }, - { name = "requests" }, - { name = "sentencepiece" }, - { name = "setproctitle" }, - { name = "setuptools", marker = "python_full_version >= '3.12'" }, - { name = "six", marker = "python_full_version >= '3.12'" }, - { name = "tiktoken" }, - { name = "tokenizers" }, - { name = "torch" }, - { name = "torchaudio" }, - { name = "torchvision" }, - { name = "tqdm" }, - { name = "transformers", version = "4.57.6", source = { registry = "https://pypi.org/simple" } }, - { name = "typing-extensions" }, - { name = "watchfiles" }, - { name = "xgrammar", marker = "platform_machine == 'aarch64' or platform_machine == 'arm64' or platform_machine == 'ppc64le' or platform_machine == 's390x' or platform_machine == 'x86_64'" }, + { name = "aiohttp", marker = "python_full_version < '3.12'" }, + { name = "anthropic", marker = "python_full_version < '3.12'" }, + { name = "blake3", marker = "python_full_version < '3.12'" }, + { name = "cachetools", marker = "python_full_version < '3.12'" }, + { name = "cbor2", marker = "python_full_version < '3.12'" }, + { name = "cloudpickle", marker = "python_full_version < '3.12'" }, + { name = "compressed-tensors", marker = "python_full_version < '3.12'" }, + { name = "depyf", marker = "python_full_version < '3.12'" }, + { name = "diskcache", marker = "python_full_version < '3.12'" }, + { name = "einops", marker = "python_full_version < '3.12'" }, + { name = "fastapi", extra = ["standard"], marker = "(python_full_version < '3.12' and extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline') or (extra == 'extra-16-pydantic-ai-slim-huggingface' and extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline')" }, + { name = "filelock", marker = "python_full_version < '3.12'" }, + { name = "flashinfer-python", marker = "python_full_version < '3.12'" }, + { name = "gguf", marker = "python_full_version < '3.12'" }, + { name = "grpcio", marker = "python_full_version < '3.12'" }, + { name = "grpcio-reflection", marker = "python_full_version < '3.12'" }, + { name = "ijson", marker = "python_full_version < '3.12'" }, + { name = "lark", marker = "python_full_version < '3.12'" }, + { name = "llguidance", marker = "(python_full_version < '3.12' and platform_machine == 'aarch64') or (python_full_version < '3.12' and platform_machine == 'arm64') or (python_full_version < '3.12' and platform_machine == 'ppc64le') or (python_full_version < '3.12' and platform_machine == 's390x') or (python_full_version < '3.12' and platform_machine == 'x86_64')" }, + { name = "lm-format-enforcer", marker = "python_full_version < '3.12'" }, + { name = "mcp", marker = "python_full_version < '3.12'" }, + { name = "mistral-common", extra = ["image"], marker = "(python_full_version < '3.12' and extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline') or (extra == 'extra-16-pydantic-ai-slim-huggingface' and extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline')" }, + { name = "model-hosting-container-standards", marker = "python_full_version < '3.12'" }, + { name = "msgspec", marker = "python_full_version < '3.12'" }, + { name = "ninja", marker = "python_full_version < '3.12'" }, + { name = "numba", marker = "python_full_version < '3.12'" }, + { name = "numpy", marker = "python_full_version < '3.12'" }, + { name = "openai", marker = "python_full_version < '3.12'" }, + { name = "openai-harmony", marker = "python_full_version < '3.12'" }, + { name = "opencv-python-headless", marker = "python_full_version < '3.12'" }, + { name = "outlines-core", marker = "python_full_version < '3.12'" }, + { name = "partial-json-parser", marker = "python_full_version < '3.12'" }, + { name = "pillow", marker = "python_full_version < '3.12'" }, + { name = "prometheus-client", marker = "python_full_version < '3.12'" }, + { name = "prometheus-fastapi-instrumentator", marker = "python_full_version < '3.12'" }, + { name = "protobuf", marker = "python_full_version < '3.12'" }, + { name = "psutil", marker = "python_full_version < '3.12'" }, + { name = "py-cpuinfo", marker = "python_full_version < '3.12'" }, + { name = "pybase64", marker = "python_full_version < '3.12'" }, + { name = "pydantic", marker = "python_full_version < '3.12'" }, + { name = "python-json-logger", marker = "python_full_version < '3.12'" }, + { name = "pyyaml", marker = "python_full_version < '3.12'" }, + { name = "pyzmq", marker = "python_full_version < '3.12'" }, + { name = "ray", extra = ["cgraph"], marker = "(python_full_version < '3.12' and extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline') or (extra == 'extra-16-pydantic-ai-slim-huggingface' and extra == 'extra-16-pydantic-ai-slim-outlines-vllm-offline')" }, + { name = "regex", marker = "python_full_version < '3.12'" }, + { name = "requests", marker = "python_full_version < '3.12'" }, + { name = "sentencepiece", marker = "python_full_version < '3.12'" }, + { name = "setproctitle", marker = "python_full_version < '3.12'" }, + { name = "tiktoken", marker = "python_full_version < '3.12'" }, + { name = "tokenizers", marker = "python_full_version < '3.12'" }, + { name = "torch", marker = "python_full_version < '3.12'" }, + { name = "torchaudio", marker = "python_full_version < '3.12'" }, + { name = "torchvision", marker = "python_full_version < '3.12'" }, + { name = "tqdm", marker = "python_full_version < '3.12'" }, + { name = "transformers", version = "4.57.6", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version < '3.12'" }, + { name = "typing-extensions", marker = "python_full_version < '3.12'" }, + { name = "watchfiles", marker = "python_full_version < '3.12'" }, + { name = "xgrammar", marker = "(python_full_version < '3.12' and platform_machine == 'aarch64') or (python_full_version < '3.12' and platform_machine == 'arm64') or (python_full_version < '3.12' and platform_machine == 'ppc64le') or (python_full_version < '3.12' and platform_machine == 's390x') or (python_full_version < '3.12' and platform_machine == 'x86_64')" }, ] wheels = [ { url = "https://files.pythonhosted.org/packages/e7/62/17dd4b80508b26c1a85db4fd9789d4726d3f36c95856a89419a178dda461/vllm-0.15.1-cp38-abi3-manylinux_2_31_aarch64.whl", hash = "sha256:97bfc79b0c29d242c57b0d395e48d2949a868957587b853deb813a985a41ed6e", size = 461362624, upload-time = "2026-02-05T00:18:12.38Z" }, @@ -10127,13 +10121,13 @@ name = "xgrammar" version = "0.1.29" source = { registry = "https://pypi.org/simple" } dependencies = [ - { name = "mlx-lm", marker = "platform_machine == 'arm64' and sys_platform == 'darwin'" }, - { name = "numpy" }, - { name = "pydantic" }, - { name = "torch" }, - { name = "transformers", version = "4.57.6", source = { registry = "https://pypi.org/simple" } }, - { name = "triton", marker = "platform_machine == 'x86_64' and sys_platform == 'linux'" }, - { name = "typing-extensions" }, + { name = "mlx-lm", marker = "python_full_version < '3.12' and platform_machine == 'arm64' and sys_platform == 'darwin'" }, + { name = "numpy", marker = "python_full_version < '3.12'" }, + { name = "pydantic", marker = "python_full_version < '3.12'" }, + { name = "torch", marker = "python_full_version < '3.12'" }, + { name = "transformers", version = "4.57.6", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version < '3.12'" }, + { name = "triton", marker = "python_full_version < '3.12' and platform_machine == 'x86_64' and sys_platform == 'linux'" }, + { name = "typing-extensions", marker = "python_full_version < '3.12'" }, ] sdist = { url = "https://files.pythonhosted.org/packages/02/a3/70dbe3ffd331a1e7e1ad5a95690a4086e6c7cdb8089f5c7eda712219ccec/xgrammar-0.1.29.tar.gz", hash = "sha256:cf195afa81b489eebf35d4c6f37f27136d05420739ab4a6f7f065c938d7e4baa", size = 2321317, upload-time = "2025-12-19T08:23:54.53Z" } wheels = [ From a735b7322144c1b2cf86dc689a3be3575c7115bb Mon Sep 17 00:00:00 2001 From: David Montague <35119617+dmontagu@users.noreply.github.com> Date: Sat, 21 Feb 2026 13:04:47 -0700 Subject: [PATCH 07/49] Filter tools at runtime based on environment capabilities MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Tools are now registered unconditionally at init time and filtered in get_tools() based on the current environment's capabilities. This fixes the issue where environment_factory or use_environment() could expose tools unsupported by the runtime environment. Also unifies the Capability type — removes the toolset-level Capability (with edit_file) and EditStrategy types, using the environment-level Capability (with replace_str/apply_patch) everywhere. --- docs/environments.md | 2 +- .../pydantic_ai/environments/_base.py | 4 +- .../toolsets/execution_environment.py | 104 +++++++----------- tests/test_environments.py | 95 +++++++++++++++- 4 files changed, 133 insertions(+), 72 deletions(-) diff --git a/docs/environments.md b/docs/environments.md index 0d5474df94..2c8c028414 100644 --- a/docs/environments.md +++ b/docs/environments.md @@ -175,7 +175,7 @@ from pydantic_ai.environments.memory import MemoryEnvironment # Only file tools — no shell or search toolset = ExecutionEnvironmentToolset( MemoryEnvironment(), - include=['read_file', 'write_file', 'edit_file'], + include=['read_file', 'write_file', 'replace_str'], ) ``` diff --git a/pydantic_ai_slim/pydantic_ai/environments/_base.py b/pydantic_ai_slim/pydantic_ai/environments/_base.py index 9385f5beba..93b05412ad 100644 --- a/pydantic_ai_slim/pydantic_ai/environments/_base.py +++ b/pydantic_ai_slim/pydantic_ai/environments/_base.py @@ -25,10 +25,10 @@ 'glob', 'grep', ] -"""Fine-grained capability identifier listing actual method names. +"""Capability identifier corresponding to environment method names. Used in `capabilities` to declare which methods an environment implements. -Toolsets are responsible for mapping these to LLM-facing tool names. +These are also used as tool names when exposed via `ExecutionEnvironmentToolset`. """ diff --git a/pydantic_ai_slim/pydantic_ai/toolsets/execution_environment.py b/pydantic_ai_slim/pydantic_ai/toolsets/execution_environment.py index ef40d282d8..e79be783be 100644 --- a/pydantic_ai_slim/pydantic_ai/toolsets/execution_environment.py +++ b/pydantic_ai_slim/pydantic_ai/toolsets/execution_environment.py @@ -8,28 +8,23 @@ from collections.abc import Callable, Iterator, Sequence from contextlib import AsyncExitStack, contextmanager from contextvars import ContextVar, Token -from typing import Any, Literal +from typing import TYPE_CHECKING, Any, Literal from typing_extensions import Self from ..environments._base import ( IMAGE_EXTENSIONS, IMAGE_MEDIA_TYPES, + Capability, ExecutionEnvironment, ) from ..exceptions import ModelRetry from ..messages import BinaryContent from ..toolsets.function import FunctionToolset -Capability = Literal['ls', 'shell', 'read_file', 'write_file', 'edit_file', 'glob', 'grep'] -"""Toolset-level capability used in `include`/`exclude`. - -These are higher-level than the environment's fine-grained capabilities. -The toolset maps these to the appropriate environment capabilities. -""" - -EditStrategy = Literal['replace_str', 'apply_patch'] -"""Specific edit tool strategy. Expanded from the `edit_file` capability.""" +if TYPE_CHECKING: + from .._run_context import AgentDepsT, RunContext + from ..toolsets.abstract import ToolsetTool class ExecutionEnvironmentToolset(FunctionToolset[Any]): @@ -69,7 +64,7 @@ def __init__( environment_factory: Callable[[], ExecutionEnvironment] | None = None, include: Sequence[Capability] | None = None, exclude: Sequence[Capability] | None = None, - edit_strategy: EditStrategy | None = None, + edit_strategy: Literal['replace_str', 'apply_patch'] | None = None, require_shell_approval: bool = False, require_write_approval: bool = False, image_support: bool = True, @@ -119,7 +114,7 @@ def __init__( ) self._include: frozenset[Capability] | None = frozenset(include) if include is not None else None self._exclude: frozenset[Capability] = frozenset(exclude) if exclude else frozenset() - self._edit_strategy: EditStrategy | None = edit_strategy + self._edit_strategy: Literal['replace_str', 'apply_patch'] | None = edit_strategy self._image_support = image_support self._max_image_bytes = max_image_bytes self._require_shell_approval = require_shell_approval @@ -128,26 +123,13 @@ def __init__( self._running_count: int = 0 self._exit_stack: AsyncExitStack | None = None - # Register tools based on what we know at init time. - # When using environment_factory, no environment is available yet, so we - # register a full set of tools and let runtime errors catch unsupported capabilities. - self._register_tools(shared_environment) + # Register all tools unconditionally so schemas are built eagerly. + # get_tools() filters at runtime based on the current environment's capabilities. + self._register_tools() - def _resolve_capabilities(self, env: ExecutionEnvironment | None) -> set[Capability]: - """Determine which toolset-level capabilities to register as tools.""" - if env is not None: - env_caps = env.capabilities - available: set[Capability] = set() - # Map env capabilities back to toolset capabilities - for cap in ('ls', 'shell', 'read_file', 'write_file', 'glob', 'grep'): - if cap in env_caps: - available.add(cap) - # Check for edit_file: env has replace_str or apply_patch - if 'replace_str' in env_caps or 'apply_patch' in env_caps: - available.add('edit_file') - else: - # No environment yet — register everything (runtime will error on unsupported) - available = {'ls', 'shell', 'read_file', 'write_file', 'edit_file', 'glob', 'grep'} + def _resolve_capabilities(self, env: ExecutionEnvironment) -> frozenset[Capability]: + """Determine which capabilities are available, applying include/exclude filters.""" + available: frozenset[Capability] = env.capabilities if self._include is not None: available &= self._include @@ -155,40 +137,31 @@ def _resolve_capabilities(self, env: ExecutionEnvironment | None) -> set[Capabil available -= self._exclude return available - def _resolve_edit_tool(self, env: ExecutionEnvironment | None) -> EditStrategy | None: + def _resolve_edit_tool(self, env: ExecutionEnvironment) -> Literal['replace_str', 'apply_patch'] | None: """Determine which edit strategy to use.""" if self._edit_strategy is not None: return self._edit_strategy - if env is not None: - env_caps = env.capabilities - if 'replace_str' in env_caps: - return 'replace_str' - if 'apply_patch' in env_caps: - return 'apply_patch' - return None - # Default when no environment is available - return 'replace_str' - - def _register_tools(self, env: ExecutionEnvironment | None) -> None: - """Register tools dynamically based on capabilities.""" - caps = self._resolve_capabilities(env) - - if 'ls' in caps: - self._register_ls() - if 'shell' in caps: - self._register_shell() - if 'read_file' in caps: - self._register_read_file() - if 'write_file' in caps: - self._register_write_file() - if 'edit_file' in caps: - edit_strategy = self._resolve_edit_tool(env) - if edit_strategy == 'replace_str': - self._register_replace_str() - if 'glob' in caps: - self._register_glob() - if 'grep' in caps: - self._register_grep() + env_caps = env.capabilities + if 'replace_str' in env_caps: + return 'replace_str' + if 'apply_patch' in env_caps: + return 'apply_patch' + return None + + def _register_tools(self) -> None: + """Register all tools unconditionally. + + Filtering based on the environment's capabilities and include/exclude + is deferred to ``get_tools()``, which runs at request time when the + active environment is known. + """ + self._register_ls() + self._register_shell() + self._register_read_file() + self._register_write_file() + self._register_replace_str() + self._register_glob() + self._register_grep() def _register_ls(self) -> None: async def ls(path: str = '.') -> str: @@ -367,6 +340,11 @@ async def grep_tool( self.tool(name='grep')(grep_tool) + async def get_tools(self, ctx: RunContext[AgentDepsT]) -> dict[str, ToolsetTool[AgentDepsT]]: + all_tools = await super().get_tools(ctx) + caps = self._resolve_capabilities(self.required_environment) + return {name: tool for name, tool in all_tools.items() if name in caps} + @property def tool_name_conflict_hint(self) -> str: return 'Wrap the ExecutionEnvironmentToolset in a PrefixedToolset to avoid name conflicts.' @@ -442,7 +420,7 @@ async def __aenter__(self) -> Self: async def __aexit__(self, *args: Any) -> bool | None: if self._environment_factory is not None: state = self._per_run_state.get() - if state is not None: + if state is not None: # pragma: no branch stack, token = state await stack.aclose() self._environment_override.reset(token) diff --git a/tests/test_environments.py b/tests/test_environments.py index 393d0e78d0..accbf0d21b 100644 --- a/tests/test_environments.py +++ b/tests/test_environments.py @@ -508,7 +508,9 @@ async def test_local_creates_root_dir(tmp_path: Path): async def test_toolset_tool_names(): toolset = ExecutionEnvironmentToolset(LocalEnvironment('.')) - tool_names = sorted(toolset.tools.keys()) + ctx = build_run_context() + tools = await toolset.get_tools(ctx) + tool_names = sorted(tools.keys()) assert tool_names == snapshot(['glob', 'grep', 'ls', 'read_file', 'replace_str', 'shell', 'write_file']) @@ -517,7 +519,9 @@ async def test_toolset_include_flags(): LocalEnvironment('.'), include=[], ) - assert toolset.tools == {} + ctx = build_run_context() + tools = await toolset.get_tools(ctx) + assert tools == {} async def test_toolset_include_shell_only(): @@ -525,7 +529,9 @@ async def test_toolset_include_shell_only(): LocalEnvironment('.'), include=['shell'], ) - assert sorted(toolset.tools.keys()) == ['shell'] + ctx = build_run_context() + tools = await toolset.get_tools(ctx) + assert sorted(tools.keys()) == ['shell'] async def test_toolset_bash_tool(tmp_path: Path): @@ -726,7 +732,8 @@ async def test_toolset_read_continuation_hint(tmp_path: Path): async def test_toolset_require_shell_approval(): """require_shell_approval sets requires_approval on the shell tool.""" - toolset = ExecutionEnvironmentToolset(require_shell_approval=True) + env = MemoryEnvironment(command_handler=lambda cmd: ExecutionResult(output='', exit_code=0)) + toolset = ExecutionEnvironmentToolset(env, require_shell_approval=True) ctx = build_run_context(None) tools = await toolset.get_tools(ctx) assert tools['shell'].tool_def.kind == 'unapproved' @@ -736,7 +743,7 @@ async def test_toolset_require_shell_approval(): async def test_toolset_require_write_approval(): """require_write_approval sets requires_approval on write_file and replace_str.""" - toolset = ExecutionEnvironmentToolset(require_write_approval=True) + toolset = ExecutionEnvironmentToolset(MemoryEnvironment(), require_write_approval=True) ctx = build_run_context(None) tools = await toolset.get_tools(ctx) assert tools['write_file'].tool_def.kind == 'unapproved' @@ -749,7 +756,7 @@ async def test_toolset_require_write_approval(): async def test_toolset_default_no_approval(): """By default, no tools require approval.""" - toolset = ExecutionEnvironmentToolset() + toolset = ExecutionEnvironmentToolset(MemoryEnvironment()) ctx = build_run_context(None) tools = await toolset.get_tools(ctx) for tool in tools.values(): @@ -1674,6 +1681,18 @@ async def test_toolset_image_read(tmp_path: Path): assert result.media_type == 'image/png' +async def test_toolset_read_binary_non_image(): + """read_file on a non-image binary file returns a placeholder message.""" + # Store invalid UTF-8 bytes under a non-image extension so MemoryEnvironment returns raw bytes + env = MemoryEnvironment(files={'data.bin': b'\x80\x81\x82'}) + toolset = ExecutionEnvironmentToolset(env) + ctx = build_run_context(None) + manager = await ToolManager[None](toolset).for_run_step(ctx) + async with env: + result = await manager.handle_call(ToolCallPart(tool_name='read_file', args={'path': 'data.bin'})) + assert result == '[Binary file: data.bin — cannot display as text]' + + async def test_toolset_grep_no_matches(tmp_path: Path): """grep with no matches returns 'No matches found.'.""" env = LocalEnvironment(tmp_path) @@ -2827,6 +2846,14 @@ def test_resolve_edit_tool_explicit_strategy(): assert strategy == 'apply_patch' +def test_resolve_edit_tool_auto_replace_str(): + """Auto-detection picks replace_str when supported by the environment.""" + env = MemoryEnvironment() + toolset = ExecutionEnvironmentToolset(env) + strategy = toolset._resolve_edit_tool(env) + assert strategy == 'replace_str' + + def test_resolve_edit_tool_apply_patch_fallback(): """When env has apply_patch but not replace_str, resolves to apply_patch.""" from pydantic_ai.environments._base import Capability as EnvCapability, ExecutionEnvironment as BaseEnv @@ -3071,3 +3098,59 @@ async def test_memory_read_image_stored_as_string(): result = await env.read_file('image.png') assert isinstance(result, bytes) assert result == b'fake png data' + + +# --- ExecutionEnvironmentToolset: get_tools filters by runtime capabilities --- + + +async def test_toolset_factory_filters_tools_by_capabilities(): + """When using environment_factory, get_tools() only returns tools supported by the runtime environment.""" + from pydantic_ai.environments._base import Capability as EnvCapability, ExecutionEnvironment as BaseEnv + + class _LsOnlyEnv(BaseEnv): + @property + def capabilities(self) -> frozenset[EnvCapability]: + return frozenset({'ls'}) + + async def ls(self, path: str = '.') -> list[FileInfo]: + return [] + + toolset = ExecutionEnvironmentToolset(environment_factory=_LsOnlyEnv) + # Before entering, all tools are registered (no env to check) + ctx = build_run_context() + + async with toolset: + tools = await toolset.get_tools(ctx) + + # Only ls should be exposed — the runtime env only supports ls + assert set(tools.keys()) == {'ls'} + + +async def test_toolset_use_environment_filters_tools(): + """use_environment() with a limited env filters tools from get_tools().""" + from pydantic_ai.environments._base import Capability as EnvCapability, ExecutionEnvironment as BaseEnv + + class _LsOnlyEnv(BaseEnv): + @property + def capabilities(self) -> frozenset[EnvCapability]: + return frozenset({'ls'}) + + # Full-capability shared env registers all tools + full_env = MemoryEnvironment() + toolset = ExecutionEnvironmentToolset(full_env) + ctx = build_run_context() + + async with full_env: + all_tools = await toolset.get_tools(ctx) + assert 'ls' in all_tools + assert 'read_file' in all_tools + assert 'write_file' in all_tools + + # Override with a limited env — only ls should remain + with toolset.use_environment(_LsOnlyEnv()): + limited_tools = await toolset.get_tools(ctx) + assert set(limited_tools.keys()) == {'ls'} + + # After exiting use_environment, all tools are back + restored_tools = await toolset.get_tools(ctx) + assert set(restored_tools.keys()) == set(all_tools.keys()) From 9001b1aec2e114963293ae903569cab8e6c5679f Mon Sep 17 00:00:00 2001 From: David Montague <35119617+dmontagu@users.noreply.github.com> Date: Sat, 21 Feb 2026 13:23:00 -0700 Subject: [PATCH 08/49] Add ToolName type, rename edit tool to edit_file, filter by tool names MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit - Add `ToolName` literal type for tool-level names exposed to the model (`edit_file` instead of `edit_file:replace_str`/`edit_file:apply_patch`) - `include`/`exclude` now accept `ToolName` values (e.g. `edit_file`) instead of env-level `Capability` values - Rename `_resolve_capabilities` → `_resolve_tool_names`, which maps env capabilities to tool names then applies include/exclude filtering - Rename `replace_str` tool → `edit_file` (the function exposed to models) - Update `Capability` values: `replace_str` → `edit_file:replace_str`, `apply_patch` → `edit_file:apply_patch` in all environments - Update docs and tests --- docs/environments.md | 6 +- .../pydantic_ai/environments/_base.py | 23 ++++-- .../pydantic_ai/environments/docker.py | 2 +- .../pydantic_ai/environments/local.py | 2 +- .../pydantic_ai/environments/memory.py | 2 +- .../toolsets/execution_environment.py | 72 +++++++++++-------- tests/test_environments.py | 20 +++--- 7 files changed, 77 insertions(+), 50 deletions(-) diff --git a/docs/environments.md b/docs/environments.md index 2c8c028414..16e368de72 100644 --- a/docs/environments.md +++ b/docs/environments.md @@ -162,7 +162,7 @@ env = DockerEnvironment.hardened( | `shell` | Execute shell commands | | `read_file` | Read files with line numbers (renders images for multimodal models) | | `write_file` | Create or overwrite files | -| `replace_str` | Edit files by exact string replacement | +| `edit_file` | Edit files by exact string replacement | | `glob` | Find files by pattern | | `grep` | Search file contents with regex | @@ -175,7 +175,7 @@ from pydantic_ai.environments.memory import MemoryEnvironment # Only file tools — no shell or search toolset = ExecutionEnvironmentToolset( MemoryEnvironment(), - include=['read_file', 'write_file', 'replace_str'], + include=['read_file', 'write_file', 'edit_file'], ) ``` @@ -321,7 +321,7 @@ from pydantic_ai.environments._base import Capability class MyCloudEnvironment(ExecutionEnvironment): @property def capabilities(self) -> frozenset[Capability]: - return frozenset({'shell', 'read_file', 'write_file', 'replace_str', 'ls', 'glob', 'grep'}) + return frozenset({'shell', 'read_file', 'write_file', 'edit_file:replace_str', 'ls', 'glob', 'grep'}) async def shell( self, command: str, *, timeout: float | None = 120, env: dict[str, str] | None = None diff --git a/pydantic_ai_slim/pydantic_ai/environments/_base.py b/pydantic_ai_slim/pydantic_ai/environments/_base.py index 93b05412ad..fc818497ba 100644 --- a/pydantic_ai_slim/pydantic_ai/environments/_base.py +++ b/pydantic_ai_slim/pydantic_ai/environments/_base.py @@ -20,15 +20,30 @@ 'shell', 'read_file', 'write_file', - 'replace_str', - 'apply_patch', + 'edit_file:replace_str', + 'edit_file:apply_patch', 'glob', 'grep', ] -"""Capability identifier corresponding to environment method names. +"""Capability identifier for environment methods. Used in `capabilities` to declare which methods an environment implements. -These are also used as tool names when exposed via `ExecutionEnvironmentToolset`. +""" + +ToolName = Literal[ + 'ls', + 'shell', + 'read_file', + 'write_file', + 'edit_file', + 'glob', + 'grep', +] +"""Tool name exposed to the model by `ExecutionEnvironmentToolset`. + +Most match `Capability` 1:1, except `edit_file` which maps to either +`edit_file:replace_str` or `edit_file:apply_patch` depending on environment support. +Used for `include`/`exclude` filtering on the toolset. """ diff --git a/pydantic_ai_slim/pydantic_ai/environments/docker.py b/pydantic_ai_slim/pydantic_ai/environments/docker.py index 6345321f21..4851edcdd9 100644 --- a/pydantic_ai_slim/pydantic_ai/environments/docker.py +++ b/pydantic_ai_slim/pydantic_ai/environments/docker.py @@ -348,7 +348,7 @@ def capabilities(self) -> frozenset[Capability]: # pragma: lax no cover 'shell', 'read_file', 'write_file', - 'replace_str', + 'edit_file:replace_str', 'glob', 'grep', } diff --git a/pydantic_ai_slim/pydantic_ai/environments/local.py b/pydantic_ai_slim/pydantic_ai/environments/local.py index 3d62598bbe..0b513b5aa1 100644 --- a/pydantic_ai_slim/pydantic_ai/environments/local.py +++ b/pydantic_ai_slim/pydantic_ai/environments/local.py @@ -136,7 +136,7 @@ def __init__( @property def capabilities(self) -> frozenset[Capability]: - return frozenset({'ls', 'shell', 'read_file', 'write_file', 'replace_str', 'glob', 'grep'}) + return frozenset({'ls', 'shell', 'read_file', 'write_file', 'edit_file:replace_str', 'glob', 'grep'}) async def __aenter__(self) -> Self: self._root_dir.mkdir(parents=True, exist_ok=True) diff --git a/pydantic_ai_slim/pydantic_ai/environments/memory.py b/pydantic_ai_slim/pydantic_ai/environments/memory.py index da7e9f2078..528d2b8d80 100644 --- a/pydantic_ai_slim/pydantic_ai/environments/memory.py +++ b/pydantic_ai_slim/pydantic_ai/environments/memory.py @@ -71,7 +71,7 @@ def __init__( @property def capabilities(self) -> frozenset[Capability]: - caps: set[Capability] = {'ls', 'read_file', 'write_file', 'replace_str', 'glob', 'grep'} + caps: set[Capability] = {'ls', 'read_file', 'write_file', 'edit_file:replace_str', 'glob', 'grep'} if self._command_handler is not None: caps.add('shell') return frozenset(caps) diff --git a/pydantic_ai_slim/pydantic_ai/toolsets/execution_environment.py b/pydantic_ai_slim/pydantic_ai/toolsets/execution_environment.py index e79be783be..544baa269d 100644 --- a/pydantic_ai_slim/pydantic_ai/toolsets/execution_environment.py +++ b/pydantic_ai_slim/pydantic_ai/toolsets/execution_environment.py @@ -15,8 +15,8 @@ from ..environments._base import ( IMAGE_EXTENSIONS, IMAGE_MEDIA_TYPES, - Capability, ExecutionEnvironment, + ToolName, ) from ..exceptions import ModelRetry from ..messages import BinaryContent @@ -62,9 +62,9 @@ def __init__( shared_environment: ExecutionEnvironment | None = None, *, environment_factory: Callable[[], ExecutionEnvironment] | None = None, - include: Sequence[Capability] | None = None, - exclude: Sequence[Capability] | None = None, - edit_strategy: Literal['replace_str', 'apply_patch'] | None = None, + include: Sequence[ToolName] | None = None, + exclude: Sequence[ToolName] | None = None, + edit_strategy: Literal['edit_file:replace_str', 'edit_file:apply_patch'] | None = None, require_shell_approval: bool = False, require_write_approval: bool = False, image_support: bool = True, @@ -82,13 +82,13 @@ def __init__( `async with toolset:` entry. Use this for concurrent runs that need isolation (e.g. separate Docker containers). Mutually exclusive with `shared_environment`. - include: Capabilities to include. `None` means all capabilities - from the environment. Pass an explicit set to restrict to - specific capabilities. - exclude: Capabilities to exclude. `None` defaults to no exclusions. - Pass an explicit set to exclude specific capabilities. + include: Tool names to include. `None` means all tools supported + by the environment. Pass an explicit sequence to restrict to + specific tools. + exclude: Tool names to exclude. `None` defaults to no exclusions. + Pass an explicit sequence to exclude specific tools. edit_strategy: Which edit strategy to use. `None` auto-selects - `'replace_str'` if supported by the environment. + `'edit_file:replace_str'` if supported by the environment. require_shell_approval: Whether the `shell` tool requires human-in-the-loop approval before execution. Recommended for `LocalEnvironment` where commands run directly on the host. @@ -112,9 +112,9 @@ def __init__( self._per_run_state: ContextVar[tuple[AsyncExitStack, Token[ExecutionEnvironment | None]] | None] = ContextVar( f'_per_run_state_{id or "environment"}', default=None ) - self._include: frozenset[Capability] | None = frozenset(include) if include is not None else None - self._exclude: frozenset[Capability] = frozenset(exclude) if exclude else frozenset() - self._edit_strategy: Literal['replace_str', 'apply_patch'] | None = edit_strategy + self._include: frozenset[ToolName] | None = frozenset(include) if include is not None else None + self._exclude: frozenset[ToolName] = frozenset(exclude) if exclude else frozenset() + self._edit_strategy: Literal['edit_file:replace_str', 'edit_file:apply_patch'] | None = edit_strategy self._image_support = image_support self._max_image_bytes = max_image_bytes self._require_shell_approval = require_shell_approval @@ -127,25 +127,37 @@ def __init__( # get_tools() filters at runtime based on the current environment's capabilities. self._register_tools() - def _resolve_capabilities(self, env: ExecutionEnvironment) -> frozenset[Capability]: - """Determine which capabilities are available, applying include/exclude filters.""" - available: frozenset[Capability] = env.capabilities + def _resolve_tool_names(self, env: ExecutionEnvironment) -> frozenset[str]: + """Determine which tool names to expose, based on the environment's capabilities and include/exclude.""" + # Map env capabilities → tool names (most 1:1, but edit_file:* → edit_file) + tool_names: set[str] = set() + for cap in env.capabilities: + if cap.startswith('edit_file:'): + continue # handled below + tool_names.add(cap) + # Add edit_file if the resolved strategy's capability is available + if self._resolve_edit_tool(env) is not None: + tool_names.add('edit_file') + + # Apply include/exclude at the tool-name level if self._include is not None: - available &= self._include + tool_names &= self._include + tool_names -= self._exclude - available -= self._exclude - return available + return frozenset(tool_names) - def _resolve_edit_tool(self, env: ExecutionEnvironment) -> Literal['replace_str', 'apply_patch'] | None: + def _resolve_edit_tool( + self, env: ExecutionEnvironment + ) -> Literal['edit_file:replace_str', 'edit_file:apply_patch'] | None: """Determine which edit strategy to use.""" if self._edit_strategy is not None: return self._edit_strategy env_caps = env.capabilities - if 'replace_str' in env_caps: - return 'replace_str' - if 'apply_patch' in env_caps: - return 'apply_patch' + if 'edit_file:replace_str' in env_caps: + return 'edit_file:replace_str' + if 'edit_file:apply_patch' in env_caps: + return 'edit_file:apply_patch' return None def _register_tools(self) -> None: @@ -159,7 +171,7 @@ def _register_tools(self) -> None: self._register_shell() self._register_read_file() self._register_write_file() - self._register_replace_str() + self._register_edit_file() self._register_glob() self._register_grep() @@ -262,8 +274,8 @@ async def write_file(path: str, content: str) -> str: self.tool(requires_approval=self._require_write_approval)(write_file) - def _register_replace_str(self) -> None: - async def replace_str(path: str, old: str, new: str, replace_all: bool = False) -> str: + def _register_edit_file(self) -> None: + async def edit_file(path: str, old: str, new: str, replace_all: bool = False) -> str: """Edit a file by exact string replacement. The old string must match exactly (including whitespace and indentation). @@ -282,7 +294,7 @@ async def replace_str(path: str, old: str, new: str, replace_all: bool = False) except (FileNotFoundError, ValueError) as e: raise ModelRetry(str(e)) - self.tool(requires_approval=self._require_write_approval)(replace_str) + self.tool(requires_approval=self._require_write_approval)(edit_file) def _register_glob(self) -> None: async def glob_tool(pattern: str, path: str = '.') -> str: @@ -342,8 +354,8 @@ async def grep_tool( async def get_tools(self, ctx: RunContext[AgentDepsT]) -> dict[str, ToolsetTool[AgentDepsT]]: all_tools = await super().get_tools(ctx) - caps = self._resolve_capabilities(self.required_environment) - return {name: tool for name, tool in all_tools.items() if name in caps} + tool_names = self._resolve_tool_names(self.required_environment) + return {name: tool for name, tool in all_tools.items() if name in tool_names} @property def tool_name_conflict_hint(self) -> str: diff --git a/tests/test_environments.py b/tests/test_environments.py index accbf0d21b..2822652ca9 100644 --- a/tests/test_environments.py +++ b/tests/test_environments.py @@ -511,7 +511,7 @@ async def test_toolset_tool_names(): ctx = build_run_context() tools = await toolset.get_tools(ctx) tool_names = sorted(tools.keys()) - assert tool_names == snapshot(['glob', 'grep', 'ls', 'read_file', 'replace_str', 'shell', 'write_file']) + assert tool_names == snapshot(['edit_file', 'glob', 'grep', 'ls', 'read_file', 'shell', 'write_file']) async def test_toolset_include_flags(): @@ -581,7 +581,7 @@ async def test_toolset_edit_retry_on_error(tmp_path: Path): with pytest.raises(UnexpectedModelBehavior, match='exceeded max retries count of 0'): await manager.handle_call( ToolCallPart( - tool_name='replace_str', + tool_name='edit_file', args={'path': 'test.txt', 'old': 'nonexistent', 'new': 'replacement'}, ) ) @@ -742,12 +742,12 @@ async def test_toolset_require_shell_approval(): async def test_toolset_require_write_approval(): - """require_write_approval sets requires_approval on write_file and replace_str.""" + """require_write_approval sets requires_approval on write_file and edit_file.""" toolset = ExecutionEnvironmentToolset(MemoryEnvironment(), require_write_approval=True) ctx = build_run_context(None) tools = await toolset.get_tools(ctx) assert tools['write_file'].tool_def.kind == 'unapproved' - assert tools['replace_str'].tool_def.kind == 'unapproved' + assert tools['edit_file'].tool_def.kind == 'unapproved' # read_file and search tools should NOT require approval assert tools['read_file'].tool_def.kind == 'function' assert tools['glob'].tool_def.kind == 'function' @@ -1729,7 +1729,7 @@ async def test_toolset_edit_success(tmp_path: Path): await env.write_file('code.py', 'old_value = 1\n') result = await manager.handle_call( ToolCallPart( - tool_name='replace_str', + tool_name='edit_file', args={'path': 'code.py', 'old': 'old_value', 'new': 'new_value'}, ) ) @@ -2841,9 +2841,9 @@ async def test_memory_read_file_that_is_also_directory_prefix(): def test_resolve_edit_tool_explicit_strategy(): """Passing edit_strategy to constructor overrides auto-detection.""" env = MemoryEnvironment() - toolset = ExecutionEnvironmentToolset(env, edit_strategy='apply_patch') + toolset = ExecutionEnvironmentToolset(env, edit_strategy='edit_file:apply_patch') strategy = toolset._resolve_edit_tool(env) - assert strategy == 'apply_patch' + assert strategy == 'edit_file:apply_patch' def test_resolve_edit_tool_auto_replace_str(): @@ -2851,7 +2851,7 @@ def test_resolve_edit_tool_auto_replace_str(): env = MemoryEnvironment() toolset = ExecutionEnvironmentToolset(env) strategy = toolset._resolve_edit_tool(env) - assert strategy == 'replace_str' + assert strategy == 'edit_file:replace_str' def test_resolve_edit_tool_apply_patch_fallback(): @@ -2861,11 +2861,11 @@ def test_resolve_edit_tool_apply_patch_fallback(): class _ApplyPatchEnv(BaseEnv): @property def capabilities(self) -> frozenset[EnvCapability]: - return frozenset({'apply_patch'}) + return frozenset({'edit_file:apply_patch'}) toolset = ExecutionEnvironmentToolset(_ApplyPatchEnv()) strategy = toolset._resolve_edit_tool(_ApplyPatchEnv()) - assert strategy == 'apply_patch' + assert strategy == 'edit_file:apply_patch' def test_resolve_edit_tool_neither(): From c66ea20d610df4a529c9a58dd633493a2a273c21 Mon Sep 17 00:00:00 2001 From: David Montague <35119617+dmontagu@users.noreply.github.com> Date: Sat, 21 Feb 2026 16:10:21 -0700 Subject: [PATCH 09/49] Rename Capability to EnvCapability, fix edit strategy fallback, fix grep glob filtering - Rename `Capability` to `EnvCapability` for clarity - Remove unused `instructions()` method from base class - Fix `_resolve_edit_tool` to fall back to auto-detection when env doesn't support the explicit strategy - Fix `MemoryEnvironment.grep` to skip glob filtering for exact file paths, matching `LocalEnvironment` behavior --- docs/environments.md | 4 +- .../pydantic_ai/environments/_base.py | 14 ++--- .../pydantic_ai/environments/docker.py | 6 +-- .../pydantic_ai/environments/local.py | 4 +- .../pydantic_ai/environments/memory.py | 12 +++-- .../toolsets/execution_environment.py | 11 ++-- tests/test_environments.py | 52 ++++++++++--------- 7 files changed, 56 insertions(+), 47 deletions(-) diff --git a/docs/environments.md b/docs/environments.md index 16e368de72..6438c36582 100644 --- a/docs/environments.md +++ b/docs/environments.md @@ -316,11 +316,11 @@ You can implement [`ExecutionEnvironment`][pydantic_ai.environments.ExecutionEnv from typing import Literal from pydantic_ai.environments import ExecutionEnvironment, ExecutionProcess, ExecutionResult, FileInfo -from pydantic_ai.environments._base import Capability +from pydantic_ai.environments._base import EnvCapability class MyCloudEnvironment(ExecutionEnvironment): @property - def capabilities(self) -> frozenset[Capability]: + def capabilities(self) -> frozenset[EnvCapability]: return frozenset({'shell', 'read_file', 'write_file', 'edit_file:replace_str', 'ls', 'glob', 'grep'}) async def shell( diff --git a/pydantic_ai_slim/pydantic_ai/environments/_base.py b/pydantic_ai_slim/pydantic_ai/environments/_base.py index fc818497ba..715f641d1d 100644 --- a/pydantic_ai_slim/pydantic_ai/environments/_base.py +++ b/pydantic_ai_slim/pydantic_ai/environments/_base.py @@ -13,9 +13,9 @@ from typing_extensions import Self -# --- Capability type alias --- +# --- Type aliases --- -Capability = Literal[ +EnvCapability = Literal[ 'ls', 'shell', 'read_file', @@ -25,7 +25,7 @@ 'glob', 'grep', ] -"""Capability identifier for environment methods. +"""Identifier for an environment method. Used in `capabilities` to declare which methods an environment implements. """ @@ -41,7 +41,7 @@ ] """Tool name exposed to the model by `ExecutionEnvironmentToolset`. -Most match `Capability` 1:1, except `edit_file` which maps to either +Most match `EnvCapability` 1:1, except `edit_file` which maps to either `edit_file:replace_str` or `edit_file:apply_patch` depending on environment support. Used for `include`/`exclude` filtering on the toolset. """ @@ -195,11 +195,11 @@ class ExecutionEnvironment(ABC): methods that match their declared capabilities. """ - # --- Capability introspection --- + # --- EnvCapability introspection --- @property @abstractmethod - def capabilities(self) -> frozenset[Capability]: + def capabilities(self) -> frozenset[EnvCapability]: """Capabilities this environment supports (high-level). Used by toolsets to decide which tools to register. Only methods @@ -207,7 +207,7 @@ def capabilities(self) -> frozenset[Capability]: """ ... - def instructions(self, capability: Capability) -> str | None: + def instructions(self, capability: EnvCapability) -> str | None: """Per-capability instructions for the LLM. Override to provide environment-specific hints that toolsets include diff --git a/pydantic_ai_slim/pydantic_ai/environments/docker.py b/pydantic_ai_slim/pydantic_ai/environments/docker.py index 4851edcdd9..c4acfc8332 100644 --- a/pydantic_ai_slim/pydantic_ai/environments/docker.py +++ b/pydantic_ai_slim/pydantic_ai/environments/docker.py @@ -20,7 +20,7 @@ from ._base import ( IMAGE_EXTENSIONS, MAX_OUTPUT_CHARS, - Capability, + EnvCapability, ExecutionEnvironment, ExecutionProcess, ExecutionResult, @@ -341,7 +341,7 @@ def hardened( ) @property - def capabilities(self) -> frozenset[Capability]: # pragma: lax no cover + def capabilities(self) -> frozenset[EnvCapability]: # pragma: lax no cover return frozenset( { 'ls', @@ -354,7 +354,7 @@ def capabilities(self) -> frozenset[Capability]: # pragma: lax no cover } ) - def instructions(self, capability: Capability) -> str | None: + def instructions(self, capability: EnvCapability) -> str | None: if capability == 'grep': # pragma: lax no cover return 'Uses POSIX basic regex, not Python `re` syntax.' elif capability == 'glob': # pragma: lax no cover diff --git a/pydantic_ai_slim/pydantic_ai/environments/local.py b/pydantic_ai_slim/pydantic_ai/environments/local.py index 0b513b5aa1..9d9a661261 100644 --- a/pydantic_ai_slim/pydantic_ai/environments/local.py +++ b/pydantic_ai_slim/pydantic_ai/environments/local.py @@ -18,7 +18,7 @@ from ._base import ( IMAGE_EXTENSIONS, MAX_OUTPUT_CHARS, - Capability, + EnvCapability, ExecutionEnvironment, ExecutionProcess, ExecutionResult, @@ -135,7 +135,7 @@ def __init__( self._inherit_env = inherit_env @property - def capabilities(self) -> frozenset[Capability]: + def capabilities(self) -> frozenset[EnvCapability]: return frozenset({'ls', 'shell', 'read_file', 'write_file', 'edit_file:replace_str', 'glob', 'grep'}) async def __aenter__(self) -> Self: diff --git a/pydantic_ai_slim/pydantic_ai/environments/memory.py b/pydantic_ai_slim/pydantic_ai/environments/memory.py index 528d2b8d80..7fc829c291 100644 --- a/pydantic_ai_slim/pydantic_ai/environments/memory.py +++ b/pydantic_ai_slim/pydantic_ai/environments/memory.py @@ -24,7 +24,7 @@ ) if TYPE_CHECKING: - from ._base import Capability + from ._base import EnvCapability class MemoryEnvironment(ExecutionEnvironment): @@ -70,8 +70,8 @@ def __init__( self._command_handler = command_handler @property - def capabilities(self) -> frozenset[Capability]: - caps: set[Capability] = {'ls', 'read_file', 'write_file', 'edit_file:replace_str', 'glob', 'grep'} + def capabilities(self) -> frozenset[EnvCapability]: + caps: set[EnvCapability] = {'ls', 'read_file', 'write_file', 'edit_file:replace_str', 'glob', 'grep'} if self._command_handler is not None: caps.add('shell') return frozenset(caps) @@ -234,6 +234,8 @@ async def grep( normalized = self._normalize(path or '.') compiled = re.compile(pattern) + is_exact_file = normalized != '.' and normalized in self._files + results: list[str] = [] for file_path in sorted(self._files): # Path filtering @@ -243,8 +245,8 @@ async def grep( elif not file_path.startswith(normalized + '/'): continue - # Glob filtering - if glob_pattern and not fnmatch.fnmatch(posixpath.basename(file_path), glob_pattern): + # Glob filtering (skip for exact file matches, matching LocalEnvironment behavior) + if not is_exact_file and glob_pattern and not fnmatch.fnmatch(posixpath.basename(file_path), glob_pattern): continue # Skip hidden files diff --git a/pydantic_ai_slim/pydantic_ai/toolsets/execution_environment.py b/pydantic_ai_slim/pydantic_ai/toolsets/execution_environment.py index 544baa269d..d5315aac1a 100644 --- a/pydantic_ai_slim/pydantic_ai/toolsets/execution_environment.py +++ b/pydantic_ai_slim/pydantic_ai/toolsets/execution_environment.py @@ -150,10 +150,15 @@ def _resolve_tool_names(self, env: ExecutionEnvironment) -> frozenset[str]: def _resolve_edit_tool( self, env: ExecutionEnvironment ) -> Literal['edit_file:replace_str', 'edit_file:apply_patch'] | None: - """Determine which edit strategy to use.""" - if self._edit_strategy is not None: - return self._edit_strategy + """Determine which edit strategy to use. + + If ``edit_strategy`` was explicitly set and the environment supports it, + that strategy is used. Otherwise falls back to auto-detection + (preferring ``replace_str`` over ``apply_patch``). + """ env_caps = env.capabilities + if self._edit_strategy is not None and self._edit_strategy in env_caps: + return self._edit_strategy if 'edit_file:replace_str' in env_caps: return 'edit_file:replace_str' if 'edit_file:apply_patch' in env_caps: diff --git a/tests/test_environments.py b/tests/test_environments.py index 2822652ca9..8ad9562c99 100644 --- a/tests/test_environments.py +++ b/tests/test_environments.py @@ -807,14 +807,6 @@ async def test_toolset_use_environment_no_default(): assert toolset.environment is None -async def test_toolset_instructions(): - """Environment instructions is accessible for each tool.""" - env = LocalEnvironment('.') - # LocalEnvironment returns None for all tool descriptions by default - assert env.instructions('shell') is None - assert env.instructions('read_file') is None - - async def test_toolset_tool_name_conflict_hint(): toolset = ExecutionEnvironmentToolset(LocalEnvironment('.')) assert 'PrefixedToolset' in toolset.tool_name_conflict_hint @@ -2839,24 +2831,36 @@ async def test_memory_read_file_that_is_also_directory_prefix(): def test_resolve_edit_tool_explicit_strategy(): - """Passing edit_strategy to constructor overrides auto-detection.""" - env = MemoryEnvironment() + """Explicit edit_strategy is used when the environment supports it.""" + from pydantic_ai.environments._base import EnvCapability, ExecutionEnvironment as BaseEnv + + class _BothEditEnv(BaseEnv): + @property + def capabilities(self) -> frozenset[EnvCapability]: + return frozenset({'edit_file:replace_str', 'edit_file:apply_patch'}) + + env = _BothEditEnv() + toolset = ExecutionEnvironmentToolset(env, edit_strategy='edit_file:apply_patch') + assert toolset._resolve_edit_tool(env) == 'edit_file:apply_patch' + + +def test_resolve_edit_tool_explicit_strategy_unsupported_falls_back(): + """Explicit edit_strategy falls back to auto-detection when the env doesn't support it.""" + env = MemoryEnvironment() # only has edit_file:replace_str toolset = ExecutionEnvironmentToolset(env, edit_strategy='edit_file:apply_patch') - strategy = toolset._resolve_edit_tool(env) - assert strategy == 'edit_file:apply_patch' + assert toolset._resolve_edit_tool(env) == 'edit_file:replace_str' def test_resolve_edit_tool_auto_replace_str(): """Auto-detection picks replace_str when supported by the environment.""" env = MemoryEnvironment() toolset = ExecutionEnvironmentToolset(env) - strategy = toolset._resolve_edit_tool(env) - assert strategy == 'edit_file:replace_str' + assert toolset._resolve_edit_tool(env) == 'edit_file:replace_str' def test_resolve_edit_tool_apply_patch_fallback(): """When env has apply_patch but not replace_str, resolves to apply_patch.""" - from pydantic_ai.environments._base import Capability as EnvCapability, ExecutionEnvironment as BaseEnv + from pydantic_ai.environments._base import EnvCapability, ExecutionEnvironment as BaseEnv class _ApplyPatchEnv(BaseEnv): @property @@ -2864,13 +2868,12 @@ def capabilities(self) -> frozenset[EnvCapability]: return frozenset({'edit_file:apply_patch'}) toolset = ExecutionEnvironmentToolset(_ApplyPatchEnv()) - strategy = toolset._resolve_edit_tool(_ApplyPatchEnv()) - assert strategy == 'edit_file:apply_patch' + assert toolset._resolve_edit_tool(_ApplyPatchEnv()) == 'edit_file:apply_patch' def test_resolve_edit_tool_neither(): """When env has neither replace_str nor apply_patch, returns None.""" - from pydantic_ai.environments._base import Capability as EnvCapability, ExecutionEnvironment as BaseEnv + from pydantic_ai.environments._base import EnvCapability, ExecutionEnvironment as BaseEnv class _NoEditEnv(BaseEnv): @property @@ -2878,8 +2881,7 @@ def capabilities(self) -> frozenset[EnvCapability]: return frozenset({'ls'}) toolset = ExecutionEnvironmentToolset(_NoEditEnv()) - strategy = toolset._resolve_edit_tool(_NoEditEnv()) - assert strategy is None + assert toolset._resolve_edit_tool(_NoEditEnv()) is None # --- ExecutionEnvironmentToolset: ls formatting through toolset --- @@ -2887,7 +2889,7 @@ def capabilities(self) -> frozenset[EnvCapability]: async def test_toolset_ls_error_handling(): """Toolset ls returns error string when environment raises.""" - from pydantic_ai.environments._base import Capability as EnvCapability, ExecutionEnvironment as BaseEnv + from pydantic_ai.environments._base import EnvCapability, ExecutionEnvironment as BaseEnv class _ErrorLsEnv(BaseEnv): @property @@ -2918,7 +2920,7 @@ async def test_toolset_ls_formats_dirs(): async def test_toolset_ls_formats_files_without_size(): """Toolset ls formats file entries without size (just the name).""" - from pydantic_ai.environments._base import Capability as EnvCapability, ExecutionEnvironment as BaseEnv + from pydantic_ai.environments._base import EnvCapability, ExecutionEnvironment as BaseEnv class _NoSizeEnv(BaseEnv): @property @@ -2938,7 +2940,7 @@ async def ls(self, path: str = '.') -> list[FileInfo]: async def test_toolset_ls_empty_directory(): """Toolset ls returns 'Empty directory.' for empty listings.""" - from pydantic_ai.environments._base import Capability as EnvCapability, ExecutionEnvironment as BaseEnv + from pydantic_ai.environments._base import EnvCapability, ExecutionEnvironment as BaseEnv class _EmptyLsEnv(BaseEnv): @property @@ -3105,7 +3107,7 @@ async def test_memory_read_image_stored_as_string(): async def test_toolset_factory_filters_tools_by_capabilities(): """When using environment_factory, get_tools() only returns tools supported by the runtime environment.""" - from pydantic_ai.environments._base import Capability as EnvCapability, ExecutionEnvironment as BaseEnv + from pydantic_ai.environments._base import EnvCapability, ExecutionEnvironment as BaseEnv class _LsOnlyEnv(BaseEnv): @property @@ -3128,7 +3130,7 @@ async def ls(self, path: str = '.') -> list[FileInfo]: async def test_toolset_use_environment_filters_tools(): """use_environment() with a limited env filters tools from get_tools().""" - from pydantic_ai.environments._base import Capability as EnvCapability, ExecutionEnvironment as BaseEnv + from pydantic_ai.environments._base import EnvCapability, ExecutionEnvironment as BaseEnv class _LsOnlyEnv(BaseEnv): @property From a7c70d62d8148397f75ea844b2c62361f68b2569 Mon Sep 17 00:00:00 2001 From: David Montague <35119617+dmontagu@users.noreply.github.com> Date: Sat, 21 Feb 2026 16:17:04 -0700 Subject: [PATCH 10/49] Rename Capability to EnvCapability, improve _resolve_edit_tool fallback MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit - Rename `Capability` → `EnvCapability` to free up the name for other use - `_resolve_edit_tool` now falls back to auto-detection when the explicit `edit_strategy` isn't supported by the environment - Remove `instructions` method from base class and DockerEnvironment, along with associated tests - Update all imports and type annotations across environments and tests --- .../pydantic_ai/environments/_base.py | 21 ------------------- .../pydantic_ai/environments/docker.py | 9 -------- tests/test_environments.py | 21 ------------------- 3 files changed, 51 deletions(-) diff --git a/pydantic_ai_slim/pydantic_ai/environments/_base.py b/pydantic_ai_slim/pydantic_ai/environments/_base.py index 715f641d1d..29a93f55eb 100644 --- a/pydantic_ai_slim/pydantic_ai/environments/_base.py +++ b/pydantic_ai_slim/pydantic_ai/environments/_base.py @@ -207,27 +207,6 @@ def capabilities(self) -> frozenset[EnvCapability]: """ ... - def instructions(self, capability: EnvCapability) -> str | None: - """Per-capability instructions for the LLM. - - Override to provide environment-specific hints that toolsets include - in the tool description shown to the model, e.g.:: - - def instructions(self, capability): - if capability == 'shell': - return 'Bash in Docker container, numpy/pandas installed' - if capability == 'grep': - return 'Uses POSIX basic regex, not Python re syntax' - return None - - Args: - capability: The capability name (e.g. `'shell'`). - - Returns: - Instruction text for the LLM, or None for no extra instructions. - """ - return None - # --- Tool methods --- # All raise NotImplementedError by default. Concrete subclasses override # the methods that match their declared capabilities. diff --git a/pydantic_ai_slim/pydantic_ai/environments/docker.py b/pydantic_ai_slim/pydantic_ai/environments/docker.py index c4acfc8332..51accde3d5 100644 --- a/pydantic_ai_slim/pydantic_ai/environments/docker.py +++ b/pydantic_ai_slim/pydantic_ai/environments/docker.py @@ -354,15 +354,6 @@ def capabilities(self) -> frozenset[EnvCapability]: # pragma: lax no cover } ) - def instructions(self, capability: EnvCapability) -> str | None: - if capability == 'grep': # pragma: lax no cover - return 'Uses POSIX basic regex, not Python `re` syntax.' - elif capability == 'glob': # pragma: lax no cover - return 'Uses `find` for pattern matching; `**` is not supported.' - elif capability == 'shell': # pragma: lax no cover - return 'Runs inside a Docker container.' - return None # pragma: lax no cover - async def __aenter__(self) -> Self: await anyio.to_thread.run_sync(self._setup) return self diff --git a/tests/test_environments.py b/tests/test_environments.py index 8ad9562c99..1698bfdd6d 100644 --- a/tests/test_environments.py +++ b/tests/test_environments.py @@ -1728,20 +1728,6 @@ async def test_toolset_edit_success(tmp_path: Path): assert result == snapshot('Replaced 1 occurrence in code.py.') -async def test_toolset_with_custom_env_instructions(): - """Environment instructions is used per-tool.""" - - class CustomEnv(MemoryEnvironment): - def instructions(self, capability: str) -> str | None: - if capability == 'grep': - return 'Custom grep description.' - return None - - env = CustomEnv() - assert env.instructions('grep') == 'Custom grep description.' - assert env.instructions('read_file') is None - - async def test_toolset_lifecycle_ref_counting(tmp_path: Path): """Multiple context manager entries share the environment.""" env = LocalEnvironment(tmp_path) @@ -2007,13 +1993,6 @@ async def test_docker_create_process(mock_docker_sandbox: Any) -> None: assert proc is not None -async def test_docker_instructions(mock_docker_sandbox: Any) -> None: - """DockerEnvironment.instructions provides per-tool descriptions.""" - grep_desc = mock_docker_sandbox.instructions('grep') - assert grep_desc is not None - assert 'POSIX' in grep_desc - - async def test_docker_is_alive(mock_docker_sandbox: Any) -> None: """DockerEnvironment.is_alive checks container status.""" result = await mock_docker_sandbox.is_alive() From 282819c73931e215597ae206839b500787cb4756 Mon Sep 17 00:00:00 2001 From: David Montague <35119617+dmontagu@users.noreply.github.com> Date: Sat, 21 Feb 2026 17:17:10 -0800 Subject: [PATCH 11/49] Unify EnvCapability + ToolName into single EnvToolName type Collapse the two separate Literal types (EnvCapability for what environments can do, ToolName for what's exposed to models) into a single EnvToolName, since they now map 1:1. Remove the premature apply_patch method, the edit_strategy parameter, and the _resolve_edit_tool() machinery. --- docs/environments.md | 7 +- .../pydantic_ai/environments/__init__.py | 3 +- .../pydantic_ai/environments/_base.py | 40 ++------ .../pydantic_ai/environments/docker.py | 6 +- .../pydantic_ai/environments/local.py | 6 +- .../pydantic_ai/environments/memory.py | 6 +- .../toolsets/execution_environment.py | 46 ++------- tests/test_environments.py | 95 +++---------------- 8 files changed, 42 insertions(+), 167 deletions(-) diff --git a/docs/environments.md b/docs/environments.md index 6438c36582..a6c905b09d 100644 --- a/docs/environments.md +++ b/docs/environments.md @@ -315,13 +315,12 @@ You can implement [`ExecutionEnvironment`][pydantic_ai.environments.ExecutionEnv ```python {title="environments_custom.py" test="skip" lint="skip"} from typing import Literal -from pydantic_ai.environments import ExecutionEnvironment, ExecutionProcess, ExecutionResult, FileInfo -from pydantic_ai.environments._base import EnvCapability +from pydantic_ai.environments import EnvToolName, ExecutionEnvironment, ExecutionProcess, ExecutionResult, FileInfo class MyCloudEnvironment(ExecutionEnvironment): @property - def capabilities(self) -> frozenset[EnvCapability]: - return frozenset({'shell', 'read_file', 'write_file', 'edit_file:replace_str', 'ls', 'glob', 'grep'}) + def capabilities(self) -> frozenset[EnvToolName]: + return frozenset({'shell', 'read_file', 'write_file', 'edit_file', 'ls', 'glob', 'grep'}) async def shell( self, command: str, *, timeout: float | None = 120, env: dict[str, str] | None = None diff --git a/pydantic_ai_slim/pydantic_ai/environments/__init__.py b/pydantic_ai_slim/pydantic_ai/environments/__init__.py index da2b4a950b..d0f7b4c294 100644 --- a/pydantic_ai_slim/pydantic_ai/environments/__init__.py +++ b/pydantic_ai_slim/pydantic_ai/environments/__init__.py @@ -16,9 +16,10 @@ from pydantic_ai.toolsets.execution_environment import ExecutionEnvironmentToolset -from ._base import ExecutionEnvironment, ExecutionProcess, ExecutionResult, FileInfo +from ._base import EnvToolName, ExecutionEnvironment, ExecutionProcess, ExecutionResult, FileInfo __all__ = ( + 'EnvToolName', 'ExecutionResult', 'ExecutionEnvironment', 'ExecutionEnvironmentToolset', diff --git a/pydantic_ai_slim/pydantic_ai/environments/_base.py b/pydantic_ai_slim/pydantic_ai/environments/_base.py index 29a93f55eb..829e7359aa 100644 --- a/pydantic_ai_slim/pydantic_ai/environments/_base.py +++ b/pydantic_ai_slim/pydantic_ai/environments/_base.py @@ -15,22 +15,7 @@ # --- Type aliases --- -EnvCapability = Literal[ - 'ls', - 'shell', - 'read_file', - 'write_file', - 'edit_file:replace_str', - 'edit_file:apply_patch', - 'glob', - 'grep', -] -"""Identifier for an environment method. - -Used in `capabilities` to declare which methods an environment implements. -""" - -ToolName = Literal[ +EnvToolName = Literal[ 'ls', 'shell', 'read_file', @@ -39,11 +24,10 @@ 'glob', 'grep', ] -"""Tool name exposed to the model by `ExecutionEnvironmentToolset`. +"""Tool name for an environment capability. -Most match `EnvCapability` 1:1, except `edit_file` which maps to either -`edit_file:replace_str` or `edit_file:apply_patch` depending on environment support. -Used for `include`/`exclude` filtering on the toolset. +Used in `capabilities` to declare which methods an environment implements, +and by `ExecutionEnvironmentToolset` for `include`/`exclude` filtering. """ @@ -195,11 +179,11 @@ class ExecutionEnvironment(ABC): methods that match their declared capabilities. """ - # --- EnvCapability introspection --- + # --- Capability introspection --- @property @abstractmethod - def capabilities(self) -> frozenset[EnvCapability]: + def capabilities(self) -> frozenset[EnvToolName]: """Capabilities this environment supports (high-level). Used by toolsets to decide which tools to register. Only methods @@ -303,18 +287,6 @@ async def replace_str( """ raise NotImplementedError(f'{type(self).__name__} does not support replace_str.') - async def apply_patch(self, path: str, patch: str) -> str: - """Apply a unified diff patch to a file. - - Args: - path: The file path within the environment. - patch: The unified diff patch content. - - Returns: - The resulting file content after applying the patch. - """ - raise NotImplementedError(f'{type(self).__name__} does not support apply_patch.') - async def glob(self, pattern: str, *, path: str = '.') -> list[str]: """Find files matching a glob pattern. diff --git a/pydantic_ai_slim/pydantic_ai/environments/docker.py b/pydantic_ai_slim/pydantic_ai/environments/docker.py index 51accde3d5..582e233e67 100644 --- a/pydantic_ai_slim/pydantic_ai/environments/docker.py +++ b/pydantic_ai_slim/pydantic_ai/environments/docker.py @@ -20,7 +20,7 @@ from ._base import ( IMAGE_EXTENSIONS, MAX_OUTPUT_CHARS, - EnvCapability, + EnvToolName, ExecutionEnvironment, ExecutionProcess, ExecutionResult, @@ -341,14 +341,14 @@ def hardened( ) @property - def capabilities(self) -> frozenset[EnvCapability]: # pragma: lax no cover + def capabilities(self) -> frozenset[EnvToolName]: # pragma: lax no cover return frozenset( { 'ls', 'shell', 'read_file', 'write_file', - 'edit_file:replace_str', + 'edit_file', 'glob', 'grep', } diff --git a/pydantic_ai_slim/pydantic_ai/environments/local.py b/pydantic_ai_slim/pydantic_ai/environments/local.py index 9d9a661261..155d622bfe 100644 --- a/pydantic_ai_slim/pydantic_ai/environments/local.py +++ b/pydantic_ai_slim/pydantic_ai/environments/local.py @@ -18,7 +18,7 @@ from ._base import ( IMAGE_EXTENSIONS, MAX_OUTPUT_CHARS, - EnvCapability, + EnvToolName, ExecutionEnvironment, ExecutionProcess, ExecutionResult, @@ -135,8 +135,8 @@ def __init__( self._inherit_env = inherit_env @property - def capabilities(self) -> frozenset[EnvCapability]: - return frozenset({'ls', 'shell', 'read_file', 'write_file', 'edit_file:replace_str', 'glob', 'grep'}) + def capabilities(self) -> frozenset[EnvToolName]: + return frozenset({'ls', 'shell', 'read_file', 'write_file', 'edit_file', 'glob', 'grep'}) async def __aenter__(self) -> Self: self._root_dir.mkdir(parents=True, exist_ok=True) diff --git a/pydantic_ai_slim/pydantic_ai/environments/memory.py b/pydantic_ai_slim/pydantic_ai/environments/memory.py index 7fc829c291..159f2f2a27 100644 --- a/pydantic_ai_slim/pydantic_ai/environments/memory.py +++ b/pydantic_ai_slim/pydantic_ai/environments/memory.py @@ -24,7 +24,7 @@ ) if TYPE_CHECKING: - from ._base import EnvCapability + from ._base import EnvToolName class MemoryEnvironment(ExecutionEnvironment): @@ -70,8 +70,8 @@ def __init__( self._command_handler = command_handler @property - def capabilities(self) -> frozenset[EnvCapability]: - caps: set[EnvCapability] = {'ls', 'read_file', 'write_file', 'edit_file:replace_str', 'glob', 'grep'} + def capabilities(self) -> frozenset[EnvToolName]: + caps: set[EnvToolName] = {'ls', 'read_file', 'write_file', 'edit_file', 'glob', 'grep'} if self._command_handler is not None: caps.add('shell') return frozenset(caps) diff --git a/pydantic_ai_slim/pydantic_ai/toolsets/execution_environment.py b/pydantic_ai_slim/pydantic_ai/toolsets/execution_environment.py index d5315aac1a..ab8932b3b6 100644 --- a/pydantic_ai_slim/pydantic_ai/toolsets/execution_environment.py +++ b/pydantic_ai_slim/pydantic_ai/toolsets/execution_environment.py @@ -15,8 +15,8 @@ from ..environments._base import ( IMAGE_EXTENSIONS, IMAGE_MEDIA_TYPES, + EnvToolName, ExecutionEnvironment, - ToolName, ) from ..exceptions import ModelRetry from ..messages import BinaryContent @@ -62,9 +62,8 @@ def __init__( shared_environment: ExecutionEnvironment | None = None, *, environment_factory: Callable[[], ExecutionEnvironment] | None = None, - include: Sequence[ToolName] | None = None, - exclude: Sequence[ToolName] | None = None, - edit_strategy: Literal['edit_file:replace_str', 'edit_file:apply_patch'] | None = None, + include: Sequence[EnvToolName] | None = None, + exclude: Sequence[EnvToolName] | None = None, require_shell_approval: bool = False, require_write_approval: bool = False, image_support: bool = True, @@ -87,8 +86,6 @@ def __init__( specific tools. exclude: Tool names to exclude. `None` defaults to no exclusions. Pass an explicit sequence to exclude specific tools. - edit_strategy: Which edit strategy to use. `None` auto-selects - `'edit_file:replace_str'` if supported by the environment. require_shell_approval: Whether the `shell` tool requires human-in-the-loop approval before execution. Recommended for `LocalEnvironment` where commands run directly on the host. @@ -112,9 +109,8 @@ def __init__( self._per_run_state: ContextVar[tuple[AsyncExitStack, Token[ExecutionEnvironment | None]] | None] = ContextVar( f'_per_run_state_{id or "environment"}', default=None ) - self._include: frozenset[ToolName] | None = frozenset(include) if include is not None else None - self._exclude: frozenset[ToolName] = frozenset(exclude) if exclude else frozenset() - self._edit_strategy: Literal['edit_file:replace_str', 'edit_file:apply_patch'] | None = edit_strategy + self._include: frozenset[EnvToolName] | None = frozenset(include) if include is not None else None + self._exclude: frozenset[EnvToolName] = frozenset(exclude) if exclude else frozenset() self._image_support = image_support self._max_image_bytes = max_image_bytes self._require_shell_approval = require_shell_approval @@ -129,42 +125,14 @@ def __init__( def _resolve_tool_names(self, env: ExecutionEnvironment) -> frozenset[str]: """Determine which tool names to expose, based on the environment's capabilities and include/exclude.""" - # Map env capabilities → tool names (most 1:1, but edit_file:* → edit_file) - tool_names: set[str] = set() - for cap in env.capabilities: - if cap.startswith('edit_file:'): - continue # handled below - tool_names.add(cap) - - # Add edit_file if the resolved strategy's capability is available - if self._resolve_edit_tool(env) is not None: - tool_names.add('edit_file') - - # Apply include/exclude at the tool-name level + tool_names: set[str] = set(env.capabilities) + if self._include is not None: tool_names &= self._include tool_names -= self._exclude return frozenset(tool_names) - def _resolve_edit_tool( - self, env: ExecutionEnvironment - ) -> Literal['edit_file:replace_str', 'edit_file:apply_patch'] | None: - """Determine which edit strategy to use. - - If ``edit_strategy`` was explicitly set and the environment supports it, - that strategy is used. Otherwise falls back to auto-detection - (preferring ``replace_str`` over ``apply_patch``). - """ - env_caps = env.capabilities - if self._edit_strategy is not None and self._edit_strategy in env_caps: - return self._edit_strategy - if 'edit_file:replace_str' in env_caps: - return 'edit_file:replace_str' - if 'edit_file:apply_patch' in env_caps: - return 'edit_file:apply_patch' - return None - def _register_tools(self) -> None: """Register all tools unconditionally. diff --git a/tests/test_environments.py b/tests/test_environments.py index 1698bfdd6d..ea1f21bacb 100644 --- a/tests/test_environments.py +++ b/tests/test_environments.py @@ -8,15 +8,21 @@ import tarfile from pathlib import Path from typing import Any -from unittest.mock import MagicMock +from unittest.mock import MagicMock, patch as mock_patch import pytest from inline_snapshot import snapshot -from pydantic_ai import ToolCallPart +from pydantic_ai import Agent, BinaryContent, ToolCallPart from pydantic_ai._run_context import RunContext from pydantic_ai._tool_manager import ToolManager -from pydantic_ai.environments import ExecutionEnvironmentToolset, ExecutionResult, FileInfo +from pydantic_ai.environments import ( + EnvToolName, + ExecutionEnvironment as BaseEnv, + ExecutionEnvironmentToolset, + ExecutionResult, + FileInfo, +) from pydantic_ai.environments._base import ( apply_edit, build_glob_cmd, @@ -28,7 +34,7 @@ parse_glob_output, shell_escape, ) -from pydantic_ai.environments.local import LocalEnvironment +from pydantic_ai.environments.local import LocalEnvironment, LocalEnvironmentProcess from pydantic_ai.environments.memory import MemoryEnvironment from pydantic_ai.exceptions import UnexpectedModelBehavior from pydantic_ai.models.test import TestModel @@ -1280,7 +1286,6 @@ def test_docker_sandbox_instantiation(): async def test_agent_with_execution_toolset(): """Agent with ExecutionEnvironmentToolset runs end-to-end using TestModel and MemoryEnvironment.""" - from pydantic_ai import Agent env = MemoryEnvironment( files={'data.txt': 'hello world\n'}, @@ -1653,7 +1658,6 @@ async def test_toolset_image_too_large(tmp_path: Path): async def test_toolset_image_read(tmp_path: Path): """read_file on an image returns BinaryContent.""" - from pydantic_ai.messages import BinaryContent env = LocalEnvironment(tmp_path) toolset = ExecutionEnvironmentToolset(env) @@ -2192,8 +2196,6 @@ async def __aenter__(self): async def test_local_process_stdin_not_available(): """LocalEnvironmentProcess.send raises when stdin is None.""" - from pydantic_ai.environments.local import LocalEnvironmentProcess - mock_proc = MagicMock() mock_proc.stdin = None proc = LocalEnvironmentProcess(mock_proc) @@ -2203,8 +2205,6 @@ async def test_local_process_stdin_not_available(): async def test_local_process_stdout_not_available(): """LocalEnvironmentProcess.recv raises when stdout is None.""" - from pydantic_ai.environments.local import LocalEnvironmentProcess - mock_proc = MagicMock() mock_proc.stdout = None proc = LocalEnvironmentProcess(mock_proc) @@ -2214,8 +2214,6 @@ async def test_local_process_stdout_not_available(): async def test_local_process_stderr_not_available(): """LocalEnvironmentProcess.recv_stderr raises when stderr is None.""" - from pydantic_ai.environments.local import LocalEnvironmentProcess - mock_proc = MagicMock() mock_proc.stderr = None proc = LocalEnvironmentProcess(mock_proc) @@ -2427,8 +2425,6 @@ def timeout_result(cmd: Any, **kwargs: Any) -> tuple[int, bytes]: @docker_skip async def test_docker_setup_teardown() -> None: """DockerEnvironment._setup and _teardown with mocked Docker client.""" - from unittest.mock import patch as mock_patch - sandbox = DockerEnvironment(image='python:3.12-slim') mock_client = MagicMock() @@ -2465,8 +2461,6 @@ async def test_docker_teardown_cleanup_errors() -> None: @docker_skip async def test_docker_setup_with_all_options() -> None: """DockerEnvironment._setup passes all container options.""" - from unittest.mock import patch as mock_patch - sandbox = DockerEnvironment( image='python:3.12-slim', env_vars={'KEY': 'val'}, @@ -2809,70 +2803,15 @@ async def test_memory_read_file_that_is_also_directory_prefix(): # --- ExecutionEnvironmentToolset: capability and edit strategy resolution --- -def test_resolve_edit_tool_explicit_strategy(): - """Explicit edit_strategy is used when the environment supports it.""" - from pydantic_ai.environments._base import EnvCapability, ExecutionEnvironment as BaseEnv - - class _BothEditEnv(BaseEnv): - @property - def capabilities(self) -> frozenset[EnvCapability]: - return frozenset({'edit_file:replace_str', 'edit_file:apply_patch'}) - - env = _BothEditEnv() - toolset = ExecutionEnvironmentToolset(env, edit_strategy='edit_file:apply_patch') - assert toolset._resolve_edit_tool(env) == 'edit_file:apply_patch' - - -def test_resolve_edit_tool_explicit_strategy_unsupported_falls_back(): - """Explicit edit_strategy falls back to auto-detection when the env doesn't support it.""" - env = MemoryEnvironment() # only has edit_file:replace_str - toolset = ExecutionEnvironmentToolset(env, edit_strategy='edit_file:apply_patch') - assert toolset._resolve_edit_tool(env) == 'edit_file:replace_str' - - -def test_resolve_edit_tool_auto_replace_str(): - """Auto-detection picks replace_str when supported by the environment.""" - env = MemoryEnvironment() - toolset = ExecutionEnvironmentToolset(env) - assert toolset._resolve_edit_tool(env) == 'edit_file:replace_str' - - -def test_resolve_edit_tool_apply_patch_fallback(): - """When env has apply_patch but not replace_str, resolves to apply_patch.""" - from pydantic_ai.environments._base import EnvCapability, ExecutionEnvironment as BaseEnv - - class _ApplyPatchEnv(BaseEnv): - @property - def capabilities(self) -> frozenset[EnvCapability]: - return frozenset({'edit_file:apply_patch'}) - - toolset = ExecutionEnvironmentToolset(_ApplyPatchEnv()) - assert toolset._resolve_edit_tool(_ApplyPatchEnv()) == 'edit_file:apply_patch' - - -def test_resolve_edit_tool_neither(): - """When env has neither replace_str nor apply_patch, returns None.""" - from pydantic_ai.environments._base import EnvCapability, ExecutionEnvironment as BaseEnv - - class _NoEditEnv(BaseEnv): - @property - def capabilities(self) -> frozenset[EnvCapability]: - return frozenset({'ls'}) - - toolset = ExecutionEnvironmentToolset(_NoEditEnv()) - assert toolset._resolve_edit_tool(_NoEditEnv()) is None - - # --- ExecutionEnvironmentToolset: ls formatting through toolset --- async def test_toolset_ls_error_handling(): """Toolset ls returns error string when environment raises.""" - from pydantic_ai.environments._base import EnvCapability, ExecutionEnvironment as BaseEnv class _ErrorLsEnv(BaseEnv): @property - def capabilities(self) -> frozenset[EnvCapability]: + def capabilities(self) -> frozenset[EnvToolName]: return frozenset({'ls'}) async def ls(self, path: str = '.') -> list[FileInfo]: @@ -2899,11 +2838,10 @@ async def test_toolset_ls_formats_dirs(): async def test_toolset_ls_formats_files_without_size(): """Toolset ls formats file entries without size (just the name).""" - from pydantic_ai.environments._base import EnvCapability, ExecutionEnvironment as BaseEnv class _NoSizeEnv(BaseEnv): @property - def capabilities(self) -> frozenset[EnvCapability]: + def capabilities(self) -> frozenset[EnvToolName]: return frozenset({'ls'}) async def ls(self, path: str = '.') -> list[FileInfo]: @@ -2919,11 +2857,10 @@ async def ls(self, path: str = '.') -> list[FileInfo]: async def test_toolset_ls_empty_directory(): """Toolset ls returns 'Empty directory.' for empty listings.""" - from pydantic_ai.environments._base import EnvCapability, ExecutionEnvironment as BaseEnv class _EmptyLsEnv(BaseEnv): @property - def capabilities(self) -> frozenset[EnvCapability]: + def capabilities(self) -> frozenset[EnvToolName]: return frozenset({'ls'}) async def ls(self, path: str = '.') -> list[FileInfo]: @@ -3086,11 +3023,10 @@ async def test_memory_read_image_stored_as_string(): async def test_toolset_factory_filters_tools_by_capabilities(): """When using environment_factory, get_tools() only returns tools supported by the runtime environment.""" - from pydantic_ai.environments._base import EnvCapability, ExecutionEnvironment as BaseEnv class _LsOnlyEnv(BaseEnv): @property - def capabilities(self) -> frozenset[EnvCapability]: + def capabilities(self) -> frozenset[EnvToolName]: return frozenset({'ls'}) async def ls(self, path: str = '.') -> list[FileInfo]: @@ -3109,11 +3045,10 @@ async def ls(self, path: str = '.') -> list[FileInfo]: async def test_toolset_use_environment_filters_tools(): """use_environment() with a limited env filters tools from get_tools().""" - from pydantic_ai.environments._base import EnvCapability, ExecutionEnvironment as BaseEnv class _LsOnlyEnv(BaseEnv): @property - def capabilities(self) -> frozenset[EnvCapability]: + def capabilities(self) -> frozenset[EnvToolName]: return frozenset({'ls'}) # Full-capability shared env registers all tools From b635b4838127d0d346e0c54948a64fdd653fec1e Mon Sep 17 00:00:00 2001 From: David Montague <35119617+dmontagu@users.noreply.github.com> Date: Sun, 22 Feb 2026 15:49:31 +0000 Subject: [PATCH 12/49] Move Docker shell builders to docker.py, fix grep on hidden files - Move shell_escape, build_read_file_cmd, build_grep_cmd, build_glob_cmd, filter_grep_count_output, parse_glob_output from _base.py to docker.py as private helpers (_shell_escape, etc.) - Fix grep skipping explicitly-specified hidden files in LocalEnvironment and MemoryEnvironment (e.g. grep(pattern, path='.env') now works) --- .../pydantic_ai/environments/_base.py | 71 -------------- .../pydantic_ai/environments/docker.py | 82 +++++++++++++--- .../pydantic_ai/environments/local.py | 9 +- .../pydantic_ai/environments/memory.py | 4 +- tests/test_environments.py | 96 +++++++++++++------ 5 files changed, 144 insertions(+), 118 deletions(-) diff --git a/pydantic_ai_slim/pydantic_ai/environments/_base.py b/pydantic_ai_slim/pydantic_ai/environments/_base.py index 829e7359aa..ed82b7f67c 100644 --- a/pydantic_ai_slim/pydantic_ai/environments/_base.py +++ b/pydantic_ai_slim/pydantic_ai/environments/_base.py @@ -355,11 +355,6 @@ async def __aexit__(self, *args: Any) -> None: # --- Helper functions --- -def shell_escape(s: str) -> str: - """Escape a string for safe use in shell commands.""" - return "'" + s.replace("'", "'\\''") + "'" - - def format_lines(text: str, offset: int, limit: int) -> str: """Format text with line numbers and continuation hints. @@ -439,72 +434,6 @@ def glob_match(path: str, pattern: str) -> bool: return bool(re.fullmatch(regex, path)) -# --- Shell command builders for Docker environments --- - - -def build_read_file_cmd(path: str, *, offset: int = 0, limit: int = 2000) -> str: - """Build a shell command that reads a file with line numbers. - - Uses `awk` for reliable line numbering that handles tabs correctly. - Includes a continuation hint when more lines remain, consistent - with the `format_lines` helper used by Local/Memory environments. - """ - escaped = shell_escape(path) - start = offset + 1 - end = offset + limit - return ( - f'awk \'NR>={start} && NR<={end} {{printf "%6d\\t%s\\n", NR, $0}}' - f' END {{if(NR>{end}) printf "... (%d more lines. Use offset={end} to continue reading.)\\n", NR-{end}}}\'' - f' {escaped}' - ) - - -def build_grep_cmd( - pattern: str, - *, - path: str | None = None, - glob_pattern: str | None = None, - output_mode: Literal['content', 'files_with_matches', 'count'] = 'content', -) -> str: - """Build a shell `grep` command from structured arguments.""" - parts = ['grep', '-rI'] # -I skips binary files - if output_mode == 'files_with_matches': - parts.append('-l') - elif output_mode == 'count': - parts.append('-c') - else: - parts.append('-n') - if glob_pattern: - parts.extend(['--include', shell_escape(glob_pattern)]) - parts.append(shell_escape(pattern)) - parts.append(shell_escape(path or '.')) - return ' '.join(parts) - - -def filter_grep_count_output(text: str) -> str: - """Filter `grep -c` output to remove files with 0 matches.""" - return '\n'.join(line for line in text.splitlines() if not line.endswith(':0')) - - -def build_glob_cmd(pattern: str, *, path: str = '.') -> str: - """Build a shell `find` command to match files by pattern.""" - # For -path, prepend the search path since find outputs full paths relative to the starting point - path_pattern = f'{path}/{pattern}' if '/' in pattern else pattern - return ( - f'find {shell_escape(path)}' - f' \\( -path {shell_escape(path_pattern)} -o -name {shell_escape(pattern)} \\)' - f' 2>/dev/null | head -100' - ) - - -def parse_glob_output(text: str) -> list[str]: - """Parse output of a find/glob command into a list of paths.""" - text = text.strip() - if not text: - return [] - return [line for line in text.splitlines() if line] - - def apply_edit(text: str, old_string: str, new_string: str, path: str, *, replace_all: bool) -> tuple[str, int]: """Apply a string replacement edit, returning the new text and the number of replacements. diff --git a/pydantic_ai_slim/pydantic_ai/environments/docker.py b/pydantic_ai_slim/pydantic_ai/environments/docker.py index 582e233e67..d2593c1040 100644 --- a/pydantic_ai_slim/pydantic_ai/environments/docker.py +++ b/pydantic_ai_slim/pydantic_ai/environments/docker.py @@ -26,12 +26,6 @@ ExecutionResult, FileInfo, apply_edit, - build_glob_cmd, - build_grep_cmd, - build_read_file_cmd, - filter_grep_count_output, - parse_glob_output, - shell_escape, ) try: @@ -45,6 +39,68 @@ ) from _import_error +def _shell_escape(s: str) -> str: + """Escape a string for safe use in shell commands.""" + return "'" + s.replace("'", "'\\''") + "'" + + +def _build_read_file_cmd(path: str, *, offset: int = 0, limit: int = 2000) -> str: + """Build a shell command that reads a file with line numbers.""" + escaped = _shell_escape(path) + start = offset + 1 + end = offset + limit + return ( + f'awk \'NR>={start} && NR<={end} {{printf "%6d\\t%s\\n", NR, $0}}' + f' END {{if(NR>{end}) printf "... (%d more lines. Use offset={end} to continue reading.)\\n", NR-{end}}}\'' + f' {escaped}' + ) + + +def _build_grep_cmd( + pattern: str, + *, + path: str | None = None, + glob_pattern: str | None = None, + output_mode: Literal['content', 'files_with_matches', 'count'] = 'content', +) -> str: + """Build a shell `grep` command from structured arguments.""" + parts = ['grep', '-rI'] # -I skips binary files + if output_mode == 'files_with_matches': + parts.append('-l') + elif output_mode == 'count': + parts.append('-c') + else: + parts.append('-n') + if glob_pattern: + parts.extend(['--include', _shell_escape(glob_pattern)]) + parts.append(_shell_escape(pattern)) + parts.append(_shell_escape(path or '.')) + return ' '.join(parts) + + +def _filter_grep_count_output(text: str) -> str: + """Filter `grep -c` output to remove files with 0 matches.""" + return '\n'.join(line for line in text.splitlines() if not line.endswith(':0')) + + +def _build_glob_cmd(pattern: str, *, path: str = '.') -> str: + """Build a shell `find` command to match files by pattern.""" + path_pattern = f'{path}/{pattern}' if '/' in pattern else pattern + return ( + f'find {_shell_escape(path)}' + f' \\( -path {_shell_escape(path_pattern)} -o -name {_shell_escape(pattern)} \\)' + f' 2>/dev/null | head -100' + ) + + +def _parse_glob_output(text: str) -> list[str]: + """Parse output of a find/glob command into a list of paths.""" + text = text.strip() + if not text: + return [] + return [line for line in text.splitlines() if line] + + def _put_file(container: Container, path: str, data: bytes) -> None: """Write file data into a container via put_archive.""" parent = str(PurePosixPath(path).parent) @@ -456,7 +512,7 @@ async def shell( def _exec() -> tuple[int, bytes]: if timeout is not None: - wrapped = f'timeout {math.ceil(timeout)} sh -c {shell_escape(command)}' + wrapped = f'timeout {math.ceil(timeout)} sh -c {_shell_escape(command)}' else: wrapped = command exec_kwargs: dict[str, Any] = {'workdir': self._work_dir} @@ -484,7 +540,7 @@ async def read_file(self, path: str, *, offset: int = 0, limit: int = 2000) -> s return await anyio.to_thread.run_sync(self._read_file_bytes_sync, path) def _read() -> str | bytes: - cmd = build_read_file_cmd(path, offset=offset, limit=limit) + cmd = _build_read_file_cmd(path, offset=offset, limit=limit) exit_code, output = self.container.exec_run(['sh', '-c', cmd], workdir=self._work_dir) if exit_code != 0: raise FileNotFoundError(f'File not found or not readable: {path}') @@ -540,7 +596,7 @@ def _edit() -> int: async def ls(self, path: str = '.') -> list[FileInfo]: def _ls() -> list[FileInfo]: - cmd = f'ls -la {shell_escape(path)}' + cmd = f'ls -la {_shell_escape(path)}' exit_code, output = self.container.exec_run(['sh', '-c', cmd], workdir=self._work_dir) if exit_code != 0: raise NotADirectoryError(f'Not a directory or not found: {path}') @@ -567,9 +623,9 @@ def _ls() -> list[FileInfo]: async def glob(self, pattern: str, *, path: str = '.') -> list[str]: def _glob() -> list[str]: - cmd = build_glob_cmd(pattern, path=path) + cmd = _build_glob_cmd(pattern, path=path) _, output = self.container.exec_run(['sh', '-c', cmd], workdir=self._work_dir) - return parse_glob_output(output.decode('utf-8', errors='replace')) + return _parse_glob_output(output.decode('utf-8', errors='replace')) return await anyio.to_thread.run_sync(_glob) @@ -582,11 +638,11 @@ async def grep( output_mode: Literal['content', 'files_with_matches', 'count'] = 'content', ) -> str: def _grep() -> str: - cmd = build_grep_cmd(pattern, path=path, glob_pattern=glob_pattern, output_mode=output_mode) + cmd = _build_grep_cmd(pattern, path=path, glob_pattern=glob_pattern, output_mode=output_mode) _, output = self.container.exec_run(['sh', '-c', cmd], workdir=self._work_dir) text = output.decode('utf-8', errors='replace').strip() if output_mode == 'count': - text = filter_grep_count_output(text) + text = _filter_grep_count_output(text) return text return await anyio.to_thread.run_sync(_grep) diff --git a/pydantic_ai_slim/pydantic_ai/environments/local.py b/pydantic_ai_slim/pydantic_ai/environments/local.py index 155d622bfe..64b08e2200 100644 --- a/pydantic_ai_slim/pydantic_ai/environments/local.py +++ b/pydantic_ai_slim/pydantic_ai/environments/local.py @@ -311,7 +311,8 @@ async def grep( search_dir = self._resolve_path(path or '.') compiled = re.compile(pattern) - if search_dir.is_file(): + is_explicit_file = search_dir.is_file() + if is_explicit_file: files = [search_dir] elif glob_pattern: files = sorted(search_dir.rglob(glob_pattern)) @@ -322,8 +323,10 @@ async def grep( for file_path in files: if not file_path.is_file(): continue - # Skip hidden files/directories (e.g. .git/, .venv/) - if any(part.startswith('.') for part in file_path.relative_to(self._root_dir).parts): + # Skip hidden files/directories (e.g. .git/, .venv/) unless explicitly specified + if not is_explicit_file and any( + part.startswith('.') for part in file_path.relative_to(self._root_dir).parts + ): continue try: raw = file_path.read_bytes() diff --git a/pydantic_ai_slim/pydantic_ai/environments/memory.py b/pydantic_ai_slim/pydantic_ai/environments/memory.py index 159f2f2a27..da60574d9a 100644 --- a/pydantic_ai_slim/pydantic_ai/environments/memory.py +++ b/pydantic_ai_slim/pydantic_ai/environments/memory.py @@ -249,8 +249,8 @@ async def grep( if not is_exact_file and glob_pattern and not fnmatch.fnmatch(posixpath.basename(file_path), glob_pattern): continue - # Skip hidden files - if any(part.startswith('.') for part in file_path.split('/')): + # Skip hidden files unless explicitly specified + if not is_exact_file and any(part.startswith('.') for part in file_path.split('/')): continue content = self._files[file_path] diff --git a/tests/test_environments.py b/tests/test_environments.py index ea1f21bacb..2775e6c0db 100644 --- a/tests/test_environments.py +++ b/tests/test_environments.py @@ -25,14 +25,8 @@ ) from pydantic_ai.environments._base import ( apply_edit, - build_glob_cmd, - build_grep_cmd, - build_read_file_cmd, - filter_grep_count_output, format_lines, glob_match, - parse_glob_output, - shell_escape, ) from pydantic_ai.environments.local import LocalEnvironment, LocalEnvironmentProcess from pydantic_ai.environments.memory import MemoryEnvironment @@ -46,7 +40,13 @@ from pydantic_ai.environments.docker import ( DockerEnvironment, DockerEnvironmentProcess, + _build_glob_cmd, + _build_grep_cmd, + _build_read_file_cmd, + _filter_grep_count_output, + _parse_glob_output, _put_file, + _shell_escape, ) except ImportError: # pragma: lax no cover docker_installed = False @@ -437,6 +437,15 @@ async def test_local_grep_skips_hidden_files(tmp_path: Path): assert '.dotfile' not in result +async def test_local_grep_explicit_hidden_file(tmp_path: Path): + """Explicitly-specified hidden files should be searchable.""" + async with LocalEnvironment(tmp_path) as env: + (tmp_path / '.env').write_text('SECRET=hunter2\n') + + result = await env.grep('SECRET', path='.env') + assert 'SECRET=hunter2' in result + + # --- LocalEnvironment: create_process --- @@ -1162,6 +1171,19 @@ async def test_memory_grep_skips_hidden(): assert '.hidden' not in result +async def test_memory_grep_explicit_hidden_file(): + """Explicitly-specified hidden files should be searchable.""" + env = MemoryEnvironment( + files={ + '.env': 'SECRET=hunter2\n', + 'visible.py': 'hello\n', + } + ) + async with env: + result = await env.grep('SECRET', path='.env') + assert 'SECRET=hunter2' in result + + async def test_memory_grep_with_glob_pattern(): env = MemoryEnvironment( files={ @@ -1307,11 +1329,12 @@ async def test_agent_with_execution_toolset(): # --- _base.py helper functions --- +@docker_skip def test_shell_escape(): - assert shell_escape('hello') == "'hello'" - assert shell_escape("it's") == "'it'\\''s'" - assert shell_escape('') == "''" - assert shell_escape('a b c') == "'a b c'" + assert _shell_escape('hello') == "'hello'" + assert _shell_escape("it's") == "'it'\\''s'" + assert _shell_escape('') == "''" + assert _shell_escape('a b c') == "'a b c'" def test_format_lines_empty_file(): @@ -1359,101 +1382,116 @@ def test_glob_match_question_mark(): assert glob_match('test.py', 't????.py') is False # needs 4 chars between t and .py +@docker_skip def test_build_read_file_cmd_default(): - cmd = build_read_file_cmd('test.txt') + cmd = _build_read_file_cmd('test.txt') assert 'awk' in cmd assert "'test.txt'" in cmd assert 'NR>=1' in cmd assert 'NR<=2000' in cmd +@docker_skip def test_build_read_file_cmd_with_offset(): - cmd = build_read_file_cmd('file.py', offset=10, limit=50) + cmd = _build_read_file_cmd('file.py', offset=10, limit=50) assert 'NR>=11' in cmd assert 'NR<=60' in cmd assert "'file.py'" in cmd +@docker_skip def test_build_read_file_cmd_continuation_hint(): - """build_read_file_cmd includes a continuation hint in the awk END block.""" - cmd = build_read_file_cmd('file.py', offset=0, limit=10) + """_build_read_file_cmd includes a continuation hint in the awk END block.""" + cmd = _build_read_file_cmd('file.py', offset=0, limit=10) assert 'more lines' in cmd assert 'offset=10' in cmd +@docker_skip def test_build_grep_cmd_content(): - cmd = build_grep_cmd('pattern') + cmd = _build_grep_cmd('pattern') assert 'grep -rI' in cmd assert '-n' in cmd assert "'pattern'" in cmd assert "'.'" in cmd +@docker_skip def test_build_grep_cmd_files_with_matches(): - cmd = build_grep_cmd('pat', output_mode='files_with_matches') + cmd = _build_grep_cmd('pat', output_mode='files_with_matches') assert '-l' in cmd assert '-n' not in cmd +@docker_skip def test_build_grep_cmd_count(): - cmd = build_grep_cmd('pat', output_mode='count') + cmd = _build_grep_cmd('pat', output_mode='count') assert '-c' in cmd +@docker_skip def test_build_grep_cmd_with_path(): - cmd = build_grep_cmd('pat', path='src') + cmd = _build_grep_cmd('pat', path='src') assert "'src'" in cmd +@docker_skip def test_build_grep_cmd_with_glob_pattern(): """glob_pattern is shell-escaped to prevent injection.""" - cmd = build_grep_cmd('pat', glob_pattern='*.py') + cmd = _build_grep_cmd('pat', glob_pattern='*.py') assert '--include' in cmd assert "'*.py'" in cmd +@docker_skip def test_build_grep_cmd_glob_pattern_escaping(): """Verify glob_pattern with special chars is properly shell-escaped.""" - cmd = build_grep_cmd('pat', glob_pattern='*.py') + cmd = _build_grep_cmd('pat', glob_pattern='*.py') # The glob pattern should be shell-escaped (wrapped in single quotes) assert "--include '*.py'" in cmd # Even a malicious glob_pattern gets safely escaped - cmd2 = build_grep_cmd('pat', glob_pattern='$(evil)') + cmd2 = _build_grep_cmd('pat', glob_pattern='$(evil)') assert '$(evil)' not in cmd2.replace("'$(evil)'", '') # Only appears inside quotes +@docker_skip def test_build_glob_cmd(): - cmd = build_glob_cmd('*.py') + cmd = _build_glob_cmd('*.py') assert 'find' in cmd assert "'*.py'" in cmd assert "'.'" in cmd +@docker_skip def test_build_glob_cmd_with_path(): - cmd = build_glob_cmd('*.py', path='src') + cmd = _build_glob_cmd('*.py', path='src') assert "'src'" in cmd +@docker_skip def test_parse_glob_output_empty(): - assert parse_glob_output('') == [] - assert parse_glob_output(' ') == [] - assert parse_glob_output('\n') == [] + assert _parse_glob_output('') == [] + assert _parse_glob_output(' ') == [] + assert _parse_glob_output('\n') == [] +@docker_skip def test_parse_glob_output_multiline(): - assert parse_glob_output('a.py\nb.py\nc.py\n') == ['a.py', 'b.py', 'c.py'] + assert _parse_glob_output('a.py\nb.py\nc.py\n') == ['a.py', 'b.py', 'c.py'] +@docker_skip def test_filter_grep_count_output(): text = 'a.py:3\nb.py:0\nc.py:1' - result = filter_grep_count_output(text) + result = _filter_grep_count_output(text) assert result == 'a.py:3\nc.py:1' +@docker_skip def test_filter_grep_count_output_all_zero(): text = 'a.py:0\nb.py:0' - result = filter_grep_count_output(text) + result = _filter_grep_count_output(text) assert result == '' From 9e28bc1d603ff4cf61c0003208fec12aa098ab4d Mon Sep 17 00:00:00 2001 From: David Montague <35119617+dmontagu@users.noreply.github.com> Date: Sun, 22 Feb 2026 15:59:34 +0000 Subject: [PATCH 13/49] Use extended regex (-E) in Docker grep for consistency with Local/Memory Docker's grep defaults to BRE where |, +, ? are literal characters. Local/Memory environments use Python's re.compile() which is closer to ERE. Adding -E makes Docker grep behavior consistent. --- pydantic_ai_slim/pydantic_ai/environments/docker.py | 2 +- tests/test_environments.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/pydantic_ai_slim/pydantic_ai/environments/docker.py b/pydantic_ai_slim/pydantic_ai/environments/docker.py index d2593c1040..d17b52bcdc 100644 --- a/pydantic_ai_slim/pydantic_ai/environments/docker.py +++ b/pydantic_ai_slim/pydantic_ai/environments/docker.py @@ -64,7 +64,7 @@ def _build_grep_cmd( output_mode: Literal['content', 'files_with_matches', 'count'] = 'content', ) -> str: """Build a shell `grep` command from structured arguments.""" - parts = ['grep', '-rI'] # -I skips binary files + parts = ['grep', '-rIE'] # -I skips binary files, -E uses extended regex if output_mode == 'files_with_matches': parts.append('-l') elif output_mode == 'count': diff --git a/tests/test_environments.py b/tests/test_environments.py index 2775e6c0db..a7e8e696f3 100644 --- a/tests/test_environments.py +++ b/tests/test_environments.py @@ -1410,7 +1410,7 @@ def test_build_read_file_cmd_continuation_hint(): @docker_skip def test_build_grep_cmd_content(): cmd = _build_grep_cmd('pattern') - assert 'grep -rI' in cmd + assert 'grep -rIE' in cmd assert '-n' in cmd assert "'pattern'" in cmd assert "'.'" in cmd From 94d032924d4790aed1b1e94252b45126e16372c6 Mon Sep 17 00:00:00 2001 From: David Montague <35119617+dmontagu@users.noreply.github.com> Date: Sun, 22 Feb 2026 16:11:08 +0000 Subject: [PATCH 14/49] Add tests for full coverage, add pragmas for defensive branches - Add tests for Docker process wait polling, recv_stderr, stream buffering, hardened constructor, setup early return, is_alive, read_file binary fallback, ls edge cases - Add tests for Local recv without timeout, EndOfStream, binary read_file, grep truncation - Add tests for Memory ls dedup, grep truncation - Mark defensive Docker branches with # pragma: no cover - Mark Docker __aenter__/__aexit__ with # pragma: lax no cover --- .../pydantic_ai/environments/docker.py | 8 +- .../pydantic_ai/environments/local.py | 2 +- tests/test_environments.py | 244 ++++++++++++++++++ 3 files changed, 249 insertions(+), 5 deletions(-) diff --git a/pydantic_ai_slim/pydantic_ai/environments/docker.py b/pydantic_ai_slim/pydantic_ai/environments/docker.py index d17b52bcdc..fdf02bd583 100644 --- a/pydantic_ai_slim/pydantic_ai/environments/docker.py +++ b/pydantic_ai_slim/pydantic_ai/environments/docker.py @@ -410,7 +410,7 @@ def capabilities(self) -> frozenset[EnvToolName]: # pragma: lax no cover } ) - async def __aenter__(self) -> Self: + async def __aenter__(self) -> Self: # pragma: lax no cover await anyio.to_thread.run_sync(self._setup) return self @@ -457,7 +457,7 @@ def _setup(self) -> None: # Ensure work_dir exists self._container.exec_run(['mkdir', '-p', self._work_dir]) - async def __aexit__(self, *_args: Any) -> None: + async def __aexit__(self, *_args: Any) -> None: # pragma: lax no cover if self._container is not None: # pragma: no branch await anyio.to_thread.run_sync(self._teardown) @@ -558,10 +558,10 @@ def _read_file_bytes_sync(self, path: str) -> bytes: tar_bytes = b''.join(bits) with tarfile.open(fileobj=io.BytesIO(tar_bytes)) as tar: members = tar.getmembers() - if not members: + if not members: # pragma: no cover raise FileNotFoundError(f'File not found: {path}') extracted = tar.extractfile(members[0]) - if extracted is None: + if extracted is None: # pragma: no cover raise FileNotFoundError(f'Cannot read file: {path}') return extracted.read() diff --git a/pydantic_ai_slim/pydantic_ai/environments/local.py b/pydantic_ai_slim/pydantic_ai/environments/local.py index 64b08e2200..a6f698645e 100644 --- a/pydantic_ai_slim/pydantic_ai/environments/local.py +++ b/pydantic_ai_slim/pydantic_ai/environments/local.py @@ -330,7 +330,7 @@ async def grep( continue try: raw = file_path.read_bytes() - except OSError: + except OSError: # pragma: no cover continue # Skip binary files (null byte in first 8KB) diff --git a/tests/test_environments.py b/tests/test_environments.py index a7e8e696f3..f5779fb8eb 100644 --- a/tests/test_environments.py +++ b/tests/test_environments.py @@ -3108,3 +3108,247 @@ def capabilities(self) -> frozenset[EnvToolName]: # After exiting use_environment, all tools are back restored_tools = await toolset.get_tools(ctx) assert set(restored_tools.keys()) == set(all_tools.keys()) + + +# --- Coverage gap tests --- + + +@docker_skip +def test_docker_hardened_constructor(): + """DockerEnvironment.hardened() returns a properly configured instance.""" + env = DockerEnvironment.hardened(image='python:3.12-slim', memory_limit='1g') + assert env._network_disabled is True + assert env._read_only is True + assert env._cap_drop == ['ALL'] + assert env._memory_limit == '1g' + assert env._user == 'nobody' + assert env._init is True + + +@docker_skip +def test_docker_setup_early_return(): + """DockerEnvironment._setup returns early if container already exists.""" + env = DockerEnvironment(image='python:3.12-slim') + env._container = MagicMock() + env._setup() # should not create a new container + assert env._client is None # docker.from_env() was never called + + +@docker_skip +async def test_docker_process_recv_stderr_no_buffer() -> None: + """DockerEnvironmentProcess.recv_stderr without buffered data (no timeout).""" + container = MockContainer() + proc = DockerEnvironmentProcess(container, 'echo test', '/workspace') # type: ignore[arg-type] + + stderr_data = b'error output' + header = struct.pack('>BxxxI', 2, len(stderr_data)) + mock_socket = MagicMock() + mock_socket.recv.side_effect = [header, stderr_data] + proc._socket = mock_socket + + result = await proc.recv_stderr() + assert result == stderr_data + + +@docker_skip +async def test_docker_process_recv_stream_buffers_stdout() -> None: + """DockerEnvironmentProcess._recv_stream buffers stdout when stderr is wanted.""" + container = MockContainer() + proc = DockerEnvironmentProcess(container, 'echo test', '/workspace') # type: ignore[arg-type] + + # First frame is stdout (type 1), second is stderr (type 2) + stdout_data = b'stdout output' + stderr_data = b'stderr output' + stdout_header = struct.pack('>BxxxI', 1, len(stdout_data)) + stderr_header = struct.pack('>BxxxI', 2, len(stderr_data)) + + mock_socket = MagicMock() + mock_socket.recv.side_effect = [stdout_header, stdout_data, stderr_header, stderr_data] + proc._socket = mock_socket + + # Requesting stderr should buffer stdout and return stderr + result = await proc.recv_stderr() + assert result == stderr_data + assert proc._stdout_buffer == [stdout_data] + + +@docker_skip +async def test_docker_process_wait_no_timeout() -> None: + """DockerEnvironmentProcess.wait without timeout polls until returncode is set.""" + container = MockContainer() + proc = DockerEnvironmentProcess(container, 'echo test', '/workspace') # type: ignore[arg-type] + proc._exec_id = 'exec-123' + # Mock exec_inspect to return "still running" first, then "exited" + call_count = 0 + + def mock_inspect(exec_id: str) -> dict[str, Any]: + nonlocal call_count + call_count += 1 + if call_count <= 1: + return {'Running': True, 'ExitCode': None} + return {'Running': False, 'ExitCode': 0} + + container.client.api.exec_inspect = mock_inspect + result = await proc.wait() + assert result == 0 + assert call_count >= 2 + + +@docker_skip +async def test_docker_process_wait_with_timeout() -> None: + """DockerEnvironmentProcess.wait with timeout.""" + container = MockContainer() + proc = DockerEnvironmentProcess(container, 'echo test', '/workspace') # type: ignore[arg-type] + proc._returncode = 42 + result = await proc.wait(timeout=5.0) + assert result == 42 + + +@docker_skip +async def test_docker_read_file_unicode_error(mock_docker_sandbox: Any, mock_container: MockContainer) -> None: + """DockerEnvironment.read_file falls back to raw bytes on UnicodeDecodeError.""" + # Store a binary file (not an image extension) that will fail utf-8 decode + binary_data = b'\x80\x81\x82\xff' + mock_container._files['/workspace/data.bin'] = binary_data + + # Make the awk command return non-utf8 data to trigger UnicodeDecodeError + original = mock_container.exec_run + + def exec_with_binary(cmd: Any, **kwargs: Any) -> tuple[int, bytes]: + cmd_str = ' '.join(cmd) if isinstance(cmd, list) else cmd + if 'awk' in cmd_str and 'data.bin' in cmd_str: + return 0, b'\x80\x81\x82\xff' + return original(cmd, **kwargs) + + mock_container.exec_run = exec_with_binary # type: ignore[assignment] + result = await mock_docker_sandbox.read_file('data.bin') + assert isinstance(result, bytes) + + +@docker_skip +async def test_docker_ls_size_value_error(mock_docker_sandbox: Any, mock_container: MockContainer) -> None: + """DockerEnvironment.ls handles non-numeric size fields gracefully.""" + original = mock_container.exec_run + + def exec_with_bad_size(cmd: Any, **kwargs: Any) -> tuple[int, bytes]: + cmd_str = ' '.join(cmd) if isinstance(cmd, list) else cmd + if 'ls -la' in cmd_str: + return 0, b'total 0\n-rw-r--r-- 1 root root NaN Jan 1 00:00 file.txt' + return original(cmd, **kwargs) + + mock_container.exec_run = exec_with_bad_size # type: ignore[assignment] + entries = await mock_docker_sandbox.ls() + assert len(entries) == 1 + assert entries[0].name == 'file.txt' + assert entries[0].size is None + + +@docker_skip +async def test_docker_ls_short_line(mock_docker_sandbox: Any, mock_container: MockContainer) -> None: + """DockerEnvironment.ls skips lines with fewer than 9 fields.""" + original = mock_container.exec_run + + def exec_with_short_lines(cmd: Any, **kwargs: Any) -> tuple[int, bytes]: + cmd_str = ' '.join(cmd) if isinstance(cmd, list) else cmd + if 'ls -la' in cmd_str: + return 0, b'total 0\nshort line\n-rw-r--r-- 1 root root 42 Jan 1 00:00 real.txt' + return original(cmd, **kwargs) + + mock_container.exec_run = exec_with_short_lines # type: ignore[assignment] + entries = await mock_docker_sandbox.ls() + assert len(entries) == 1 + assert entries[0].name == 'real.txt' + + +@docker_skip +async def test_docker_is_alive_exception(mock_docker_sandbox: Any, mock_container: MockContainer) -> None: + """DockerEnvironment.is_alive returns False when reload raises.""" + mock_container.reload = MagicMock(side_effect=Exception('connection error')) + result = await mock_docker_sandbox.is_alive() + assert result is False + + +@docker_skip +async def test_docker_is_alive_running(mock_docker_sandbox: Any) -> None: + """DockerEnvironment.is_alive returns True when running.""" + result = await mock_docker_sandbox.is_alive() + assert result is True + + +async def test_local_recv_no_timeout(tmp_path: Path): + """LocalEnvironmentProcess.recv without timeout returns data.""" + env = LocalEnvironment(tmp_path) + proc = await env.create_process('echo hello') + async with proc: + data = await proc.recv() # no timeout + assert b'hello' in data + + +async def test_local_recv_end_of_stream(tmp_path: Path): + """LocalEnvironmentProcess.recv returns empty bytes at EndOfStream.""" + env = LocalEnvironment(tmp_path) + proc = await env.create_process('true') + async with proc: + await proc.wait(timeout=5) + # After process exits, reading should return empty + data = await proc.recv() + assert data == b'' + + +async def test_local_read_file_binary_non_image(tmp_path: Path): + """LocalEnvironment.read_file returns raw bytes for non-image binary files.""" + async with LocalEnvironment(tmp_path) as env: + binary_path = tmp_path / 'data.bin' + binary_path.write_bytes(b'\x80\x81\x82\xff') + result = await env.read_file('data.bin') + assert isinstance(result, bytes) + assert result == b'\x80\x81\x82\xff' + + +async def test_local_grep_truncation(tmp_path: Path): + """LocalEnvironment.grep truncates at 1000+ matches.""" + async with LocalEnvironment(tmp_path) as env: + # Create a file with 1002 matching lines + lines = '\n'.join(f'match_{i}' for i in range(1002)) + await env.write_file('big.txt', lines) + result = await env.grep('match_') + assert '[... truncated at 1000 matches]' in result + + +async def test_memory_ls_duplicate_entry(): + """MemoryEnvironment.ls deduplicates entries at the same directory level.""" + # 'sub/a.txt' and 'sub/b.txt' both create a 'sub' directory entry + env = MemoryEnvironment(files={'sub/a.txt': 'a', 'sub/b.txt': 'b'}) + async with env: + entries = await env.ls() + names = [e.name for e in entries] + assert names.count('sub') == 1 + + +async def test_memory_grep_truncation(): + """MemoryEnvironment.grep truncates at 1000+ matches.""" + lines = '\n'.join(f'match_{i}' for i in range(1002)) + env = MemoryEnvironment(files={'big.txt': lines}) + async with env: + result = await env.grep('match_') + assert '[... truncated at 1000 matches]' in result + + +async def test_toolset_factory_ls_only_calls_ls(): + """Test that _LsOnlyEnv.ls is actually callable (covers line 3071).""" + + class _LsOnlyEnv(BaseEnv): + @property + def capabilities(self) -> frozenset[EnvToolName]: + return frozenset({'ls'}) + + async def ls(self, path: str = '.') -> list[FileInfo]: + return [FileInfo(name='test.txt', path='test.txt', is_dir=False, size=10)] + + toolset = ExecutionEnvironmentToolset(environment_factory=_LsOnlyEnv) + ctx = build_run_context() + + async with toolset: + tools = await toolset.get_tools(ctx) + result = await toolset.call_tool('ls', {}, ctx, tools['ls']) + assert 'test.txt' in str(result) From 2b30c3a9a7d573aa351999c0626eff949ed66db1 Mon Sep 17 00:00:00 2001 From: David Montague <35119617+dmontagu@users.noreply.github.com> Date: Sun, 22 Feb 2026 16:21:31 +0000 Subject: [PATCH 15/49] Catch PermissionError/OSError in edit_file tool for ModelRetry Aligns edit_file exception handling with read_file and write_file, which already catch these errors for path traversal and OS-level failures. --- .../toolsets/execution_environment.py | 2 +- tests/test_environments.py | 17 +++++++++++++++++ 2 files changed, 18 insertions(+), 1 deletion(-) diff --git a/pydantic_ai_slim/pydantic_ai/toolsets/execution_environment.py b/pydantic_ai_slim/pydantic_ai/toolsets/execution_environment.py index ab8932b3b6..36e4cf77a1 100644 --- a/pydantic_ai_slim/pydantic_ai/toolsets/execution_environment.py +++ b/pydantic_ai_slim/pydantic_ai/toolsets/execution_environment.py @@ -264,7 +264,7 @@ async def edit_file(path: str, old: str, new: str, replace_all: bool = False) -> try: count = await self.required_environment.replace_str(path, old, new, replace_all=replace_all) return f'Replaced {count} occurrence{"s" if count != 1 else ""} in {path}.' - except (FileNotFoundError, ValueError) as e: + except (FileNotFoundError, PermissionError, ValueError, OSError) as e: raise ModelRetry(str(e)) self.tool(requires_approval=self._require_write_approval)(edit_file) diff --git a/tests/test_environments.py b/tests/test_environments.py index f5779fb8eb..ba69d3cdc6 100644 --- a/tests/test_environments.py +++ b/tests/test_environments.py @@ -602,6 +602,23 @@ async def test_toolset_edit_retry_on_error(tmp_path: Path): ) +async def test_toolset_edit_retry_on_permission_error(tmp_path: Path): + """edit_file raises ModelRetry on PermissionError (e.g. path traversal).""" + env = LocalEnvironment(tmp_path) + toolset = ExecutionEnvironmentToolset(env, max_retries=0) + ctx = build_run_context(None) + manager = await ToolManager[None](toolset).for_run_step(ctx) + + async with env: + with pytest.raises(UnexpectedModelBehavior, match='exceeded max retries count of 0'): + await manager.handle_call( + ToolCallPart( + tool_name='edit_file', + args={'path': '../../etc/passwd', 'old': 'root', 'new': 'hacked'}, + ) + ) + + async def test_toolset_glob_tool(tmp_path: Path): env = LocalEnvironment(tmp_path) toolset = ExecutionEnvironmentToolset(env) From 3d8b3a625c547d50626cd64aba8d346038f4ac37 Mon Sep 17 00:00:00 2001 From: David Montague <35119617+dmontagu@users.noreply.github.com> Date: Sun, 22 Feb 2026 16:27:44 +0000 Subject: [PATCH 16/49] Fix Docker read_file offset validation and image NotFound handling - Raise ValueError when offset exceeds file length, matching Local/Memory - Catch docker.errors.NotFound in _read_file_bytes_sync, convert to FileNotFoundError - Update MockContainer awk handler to simulate offset/limit behavior --- .../pydantic_ai/environments/docker.py | 18 ++++++-- tests/test_environments.py | 43 ++++++++++++++++--- 2 files changed, 50 insertions(+), 11 deletions(-) diff --git a/pydantic_ai_slim/pydantic_ai/environments/docker.py b/pydantic_ai_slim/pydantic_ai/environments/docker.py index fdf02bd583..521af54339 100644 --- a/pydantic_ai_slim/pydantic_ai/environments/docker.py +++ b/pydantic_ai_slim/pydantic_ai/environments/docker.py @@ -30,7 +30,7 @@ try: import docker - from docker.errors import DockerException + from docker.errors import DockerException, NotFound from docker.models.containers import Container except ImportError as _import_error: raise ImportError( @@ -51,7 +51,10 @@ def _build_read_file_cmd(path: str, *, offset: int = 0, limit: int = 2000) -> st end = offset + limit return ( f'awk \'NR>={start} && NR<={end} {{printf "%6d\\t%s\\n", NR, $0}}' - f' END {{if(NR>{end}) printf "... (%d more lines. Use offset={end} to continue reading.)\\n", NR-{end}}}\'' + f' END {{' + f'if(NR>{end}) printf "... (%d more lines. Use offset={end} to continue reading.)\\n", NR-{end};' + f' else if(NR>0 && NR<{start}) printf "__OFFSET_ERROR__:%d\\n", NR' + f"}}'" f' {escaped}' ) @@ -545,15 +548,22 @@ def _read() -> str | bytes: if exit_code != 0: raise FileNotFoundError(f'File not found or not readable: {path}') try: - return output.decode('utf-8') + text = output.decode('utf-8') except UnicodeDecodeError: return self._read_file_bytes_sync(path) + if text.startswith('__OFFSET_ERROR__:'): + total_lines = int(text.split(':')[1].strip()) + raise ValueError(f'Offset {offset} exceeds file length ({total_lines} lines).') + return text return await anyio.to_thread.run_sync(_read) def _read_file_bytes_sync(self, path: str) -> bytes: """Read raw file bytes using Docker's get_archive API.""" - bits, _ = self.container.get_archive(self._resolve_path(path)) + try: + bits, _ = self.container.get_archive(self._resolve_path(path)) + except NotFound: + raise FileNotFoundError(f'File not found: {path}') # get_archive returns a tar stream tar_bytes = b''.join(bits) with tarfile.open(fileobj=io.BytesIO(tar_bytes)) as tar: diff --git a/tests/test_environments.py b/tests/test_environments.py index ba69d3cdc6..2316a744ac 100644 --- a/tests/test_environments.py +++ b/tests/test_environments.py @@ -1844,17 +1844,29 @@ def exec_run( # Handle awk (read_file) if 'awk' in cmd_str: + # Parse start/end from the awk NR range: NR>=start && NR<=end + import re as _re + + nr_match = _re.search(r'NR>=(\d+) && NR<=(\d+)', cmd_str) + start = int(nr_match.group(1)) if nr_match else 1 + end = int(nr_match.group(2)) if nr_match else 2000 # Try to find the file by matching path in the awk command. - # The path is shell-escaped (e.g. 'test.txt'), so check both - # the full path and relative to workdir. for fpath, data in self._files.items(): - # Check if the filename or path appears in the command name = fpath.rsplit('/', 1)[-1] if '/' in fpath else fpath if name in cmd_str or fpath in cmd_str: # pragma: no branch text = data.decode('utf-8', errors='replace') lines = text.splitlines(keepends=True) - numbered = [f'{i:>6}\t{line}' for i, line in enumerate(lines, start=1)] - return 0, ''.join(numbered).encode('utf-8') + total = len(lines) + # Offset exceeds file length + if total > 0 and total < start: + return 0, f'__OFFSET_ERROR__:{total}\n'.encode() + selected = lines[start - 1 : end] + numbered = [f'{i:>6}\t{line}' for i, line in enumerate(selected, start=start)] + result = ''.join(numbered) + remaining = total - end + if remaining > 0: + result += f'... ({remaining} more lines. Use offset={end} to continue reading.)\n' + return 0, result.encode('utf-8') return 1, b'File not found' # Handle ls -la @@ -1979,6 +1991,23 @@ async def test_docker_read_file_not_found(mock_docker_sandbox: Any) -> None: await mock_docker_sandbox.read_file('nonexistent.txt') +async def test_docker_read_file_offset_out_of_range(mock_docker_sandbox: Any, mock_container: MockContainer) -> None: + """DockerEnvironment.read_file raises ValueError when offset exceeds file length.""" + mock_container._files['/workspace/small.txt'] = b'line1\nline2\nline3\n' + with pytest.raises(ValueError, match='Offset 10 exceeds file length \\(3 lines\\)'): + await mock_docker_sandbox.read_file('small.txt', offset=10) + + +async def test_docker_read_file_with_offset(mock_docker_sandbox: Any, mock_container: MockContainer) -> None: + """DockerEnvironment.read_file respects offset and limit.""" + mock_container._files['/workspace/lines.txt'] = b'a\nb\nc\nd\ne\n' + result = await mock_docker_sandbox.read_file('lines.txt', offset=2, limit=2) + assert isinstance(result, str) + assert ' 3\tc\n' in result + assert ' 4\td\n' in result + assert '... (1 more lines. Use offset=4 to continue reading.)' in result + + async def test_docker_read_file_image(mock_docker_sandbox: Any, mock_container: MockContainer) -> None: """DockerEnvironment.read_file returns raw bytes for image files.""" png_data = b'\x89PNG\r\n\x1a\n' @@ -2818,13 +2847,13 @@ def fail_ls(cmd: Any, **kwargs: Any) -> tuple[int, bytes]: @docker_skip async def test_docker_read_file_image_not_found(mock_docker_sandbox: Any, mock_container: MockContainer) -> None: - """DockerEnvironment.read_file raises DockerNotFound for missing image files.""" + """DockerEnvironment.read_file raises FileNotFoundError for missing image files.""" def fail_get_archive(path: str) -> Any: raise DockerNotFound('File not found') mock_container.get_archive = fail_get_archive - with pytest.raises(DockerNotFound): + with pytest.raises(FileNotFoundError, match='File not found: missing.png'): await mock_docker_sandbox.read_file('missing.png') From d8d3aba0d52ac24f5ee7d2cb81df5619ff02ee59 Mon Sep 17 00:00:00 2001 From: David Montague <35119617+dmontagu@users.noreply.github.com> Date: Sun, 22 Feb 2026 16:38:55 +0000 Subject: [PATCH 17/49] Fix coverage --- tests/test_environments.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/tests/test_environments.py b/tests/test_environments.py index 2316a744ac..af67ea6055 100644 --- a/tests/test_environments.py +++ b/tests/test_environments.py @@ -3114,7 +3114,7 @@ def capabilities(self) -> frozenset[EnvToolName]: return frozenset({'ls'}) async def ls(self, path: str = '.') -> list[FileInfo]: - return [] + return [] # pragma: no cover toolset = ExecutionEnvironmentToolset(environment_factory=_LsOnlyEnv) # Before entering, all tools are registered (no env to check) @@ -3264,7 +3264,7 @@ def exec_with_binary(cmd: Any, **kwargs: Any) -> tuple[int, bytes]: cmd_str = ' '.join(cmd) if isinstance(cmd, list) else cmd if 'awk' in cmd_str and 'data.bin' in cmd_str: return 0, b'\x80\x81\x82\xff' - return original(cmd, **kwargs) + return original(cmd, **kwargs) # pragma: no cover mock_container.exec_run = exec_with_binary # type: ignore[assignment] result = await mock_docker_sandbox.read_file('data.bin') @@ -3280,7 +3280,7 @@ def exec_with_bad_size(cmd: Any, **kwargs: Any) -> tuple[int, bytes]: cmd_str = ' '.join(cmd) if isinstance(cmd, list) else cmd if 'ls -la' in cmd_str: return 0, b'total 0\n-rw-r--r-- 1 root root NaN Jan 1 00:00 file.txt' - return original(cmd, **kwargs) + return original(cmd, **kwargs) # pragma: no cover mock_container.exec_run = exec_with_bad_size # type: ignore[assignment] entries = await mock_docker_sandbox.ls() @@ -3298,7 +3298,7 @@ def exec_with_short_lines(cmd: Any, **kwargs: Any) -> tuple[int, bytes]: cmd_str = ' '.join(cmd) if isinstance(cmd, list) else cmd if 'ls -la' in cmd_str: return 0, b'total 0\nshort line\n-rw-r--r-- 1 root root 42 Jan 1 00:00 real.txt' - return original(cmd, **kwargs) + return original(cmd, **kwargs) # pragma: no cover mock_container.exec_run = exec_with_short_lines # type: ignore[assignment] entries = await mock_docker_sandbox.ls() From 598a8cfbf587722bd01b4d2395cd712dedaa562e Mon Sep 17 00:00:00 2001 From: David Montague <35119617+dmontagu@users.noreply.github.com> Date: Sun, 22 Feb 2026 17:07:27 +0000 Subject: [PATCH 18/49] Address feedback --- .../pydantic_ai/environments/docker.py | 14 ++++++-- tests/test_environments.py | 35 +++++++++++++++++-- 2 files changed, 44 insertions(+), 5 deletions(-) diff --git a/pydantic_ai_slim/pydantic_ai/environments/docker.py b/pydantic_ai_slim/pydantic_ai/environments/docker.py index 521af54339..b529a5dba9 100644 --- a/pydantic_ai_slim/pydantic_ai/environments/docker.py +++ b/pydantic_ai_slim/pydantic_ai/environments/docker.py @@ -87,10 +87,20 @@ def _filter_grep_count_output(text: str) -> str: def _build_glob_cmd(pattern: str, *, path: str = '.') -> str: - """Build a shell `find` command to match files by pattern.""" + """Build a shell `find` command to match files by pattern. + + When the pattern does not contain ``**``, ``-maxdepth`` is added so that + ``*.py`` only matches in the target directory (matching Local/Memory + behaviour), while ``**/*.py`` recurses without limit. + """ path_pattern = f'{path}/{pattern}' if '/' in pattern else pattern + # Limit recursion depth for non-** patterns to match pathlib.glob semantics + if '**' in pattern: + depth_flag = '' + else: + depth_flag = f' -maxdepth {pattern.count("/") + 1}' return ( - f'find {_shell_escape(path)}' + f'find {_shell_escape(path)}{depth_flag}' f' \\( -path {_shell_escape(path_pattern)} -o -name {_shell_escape(pattern)} \\)' f' 2>/dev/null | head -100' ) diff --git a/tests/test_environments.py b/tests/test_environments.py index af67ea6055..ffce81d213 100644 --- a/tests/test_environments.py +++ b/tests/test_environments.py @@ -379,6 +379,16 @@ async def test_local_glob(tmp_path: Path): assert not any('data.json' in m for m in matches) +async def test_local_glob_non_recursive(tmp_path: Path): + async with LocalEnvironment(tmp_path) as env: + await env.write_file('top.py', '# top') + await env.write_file('sub/nested.py', '# nested') + + # Non-recursive pattern should only match in the target directory + matches = await env.glob('*.py') + assert matches == ['top.py'] + + async def test_local_glob_no_matches(tmp_path: Path): async with LocalEnvironment(tmp_path) as env: matches = await env.glob('**/*.rs') @@ -1116,6 +1126,14 @@ async def test_memory_glob(): assert sorted(matches) == ['src/main.py', 'src/utils.py'] +async def test_memory_glob_non_recursive(): + env = MemoryEnvironment(files={'top.py': '# top', 'sub/nested.py': '# nested'}) + async with env: + # Non-recursive pattern should only match in the target directory + matches = await env.glob('*.py') + assert matches == ['top.py'] + + async def test_memory_glob_no_matches(): env = MemoryEnvironment(files={'a.py': ''}) async with env: @@ -1478,12 +1496,26 @@ def test_build_glob_cmd(): assert 'find' in cmd assert "'*.py'" in cmd assert "'.'" in cmd + assert '-maxdepth 1' in cmd @docker_skip def test_build_glob_cmd_with_path(): cmd = _build_glob_cmd('*.py', path='src') assert "'src'" in cmd + assert '-maxdepth 1' in cmd + + +@docker_skip +def test_build_glob_cmd_nested_pattern(): + cmd = _build_glob_cmd('src/*.py') + assert '-maxdepth 2' in cmd + + +@docker_skip +def test_build_glob_cmd_recursive_no_maxdepth(): + cmd = _build_glob_cmd('**/*.py') + assert '-maxdepth' not in cmd @docker_skip @@ -1939,9 +1971,6 @@ def mock_container() -> MockContainer: @pytest.fixture def mock_docker_sandbox(mock_container: MockContainer) -> Any: """Create a DockerEnvironment with a mock container.""" - if not docker_installed: - pytest.skip('docker package not installed') - sandbox = DockerEnvironment(image='python:3.12-slim') sandbox._container = mock_container # type: ignore[assignment] sandbox._client = MagicMock() From 37e4d1f23c26bec9618bc45953f1724319825587 Mon Sep 17 00:00:00 2001 From: David Montague <35119617+dmontagu@users.noreply.github.com> Date: Sun, 22 Feb 2026 17:27:35 +0000 Subject: [PATCH 19/49] Simplify docker coverage stuff --- tests/test_environments.py | 1703 +++++++++++++++++------------------- 1 file changed, 813 insertions(+), 890 deletions(-) diff --git a/tests/test_environments.py b/tests/test_environments.py index ffce81d213..f19c90756e 100644 --- a/tests/test_environments.py +++ b/tests/test_environments.py @@ -55,8 +55,6 @@ pytestmark = pytest.mark.anyio -docker_skip = pytest.mark.skipif(not docker_installed, reason='docker package not installed') - def build_run_context(deps: Any = None, run_step: int = 0) -> RunContext[Any]: return RunContext( @@ -1304,40 +1302,6 @@ async def test_memory_toolset_integration(): assert result == snapshot('main.py:1:print("hello")') -# --- Docker instantiation tests --- - - -@docker_skip -def test_docker_sandbox_instantiation(): - """DockerEnvironment can be constructed without starting Docker.""" - - # Verify construction succeeds with default and custom settings - sandbox = DockerEnvironment(image='python:3.12-slim') - assert isinstance(sandbox, DockerEnvironment) - - sandbox_with_opts = DockerEnvironment( - image='node:20-slim', - memory_limit='512m', - cpu_limit=1.0, - network_disabled=True, - ) - assert isinstance(sandbox_with_opts, DockerEnvironment) - - # Verify security hardening parameters are accepted - sandbox_hardened = DockerEnvironment( - image='python:3.12-slim', - network_disabled=True, - read_only=True, - cap_drop=['ALL'], - security_opt=['no-new-privileges'], - user='nobody', - pids_limit=256, - tmpfs={'/tmp': 'noexec,nosuid,size=64m'}, - init=True, - ) - assert isinstance(sandbox_hardened, DockerEnvironment) - - # --- Agent-level integration test --- @@ -1364,14 +1328,6 @@ async def test_agent_with_execution_toolset(): # --- _base.py helper functions --- -@docker_skip -def test_shell_escape(): - assert _shell_escape('hello') == "'hello'" - assert _shell_escape("it's") == "'it'\\''s'" - assert _shell_escape('') == "''" - assert _shell_escape('a b c') == "'a b c'" - - def test_format_lines_empty_file(): """format_lines on empty string returns just a newline.""" result = format_lines('', 0, 2000) @@ -1417,133 +1373,6 @@ def test_glob_match_question_mark(): assert glob_match('test.py', 't????.py') is False # needs 4 chars between t and .py -@docker_skip -def test_build_read_file_cmd_default(): - cmd = _build_read_file_cmd('test.txt') - assert 'awk' in cmd - assert "'test.txt'" in cmd - assert 'NR>=1' in cmd - assert 'NR<=2000' in cmd - - -@docker_skip -def test_build_read_file_cmd_with_offset(): - cmd = _build_read_file_cmd('file.py', offset=10, limit=50) - assert 'NR>=11' in cmd - assert 'NR<=60' in cmd - assert "'file.py'" in cmd - - -@docker_skip -def test_build_read_file_cmd_continuation_hint(): - """_build_read_file_cmd includes a continuation hint in the awk END block.""" - cmd = _build_read_file_cmd('file.py', offset=0, limit=10) - assert 'more lines' in cmd - assert 'offset=10' in cmd - - -@docker_skip -def test_build_grep_cmd_content(): - cmd = _build_grep_cmd('pattern') - assert 'grep -rIE' in cmd - assert '-n' in cmd - assert "'pattern'" in cmd - assert "'.'" in cmd - - -@docker_skip -def test_build_grep_cmd_files_with_matches(): - cmd = _build_grep_cmd('pat', output_mode='files_with_matches') - assert '-l' in cmd - assert '-n' not in cmd - - -@docker_skip -def test_build_grep_cmd_count(): - cmd = _build_grep_cmd('pat', output_mode='count') - assert '-c' in cmd - - -@docker_skip -def test_build_grep_cmd_with_path(): - cmd = _build_grep_cmd('pat', path='src') - assert "'src'" in cmd - - -@docker_skip -def test_build_grep_cmd_with_glob_pattern(): - """glob_pattern is shell-escaped to prevent injection.""" - cmd = _build_grep_cmd('pat', glob_pattern='*.py') - assert '--include' in cmd - assert "'*.py'" in cmd - - -@docker_skip -def test_build_grep_cmd_glob_pattern_escaping(): - """Verify glob_pattern with special chars is properly shell-escaped.""" - cmd = _build_grep_cmd('pat', glob_pattern='*.py') - # The glob pattern should be shell-escaped (wrapped in single quotes) - assert "--include '*.py'" in cmd - - # Even a malicious glob_pattern gets safely escaped - cmd2 = _build_grep_cmd('pat', glob_pattern='$(evil)') - assert '$(evil)' not in cmd2.replace("'$(evil)'", '') # Only appears inside quotes - - -@docker_skip -def test_build_glob_cmd(): - cmd = _build_glob_cmd('*.py') - assert 'find' in cmd - assert "'*.py'" in cmd - assert "'.'" in cmd - assert '-maxdepth 1' in cmd - - -@docker_skip -def test_build_glob_cmd_with_path(): - cmd = _build_glob_cmd('*.py', path='src') - assert "'src'" in cmd - assert '-maxdepth 1' in cmd - - -@docker_skip -def test_build_glob_cmd_nested_pattern(): - cmd = _build_glob_cmd('src/*.py') - assert '-maxdepth 2' in cmd - - -@docker_skip -def test_build_glob_cmd_recursive_no_maxdepth(): - cmd = _build_glob_cmd('**/*.py') - assert '-maxdepth' not in cmd - - -@docker_skip -def test_parse_glob_output_empty(): - assert _parse_glob_output('') == [] - assert _parse_glob_output(' ') == [] - assert _parse_glob_output('\n') == [] - - -@docker_skip -def test_parse_glob_output_multiline(): - assert _parse_glob_output('a.py\nb.py\nc.py\n') == ['a.py', 'b.py', 'c.py'] - - -@docker_skip -def test_filter_grep_count_output(): - text = 'a.py:3\nb.py:0\nc.py:1' - result = _filter_grep_count_output(text) - assert result == 'a.py:3\nc.py:1' - - -@docker_skip -def test_filter_grep_count_output_all_zero(): - text = 'a.py:0\nb.py:0' - result = _filter_grep_count_output(text) - assert result == '' - - def test_apply_edit_basic(): new_text, count = apply_edit('hello world', 'world', 'earth', 'test.txt', replace_all=False) assert new_text == 'hello earth' @@ -1977,256 +1806,893 @@ def mock_docker_sandbox(mock_container: MockContainer) -> Any: return sandbox -async def test_docker_execute(mock_docker_sandbox: Any) -> None: - """DockerEnvironment.execute runs commands in container.""" - result = await mock_docker_sandbox.shell('echo hello') - assert result.exit_code == 0 - assert isinstance(result.output, str) +@pytest.mark.skipif(not docker_installed, reason='docker package not installed') +class TestDocker: + async def test_docker_execute(self, mock_docker_sandbox: Any) -> None: + """DockerEnvironment.execute runs commands in container.""" + result = await mock_docker_sandbox.shell('echo hello') + assert result.exit_code == 0 + assert isinstance(result.output, str) + async def test_docker_execute_timeout(self, mock_docker_sandbox: Any) -> None: + """DockerEnvironment.execute wraps command with timeout.""" + result = await mock_docker_sandbox.shell('echo test', timeout=30) + assert result.exit_code == 0 -async def test_docker_execute_timeout(mock_docker_sandbox: Any) -> None: - """DockerEnvironment.execute wraps command with timeout.""" - result = await mock_docker_sandbox.shell('echo test', timeout=30) - assert result.exit_code == 0 + async def test_docker_execute_no_timeout(self, mock_docker_sandbox: Any) -> None: + """DockerEnvironment.execute with timeout=None.""" + result = await mock_docker_sandbox.shell('echo test', timeout=None) + assert result.exit_code == 0 + async def test_docker_execute_with_env(self, mock_docker_sandbox: Any) -> None: + """DockerEnvironment.execute passes env vars.""" + result = await mock_docker_sandbox.shell('echo test', env={'KEY': 'value'}) + assert result.exit_code == 0 -async def test_docker_execute_no_timeout(mock_docker_sandbox: Any) -> None: - """DockerEnvironment.execute with timeout=None.""" - result = await mock_docker_sandbox.shell('echo test', timeout=None) - assert result.exit_code == 0 + async def test_docker_write_read_file(self, mock_docker_sandbox: Any) -> None: + """DockerEnvironment write and read files.""" + await mock_docker_sandbox.write_file('test.txt', 'hello world\n') + content = await mock_docker_sandbox.read_file('test.txt') + assert isinstance(content, str) + async def test_docker_write_file_binary(self, mock_docker_sandbox: Any) -> None: + """DockerEnvironment write binary file.""" + await mock_docker_sandbox.write_file('data.bin', b'\x00\x01\x02') -async def test_docker_execute_with_env(mock_docker_sandbox: Any) -> None: - """DockerEnvironment.execute passes env vars.""" - result = await mock_docker_sandbox.shell('echo test', env={'KEY': 'value'}) - assert result.exit_code == 0 + async def test_docker_read_file_not_found(self, mock_docker_sandbox: Any) -> None: + """DockerEnvironment.read_file on missing file raises FileNotFoundError.""" + with pytest.raises(FileNotFoundError): + await mock_docker_sandbox.read_file('nonexistent.txt') + + async def test_docker_read_file_offset_out_of_range( + self, mock_docker_sandbox: Any, mock_container: MockContainer + ) -> None: + """DockerEnvironment.read_file raises ValueError when offset exceeds file length.""" + mock_container._files['/workspace/small.txt'] = b'line1\nline2\nline3\n' + with pytest.raises(ValueError, match='Offset 10 exceeds file length \\(3 lines\\)'): + await mock_docker_sandbox.read_file('small.txt', offset=10) + + async def test_docker_read_file_with_offset(self, mock_docker_sandbox: Any, mock_container: MockContainer) -> None: + """DockerEnvironment.read_file respects offset and limit.""" + mock_container._files['/workspace/lines.txt'] = b'a\nb\nc\nd\ne\n' + result = await mock_docker_sandbox.read_file('lines.txt', offset=2, limit=2) + assert isinstance(result, str) + assert ' 3\tc\n' in result + assert ' 4\td\n' in result + assert '... (1 more lines. Use offset=4 to continue reading.)' in result + + async def test_docker_read_file_image(self, mock_docker_sandbox: Any, mock_container: MockContainer) -> None: + """DockerEnvironment.read_file returns raw bytes for image files.""" + png_data = b'\x89PNG\r\n\x1a\n' + mock_container._files['/workspace/image.png'] = png_data + result = await mock_docker_sandbox.read_file('image.png') + assert isinstance(result, bytes) + assert result == png_data + async def test_docker_edit_file(self, mock_docker_sandbox: Any, mock_container: MockContainer) -> None: + """DockerEnvironment.edit_file replaces text.""" + mock_container._files['/workspace/code.py'] = b'old_value = 1' + count = await mock_docker_sandbox.replace_str('code.py', 'old_value', 'new_value') + assert count == 1 -async def test_docker_write_read_file(mock_docker_sandbox: Any) -> None: - """DockerEnvironment write and read files.""" - await mock_docker_sandbox.write_file('test.txt', 'hello world\n') - content = await mock_docker_sandbox.read_file('test.txt') - assert isinstance(content, str) + async def test_docker_ls(self, mock_docker_sandbox: Any, mock_container: MockContainer) -> None: + """DockerEnvironment.ls returns file entries.""" + mock_container._files['test.txt'] = b'hello' + entries = await mock_docker_sandbox.ls('.') + assert isinstance(entries, list) + async def test_docker_glob(self, mock_docker_sandbox: Any) -> None: + """DockerEnvironment.glob returns matching paths.""" + matches = await mock_docker_sandbox.glob('*.py') + assert isinstance(matches, list) -async def test_docker_write_file_binary(mock_docker_sandbox: Any) -> None: - """DockerEnvironment write binary file.""" - await mock_docker_sandbox.write_file('data.bin', b'\x00\x01\x02') + async def test_docker_grep(self, mock_docker_sandbox: Any) -> None: + """DockerEnvironment.grep returns matches.""" + result = await mock_docker_sandbox.grep('pattern') + assert isinstance(result, str) + async def test_docker_grep_with_options(self, mock_docker_sandbox: Any) -> None: + """DockerEnvironment.grep with output_mode and glob_pattern.""" + result = await mock_docker_sandbox.grep('pattern', glob_pattern='*.py', output_mode='files_with_matches') + assert isinstance(result, str) -async def test_docker_read_file_not_found(mock_docker_sandbox: Any) -> None: - """DockerEnvironment.read_file on missing file raises FileNotFoundError.""" - with pytest.raises(FileNotFoundError): - await mock_docker_sandbox.read_file('nonexistent.txt') + async def test_docker_grep_count(self, mock_docker_sandbox: Any, mock_container: MockContainer) -> None: + """DockerEnvironment.grep count mode filters zero-count results.""" + # Override exec_run to return count-style output + original_exec_run = mock_container.exec_run + def count_exec_run(cmd: Any, **kwargs: Any) -> tuple[int, bytes]: + if isinstance(cmd, list) and 'sh' in cmd[0]: + cmd_str = cmd[-1] if len(cmd) > 1 else '' + if 'grep' in cmd_str and '-c' in cmd_str: + return 0, b'a.py:3\nb.py:0\nc.py:1' + return original_exec_run(cmd, **kwargs) # pragma: no cover -async def test_docker_read_file_offset_out_of_range(mock_docker_sandbox: Any, mock_container: MockContainer) -> None: - """DockerEnvironment.read_file raises ValueError when offset exceeds file length.""" - mock_container._files['/workspace/small.txt'] = b'line1\nline2\nline3\n' - with pytest.raises(ValueError, match='Offset 10 exceeds file length \\(3 lines\\)'): - await mock_docker_sandbox.read_file('small.txt', offset=10) + mock_container.exec_run = count_exec_run # type: ignore[assignment] + result = await mock_docker_sandbox.grep('pattern', output_mode='count') + assert 'b.py:0' not in result + async def test_docker_container_property(self, mock_docker_sandbox: Any) -> None: + """DockerEnvironment.container raises when not started.""" -async def test_docker_read_file_with_offset(mock_docker_sandbox: Any, mock_container: MockContainer) -> None: - """DockerEnvironment.read_file respects offset and limit.""" - mock_container._files['/workspace/lines.txt'] = b'a\nb\nc\nd\ne\n' - result = await mock_docker_sandbox.read_file('lines.txt', offset=2, limit=2) - assert isinstance(result, str) - assert ' 3\tc\n' in result - assert ' 4\td\n' in result - assert '... (1 more lines. Use offset=4 to continue reading.)' in result + sandbox = DockerEnvironment() + with pytest.raises(RuntimeError, match='not started'): + _ = sandbox.container + async def test_docker_create_process(self, mock_docker_sandbox: Any) -> None: + """DockerEnvironment.create_process returns a DockerEnvironmentProcess.""" + proc = await mock_docker_sandbox.create_process('echo test') + assert proc is not None -async def test_docker_read_file_image(mock_docker_sandbox: Any, mock_container: MockContainer) -> None: - """DockerEnvironment.read_file returns raw bytes for image files.""" - png_data = b'\x89PNG\r\n\x1a\n' - mock_container._files['/workspace/image.png'] = png_data - result = await mock_docker_sandbox.read_file('image.png') - assert isinstance(result, bytes) - assert result == png_data + async def test_docker_is_alive(self, mock_docker_sandbox: Any) -> None: + """DockerEnvironment.is_alive checks container status.""" + result = await mock_docker_sandbox.is_alive() + assert result is True + + async def test_docker_is_alive_not_started( + self, + ) -> None: + """DockerEnvironment.is_alive returns False when not started.""" + sandbox = DockerEnvironment() + result = await sandbox.is_alive() + assert result is False -async def test_docker_edit_file(mock_docker_sandbox: Any, mock_container: MockContainer) -> None: - """DockerEnvironment.edit_file replaces text.""" - mock_container._files['/workspace/code.py'] = b'old_value = 1' - count = await mock_docker_sandbox.replace_str('code.py', 'old_value', 'new_value') - assert count == 1 + async def test_docker_resolve_path(self, mock_docker_sandbox: Any) -> None: + """DockerEnvironment._resolve_path resolves relative paths.""" + assert mock_docker_sandbox._resolve_path('test.txt') == '/workspace/test.txt' + assert mock_docker_sandbox._resolve_path('/abs/path') == '/abs/path' + assert mock_docker_sandbox._resolve_path('sub/dir/file.py') == '/workspace/sub/dir/file.py' + + def test_docker_put_file(self) -> None: + """_put_file creates a tar archive and uploads it.""" + + container = MockContainer() + _put_file(container, '/workspace/test.txt', b'hello') # type: ignore[arg-type] + assert '/workspace/test.txt' in container._files + assert container._files['/workspace/test.txt'] == b'hello' + def test_docker_sandbox_process_read_frame(self) -> None: + """DockerEnvironmentProcess._read_frame parses multiplexed stream frames.""" -async def test_docker_ls(mock_docker_sandbox: Any, mock_container: MockContainer) -> None: - """DockerEnvironment.ls returns file entries.""" - mock_container._files['test.txt'] = b'hello' - entries = await mock_docker_sandbox.ls('.') - assert isinstance(entries, list) + container = MockContainer() + proc = DockerEnvironmentProcess(container, 'echo test', '/workspace') # type: ignore[arg-type] + # Create a mock socket with a multiplexed frame + stdout_data = b'hello from stdout' + header = struct.pack('>BxxxI', 1, len(stdout_data)) # stream_type=1 (stdout) -async def test_docker_glob(mock_docker_sandbox: Any) -> None: - """DockerEnvironment.glob returns matching paths.""" - matches = await mock_docker_sandbox.glob('*.py') - assert isinstance(matches, list) + mock_socket = MagicMock() + mock_socket.recv.side_effect = [header, stdout_data] + proc._socket = mock_socket + stream_type, data = proc._read_frame() + assert stream_type == 1 + assert data == stdout_data -async def test_docker_grep(mock_docker_sandbox: Any) -> None: - """DockerEnvironment.grep returns matches.""" - result = await mock_docker_sandbox.grep('pattern') - assert isinstance(result, str) + def test_docker_sandbox_process_read_frame_stderr(self) -> None: + """DockerEnvironmentProcess._read_frame handles stderr frames.""" + container = MockContainer() + proc = DockerEnvironmentProcess(container, 'echo test', '/workspace') # type: ignore[arg-type] -async def test_docker_grep_with_options(mock_docker_sandbox: Any) -> None: - """DockerEnvironment.grep with output_mode and glob_pattern.""" - result = await mock_docker_sandbox.grep('pattern', glob_pattern='*.py', output_mode='files_with_matches') - assert isinstance(result, str) + stderr_data = b'error output' + header = struct.pack('>BxxxI', 2, len(stderr_data)) # stream_type=2 (stderr) + mock_socket = MagicMock() + mock_socket.recv.side_effect = [header, stderr_data] + proc._socket = mock_socket -async def test_docker_grep_count(mock_docker_sandbox: Any, mock_container: MockContainer) -> None: - """DockerEnvironment.grep count mode filters zero-count results.""" - # Override exec_run to return count-style output - original_exec_run = mock_container.exec_run + stream_type, data = proc._read_frame() + assert stream_type == 2 + assert data == stderr_data - def count_exec_run(cmd: Any, **kwargs: Any) -> tuple[int, bytes]: - if isinstance(cmd, list) and 'sh' in cmd[0]: - cmd_str = cmd[-1] if len(cmd) > 1 else '' - if 'grep' in cmd_str and '-c' in cmd_str: - return 0, b'a.py:3\nb.py:0\nc.py:1' - return original_exec_run(cmd, **kwargs) # pragma: no cover + def test_docker_sandbox_process_read_frame_eof(self) -> None: + """DockerEnvironmentProcess._read_frame returns empty on EOF.""" - mock_container.exec_run = count_exec_run # type: ignore[assignment] - result = await mock_docker_sandbox.grep('pattern', output_mode='count') - assert 'b.py:0' not in result + container = MockContainer() + proc = DockerEnvironmentProcess(container, 'echo test', '/workspace') # type: ignore[arg-type] + mock_socket = MagicMock() + mock_socket.recv.return_value = b'' # EOF + proc._socket = mock_socket -@docker_skip -async def test_docker_container_property(mock_docker_sandbox: Any) -> None: - """DockerEnvironment.container raises when not started.""" + stream_type, data = proc._read_frame() + assert stream_type == 0 + assert data == b'' + assert proc._eof is True + + def test_docker_sandbox_process_read_frame_zero_size(self) -> None: + """DockerEnvironmentProcess._read_frame handles zero-size frames.""" + + container = MockContainer() + proc = DockerEnvironmentProcess(container, 'echo test', '/workspace') # type: ignore[arg-type] + + header = struct.pack('>BxxxI', 1, 0) # zero size + + mock_socket = MagicMock() + mock_socket.recv.return_value = header + proc._socket = mock_socket + + stream_type, data = proc._read_frame() + assert stream_type == 1 + assert data == b'' + + def test_docker_sandbox_process_already_eof(self) -> None: + """DockerEnvironmentProcess._read_frame returns empty when already at EOF.""" + + container = MockContainer() + proc = DockerEnvironmentProcess(container, 'echo test', '/workspace') # type: ignore[arg-type] + proc._eof = True + + stream_type, data = proc._read_frame() + assert stream_type == 0 + assert data == b'' + + def test_docker_hardened_constructor( + self, + ): + """DockerEnvironment.hardened() returns a properly configured instance.""" + env = DockerEnvironment.hardened(image='python:3.12-slim', memory_limit='1g') + assert env._network_disabled is True + assert env._read_only is True + assert env._cap_drop == ['ALL'] + assert env._memory_limit == '1g' + assert env._user == 'nobody' + assert env._init is True + + def test_docker_setup_early_return( + self, + ): + """DockerEnvironment._setup returns early if container already exists.""" + env = DockerEnvironment(image='python:3.12-slim') + env._container = MagicMock() + env._setup() # should not create a new container + assert env._client is None # docker.from_env() was never called + + async def test_docker_process_recv_stderr_no_buffer( + self, + ) -> None: + """DockerEnvironmentProcess.recv_stderr without buffered data (no timeout).""" + container = MockContainer() + proc = DockerEnvironmentProcess(container, 'echo test', '/workspace') # type: ignore[arg-type] + + stderr_data = b'error output' + header = struct.pack('>BxxxI', 2, len(stderr_data)) + mock_socket = MagicMock() + mock_socket.recv.side_effect = [header, stderr_data] + proc._socket = mock_socket + + result = await proc.recv_stderr() + assert result == stderr_data + + async def test_docker_process_recv_stream_buffers_stdout( + self, + ) -> None: + """DockerEnvironmentProcess._recv_stream buffers stdout when stderr is wanted.""" + container = MockContainer() + proc = DockerEnvironmentProcess(container, 'echo test', '/workspace') # type: ignore[arg-type] + + # First frame is stdout (type 1), second is stderr (type 2) + stdout_data = b'stdout output' + stderr_data = b'stderr output' + stdout_header = struct.pack('>BxxxI', 1, len(stdout_data)) + stderr_header = struct.pack('>BxxxI', 2, len(stderr_data)) + + mock_socket = MagicMock() + mock_socket.recv.side_effect = [stdout_header, stdout_data, stderr_header, stderr_data] + proc._socket = mock_socket + + # Requesting stderr should buffer stdout and return stderr + result = await proc.recv_stderr() + assert result == stderr_data + assert proc._stdout_buffer == [stdout_data] + + async def test_docker_process_wait_no_timeout( + self, + ) -> None: + """DockerEnvironmentProcess.wait without timeout polls until returncode is set.""" + container = MockContainer() + proc = DockerEnvironmentProcess(container, 'echo test', '/workspace') # type: ignore[arg-type] + proc._exec_id = 'exec-123' + # Mock exec_inspect to return "still running" first, then "exited" + call_count = 0 + + def mock_inspect(exec_id: str) -> dict[str, Any]: + nonlocal call_count + call_count += 1 + if call_count <= 1: + return {'Running': True, 'ExitCode': None} + return {'Running': False, 'ExitCode': 0} + + container.client.api.exec_inspect = mock_inspect + result = await proc.wait() + assert result == 0 + assert call_count >= 2 + + async def test_docker_process_wait_with_timeout( + self, + ) -> None: + """DockerEnvironmentProcess.wait with timeout.""" + container = MockContainer() + proc = DockerEnvironmentProcess(container, 'echo test', '/workspace') # type: ignore[arg-type] + proc._returncode = 42 + result = await proc.wait(timeout=5.0) + assert result == 42 + + async def test_docker_read_file_unicode_error( + self, mock_docker_sandbox: Any, mock_container: MockContainer + ) -> None: + """DockerEnvironment.read_file falls back to raw bytes on UnicodeDecodeError.""" + # Store a binary file (not an image extension) that will fail utf-8 decode + binary_data = b'\x80\x81\x82\xff' + mock_container._files['/workspace/data.bin'] = binary_data + + # Make the awk command return non-utf8 data to trigger UnicodeDecodeError + original = mock_container.exec_run + + def exec_with_binary(cmd: Any, **kwargs: Any) -> tuple[int, bytes]: + cmd_str = ' '.join(cmd) if isinstance(cmd, list) else cmd + if 'awk' in cmd_str and 'data.bin' in cmd_str: + return 0, b'\x80\x81\x82\xff' + return original(cmd, **kwargs) # pragma: no cover + + mock_container.exec_run = exec_with_binary # type: ignore[assignment] + result = await mock_docker_sandbox.read_file('data.bin') + assert isinstance(result, bytes) - sandbox = DockerEnvironment() - with pytest.raises(RuntimeError, match='not started'): - _ = sandbox.container + async def test_docker_ls_size_value_error(self, mock_docker_sandbox: Any, mock_container: MockContainer) -> None: + """DockerEnvironment.ls handles non-numeric size fields gracefully.""" + original = mock_container.exec_run + def exec_with_bad_size(cmd: Any, **kwargs: Any) -> tuple[int, bytes]: + cmd_str = ' '.join(cmd) if isinstance(cmd, list) else cmd + if 'ls -la' in cmd_str: + return 0, b'total 0\n-rw-r--r-- 1 root root NaN Jan 1 00:00 file.txt' + return original(cmd, **kwargs) # pragma: no cover -async def test_docker_create_process(mock_docker_sandbox: Any) -> None: - """DockerEnvironment.create_process returns a DockerEnvironmentProcess.""" - proc = await mock_docker_sandbox.create_process('echo test') - assert proc is not None + mock_container.exec_run = exec_with_bad_size # type: ignore[assignment] + entries = await mock_docker_sandbox.ls() + assert len(entries) == 1 + assert entries[0].name == 'file.txt' + assert entries[0].size is None + async def test_docker_ls_short_line(self, mock_docker_sandbox: Any, mock_container: MockContainer) -> None: + """DockerEnvironment.ls skips lines with fewer than 9 fields.""" + original = mock_container.exec_run -async def test_docker_is_alive(mock_docker_sandbox: Any) -> None: - """DockerEnvironment.is_alive checks container status.""" - result = await mock_docker_sandbox.is_alive() - assert result is True + def exec_with_short_lines(cmd: Any, **kwargs: Any) -> tuple[int, bytes]: + cmd_str = ' '.join(cmd) if isinstance(cmd, list) else cmd + if 'ls -la' in cmd_str: + return 0, b'total 0\nshort line\n-rw-r--r-- 1 root root 42 Jan 1 00:00 real.txt' + return original(cmd, **kwargs) # pragma: no cover + mock_container.exec_run = exec_with_short_lines # type: ignore[assignment] + entries = await mock_docker_sandbox.ls() + assert len(entries) == 1 + assert entries[0].name == 'real.txt' + + async def test_docker_is_alive_exception(self, mock_docker_sandbox: Any, mock_container: MockContainer) -> None: + """DockerEnvironment.is_alive returns False when reload raises.""" + mock_container.reload = MagicMock(side_effect=Exception('connection error')) + result = await mock_docker_sandbox.is_alive() + assert result is False -@docker_skip -async def test_docker_is_alive_not_started() -> None: - """DockerEnvironment.is_alive returns False when not started.""" + async def test_docker_is_alive_running(self, mock_docker_sandbox: Any) -> None: + """DockerEnvironment.is_alive returns True when running.""" + result = await mock_docker_sandbox.is_alive() + assert result is True - sandbox = DockerEnvironment() - result = await sandbox.is_alive() - assert result is False + async def test_docker_process_recv_with_buffered_data( + self, + ) -> None: + """DockerEnvironmentProcess.recv returns buffered stdout data first.""" + container = MockContainer() + proc = DockerEnvironmentProcess(container, 'echo test', '/workspace') # type: ignore[arg-type] + proc._stdout_buffer.append(b'buffered data') -async def test_docker_resolve_path(mock_docker_sandbox: Any) -> None: - """DockerEnvironment._resolve_path resolves relative paths.""" - assert mock_docker_sandbox._resolve_path('test.txt') == '/workspace/test.txt' - assert mock_docker_sandbox._resolve_path('/abs/path') == '/abs/path' - assert mock_docker_sandbox._resolve_path('sub/dir/file.py') == '/workspace/sub/dir/file.py' + result = await proc.recv() + assert result == b'buffered data' + assert proc._stdout_buffer == [] + async def test_docker_process_recv_stderr_with_buffered_data( + self, + ) -> None: + """DockerEnvironmentProcess.recv_stderr returns buffered stderr data first.""" -@docker_skip -def test_docker_put_file() -> None: - """_put_file creates a tar archive and uploads it.""" + container = MockContainer() + proc = DockerEnvironmentProcess(container, 'echo test', '/workspace') # type: ignore[arg-type] + proc._stderr_buffer.append(b'buffered error') - container = MockContainer() - _put_file(container, '/workspace/test.txt', b'hello') # type: ignore[arg-type] - assert '/workspace/test.txt' in container._files - assert container._files['/workspace/test.txt'] == b'hello' + result = await proc.recv_stderr() + assert result == b'buffered error' + assert proc._stderr_buffer == [] + async def test_docker_process_recv_stream_buffers_other( + self, + ) -> None: + """DockerEnvironmentProcess._recv_stream buffers frames for the other stream.""" -@docker_skip -def test_docker_sandbox_process_read_frame() -> None: - """DockerEnvironmentProcess._read_frame parses multiplexed stream frames.""" + container = MockContainer() + proc = DockerEnvironmentProcess(container, 'echo test', '/workspace') # type: ignore[arg-type] - container = MockContainer() - proc = DockerEnvironmentProcess(container, 'echo test', '/workspace') # type: ignore[arg-type] + # First frame is stderr (type 2), second is stdout (type 1) + stderr_data = b'error output' + stdout_data = b'stdout output' + stderr_header = struct.pack('>BxxxI', 2, len(stderr_data)) + stdout_header = struct.pack('>BxxxI', 1, len(stdout_data)) - # Create a mock socket with a multiplexed frame - stdout_data = b'hello from stdout' - header = struct.pack('>BxxxI', 1, len(stdout_data)) # stream_type=1 (stdout) + mock_socket = MagicMock() + mock_socket.recv.side_effect = [stderr_header, stderr_data, stdout_header, stdout_data] + proc._socket = mock_socket - mock_socket = MagicMock() - mock_socket.recv.side_effect = [header, stdout_data] - proc._socket = mock_socket + # Requesting stdout should buffer stderr and return stdout + result = await proc.recv() + assert result == stdout_data + assert proc._stderr_buffer == [stderr_data] - stream_type, data = proc._read_frame() - assert stream_type == 1 - assert data == stdout_data + async def test_docker_process_recv_stream_eof( + self, + ) -> None: + """DockerEnvironmentProcess._recv_stream returns empty on EOF.""" + container = MockContainer() + proc = DockerEnvironmentProcess(container, 'echo test', '/workspace') # type: ignore[arg-type] -@docker_skip -def test_docker_sandbox_process_read_frame_stderr() -> None: - """DockerEnvironmentProcess._read_frame handles stderr frames.""" + mock_socket = MagicMock() + mock_socket.recv.return_value = b'' # EOF + proc._socket = mock_socket - container = MockContainer() - proc = DockerEnvironmentProcess(container, 'echo test', '/workspace') # type: ignore[arg-type] + result = await proc.recv() + assert result == b'' - stderr_data = b'error output' - header = struct.pack('>BxxxI', 2, len(stderr_data)) # stream_type=2 (stderr) + async def test_docker_process_kill( + self, + ) -> None: + """DockerEnvironmentProcess.kill closes the socket.""" - mock_socket = MagicMock() - mock_socket.recv.side_effect = [header, stderr_data] - proc._socket = mock_socket + container = MockContainer() + proc = DockerEnvironmentProcess(container, 'echo test', '/workspace') # type: ignore[arg-type] + mock_socket = MagicMock() + proc._socket = mock_socket - stream_type, data = proc._read_frame() - assert stream_type == 2 - assert data == stderr_data + await proc.kill() + mock_socket.close.assert_called_once() + async def test_docker_process_kill_oserror( + self, + ) -> None: + """DockerEnvironmentProcess.kill handles OSError.""" -@docker_skip -def test_docker_sandbox_process_read_frame_eof() -> None: - """DockerEnvironmentProcess._read_frame returns empty on EOF.""" + container = MockContainer() + proc = DockerEnvironmentProcess(container, 'echo test', '/workspace') # type: ignore[arg-type] + mock_socket = MagicMock() + mock_socket.close.side_effect = OSError('socket error') + proc._socket = mock_socket - container = MockContainer() - proc = DockerEnvironmentProcess(container, 'echo test', '/workspace') # type: ignore[arg-type] + # Should not raise + await proc.kill() - mock_socket = MagicMock() - mock_socket.recv.return_value = b'' # EOF - proc._socket = mock_socket + async def test_docker_process_returncode( + self, + ) -> None: + """DockerEnvironmentProcess.returncode checks exec status.""" - stream_type, data = proc._read_frame() - assert stream_type == 0 - assert data == b'' - assert proc._eof is True + container = MockContainer() + proc = DockerEnvironmentProcess(container, 'echo test', '/workspace') # type: ignore[arg-type] + # No exec_id means returncode is None + assert proc.returncode is None -@docker_skip -def test_docker_sandbox_process_read_frame_zero_size() -> None: - """DockerEnvironmentProcess._read_frame handles zero-size frames.""" + # With exec_id and cached returncode + proc._exec_id = 'exec-123' + proc._returncode = 0 + assert proc.returncode == 0 - container = MockContainer() - proc = DockerEnvironmentProcess(container, 'echo test', '/workspace') # type: ignore[arg-type] + async def test_docker_process_returncode_from_inspect( + self, + ) -> None: + """DockerEnvironmentProcess.returncode polls Docker API.""" - header = struct.pack('>BxxxI', 1, 0) # zero size + container = MockContainer() + container.client.api.exec_inspect.return_value = {'ExitCode': 42, 'Running': False} + proc = DockerEnvironmentProcess(container, 'echo test', '/workspace') # type: ignore[arg-type] + proc._exec_id = 'exec-123' - mock_socket = MagicMock() - mock_socket.recv.return_value = header - proc._socket = mock_socket + assert proc.returncode == 42 + assert proc._returncode == 42 - stream_type, data = proc._read_frame() - assert stream_type == 1 - assert data == b'' + async def test_docker_process_returncode_still_running( + self, + ) -> None: + """DockerEnvironmentProcess.returncode returns None when process is running (ExitCode=0, Running=True).""" + container = MockContainer() + # Docker returns ExitCode=0 + Running=True for still-running processes + container.client.api.exec_inspect.return_value = {'ExitCode': 0, 'Running': True} + proc = DockerEnvironmentProcess(container, 'echo test', '/workspace') # type: ignore[arg-type] + proc._exec_id = 'exec-123' -@docker_skip -def test_docker_sandbox_process_already_eof() -> None: - """DockerEnvironmentProcess._read_frame returns empty when already at EOF.""" + assert proc.returncode is None + + async def test_docker_process_returncode_inspect_error( + self, + ) -> None: + """DockerEnvironmentProcess.returncode handles API errors.""" + + container = MockContainer() + container.client.api.exec_inspect.side_effect = OSError('connection failed') + proc = DockerEnvironmentProcess(container, 'echo test', '/workspace') # type: ignore[arg-type] + proc._exec_id = 'exec-123' + + assert proc.returncode is None + + async def test_docker_process_send( + self, + ) -> None: + """DockerEnvironmentProcess.send writes to socket.""" + + container = MockContainer() + proc = DockerEnvironmentProcess(container, 'echo test', '/workspace') # type: ignore[arg-type] + mock_socket = MagicMock() + proc._socket = mock_socket + + await proc.send(b'hello') + mock_socket.sendall.assert_called_once_with(b'hello') + + async def test_docker_process_recv_with_timeout( + self, + ) -> None: + """DockerEnvironmentProcess.recv with timeout.""" + + container = MockContainer() + proc = DockerEnvironmentProcess(container, 'echo test', '/workspace') # type: ignore[arg-type] + + stdout_data = b'data' + header = struct.pack('>BxxxI', 1, len(stdout_data)) + mock_socket = MagicMock() + mock_socket.recv.side_effect = [header, stdout_data] + proc._socket = mock_socket + + result = await proc.recv(timeout=5.0) + assert result == stdout_data + + async def test_docker_process_recv_stderr_with_timeout( + self, + ) -> None: + """DockerEnvironmentProcess.recv_stderr with timeout.""" + + container = MockContainer() + proc = DockerEnvironmentProcess(container, 'echo test', '/workspace') # type: ignore[arg-type] + + stderr_data = b'error' + header = struct.pack('>BxxxI', 2, len(stderr_data)) + mock_socket = MagicMock() + mock_socket.recv.side_effect = [header, stderr_data] + proc._socket = mock_socket + + result = await proc.recv_stderr(timeout=5.0) + assert result == stderr_data + + async def test_docker_read_frame_data_eof_during_read( + self, + ) -> None: + """DockerEnvironmentProcess._read_frame handles EOF during data read.""" + + container = MockContainer() + proc = DockerEnvironmentProcess(container, 'echo test', '/workspace') # type: ignore[arg-type] + + # Header says 100 bytes but socket returns less then EOF + header = struct.pack('>BxxxI', 1, 100) + mock_socket = MagicMock() + mock_socket.recv.side_effect = [header, b'partial', b''] # EOF during data + proc._socket = mock_socket + + stream_type, data = proc._read_frame() + assert stream_type == 1 + assert data == b'partial' + assert proc._eof is True + + async def test_docker_process_start_with_env( + self, + ) -> None: + """DockerEnvironmentProcess._do_start passes env to exec_create.""" + + container = MockContainer() + container.client.api.exec_create.return_value = {'Id': 'exec-test'} + mock_sock = MagicMock() + container.client.api.exec_start.return_value = mock_sock + + proc = DockerEnvironmentProcess( + container, # type: ignore[arg-type] + 'echo test', + '/workspace', + env={'FOO': 'bar'}, + ) + await proc._start() + + assert proc._exec_id == 'exec-test' + call_kwargs = container.client.api.exec_create.call_args[1] + assert call_kwargs['environment'] == {'FOO': 'bar'} + + async def test_docker_process_aenter( + self, + ) -> None: + """DockerEnvironmentProcess.__aenter__ starts the process.""" + + container = MockContainer() + container.client.api.exec_create.return_value = {'Id': 'exec-aenter'} + mock_sock = MagicMock() + container.client.api.exec_start.return_value = mock_sock + + proc = DockerEnvironmentProcess(container, 'echo test', '/workspace') # type: ignore[arg-type] + entered = await proc.__aenter__() + assert entered is proc + assert proc._exec_id == 'exec-aenter' + + async def test_docker_ls_not_found(self, mock_docker_sandbox: Any, mock_container: MockContainer) -> None: + """DockerEnvironment.ls raises NotADirectoryError on missing dirs.""" + original = mock_container.exec_run + + def fail_ls(cmd: Any, **kwargs: Any) -> tuple[int, bytes]: + if isinstance(cmd, list) and 'ls -la' in ' '.join(cmd): + return 1, b'ls: cannot access: No such file or directory' + return original(cmd, **kwargs) # pragma: no cover + + mock_container.exec_run = fail_ls # type: ignore[assignment] + with pytest.raises(NotADirectoryError): + await mock_docker_sandbox.ls('nonexistent') - container = MockContainer() - proc = DockerEnvironmentProcess(container, 'echo test', '/workspace') # type: ignore[arg-type] - proc._eof = True + async def test_docker_read_file_image_not_found( + self, mock_docker_sandbox: Any, mock_container: MockContainer + ) -> None: + """DockerEnvironment.read_file raises FileNotFoundError for missing image files.""" - stream_type, data = proc._read_frame() - assert stream_type == 0 - assert data == b'' + def fail_get_archive(path: str) -> Any: + raise DockerNotFound('File not found') + + mock_container.get_archive = fail_get_archive + with pytest.raises(FileNotFoundError, match='File not found: missing.png'): + await mock_docker_sandbox.read_file('missing.png') + + # --- Additional Docker coverage: lifecycle, process, truncation --- + + async def test_docker_execute_truncation(self, mock_docker_sandbox: Any, mock_container: MockContainer) -> None: + """DockerEnvironment.execute truncates long output.""" + original = mock_container.exec_run + + def big_output(cmd: Any, **kwargs: Any) -> tuple[int, bytes]: + if isinstance(cmd, list) and 'echo' in str(cmd): + return 0, b'x' * 200_000 + return original(cmd, **kwargs) # pragma: no cover + + mock_container.exec_run = big_output # type: ignore[assignment] + result = await mock_docker_sandbox.shell('echo big') + assert result.truncated is True + assert len(result.output) == 100_000 + + async def test_docker_execute_timeout_exit_code( + self, mock_docker_sandbox: Any, mock_container: MockContainer + ) -> None: + """DockerEnvironment.execute handles timeout exit code 124.""" + + def timeout_result(cmd: Any, **kwargs: Any) -> tuple[int, bytes]: + return 124, b'partial output' + + mock_container.exec_run = timeout_result # type: ignore[assignment] + result = await mock_docker_sandbox.shell('sleep 999', timeout=1) + assert result.exit_code == 124 + assert '[Command timed out]' in result.output + + async def test_docker_setup_teardown( + self, + ) -> None: + """DockerEnvironment._setup and _teardown with mocked Docker client.""" + sandbox = DockerEnvironment(image='python:3.12-slim') + + mock_client = MagicMock() + mock_container_obj = MagicMock() + mock_client.containers.run.return_value = mock_container_obj + + with mock_patch('pydantic_ai.environments.docker.docker') as mock_docker: + mock_docker.from_env.return_value = mock_client + sandbox._setup() + assert sandbox._container is not None + + # Teardown + sandbox._teardown() + mock_container_obj.stop.assert_called() + mock_container_obj.remove.assert_called() + assert sandbox._container is None + + async def test_docker_teardown_cleanup_errors( + self, + ) -> None: + """DockerEnvironment._teardown handles exceptions gracefully.""" + + sandbox = DockerEnvironment() + mock_container = MagicMock() + mock_container.stop.side_effect = Exception('stop failed') + mock_container.remove.side_effect = Exception('remove failed') + sandbox._container = mock_container + + # Should not raise + sandbox._teardown() + assert sandbox._container is None + + async def test_docker_setup_with_all_options( + self, + ) -> None: + """DockerEnvironment._setup passes all container options.""" + sandbox = DockerEnvironment( + image='python:3.12-slim', + env_vars={'KEY': 'val'}, + volumes={'/host': {'bind': '/container', 'mode': 'rw'}}, + memory_limit='512m', + cpu_limit=1.0, + pids_limit=256, + network_disabled=True, + read_only=True, + cap_drop=['ALL'], + security_opt=['no-new-privileges'], + user='nobody', + tmpfs={'/tmp': 'noexec,nosuid,size=64m'}, + init=True, + ) + + mock_client = MagicMock() + mock_container = MagicMock() + mock_client.containers.run.return_value = mock_container + + with mock_patch('pydantic_ai.environments.docker.docker') as mock_docker: + mock_docker.from_env.return_value = mock_client + sandbox._setup() + + call_kwargs = mock_client.containers.run.call_args[1] + assert call_kwargs['volumes'] == {'/host': {'bind': '/container', 'mode': 'rw'}} + assert call_kwargs['mem_limit'] == '512m' + assert call_kwargs['nano_cpus'] == int(1e9) + assert call_kwargs['pids_limit'] == 256 + assert call_kwargs['network_disabled'] is True + assert call_kwargs['read_only'] is True + assert call_kwargs['cap_drop'] == ['ALL'] + assert call_kwargs['security_opt'] == ['no-new-privileges'] + assert call_kwargs['user'] == 'nobody' + assert call_kwargs['tmpfs'] == {'/tmp': 'noexec,nosuid,size=64m'} + assert call_kwargs['init'] is True + + # --- Docker instantiation tests --- + + def test_docker_sandbox_instantiation( + self, + ): + """DockerEnvironment can be constructed without starting Docker.""" + + # Verify construction succeeds with default and custom settings + sandbox = DockerEnvironment(image='python:3.12-slim') + assert isinstance(sandbox, DockerEnvironment) + + sandbox_with_opts = DockerEnvironment( + image='node:20-slim', + memory_limit='512m', + cpu_limit=1.0, + network_disabled=True, + ) + assert isinstance(sandbox_with_opts, DockerEnvironment) + + # Verify security hardening parameters are accepted + sandbox_hardened = DockerEnvironment( + image='python:3.12-slim', + network_disabled=True, + read_only=True, + cap_drop=['ALL'], + security_opt=['no-new-privileges'], + user='nobody', + pids_limit=256, + tmpfs={'/tmp': 'noexec,nosuid,size=64m'}, + init=True, + ) + assert isinstance(sandbox_hardened, DockerEnvironment) + + def test_shell_escape(self): + assert _shell_escape('hello') == "'hello'" + assert _shell_escape("it's") == "'it'\\''s'" + assert _shell_escape('') == "''" + assert _shell_escape('a b c') == "'a b c'" + + def test_build_read_file_cmd_default(self): + cmd = _build_read_file_cmd('test.txt') + assert 'awk' in cmd + assert "'test.txt'" in cmd + assert 'NR>=1' in cmd + assert 'NR<=2000' in cmd + + def test_build_read_file_cmd_with_offset(self): + cmd = _build_read_file_cmd('file.py', offset=10, limit=50) + assert 'NR>=11' in cmd + assert 'NR<=60' in cmd + assert "'file.py'" in cmd + + def test_build_read_file_cmd_continuation_hint(self): + """_build_read_file_cmd includes a continuation hint in the awk END block.""" + cmd = _build_read_file_cmd('file.py', offset=0, limit=10) + assert 'more lines' in cmd + assert 'offset=10' in cmd + + def test_build_grep_cmd_content(self): + cmd = _build_grep_cmd('pattern') + assert 'grep -rIE' in cmd + assert '-n' in cmd + assert "'pattern'" in cmd + assert "'.'" in cmd + + def test_build_grep_cmd_files_with_matches(self): + cmd = _build_grep_cmd('pat', output_mode='files_with_matches') + assert '-l' in cmd + assert '-n' not in cmd + + def test_build_grep_cmd_count(self): + cmd = _build_grep_cmd('pat', output_mode='count') + assert '-c' in cmd + + def test_build_grep_cmd_with_path(self): + cmd = _build_grep_cmd('pat', path='src') + assert "'src'" in cmd + + def test_build_grep_cmd_with_glob_pattern(self): + """glob_pattern is shell-escaped to prevent injection.""" + cmd = _build_grep_cmd('pat', glob_pattern='*.py') + assert '--include' in cmd + assert "'*.py'" in cmd + + def test_build_grep_cmd_glob_pattern_escaping(self): + """Verify glob_pattern with special chars is properly shell-escaped.""" + cmd = _build_grep_cmd('pat', glob_pattern='*.py') + # The glob pattern should be shell-escaped (wrapped in single quotes) + assert "--include '*.py'" in cmd + + # Even a malicious glob_pattern gets safely escaped + cmd2 = _build_grep_cmd('pat', glob_pattern='$(evil)') + assert '$(evil)' not in cmd2.replace("'$(evil)'", '') # Only appears inside quotes + + def test_build_glob_cmd(self): + cmd = _build_glob_cmd('*.py') + assert 'find' in cmd + assert "'*.py'" in cmd + assert "'.'" in cmd + assert '-maxdepth 1' in cmd + + def test_build_glob_cmd_with_path(self): + cmd = _build_glob_cmd('*.py', path='src') + assert "'src'" in cmd + assert '-maxdepth 1' in cmd + + def test_build_glob_cmd_nested_pattern(self): + cmd = _build_glob_cmd('src/*.py') + assert '-maxdepth 2' in cmd + + def test_build_glob_cmd_recursive_no_maxdepth(self): + cmd = _build_glob_cmd('**/*.py') + assert '-maxdepth' not in cmd + + def test_parse_glob_output_empty(self): + assert _parse_glob_output('') == [] + assert _parse_glob_output(' ') == [] + assert _parse_glob_output('\n') == [] + + def test_parse_glob_output_multiline(self): + assert _parse_glob_output('a.py\nb.py\nc.py\n') == ['a.py', 'b.py', 'c.py'] + + def test_filter_grep_count_output(self): + text = 'a.py:3\nb.py:0\nc.py:1' + result = _filter_grep_count_output(text) + assert result == 'a.py:3\nc.py:1' + + def test_filter_grep_count_output_all_zero(self): + text = 'a.py:0\nb.py:0' + result = _filter_grep_count_output(text) + assert result == '' # --- Additional coverage: _base.py --- @@ -2505,387 +2971,6 @@ async def test_memory_glob_in_subdirectory_with_path_filter(): assert 'other.py' not in matches -# --- Additional Docker coverage: lifecycle, process, truncation --- - - -async def test_docker_execute_truncation(mock_docker_sandbox: Any, mock_container: MockContainer) -> None: - """DockerEnvironment.execute truncates long output.""" - original = mock_container.exec_run - - def big_output(cmd: Any, **kwargs: Any) -> tuple[int, bytes]: - if isinstance(cmd, list) and 'echo' in str(cmd): - return 0, b'x' * 200_000 - return original(cmd, **kwargs) # pragma: no cover - - mock_container.exec_run = big_output # type: ignore[assignment] - result = await mock_docker_sandbox.shell('echo big') - assert result.truncated is True - assert len(result.output) == 100_000 - - -async def test_docker_execute_timeout_exit_code(mock_docker_sandbox: Any, mock_container: MockContainer) -> None: - """DockerEnvironment.execute handles timeout exit code 124.""" - - def timeout_result(cmd: Any, **kwargs: Any) -> tuple[int, bytes]: - return 124, b'partial output' - - mock_container.exec_run = timeout_result # type: ignore[assignment] - result = await mock_docker_sandbox.shell('sleep 999', timeout=1) - assert result.exit_code == 124 - assert '[Command timed out]' in result.output - - -@docker_skip -async def test_docker_setup_teardown() -> None: - """DockerEnvironment._setup and _teardown with mocked Docker client.""" - sandbox = DockerEnvironment(image='python:3.12-slim') - - mock_client = MagicMock() - mock_container_obj = MagicMock() - mock_client.containers.run.return_value = mock_container_obj - - with mock_patch('pydantic_ai.environments.docker.docker') as mock_docker: - mock_docker.from_env.return_value = mock_client - sandbox._setup() - assert sandbox._container is not None - - # Teardown - sandbox._teardown() - mock_container_obj.stop.assert_called() - mock_container_obj.remove.assert_called() - assert sandbox._container is None - - -@docker_skip -async def test_docker_teardown_cleanup_errors() -> None: - """DockerEnvironment._teardown handles exceptions gracefully.""" - - sandbox = DockerEnvironment() - mock_container = MagicMock() - mock_container.stop.side_effect = Exception('stop failed') - mock_container.remove.side_effect = Exception('remove failed') - sandbox._container = mock_container - - # Should not raise - sandbox._teardown() - assert sandbox._container is None - - -@docker_skip -async def test_docker_setup_with_all_options() -> None: - """DockerEnvironment._setup passes all container options.""" - sandbox = DockerEnvironment( - image='python:3.12-slim', - env_vars={'KEY': 'val'}, - volumes={'/host': {'bind': '/container', 'mode': 'rw'}}, - memory_limit='512m', - cpu_limit=1.0, - pids_limit=256, - network_disabled=True, - read_only=True, - cap_drop=['ALL'], - security_opt=['no-new-privileges'], - user='nobody', - tmpfs={'/tmp': 'noexec,nosuid,size=64m'}, - init=True, - ) - - mock_client = MagicMock() - mock_container = MagicMock() - mock_client.containers.run.return_value = mock_container - - with mock_patch('pydantic_ai.environments.docker.docker') as mock_docker: - mock_docker.from_env.return_value = mock_client - sandbox._setup() - - call_kwargs = mock_client.containers.run.call_args[1] - assert call_kwargs['volumes'] == {'/host': {'bind': '/container', 'mode': 'rw'}} - assert call_kwargs['mem_limit'] == '512m' - assert call_kwargs['nano_cpus'] == int(1e9) - assert call_kwargs['pids_limit'] == 256 - assert call_kwargs['network_disabled'] is True - assert call_kwargs['read_only'] is True - assert call_kwargs['cap_drop'] == ['ALL'] - assert call_kwargs['security_opt'] == ['no-new-privileges'] - assert call_kwargs['user'] == 'nobody' - assert call_kwargs['tmpfs'] == {'/tmp': 'noexec,nosuid,size=64m'} - assert call_kwargs['init'] is True - - -@docker_skip -async def test_docker_process_recv_with_buffered_data() -> None: - """DockerEnvironmentProcess.recv returns buffered stdout data first.""" - - container = MockContainer() - proc = DockerEnvironmentProcess(container, 'echo test', '/workspace') # type: ignore[arg-type] - proc._stdout_buffer.append(b'buffered data') - - result = await proc.recv() - assert result == b'buffered data' - assert proc._stdout_buffer == [] - - -@docker_skip -async def test_docker_process_recv_stderr_with_buffered_data() -> None: - """DockerEnvironmentProcess.recv_stderr returns buffered stderr data first.""" - - container = MockContainer() - proc = DockerEnvironmentProcess(container, 'echo test', '/workspace') # type: ignore[arg-type] - proc._stderr_buffer.append(b'buffered error') - - result = await proc.recv_stderr() - assert result == b'buffered error' - assert proc._stderr_buffer == [] - - -@docker_skip -async def test_docker_process_recv_stream_buffers_other() -> None: - """DockerEnvironmentProcess._recv_stream buffers frames for the other stream.""" - - container = MockContainer() - proc = DockerEnvironmentProcess(container, 'echo test', '/workspace') # type: ignore[arg-type] - - # First frame is stderr (type 2), second is stdout (type 1) - stderr_data = b'error output' - stdout_data = b'stdout output' - stderr_header = struct.pack('>BxxxI', 2, len(stderr_data)) - stdout_header = struct.pack('>BxxxI', 1, len(stdout_data)) - - mock_socket = MagicMock() - mock_socket.recv.side_effect = [stderr_header, stderr_data, stdout_header, stdout_data] - proc._socket = mock_socket - - # Requesting stdout should buffer stderr and return stdout - result = await proc.recv() - assert result == stdout_data - assert proc._stderr_buffer == [stderr_data] - - -@docker_skip -async def test_docker_process_recv_stream_eof() -> None: - """DockerEnvironmentProcess._recv_stream returns empty on EOF.""" - - container = MockContainer() - proc = DockerEnvironmentProcess(container, 'echo test', '/workspace') # type: ignore[arg-type] - - mock_socket = MagicMock() - mock_socket.recv.return_value = b'' # EOF - proc._socket = mock_socket - - result = await proc.recv() - assert result == b'' - - -@docker_skip -async def test_docker_process_kill() -> None: - """DockerEnvironmentProcess.kill closes the socket.""" - - container = MockContainer() - proc = DockerEnvironmentProcess(container, 'echo test', '/workspace') # type: ignore[arg-type] - mock_socket = MagicMock() - proc._socket = mock_socket - - await proc.kill() - mock_socket.close.assert_called_once() - - -@docker_skip -async def test_docker_process_kill_oserror() -> None: - """DockerEnvironmentProcess.kill handles OSError.""" - - container = MockContainer() - proc = DockerEnvironmentProcess(container, 'echo test', '/workspace') # type: ignore[arg-type] - mock_socket = MagicMock() - mock_socket.close.side_effect = OSError('socket error') - proc._socket = mock_socket - - # Should not raise - await proc.kill() - - -@docker_skip -async def test_docker_process_returncode() -> None: - """DockerEnvironmentProcess.returncode checks exec status.""" - - container = MockContainer() - proc = DockerEnvironmentProcess(container, 'echo test', '/workspace') # type: ignore[arg-type] - - # No exec_id means returncode is None - assert proc.returncode is None - - # With exec_id and cached returncode - proc._exec_id = 'exec-123' - proc._returncode = 0 - assert proc.returncode == 0 - - -@docker_skip -async def test_docker_process_returncode_from_inspect() -> None: - """DockerEnvironmentProcess.returncode polls Docker API.""" - - container = MockContainer() - container.client.api.exec_inspect.return_value = {'ExitCode': 42, 'Running': False} - proc = DockerEnvironmentProcess(container, 'echo test', '/workspace') # type: ignore[arg-type] - proc._exec_id = 'exec-123' - - assert proc.returncode == 42 - assert proc._returncode == 42 - - -@docker_skip -async def test_docker_process_returncode_still_running() -> None: - """DockerEnvironmentProcess.returncode returns None when process is running (ExitCode=0, Running=True).""" - - container = MockContainer() - # Docker returns ExitCode=0 + Running=True for still-running processes - container.client.api.exec_inspect.return_value = {'ExitCode': 0, 'Running': True} - proc = DockerEnvironmentProcess(container, 'echo test', '/workspace') # type: ignore[arg-type] - proc._exec_id = 'exec-123' - - assert proc.returncode is None - - -@docker_skip -async def test_docker_process_returncode_inspect_error() -> None: - """DockerEnvironmentProcess.returncode handles API errors.""" - - container = MockContainer() - container.client.api.exec_inspect.side_effect = OSError('connection failed') - proc = DockerEnvironmentProcess(container, 'echo test', '/workspace') # type: ignore[arg-type] - proc._exec_id = 'exec-123' - - assert proc.returncode is None - - -@docker_skip -async def test_docker_process_send() -> None: - """DockerEnvironmentProcess.send writes to socket.""" - - container = MockContainer() - proc = DockerEnvironmentProcess(container, 'echo test', '/workspace') # type: ignore[arg-type] - mock_socket = MagicMock() - proc._socket = mock_socket - - await proc.send(b'hello') - mock_socket.sendall.assert_called_once_with(b'hello') - - -@docker_skip -async def test_docker_process_recv_with_timeout() -> None: - """DockerEnvironmentProcess.recv with timeout.""" - - container = MockContainer() - proc = DockerEnvironmentProcess(container, 'echo test', '/workspace') # type: ignore[arg-type] - - stdout_data = b'data' - header = struct.pack('>BxxxI', 1, len(stdout_data)) - mock_socket = MagicMock() - mock_socket.recv.side_effect = [header, stdout_data] - proc._socket = mock_socket - - result = await proc.recv(timeout=5.0) - assert result == stdout_data - - -@docker_skip -async def test_docker_process_recv_stderr_with_timeout() -> None: - """DockerEnvironmentProcess.recv_stderr with timeout.""" - - container = MockContainer() - proc = DockerEnvironmentProcess(container, 'echo test', '/workspace') # type: ignore[arg-type] - - stderr_data = b'error' - header = struct.pack('>BxxxI', 2, len(stderr_data)) - mock_socket = MagicMock() - mock_socket.recv.side_effect = [header, stderr_data] - proc._socket = mock_socket - - result = await proc.recv_stderr(timeout=5.0) - assert result == stderr_data - - -@docker_skip -async def test_docker_read_frame_data_eof_during_read() -> None: - """DockerEnvironmentProcess._read_frame handles EOF during data read.""" - - container = MockContainer() - proc = DockerEnvironmentProcess(container, 'echo test', '/workspace') # type: ignore[arg-type] - - # Header says 100 bytes but socket returns less then EOF - header = struct.pack('>BxxxI', 1, 100) - mock_socket = MagicMock() - mock_socket.recv.side_effect = [header, b'partial', b''] # EOF during data - proc._socket = mock_socket - - stream_type, data = proc._read_frame() - assert stream_type == 1 - assert data == b'partial' - assert proc._eof is True - - -@docker_skip -async def test_docker_process_start_with_env() -> None: - """DockerEnvironmentProcess._do_start passes env to exec_create.""" - - container = MockContainer() - container.client.api.exec_create.return_value = {'Id': 'exec-test'} - mock_sock = MagicMock() - container.client.api.exec_start.return_value = mock_sock - - proc = DockerEnvironmentProcess( - container, # type: ignore[arg-type] - 'echo test', - '/workspace', - env={'FOO': 'bar'}, - ) - await proc._start() - - assert proc._exec_id == 'exec-test' - call_kwargs = container.client.api.exec_create.call_args[1] - assert call_kwargs['environment'] == {'FOO': 'bar'} - - -@docker_skip -async def test_docker_process_aenter() -> None: - """DockerEnvironmentProcess.__aenter__ starts the process.""" - - container = MockContainer() - container.client.api.exec_create.return_value = {'Id': 'exec-aenter'} - mock_sock = MagicMock() - container.client.api.exec_start.return_value = mock_sock - - proc = DockerEnvironmentProcess(container, 'echo test', '/workspace') # type: ignore[arg-type] - entered = await proc.__aenter__() - assert entered is proc - assert proc._exec_id == 'exec-aenter' - - -async def test_docker_ls_not_found(mock_docker_sandbox: Any, mock_container: MockContainer) -> None: - """DockerEnvironment.ls raises NotADirectoryError on missing dirs.""" - original = mock_container.exec_run - - def fail_ls(cmd: Any, **kwargs: Any) -> tuple[int, bytes]: - if isinstance(cmd, list) and 'ls -la' in ' '.join(cmd): - return 1, b'ls: cannot access: No such file or directory' - return original(cmd, **kwargs) # pragma: no cover - - mock_container.exec_run = fail_ls # type: ignore[assignment] - with pytest.raises(NotADirectoryError): - await mock_docker_sandbox.ls('nonexistent') - - -@docker_skip -async def test_docker_read_file_image_not_found(mock_docker_sandbox: Any, mock_container: MockContainer) -> None: - """DockerEnvironment.read_file raises FileNotFoundError for missing image files.""" - - def fail_get_archive(path: str) -> Any: - raise DockerNotFound('File not found') - - mock_container.get_archive = fail_get_archive - with pytest.raises(FileNotFoundError, match='File not found: missing.png'): - await mock_docker_sandbox.read_file('missing.png') - - async def test_local_process_wait_no_timeout(tmp_path: Path): """LocalEnvironmentProcess.wait without timeout (line 74).""" env = LocalEnvironment(tmp_path) @@ -3188,168 +3273,6 @@ def capabilities(self) -> frozenset[EnvToolName]: # --- Coverage gap tests --- -@docker_skip -def test_docker_hardened_constructor(): - """DockerEnvironment.hardened() returns a properly configured instance.""" - env = DockerEnvironment.hardened(image='python:3.12-slim', memory_limit='1g') - assert env._network_disabled is True - assert env._read_only is True - assert env._cap_drop == ['ALL'] - assert env._memory_limit == '1g' - assert env._user == 'nobody' - assert env._init is True - - -@docker_skip -def test_docker_setup_early_return(): - """DockerEnvironment._setup returns early if container already exists.""" - env = DockerEnvironment(image='python:3.12-slim') - env._container = MagicMock() - env._setup() # should not create a new container - assert env._client is None # docker.from_env() was never called - - -@docker_skip -async def test_docker_process_recv_stderr_no_buffer() -> None: - """DockerEnvironmentProcess.recv_stderr without buffered data (no timeout).""" - container = MockContainer() - proc = DockerEnvironmentProcess(container, 'echo test', '/workspace') # type: ignore[arg-type] - - stderr_data = b'error output' - header = struct.pack('>BxxxI', 2, len(stderr_data)) - mock_socket = MagicMock() - mock_socket.recv.side_effect = [header, stderr_data] - proc._socket = mock_socket - - result = await proc.recv_stderr() - assert result == stderr_data - - -@docker_skip -async def test_docker_process_recv_stream_buffers_stdout() -> None: - """DockerEnvironmentProcess._recv_stream buffers stdout when stderr is wanted.""" - container = MockContainer() - proc = DockerEnvironmentProcess(container, 'echo test', '/workspace') # type: ignore[arg-type] - - # First frame is stdout (type 1), second is stderr (type 2) - stdout_data = b'stdout output' - stderr_data = b'stderr output' - stdout_header = struct.pack('>BxxxI', 1, len(stdout_data)) - stderr_header = struct.pack('>BxxxI', 2, len(stderr_data)) - - mock_socket = MagicMock() - mock_socket.recv.side_effect = [stdout_header, stdout_data, stderr_header, stderr_data] - proc._socket = mock_socket - - # Requesting stderr should buffer stdout and return stderr - result = await proc.recv_stderr() - assert result == stderr_data - assert proc._stdout_buffer == [stdout_data] - - -@docker_skip -async def test_docker_process_wait_no_timeout() -> None: - """DockerEnvironmentProcess.wait without timeout polls until returncode is set.""" - container = MockContainer() - proc = DockerEnvironmentProcess(container, 'echo test', '/workspace') # type: ignore[arg-type] - proc._exec_id = 'exec-123' - # Mock exec_inspect to return "still running" first, then "exited" - call_count = 0 - - def mock_inspect(exec_id: str) -> dict[str, Any]: - nonlocal call_count - call_count += 1 - if call_count <= 1: - return {'Running': True, 'ExitCode': None} - return {'Running': False, 'ExitCode': 0} - - container.client.api.exec_inspect = mock_inspect - result = await proc.wait() - assert result == 0 - assert call_count >= 2 - - -@docker_skip -async def test_docker_process_wait_with_timeout() -> None: - """DockerEnvironmentProcess.wait with timeout.""" - container = MockContainer() - proc = DockerEnvironmentProcess(container, 'echo test', '/workspace') # type: ignore[arg-type] - proc._returncode = 42 - result = await proc.wait(timeout=5.0) - assert result == 42 - - -@docker_skip -async def test_docker_read_file_unicode_error(mock_docker_sandbox: Any, mock_container: MockContainer) -> None: - """DockerEnvironment.read_file falls back to raw bytes on UnicodeDecodeError.""" - # Store a binary file (not an image extension) that will fail utf-8 decode - binary_data = b'\x80\x81\x82\xff' - mock_container._files['/workspace/data.bin'] = binary_data - - # Make the awk command return non-utf8 data to trigger UnicodeDecodeError - original = mock_container.exec_run - - def exec_with_binary(cmd: Any, **kwargs: Any) -> tuple[int, bytes]: - cmd_str = ' '.join(cmd) if isinstance(cmd, list) else cmd - if 'awk' in cmd_str and 'data.bin' in cmd_str: - return 0, b'\x80\x81\x82\xff' - return original(cmd, **kwargs) # pragma: no cover - - mock_container.exec_run = exec_with_binary # type: ignore[assignment] - result = await mock_docker_sandbox.read_file('data.bin') - assert isinstance(result, bytes) - - -@docker_skip -async def test_docker_ls_size_value_error(mock_docker_sandbox: Any, mock_container: MockContainer) -> None: - """DockerEnvironment.ls handles non-numeric size fields gracefully.""" - original = mock_container.exec_run - - def exec_with_bad_size(cmd: Any, **kwargs: Any) -> tuple[int, bytes]: - cmd_str = ' '.join(cmd) if isinstance(cmd, list) else cmd - if 'ls -la' in cmd_str: - return 0, b'total 0\n-rw-r--r-- 1 root root NaN Jan 1 00:00 file.txt' - return original(cmd, **kwargs) # pragma: no cover - - mock_container.exec_run = exec_with_bad_size # type: ignore[assignment] - entries = await mock_docker_sandbox.ls() - assert len(entries) == 1 - assert entries[0].name == 'file.txt' - assert entries[0].size is None - - -@docker_skip -async def test_docker_ls_short_line(mock_docker_sandbox: Any, mock_container: MockContainer) -> None: - """DockerEnvironment.ls skips lines with fewer than 9 fields.""" - original = mock_container.exec_run - - def exec_with_short_lines(cmd: Any, **kwargs: Any) -> tuple[int, bytes]: - cmd_str = ' '.join(cmd) if isinstance(cmd, list) else cmd - if 'ls -la' in cmd_str: - return 0, b'total 0\nshort line\n-rw-r--r-- 1 root root 42 Jan 1 00:00 real.txt' - return original(cmd, **kwargs) # pragma: no cover - - mock_container.exec_run = exec_with_short_lines # type: ignore[assignment] - entries = await mock_docker_sandbox.ls() - assert len(entries) == 1 - assert entries[0].name == 'real.txt' - - -@docker_skip -async def test_docker_is_alive_exception(mock_docker_sandbox: Any, mock_container: MockContainer) -> None: - """DockerEnvironment.is_alive returns False when reload raises.""" - mock_container.reload = MagicMock(side_effect=Exception('connection error')) - result = await mock_docker_sandbox.is_alive() - assert result is False - - -@docker_skip -async def test_docker_is_alive_running(mock_docker_sandbox: Any) -> None: - """DockerEnvironment.is_alive returns True when running.""" - result = await mock_docker_sandbox.is_alive() - assert result is True - - async def test_local_recv_no_timeout(tmp_path: Path): """LocalEnvironmentProcess.recv without timeout returns data.""" env = LocalEnvironment(tmp_path) From b91be81120c20bfcfa928b380c2ec8f11b7d3ec1 Mon Sep 17 00:00:00 2001 From: David Montague <35119617+dmontagu@users.noreply.github.com> Date: Sun, 22 Feb 2026 22:10:54 +0000 Subject: [PATCH 20/49] Use anyio.Lock --- .../pydantic_ai/toolsets/execution_environment.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pydantic_ai_slim/pydantic_ai/toolsets/execution_environment.py b/pydantic_ai_slim/pydantic_ai/toolsets/execution_environment.py index 36e4cf77a1..f38a601a7e 100644 --- a/pydantic_ai_slim/pydantic_ai/toolsets/execution_environment.py +++ b/pydantic_ai_slim/pydantic_ai/toolsets/execution_environment.py @@ -4,12 +4,12 @@ import posixpath import re -from asyncio import Lock from collections.abc import Callable, Iterator, Sequence from contextlib import AsyncExitStack, contextmanager from contextvars import ContextVar, Token from typing import TYPE_CHECKING, Any, Literal +import anyio from typing_extensions import Self from ..environments._base import ( @@ -115,7 +115,7 @@ def __init__( self._max_image_bytes = max_image_bytes self._require_shell_approval = require_shell_approval self._require_write_approval = require_write_approval - self._enter_lock: Lock = Lock() + self._enter_lock = anyio.Lock() self._running_count: int = 0 self._exit_stack: AsyncExitStack | None = None From 8392293723566ae5bcf3c4cd008cfac358535067 Mon Sep 17 00:00:00 2001 From: David Montague <35119617+dmontagu@users.noreply.github.com> Date: Mon, 23 Feb 2026 00:55:50 +0000 Subject: [PATCH 21/49] Fix Docker glob missing root-level files for **/ patterns --- .../pydantic_ai/environments/docker.py | 16 +++++++++++----- tests/test_environments.py | 8 ++++++++ 2 files changed, 19 insertions(+), 5 deletions(-) diff --git a/pydantic_ai_slim/pydantic_ai/environments/docker.py b/pydantic_ai_slim/pydantic_ai/environments/docker.py index b529a5dba9..6fab750e6d 100644 --- a/pydantic_ai_slim/pydantic_ai/environments/docker.py +++ b/pydantic_ai_slim/pydantic_ai/environments/docker.py @@ -99,11 +99,17 @@ def _build_glob_cmd(pattern: str, *, path: str = '.') -> str: depth_flag = '' else: depth_flag = f' -maxdepth {pattern.count("/") + 1}' - return ( - f'find {_shell_escape(path)}{depth_flag}' - f' \\( -path {_shell_escape(path_pattern)} -o -name {_shell_escape(pattern)} \\)' - f' 2>/dev/null | head -100' - ) + conditions = [f'-path {_shell_escape(path_pattern)}', f'-name {_shell_escape(pattern)}'] + # `**/' in find's -path requires at least one directory level, so files at + # the root of the search path won't match. Add a condition for the suffix + # after `**/` to handle the zero-directory-levels case. + if pattern.startswith('**/'): + suffix = pattern[3:] + if '/' in suffix: + conditions.append(f'-path {_shell_escape(path + "/" + suffix)}') + else: + conditions.append(f'-name {_shell_escape(suffix)}') + return f'find {_shell_escape(path)}{depth_flag} \\( {" -o ".join(conditions)} \\) 2>/dev/null | head -100' def _parse_glob_output(text: str) -> list[str]: diff --git a/tests/test_environments.py b/tests/test_environments.py index f19c90756e..b416696ef6 100644 --- a/tests/test_environments.py +++ b/tests/test_environments.py @@ -2675,6 +2675,14 @@ def test_build_glob_cmd_nested_pattern(self): def test_build_glob_cmd_recursive_no_maxdepth(self): cmd = _build_glob_cmd('**/*.py') assert '-maxdepth' not in cmd + # Root-level files should also match via the -name suffix condition + assert "-name '*.py'" in cmd + + def test_build_glob_cmd_recursive_with_subdir(self): + cmd = _build_glob_cmd('**/subdir/*.py') + assert '-maxdepth' not in cmd + # Should include a -path condition for the root-level subdir case + assert "-path './subdir/*.py'" in cmd def test_parse_glob_output_empty(self): assert _parse_glob_output('') == [] From a539b4efa02796ee4f57e11779a5b8471b55388c Mon Sep 17 00:00:00 2001 From: David Montague <35119617+dmontagu@users.noreply.github.com> Date: Mon, 23 Feb 2026 01:28:08 +0000 Subject: [PATCH 22/49] Add environment-specific tool descriptions (regex flavor docs for grep) ExecutionEnvironmentToolset.get_tools() now pulls tool descriptions from the active environment's method docstrings when present, replacing the generic defaults. This lets each environment document its specific behavior for the LLM (e.g. regex syntax for grep). - DockerEnvironment.grep: documents POSIX ERE (grep -E) limitations - LocalEnvironment.grep / MemoryEnvironment.grep: notes Python re syntax --- .../pydantic_ai/environments/docker.py | 7 +++++ .../pydantic_ai/environments/local.py | 1 + .../pydantic_ai/environments/memory.py | 1 + .../toolsets/execution_environment.py | 28 +++++++++++++++++-- 4 files changed, 35 insertions(+), 2 deletions(-) diff --git a/pydantic_ai_slim/pydantic_ai/environments/docker.py b/pydantic_ai_slim/pydantic_ai/environments/docker.py index 6fab750e6d..49faed94c3 100644 --- a/pydantic_ai_slim/pydantic_ai/environments/docker.py +++ b/pydantic_ai_slim/pydantic_ai/environments/docker.py @@ -663,6 +663,13 @@ async def grep( glob_pattern: str | None = None, output_mode: Literal['content', 'files_with_matches', 'count'] = 'content', ) -> str: + r"""Search file contents using a regex pattern. + + Patterns use POSIX Extended Regular Expression (ERE) syntax, as interpreted by `grep -E`. + Supported: `|`, `+`, `?`, `()`, `{}`, character classes like `[[:digit:]]`. + Not available: lookaheads/lookbehinds, `\d`, `\w`, `\b`, non-greedy quantifiers (`*?`, `+?`). + """ + def _grep() -> str: cmd = _build_grep_cmd(pattern, path=path, glob_pattern=glob_pattern, output_mode=output_mode) _, output = self.container.exec_run(['sh', '-c', cmd], workdir=self._work_dir) diff --git a/pydantic_ai_slim/pydantic_ai/environments/local.py b/pydantic_ai_slim/pydantic_ai/environments/local.py index a6f698645e..db149f4000 100644 --- a/pydantic_ai_slim/pydantic_ai/environments/local.py +++ b/pydantic_ai_slim/pydantic_ai/environments/local.py @@ -308,6 +308,7 @@ async def grep( glob_pattern: str | None = None, output_mode: Literal['content', 'files_with_matches', 'count'] = 'content', ) -> str: + """Search file contents using a regex pattern (Python `re` module syntax).""" search_dir = self._resolve_path(path or '.') compiled = re.compile(pattern) diff --git a/pydantic_ai_slim/pydantic_ai/environments/memory.py b/pydantic_ai_slim/pydantic_ai/environments/memory.py index da60574d9a..c3c8d2f58c 100644 --- a/pydantic_ai_slim/pydantic_ai/environments/memory.py +++ b/pydantic_ai_slim/pydantic_ai/environments/memory.py @@ -231,6 +231,7 @@ async def grep( glob_pattern: str | None = None, output_mode: Literal['content', 'files_with_matches', 'count'] = 'content', ) -> str: + """Search file contents using a regex pattern (Python `re` module syntax).""" normalized = self._normalize(path or '.') compiled = re.compile(pattern) diff --git a/pydantic_ai_slim/pydantic_ai/toolsets/execution_environment.py b/pydantic_ai_slim/pydantic_ai/toolsets/execution_environment.py index f38a601a7e..9c484bae81 100644 --- a/pydantic_ai_slim/pydantic_ai/toolsets/execution_environment.py +++ b/pydantic_ai_slim/pydantic_ai/toolsets/execution_environment.py @@ -2,11 +2,13 @@ from __future__ import annotations +import inspect import posixpath import re from collections.abc import Callable, Iterator, Sequence from contextlib import AsyncExitStack, contextmanager from contextvars import ContextVar, Token +from dataclasses import replace from typing import TYPE_CHECKING, Any, Literal import anyio @@ -27,6 +29,12 @@ from ..toolsets.abstract import ToolsetTool +_TOOL_TO_ENV_METHOD: dict[str, str] = { + 'edit_file': 'replace_str', +} +"""Map tool names to environment method names where they differ.""" + + class ExecutionEnvironmentToolset(FunctionToolset[Any]): """Toolset providing coding-agent-style tools backed by an `ExecutionEnvironment`. @@ -327,8 +335,24 @@ async def grep_tool( async def get_tools(self, ctx: RunContext[AgentDepsT]) -> dict[str, ToolsetTool[AgentDepsT]]: all_tools = await super().get_tools(ctx) - tool_names = self._resolve_tool_names(self.required_environment) - return {name: tool for name, tool in all_tools.items() if name in tool_names} + env = self.required_environment + tool_names = self._resolve_tool_names(env) + filtered = {name: tool for name, tool in all_tools.items() if name in tool_names} + + # Override tool descriptions from environment method docstrings. + # Each environment subclass can document its tool methods with LLM-facing + # docstrings (e.g. explaining regex flavor for grep); if present, these + # replace the generic default description. + env_type = type(env) + for tool_name, tool in filtered.items(): + method_name = _TOOL_TO_ENV_METHOD.get(tool_name, tool_name) + env_method = getattr(env_type, method_name, None) + base_method = getattr(ExecutionEnvironment, method_name, None) + if env_method is not None and env_method is not base_method and env_method.__doc__: + desc = inspect.cleandoc(env_method.__doc__) + filtered[tool_name] = replace(tool, tool_def=replace(tool.tool_def, description=desc)) + + return filtered @property def tool_name_conflict_hint(self) -> str: From e567e98d763df820ec72a8726f8c214ebb4b6038 Mon Sep 17 00:00:00 2001 From: David Montague <35119617+dmontagu@users.noreply.github.com> Date: Mon, 23 Feb 2026 01:45:55 +0000 Subject: [PATCH 23/49] Use _shared_environment directly in shared lifecycle __aenter__ Avoids a subtle interaction where use_environment() override could be entered into the shared exit stack instead of the actual shared environment. --- .../pydantic_ai/toolsets/execution_environment.py | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/pydantic_ai_slim/pydantic_ai/toolsets/execution_environment.py b/pydantic_ai_slim/pydantic_ai/toolsets/execution_environment.py index 9c484bae81..e080bb6395 100644 --- a/pydantic_ai_slim/pydantic_ai/toolsets/execution_environment.py +++ b/pydantic_ai_slim/pydantic_ai/toolsets/execution_environment.py @@ -418,9 +418,17 @@ async def __aenter__(self) -> Self: async with self._enter_lock: self._running_count += 1 if self._running_count == 1: + # Use _shared_environment directly (not required_environment) to avoid + # entering a use_environment() override into the shared exit stack. + env = self._shared_environment + if env is None: + self._running_count -= 1 + raise RuntimeError( + 'No execution environment configured. Pass one to ExecutionEnvironmentToolset() or use environment_factory.' + ) self._exit_stack = AsyncExitStack() try: - await self._exit_stack.enter_async_context(self.required_environment) + await self._exit_stack.enter_async_context(env) except Exception: self._running_count -= 1 raise From df4fe1a370395bc2729f476ee910533bbc5d833f Mon Sep 17 00:00:00 2001 From: David Montague <35119617+dmontagu@users.noreply.github.com> Date: Mon, 23 Feb 2026 02:01:18 +0000 Subject: [PATCH 24/49] Add test for __aenter__ with no environment configured --- tests/test_environments.py | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/tests/test_environments.py b/tests/test_environments.py index b416696ef6..5fb4f976d6 100644 --- a/tests/test_environments.py +++ b/tests/test_environments.py @@ -855,6 +855,13 @@ async def test_toolset_tool_name_conflict_hint(): # --- ExecutionEnvironmentToolset: lifecycle --- +async def test_toolset_enter_no_environment_raises(): + toolset = ExecutionEnvironmentToolset() + with pytest.raises(RuntimeError, match='No execution environment configured'): + async with toolset: + pass + + async def test_toolset_lifecycle(tmp_path: Path): env = LocalEnvironment(tmp_path) toolset = ExecutionEnvironmentToolset(env) From 4e01113b07f0ba78c8c82073eaf24e70ab2c01d7 Mon Sep 17 00:00:00 2001 From: David Montague <35119617+dmontagu@users.noreply.github.com> Date: Mon, 23 Feb 2026 09:52:45 +0000 Subject: [PATCH 25/49] Strip ./ prefix from Docker glob and grep output for consistency with Local/Memory environments --- pydantic_ai_slim/pydantic_ai/environments/docker.py | 4 +++- tests/test_environments.py | 3 +++ 2 files changed, 6 insertions(+), 1 deletion(-) diff --git a/pydantic_ai_slim/pydantic_ai/environments/docker.py b/pydantic_ai_slim/pydantic_ai/environments/docker.py index 49faed94c3..4d3d758835 100644 --- a/pydantic_ai_slim/pydantic_ai/environments/docker.py +++ b/pydantic_ai_slim/pydantic_ai/environments/docker.py @@ -117,7 +117,7 @@ def _parse_glob_output(text: str) -> list[str]: text = text.strip() if not text: return [] - return [line for line in text.splitlines() if line] + return [line.removeprefix('./') for line in text.splitlines() if line] def _put_file(container: Container, path: str, data: bytes) -> None: @@ -674,6 +674,8 @@ def _grep() -> str: cmd = _build_grep_cmd(pattern, path=path, glob_pattern=glob_pattern, output_mode=output_mode) _, output = self.container.exec_run(['sh', '-c', cmd], workdir=self._work_dir) text = output.decode('utf-8', errors='replace').strip() + # Strip `./` prefix from paths to match Local/Memory environment output + text = '\n'.join(line.removeprefix('./') for line in text.splitlines()) if output_mode == 'count': text = _filter_grep_count_output(text) return text diff --git a/tests/test_environments.py b/tests/test_environments.py index 5fb4f976d6..be31082f3a 100644 --- a/tests/test_environments.py +++ b/tests/test_environments.py @@ -2699,6 +2699,9 @@ def test_parse_glob_output_empty(self): def test_parse_glob_output_multiline(self): assert _parse_glob_output('a.py\nb.py\nc.py\n') == ['a.py', 'b.py', 'c.py'] + def test_parse_glob_output_strips_dot_slash(self): + assert _parse_glob_output('./a.py\n./src/b.py\nc.py\n') == ['a.py', 'src/b.py', 'c.py'] + def test_filter_grep_count_output(self): text = 'a.py:3\nb.py:0\nc.py:1' result = _filter_grep_count_output(text) From 99ae9c90623c15de86ef74baa51336eceaf6463b Mon Sep 17 00:00:00 2001 From: David Montague <35119617+dmontagu@users.noreply.github.com> Date: Mon, 23 Feb 2026 10:09:12 +0000 Subject: [PATCH 26/49] Handle mid-pattern **/ in Docker glob to match zero directories find's -path treats the literal / in **/ as requiring at least one directory level. Generalize the existing startswith('**/') handling to cover **/ appearing anywhere in the pattern by generating all collapsed variants. --- .../pydantic_ai/environments/docker.py | 46 +++++++++++++++---- tests/test_environments.py | 28 +++++++++++ 2 files changed, 66 insertions(+), 8 deletions(-) diff --git a/pydantic_ai_slim/pydantic_ai/environments/docker.py b/pydantic_ai_slim/pydantic_ai/environments/docker.py index 4d3d758835..37d2dfad3d 100644 --- a/pydantic_ai_slim/pydantic_ai/environments/docker.py +++ b/pydantic_ai_slim/pydantic_ai/environments/docker.py @@ -86,6 +86,37 @@ def _filter_grep_count_output(text: str) -> str: return '\n'.join(line for line in text.splitlines() if not line.endswith(':0')) +def _globstar_zero_dir_variants(pattern: str) -> list[str]: + """Generate variants of *pattern* with one or more ``**/`` segments collapsed. + + In ``find -path``, the literal ``/`` inside ``**/`` requires at least one + directory level, so ``**`` never matches zero directories. This helper + produces all the collapsed forms needed to cover the zero-directory case. + + Examples:: + + '**/*.py' → ['*.py'] + 'src/**/*.py' → ['src/*.py'] + '**/src/**/*.py' → ['**/src/*.py', 'src/**/*.py', 'src/*.py'] + """ + segments = pattern.split('**/') + if len(segments) <= 1: + return [] + n = len(segments) - 1 # number of **/ occurrences + all_kept = (1 << n) - 1 + variants: set[str] = set() + for mask in range(all_kept): # every subset except "all kept" (= original) + result = segments[0] + for i in range(n): + if mask & (1 << i): + result += '**/' + segments[i + 1] + else: + result += segments[i + 1] + if result: + variants.add(result) + return sorted(variants) + + def _build_glob_cmd(pattern: str, *, path: str = '.') -> str: """Build a shell `find` command to match files by pattern. @@ -100,15 +131,14 @@ def _build_glob_cmd(pattern: str, *, path: str = '.') -> str: else: depth_flag = f' -maxdepth {pattern.count("/") + 1}' conditions = [f'-path {_shell_escape(path_pattern)}', f'-name {_shell_escape(pattern)}'] - # `**/' in find's -path requires at least one directory level, so files at - # the root of the search path won't match. Add a condition for the suffix - # after `**/` to handle the zero-directory-levels case. - if pattern.startswith('**/'): - suffix = pattern[3:] - if '/' in suffix: - conditions.append(f'-path {_shell_escape(path + "/" + suffix)}') + # `**/` in find's -path requires at least one directory level, so `**` + # never matches zero directories. Add conditions for every collapsed + # variant to cover the zero-directory case(s). + for variant in _globstar_zero_dir_variants(pattern): + if '/' in variant: + conditions.append(f'-path {_shell_escape(path + "/" + variant)}') else: - conditions.append(f'-name {_shell_escape(suffix)}') + conditions.append(f'-name {_shell_escape(variant)}') return f'find {_shell_escape(path)}{depth_flag} \\( {" -o ".join(conditions)} \\) 2>/dev/null | head -100' diff --git a/tests/test_environments.py b/tests/test_environments.py index be31082f3a..8323af1431 100644 --- a/tests/test_environments.py +++ b/tests/test_environments.py @@ -44,6 +44,7 @@ _build_grep_cmd, _build_read_file_cmd, _filter_grep_count_output, + _globstar_zero_dir_variants, _parse_glob_output, _put_file, _shell_escape, @@ -2691,6 +2692,33 @@ def test_build_glob_cmd_recursive_with_subdir(self): # Should include a -path condition for the root-level subdir case assert "-path './subdir/*.py'" in cmd + def test_build_glob_cmd_mid_pattern_globstar(self): + """Mid-pattern **/ should add a zero-directory fallback condition.""" + cmd = _build_glob_cmd('src/**/*.py') + assert '-maxdepth' not in cmd + # The zero-directory collapse of src/**/*.py → src/*.py + assert "-path './src/*.py'" in cmd + + def test_build_glob_cmd_multiple_globstars(self): + """Multiple **/ segments should generate all collapsed variants.""" + cmd = _build_glob_cmd('**/src/**/*.py') + assert '-maxdepth' not in cmd + # All three collapsed variants + assert "-path './**/src/*.py'" in cmd + assert "-path './src/**/*.py'" in cmd + assert "-path './src/*.py'" in cmd + + def test_globstar_zero_dir_variants(self): + assert _globstar_zero_dir_variants('*.py') == [] + assert _globstar_zero_dir_variants('src/*.py') == [] + assert _globstar_zero_dir_variants('**/*.py') == ['*.py'] + assert _globstar_zero_dir_variants('src/**/*.py') == ['src/*.py'] + assert sorted(_globstar_zero_dir_variants('**/src/**/*.py')) == [ + '**/src/*.py', + 'src/**/*.py', + 'src/*.py', + ] + def test_parse_glob_output_empty(self): assert _parse_glob_output('') == [] assert _parse_glob_output(' ') == [] From 32a0ce7920b2e8e714e0ce6672dfa2e916d4ac40 Mon Sep 17 00:00:00 2001 From: David Montague <35119617+dmontagu@users.noreply.github.com> Date: Mon, 23 Feb 2026 11:45:16 +0000 Subject: [PATCH 27/49] Remove unreachable guard in _globstar_zero_dir_variants to fix coverage --- pydantic_ai_slim/pydantic_ai/environments/docker.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/pydantic_ai_slim/pydantic_ai/environments/docker.py b/pydantic_ai_slim/pydantic_ai/environments/docker.py index 37d2dfad3d..281d4fc409 100644 --- a/pydantic_ai_slim/pydantic_ai/environments/docker.py +++ b/pydantic_ai_slim/pydantic_ai/environments/docker.py @@ -112,8 +112,7 @@ def _globstar_zero_dir_variants(pattern: str) -> list[str]: result += '**/' + segments[i + 1] else: result += segments[i + 1] - if result: - variants.add(result) + variants.add(result) return sorted(variants) From 561eeda37de10261b16bb5105ea3e1a979e6740f Mon Sep 17 00:00:00 2001 From: David Montague <35119617+dmontagu@users.noreply.github.com> Date: Mon, 23 Feb 2026 16:40:53 +0000 Subject: [PATCH 28/49] Address review feedback: testable doc examples, public files accessor, API docs - Mock DockerEnvironment with LocalEnvironment in test harness so 11 of 15 environment doc examples now run in CI (up from 2) - Add public `files` property to MemoryEnvironment for test assertions - Add EnvToolName to API reference members list --- docs/api/environments.md | 1 + docs/environments.md | 26 +++++++---- .../pydantic_ai/environments/memory.py | 12 +++++- tests/test_examples.py | 43 +++++++++++++++++++ 4 files changed, 72 insertions(+), 10 deletions(-) diff --git a/docs/api/environments.md b/docs/api/environments.md index 8752e5d6f5..4d9bc9f0a0 100644 --- a/docs/api/environments.md +++ b/docs/api/environments.md @@ -3,6 +3,7 @@ ::: pydantic_ai.environments options: members: + - EnvToolName - ExecutionEnvironment - ExecutionEnvironmentToolset - ExecutionProcess diff --git a/docs/environments.md b/docs/environments.md index a6c905b09d..dd1d860475 100644 --- a/docs/environments.md +++ b/docs/environments.md @@ -6,7 +6,7 @@ This is the foundation for building coding agents, data analysis bots, and other ## Quick Start -```python {title="environments_quickstart.py" test="skip"} +```python {title="environments_quickstart.py"} from pydantic_ai import Agent from pydantic_ai.environments import ExecutionEnvironmentToolset from pydantic_ai.environments.local import LocalEnvironment @@ -20,6 +20,7 @@ async def main(): async with env: result = await agent.run('Create a Python script that prints the first 10 Fibonacci numbers, then run it.') print(result.output) + #> Done! The first 10 Fibonacci numbers are: 0, 1, 1, 2, 3, 5, 8, 13, 21, 34 ``` ## Environments @@ -34,7 +35,7 @@ An [`ExecutionEnvironment`][pydantic_ai.environments.ExecutionEnvironment] defin All environments are async context managers. Enter the environment before running the agent, and exit it to clean up: -```python {title="environments_lifecycle.py" test="skip"} +```python {title="environments_lifecycle.py"} from pydantic_ai.environments.docker import DockerEnvironment env = DockerEnvironment(image='python:3.12-slim') @@ -43,6 +44,10 @@ async def main(): async with env: result = await env.shell('python -c "print(42)"') print(result.output) + """ + 42 + """ + #> ``` ### LocalEnvironment @@ -70,7 +75,7 @@ File operations (read, write, edit, ls, glob, grep) are confined to the root dir Requires the `docker` package: `pip install pydantic-ai-slim[docker-environment]` -```python {title="environments_docker.py" test="skip"} +```python {title="environments_docker.py"} from pydantic_ai.environments.docker import DockerEnvironment env = DockerEnvironment( @@ -110,7 +115,7 @@ docker build -t my-sandbox:latest . Then pass the tag to `DockerEnvironment`: -```python {title="environments_docker_custom.py" test="skip"} +```python {title="environments_docker_custom.py"} from pydantic_ai.environments.docker import DockerEnvironment env = DockerEnvironment(image='my-sandbox:latest') @@ -133,7 +138,7 @@ env = DockerEnvironment(image='my-sandbox:latest') For running untrusted code, you can harden the container with Linux security options: -```python {title="environments_docker_hardened.py" test="skip"} +```python {title="environments_docker_hardened.py"} from pydantic_ai.environments.docker import DockerEnvironment env = DockerEnvironment.hardened(image='python:3.12-slim') @@ -141,7 +146,7 @@ env = DockerEnvironment.hardened(image='python:3.12-slim') This uses the [`hardened()`][pydantic_ai.environments.docker.DockerEnvironment.hardened] convenience constructor, which sets sensible security defaults: network disabled, read-only root filesystem, all capabilities dropped, no privilege escalation, runs as `nobody`, uses an init process, and limits PIDs, memory, and CPU. You can customize the resource limits: -```python {title="environments_docker_hardened_custom.py" test="skip"} +```python {title="environments_docker_hardened_custom.py"} from pydantic_ai.environments.docker import DockerEnvironment env = DockerEnvironment.hardened( @@ -183,7 +188,7 @@ toolset = ExecutionEnvironmentToolset( The toolset manages the environment lifecycle when used as a context manager: -```python {title="environments_agent.py" test="skip"} +```python {title="environments_agent.py"} from pydantic_ai import Agent from pydantic_ai.environments import ExecutionEnvironmentToolset from pydantic_ai.environments.docker import DockerEnvironment @@ -197,6 +202,9 @@ async def main(): async with toolset: # starts the Docker container result = await agent.run('Fetch https://httpbin.org/get and print the response') print(result.output) + """ + Successfully fetched the URL. The response contains request metadata including headers and origin IP. + """ # container cleaned up automatically ``` @@ -210,7 +218,7 @@ async def main(): You can swap the backing environment at runtime using [`use_environment()`][pydantic_ai.environments.ExecutionEnvironmentToolset.use_environment]: -```python {title="environments_override.py" test="skip"} +```python {title="environments_override.py"} from pydantic_ai import Agent from pydantic_ai.environments import ExecutionEnvironmentToolset from pydantic_ai.environments.docker import DockerEnvironment @@ -236,7 +244,7 @@ async def main(): When multiple `agent.run()` calls execute concurrently (e.g. via `asyncio.gather`), a shared environment means they all operate on the same filesystem and processes, which can cause interference. Use `environment_factory` to create a fresh, isolated environment for each run: -```python {title="environments_concurrent.py" test="skip"} +```python {title="environments_concurrent.py"} import asyncio from pydantic_ai import Agent diff --git a/pydantic_ai_slim/pydantic_ai/environments/memory.py b/pydantic_ai_slim/pydantic_ai/environments/memory.py index c3c8d2f58c..149b6501b6 100644 --- a/pydantic_ai_slim/pydantic_ai/environments/memory.py +++ b/pydantic_ai_slim/pydantic_ai/environments/memory.py @@ -9,7 +9,7 @@ import fnmatch import posixpath import re -from collections.abc import Callable +from collections.abc import Callable, Mapping from typing import TYPE_CHECKING, Literal from ._base import ( @@ -76,6 +76,16 @@ def capabilities(self) -> frozenset[EnvToolName]: caps.add('shell') return frozenset(caps) + @property + def files(self) -> Mapping[str, str | bytes]: + """Read-only view of the in-memory file system. + + Keys are normalized file paths, values are file contents. + Useful for test assertions against raw file content without the + line-number formatting that [`read_file()`][pydantic_ai.environments.memory.MemoryEnvironment.read_file] adds. + """ + return self._files + @staticmethod def _normalize(path: str) -> str: """Normalize a path for consistent storage.""" diff --git a/tests/test_examples.py b/tests/test_examples.py index e57d1a162d..22c9cb8acf 100644 --- a/tests/test_examples.py +++ b/tests/test_examples.py @@ -6,6 +6,8 @@ import shutil import ssl import sys +import tempfile +import types from collections.abc import AsyncIterator, Iterable, Sequence from dataclasses import dataclass, field from inspect import FrameInfo @@ -40,6 +42,7 @@ from pydantic_ai._utils import group_by_temporal from pydantic_ai.embeddings import EmbeddingModel, infer_embedding_model from pydantic_ai.embeddings.test import TestEmbeddingModel +from pydantic_ai.environments.local import LocalEnvironment as _LocalEnvironment from pydantic_ai.exceptions import UnexpectedModelBehavior from pydantic_ai.models import KnownModelName, Model, infer_model from pydantic_ai.models.fallback import FallbackModel @@ -58,6 +61,35 @@ pytestmark = [ pytest.mark.skipif(not imports_successful(), reason='extras not installed'), ] + +# --------------------------------------------------------------------------- +# Mock DockerEnvironment backed by LocalEnvironment for testing doc examples +# without requiring the `docker` package or a running Docker daemon. +# --------------------------------------------------------------------------- + + +class _MockDockerEnvironment(_LocalEnvironment): + """Test stand-in for DockerEnvironment that uses LocalEnvironment under the hood.""" + + def __init__(self, **_kwargs: Any) -> None: + # Use mkdtemp (no finalizer) instead of TemporaryDirectory to avoid + # PytestUnraisableExceptionWarning when constructor-only examples + # never enter the async context manager. + self._temp_path = Path(tempfile.mkdtemp()) + super().__init__(root_dir=self._temp_path) + + @classmethod + def hardened(cls, **kwargs: Any) -> _MockDockerEnvironment: + return cls(**kwargs) + + async def __aexit__(self, *_args: Any) -> None: + shutil.rmtree(self._temp_path, ignore_errors=True) + + +_mock_docker_env_module = types.ModuleType('pydantic_ai.environments.docker') +_mock_docker_env_module.__package__ = 'pydantic_ai.environments' +_mock_docker_env_module.DockerEnvironment = _MockDockerEnvironment # type: ignore[attr-defined] + code_examples: dict[str, CodeExample] = {} @@ -170,6 +202,10 @@ def print(self, *args: Any, **kwargs: Any) -> None: except ModuleNotFoundError: pass + # Replace DockerEnvironment with a LocalEnvironment-backed mock so doc + # examples that reference Docker can run without the docker package or daemon. + mocker.patch.dict(sys.modules, {'pydantic_ai.environments.docker': _mock_docker_env_module}) + env.set('OPENAI_API_KEY', 'testing') env.set('GEMINI_API_KEY', 'testing') env.set('GOOGLE_API_KEY', 'testing') @@ -573,6 +609,13 @@ async def call_tool( args={'name': 'test', 'value': 42}, tool_call_id='pyd_ai_tool_call_id', ), + # Execution environment doc examples + 'Create a Python script that prints the first 10 Fibonacci numbers, then run it.': 'Done! The first 10 Fibonacci numbers are: 0, 1, 1, 2, 3, 5, 8, 13, 21, 34', + 'Fetch https://httpbin.org/get and print the response': 'Successfully fetched the URL. The response contains request metadata including headers and origin IP.', + 'echo "running locally"': 'Command executed successfully.', + 'echo "running in Docker"': 'Command executed successfully.', + 'task A': 'Task A completed.', + 'task B': 'Task B completed.', } tool_responses: dict[tuple[str, str], str] = { From 2ac4084331c18a75860fb2630c88b53f1fec590d Mon Sep 17 00:00:00 2001 From: David Montague <35119617+dmontagu@users.noreply.github.com> Date: Tue, 24 Feb 2026 11:04:34 +0000 Subject: [PATCH 29/49] Make process classes and container property private, narrow exception handling MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit - Rename DockerEnvironmentProcess → _DockerEnvironmentProcess (internal impl detail) - Rename LocalEnvironmentProcess → _LocalEnvironmentProcess (internal impl detail) - Rename .container → ._required_container (avoid coupling users to docker-py) - Narrow except Exception → except (DockerException, OSError) in teardown/is_alive - Remove unnecessary r-prefix from ExecutionProcess docstring --- .../pydantic_ai/environments/_base.py | 2 +- .../pydantic_ai/environments/docker.py | 30 ++-- .../pydantic_ai/environments/local.py | 4 +- tests/test_environments.py | 142 +++++++++--------- 4 files changed, 89 insertions(+), 89 deletions(-) diff --git a/pydantic_ai_slim/pydantic_ai/environments/_base.py b/pydantic_ai_slim/pydantic_ai/environments/_base.py index ed82b7f67c..7f1496ceba 100644 --- a/pydantic_ai_slim/pydantic_ai/environments/_base.py +++ b/pydantic_ai_slim/pydantic_ai/environments/_base.py @@ -66,7 +66,7 @@ class FileInfo: class ExecutionProcess(ABC): - r"""Handle to a running process with bidirectional streaming I/O. + """Handle to a running process with bidirectional streaming I/O. Used for interactive execution where a script outputs data, waits for input, processes it, and outputs more data. diff --git a/pydantic_ai_slim/pydantic_ai/environments/docker.py b/pydantic_ai_slim/pydantic_ai/environments/docker.py index 281d4fc409..415842615a 100644 --- a/pydantic_ai_slim/pydantic_ai/environments/docker.py +++ b/pydantic_ai_slim/pydantic_ai/environments/docker.py @@ -162,7 +162,7 @@ def _put_file(container: Container, path: str, data: bytes) -> None: container.put_archive(parent, f) # pyright: ignore[reportUnknownMemberType] -class DockerEnvironmentProcess(ExecutionProcess): +class _DockerEnvironmentProcess(ExecutionProcess): """Interactive process inside a Docker container using exec with socket I/O. Docker's exec socket uses a multiplexed stream protocol where stdout and @@ -514,18 +514,18 @@ def _teardown(self) -> None: if self._container is not None: # pragma: no branch try: self._container.stop(timeout=5) - except Exception: + except (DockerException, OSError): # Best-effort cleanup: container may already be stopped or removed pass try: self._container.remove(force=True) - except Exception: + except (DockerException, OSError): # Best-effort cleanup: container may already be removed pass self._container = None @property - def container(self) -> Container: + def _required_container(self) -> Container: if self._container is None: raise RuntimeError('DockerEnvironment not started. Use `async with DockerEnvironment(...) as env:`') return self._container @@ -547,7 +547,7 @@ async def create_process( *, env: dict[str, str] | None = None, ) -> ExecutionProcess: - return DockerEnvironmentProcess(self.container, command, self._work_dir, env=env) + return _DockerEnvironmentProcess(self._required_container, command, self._work_dir, env=env) async def shell( self, @@ -566,7 +566,7 @@ def _exec() -> tuple[int, bytes]: exec_kwargs: dict[str, Any] = {'workdir': self._work_dir} if env: exec_kwargs['environment'] = env - exit_code, output = self.container.exec_run( + exit_code, output = self._required_container.exec_run( ['sh', '-c', wrapped], **exec_kwargs, ) @@ -589,7 +589,7 @@ async def read_file(self, path: str, *, offset: int = 0, limit: int = 2000) -> s def _read() -> str | bytes: cmd = _build_read_file_cmd(path, offset=offset, limit=limit) - exit_code, output = self.container.exec_run(['sh', '-c', cmd], workdir=self._work_dir) + exit_code, output = self._required_container.exec_run(['sh', '-c', cmd], workdir=self._work_dir) if exit_code != 0: raise FileNotFoundError(f'File not found or not readable: {path}') try: @@ -606,7 +606,7 @@ def _read() -> str | bytes: def _read_file_bytes_sync(self, path: str) -> bytes: """Read raw file bytes using Docker's get_archive API.""" try: - bits, _ = self.container.get_archive(self._resolve_path(path)) + bits, _ = self._required_container.get_archive(self._resolve_path(path)) except NotFound: raise FileNotFoundError(f'File not found: {path}') # get_archive returns a tar stream @@ -625,10 +625,10 @@ def _write() -> None: full_path = self._resolve_path(path) # Ensure parent directory exists parent = str(PurePosixPath(full_path).parent) - self.container.exec_run(['mkdir', '-p', parent]) + self._required_container.exec_run(['mkdir', '-p', parent]) data = content.encode('utf-8') if isinstance(content, str) else content - _put_file(self.container, full_path, data) + _put_file(self._required_container, full_path, data) await anyio.to_thread.run_sync(_write) @@ -644,7 +644,7 @@ def _edit() -> int: raw = self._read_file_bytes_sync(path) text = raw.decode('utf-8') new_text, count = apply_edit(text, old, new, path, replace_all=replace_all) - _put_file(self.container, self._resolve_path(path), new_text.encode('utf-8')) + _put_file(self._required_container, self._resolve_path(path), new_text.encode('utf-8')) return count return await anyio.to_thread.run_sync(_edit) @@ -652,7 +652,7 @@ def _edit() -> int: async def ls(self, path: str = '.') -> list[FileInfo]: def _ls() -> list[FileInfo]: cmd = f'ls -la {_shell_escape(path)}' - exit_code, output = self.container.exec_run(['sh', '-c', cmd], workdir=self._work_dir) + exit_code, output = self._required_container.exec_run(['sh', '-c', cmd], workdir=self._work_dir) if exit_code != 0: raise NotADirectoryError(f'Not a directory or not found: {path}') @@ -679,7 +679,7 @@ def _ls() -> list[FileInfo]: async def glob(self, pattern: str, *, path: str = '.') -> list[str]: def _glob() -> list[str]: cmd = _build_glob_cmd(pattern, path=path) - _, output = self.container.exec_run(['sh', '-c', cmd], workdir=self._work_dir) + _, output = self._required_container.exec_run(['sh', '-c', cmd], workdir=self._work_dir) return _parse_glob_output(output.decode('utf-8', errors='replace')) return await anyio.to_thread.run_sync(_glob) @@ -701,7 +701,7 @@ async def grep( def _grep() -> str: cmd = _build_grep_cmd(pattern, path=path, glob_pattern=glob_pattern, output_mode=output_mode) - _, output = self.container.exec_run(['sh', '-c', cmd], workdir=self._work_dir) + _, output = self._required_container.exec_run(['sh', '-c', cmd], workdir=self._work_dir) text = output.decode('utf-8', errors='replace').strip() # Strip `./` prefix from paths to match Local/Memory environment output text = '\n'.join(line.removeprefix('./') for line in text.splitlines()) @@ -725,7 +725,7 @@ def _check() -> bool: try: self._container.reload() return self._container.status == 'running' - except Exception: + except (DockerException, OSError): return False return await anyio.to_thread.run_sync(_check) diff --git a/pydantic_ai_slim/pydantic_ai/environments/local.py b/pydantic_ai_slim/pydantic_ai/environments/local.py index db149f4000..b8f07f7b59 100644 --- a/pydantic_ai_slim/pydantic_ai/environments/local.py +++ b/pydantic_ai_slim/pydantic_ai/environments/local.py @@ -29,7 +29,7 @@ ) -class LocalEnvironmentProcess(ExecutionProcess): +class _LocalEnvironmentProcess(ExecutionProcess): """Interactive process backed by `anyio.abc.Process`.""" def __init__(self, proc: anyio.abc.Process) -> None: @@ -178,7 +178,7 @@ async def create_process( cwd=self._root_dir, env=self._build_env(env), ) - return LocalEnvironmentProcess(proc) + return _LocalEnvironmentProcess(proc) async def shell( self, diff --git a/tests/test_environments.py b/tests/test_environments.py index 8323af1431..211f3713d4 100644 --- a/tests/test_environments.py +++ b/tests/test_environments.py @@ -28,21 +28,21 @@ format_lines, glob_match, ) -from pydantic_ai.environments.local import LocalEnvironment, LocalEnvironmentProcess +from pydantic_ai.environments.local import LocalEnvironment, _LocalEnvironmentProcess from pydantic_ai.environments.memory import MemoryEnvironment from pydantic_ai.exceptions import UnexpectedModelBehavior from pydantic_ai.models.test import TestModel from pydantic_ai.usage import RunUsage try: - from docker.errors import NotFound as DockerNotFound + from docker.errors import DockerException, NotFound as DockerNotFound from pydantic_ai.environments.docker import ( DockerEnvironment, - DockerEnvironmentProcess, _build_glob_cmd, _build_grep_cmd, _build_read_file_cmd, + _DockerEnvironmentProcess, _filter_grep_count_output, _globstar_zero_dir_variants, _parse_glob_output, @@ -1921,14 +1921,14 @@ def count_exec_run(cmd: Any, **kwargs: Any) -> tuple[int, bytes]: assert 'b.py:0' not in result async def test_docker_container_property(self, mock_docker_sandbox: Any) -> None: - """DockerEnvironment.container raises when not started.""" + """DockerEnvironment._required_container raises when not started.""" sandbox = DockerEnvironment() with pytest.raises(RuntimeError, match='not started'): - _ = sandbox.container + _ = sandbox._required_container async def test_docker_create_process(self, mock_docker_sandbox: Any) -> None: - """DockerEnvironment.create_process returns a DockerEnvironmentProcess.""" + """DockerEnvironment.create_process returns a _DockerEnvironmentProcess.""" proc = await mock_docker_sandbox.create_process('echo test') assert proc is not None @@ -1961,10 +1961,10 @@ def test_docker_put_file(self) -> None: assert container._files['/workspace/test.txt'] == b'hello' def test_docker_sandbox_process_read_frame(self) -> None: - """DockerEnvironmentProcess._read_frame parses multiplexed stream frames.""" + """_DockerEnvironmentProcess._read_frame parses multiplexed stream frames.""" container = MockContainer() - proc = DockerEnvironmentProcess(container, 'echo test', '/workspace') # type: ignore[arg-type] + proc = _DockerEnvironmentProcess(container, 'echo test', '/workspace') # type: ignore[arg-type] # Create a mock socket with a multiplexed frame stdout_data = b'hello from stdout' @@ -1979,10 +1979,10 @@ def test_docker_sandbox_process_read_frame(self) -> None: assert data == stdout_data def test_docker_sandbox_process_read_frame_stderr(self) -> None: - """DockerEnvironmentProcess._read_frame handles stderr frames.""" + """_DockerEnvironmentProcess._read_frame handles stderr frames.""" container = MockContainer() - proc = DockerEnvironmentProcess(container, 'echo test', '/workspace') # type: ignore[arg-type] + proc = _DockerEnvironmentProcess(container, 'echo test', '/workspace') # type: ignore[arg-type] stderr_data = b'error output' header = struct.pack('>BxxxI', 2, len(stderr_data)) # stream_type=2 (stderr) @@ -1996,10 +1996,10 @@ def test_docker_sandbox_process_read_frame_stderr(self) -> None: assert data == stderr_data def test_docker_sandbox_process_read_frame_eof(self) -> None: - """DockerEnvironmentProcess._read_frame returns empty on EOF.""" + """_DockerEnvironmentProcess._read_frame returns empty on EOF.""" container = MockContainer() - proc = DockerEnvironmentProcess(container, 'echo test', '/workspace') # type: ignore[arg-type] + proc = _DockerEnvironmentProcess(container, 'echo test', '/workspace') # type: ignore[arg-type] mock_socket = MagicMock() mock_socket.recv.return_value = b'' # EOF @@ -2011,10 +2011,10 @@ def test_docker_sandbox_process_read_frame_eof(self) -> None: assert proc._eof is True def test_docker_sandbox_process_read_frame_zero_size(self) -> None: - """DockerEnvironmentProcess._read_frame handles zero-size frames.""" + """_DockerEnvironmentProcess._read_frame handles zero-size frames.""" container = MockContainer() - proc = DockerEnvironmentProcess(container, 'echo test', '/workspace') # type: ignore[arg-type] + proc = _DockerEnvironmentProcess(container, 'echo test', '/workspace') # type: ignore[arg-type] header = struct.pack('>BxxxI', 1, 0) # zero size @@ -2027,10 +2027,10 @@ def test_docker_sandbox_process_read_frame_zero_size(self) -> None: assert data == b'' def test_docker_sandbox_process_already_eof(self) -> None: - """DockerEnvironmentProcess._read_frame returns empty when already at EOF.""" + """_DockerEnvironmentProcess._read_frame returns empty when already at EOF.""" container = MockContainer() - proc = DockerEnvironmentProcess(container, 'echo test', '/workspace') # type: ignore[arg-type] + proc = _DockerEnvironmentProcess(container, 'echo test', '/workspace') # type: ignore[arg-type] proc._eof = True stream_type, data = proc._read_frame() @@ -2061,9 +2061,9 @@ def test_docker_setup_early_return( async def test_docker_process_recv_stderr_no_buffer( self, ) -> None: - """DockerEnvironmentProcess.recv_stderr without buffered data (no timeout).""" + """_DockerEnvironmentProcess.recv_stderr without buffered data (no timeout).""" container = MockContainer() - proc = DockerEnvironmentProcess(container, 'echo test', '/workspace') # type: ignore[arg-type] + proc = _DockerEnvironmentProcess(container, 'echo test', '/workspace') # type: ignore[arg-type] stderr_data = b'error output' header = struct.pack('>BxxxI', 2, len(stderr_data)) @@ -2077,9 +2077,9 @@ async def test_docker_process_recv_stderr_no_buffer( async def test_docker_process_recv_stream_buffers_stdout( self, ) -> None: - """DockerEnvironmentProcess._recv_stream buffers stdout when stderr is wanted.""" + """_DockerEnvironmentProcess._recv_stream buffers stdout when stderr is wanted.""" container = MockContainer() - proc = DockerEnvironmentProcess(container, 'echo test', '/workspace') # type: ignore[arg-type] + proc = _DockerEnvironmentProcess(container, 'echo test', '/workspace') # type: ignore[arg-type] # First frame is stdout (type 1), second is stderr (type 2) stdout_data = b'stdout output' @@ -2099,9 +2099,9 @@ async def test_docker_process_recv_stream_buffers_stdout( async def test_docker_process_wait_no_timeout( self, ) -> None: - """DockerEnvironmentProcess.wait without timeout polls until returncode is set.""" + """_DockerEnvironmentProcess.wait without timeout polls until returncode is set.""" container = MockContainer() - proc = DockerEnvironmentProcess(container, 'echo test', '/workspace') # type: ignore[arg-type] + proc = _DockerEnvironmentProcess(container, 'echo test', '/workspace') # type: ignore[arg-type] proc._exec_id = 'exec-123' # Mock exec_inspect to return "still running" first, then "exited" call_count = 0 @@ -2121,9 +2121,9 @@ def mock_inspect(exec_id: str) -> dict[str, Any]: async def test_docker_process_wait_with_timeout( self, ) -> None: - """DockerEnvironmentProcess.wait with timeout.""" + """_DockerEnvironmentProcess.wait with timeout.""" container = MockContainer() - proc = DockerEnvironmentProcess(container, 'echo test', '/workspace') # type: ignore[arg-type] + proc = _DockerEnvironmentProcess(container, 'echo test', '/workspace') # type: ignore[arg-type] proc._returncode = 42 result = await proc.wait(timeout=5.0) assert result == 42 @@ -2182,7 +2182,7 @@ def exec_with_short_lines(cmd: Any, **kwargs: Any) -> tuple[int, bytes]: async def test_docker_is_alive_exception(self, mock_docker_sandbox: Any, mock_container: MockContainer) -> None: """DockerEnvironment.is_alive returns False when reload raises.""" - mock_container.reload = MagicMock(side_effect=Exception('connection error')) + mock_container.reload = MagicMock(side_effect=DockerException('connection error')) result = await mock_docker_sandbox.is_alive() assert result is False @@ -2194,10 +2194,10 @@ async def test_docker_is_alive_running(self, mock_docker_sandbox: Any) -> None: async def test_docker_process_recv_with_buffered_data( self, ) -> None: - """DockerEnvironmentProcess.recv returns buffered stdout data first.""" + """_DockerEnvironmentProcess.recv returns buffered stdout data first.""" container = MockContainer() - proc = DockerEnvironmentProcess(container, 'echo test', '/workspace') # type: ignore[arg-type] + proc = _DockerEnvironmentProcess(container, 'echo test', '/workspace') # type: ignore[arg-type] proc._stdout_buffer.append(b'buffered data') result = await proc.recv() @@ -2207,10 +2207,10 @@ async def test_docker_process_recv_with_buffered_data( async def test_docker_process_recv_stderr_with_buffered_data( self, ) -> None: - """DockerEnvironmentProcess.recv_stderr returns buffered stderr data first.""" + """_DockerEnvironmentProcess.recv_stderr returns buffered stderr data first.""" container = MockContainer() - proc = DockerEnvironmentProcess(container, 'echo test', '/workspace') # type: ignore[arg-type] + proc = _DockerEnvironmentProcess(container, 'echo test', '/workspace') # type: ignore[arg-type] proc._stderr_buffer.append(b'buffered error') result = await proc.recv_stderr() @@ -2220,10 +2220,10 @@ async def test_docker_process_recv_stderr_with_buffered_data( async def test_docker_process_recv_stream_buffers_other( self, ) -> None: - """DockerEnvironmentProcess._recv_stream buffers frames for the other stream.""" + """_DockerEnvironmentProcess._recv_stream buffers frames for the other stream.""" container = MockContainer() - proc = DockerEnvironmentProcess(container, 'echo test', '/workspace') # type: ignore[arg-type] + proc = _DockerEnvironmentProcess(container, 'echo test', '/workspace') # type: ignore[arg-type] # First frame is stderr (type 2), second is stdout (type 1) stderr_data = b'error output' @@ -2243,10 +2243,10 @@ async def test_docker_process_recv_stream_buffers_other( async def test_docker_process_recv_stream_eof( self, ) -> None: - """DockerEnvironmentProcess._recv_stream returns empty on EOF.""" + """_DockerEnvironmentProcess._recv_stream returns empty on EOF.""" container = MockContainer() - proc = DockerEnvironmentProcess(container, 'echo test', '/workspace') # type: ignore[arg-type] + proc = _DockerEnvironmentProcess(container, 'echo test', '/workspace') # type: ignore[arg-type] mock_socket = MagicMock() mock_socket.recv.return_value = b'' # EOF @@ -2258,10 +2258,10 @@ async def test_docker_process_recv_stream_eof( async def test_docker_process_kill( self, ) -> None: - """DockerEnvironmentProcess.kill closes the socket.""" + """_DockerEnvironmentProcess.kill closes the socket.""" container = MockContainer() - proc = DockerEnvironmentProcess(container, 'echo test', '/workspace') # type: ignore[arg-type] + proc = _DockerEnvironmentProcess(container, 'echo test', '/workspace') # type: ignore[arg-type] mock_socket = MagicMock() proc._socket = mock_socket @@ -2271,10 +2271,10 @@ async def test_docker_process_kill( async def test_docker_process_kill_oserror( self, ) -> None: - """DockerEnvironmentProcess.kill handles OSError.""" + """_DockerEnvironmentProcess.kill handles OSError.""" container = MockContainer() - proc = DockerEnvironmentProcess(container, 'echo test', '/workspace') # type: ignore[arg-type] + proc = _DockerEnvironmentProcess(container, 'echo test', '/workspace') # type: ignore[arg-type] mock_socket = MagicMock() mock_socket.close.side_effect = OSError('socket error') proc._socket = mock_socket @@ -2285,10 +2285,10 @@ async def test_docker_process_kill_oserror( async def test_docker_process_returncode( self, ) -> None: - """DockerEnvironmentProcess.returncode checks exec status.""" + """_DockerEnvironmentProcess.returncode checks exec status.""" container = MockContainer() - proc = DockerEnvironmentProcess(container, 'echo test', '/workspace') # type: ignore[arg-type] + proc = _DockerEnvironmentProcess(container, 'echo test', '/workspace') # type: ignore[arg-type] # No exec_id means returncode is None assert proc.returncode is None @@ -2301,11 +2301,11 @@ async def test_docker_process_returncode( async def test_docker_process_returncode_from_inspect( self, ) -> None: - """DockerEnvironmentProcess.returncode polls Docker API.""" + """_DockerEnvironmentProcess.returncode polls Docker API.""" container = MockContainer() container.client.api.exec_inspect.return_value = {'ExitCode': 42, 'Running': False} - proc = DockerEnvironmentProcess(container, 'echo test', '/workspace') # type: ignore[arg-type] + proc = _DockerEnvironmentProcess(container, 'echo test', '/workspace') # type: ignore[arg-type] proc._exec_id = 'exec-123' assert proc.returncode == 42 @@ -2314,12 +2314,12 @@ async def test_docker_process_returncode_from_inspect( async def test_docker_process_returncode_still_running( self, ) -> None: - """DockerEnvironmentProcess.returncode returns None when process is running (ExitCode=0, Running=True).""" + """_DockerEnvironmentProcess.returncode returns None when process is running (ExitCode=0, Running=True).""" container = MockContainer() # Docker returns ExitCode=0 + Running=True for still-running processes container.client.api.exec_inspect.return_value = {'ExitCode': 0, 'Running': True} - proc = DockerEnvironmentProcess(container, 'echo test', '/workspace') # type: ignore[arg-type] + proc = _DockerEnvironmentProcess(container, 'echo test', '/workspace') # type: ignore[arg-type] proc._exec_id = 'exec-123' assert proc.returncode is None @@ -2327,11 +2327,11 @@ async def test_docker_process_returncode_still_running( async def test_docker_process_returncode_inspect_error( self, ) -> None: - """DockerEnvironmentProcess.returncode handles API errors.""" + """_DockerEnvironmentProcess.returncode handles API errors.""" container = MockContainer() container.client.api.exec_inspect.side_effect = OSError('connection failed') - proc = DockerEnvironmentProcess(container, 'echo test', '/workspace') # type: ignore[arg-type] + proc = _DockerEnvironmentProcess(container, 'echo test', '/workspace') # type: ignore[arg-type] proc._exec_id = 'exec-123' assert proc.returncode is None @@ -2339,10 +2339,10 @@ async def test_docker_process_returncode_inspect_error( async def test_docker_process_send( self, ) -> None: - """DockerEnvironmentProcess.send writes to socket.""" + """_DockerEnvironmentProcess.send writes to socket.""" container = MockContainer() - proc = DockerEnvironmentProcess(container, 'echo test', '/workspace') # type: ignore[arg-type] + proc = _DockerEnvironmentProcess(container, 'echo test', '/workspace') # type: ignore[arg-type] mock_socket = MagicMock() proc._socket = mock_socket @@ -2352,10 +2352,10 @@ async def test_docker_process_send( async def test_docker_process_recv_with_timeout( self, ) -> None: - """DockerEnvironmentProcess.recv with timeout.""" + """_DockerEnvironmentProcess.recv with timeout.""" container = MockContainer() - proc = DockerEnvironmentProcess(container, 'echo test', '/workspace') # type: ignore[arg-type] + proc = _DockerEnvironmentProcess(container, 'echo test', '/workspace') # type: ignore[arg-type] stdout_data = b'data' header = struct.pack('>BxxxI', 1, len(stdout_data)) @@ -2369,10 +2369,10 @@ async def test_docker_process_recv_with_timeout( async def test_docker_process_recv_stderr_with_timeout( self, ) -> None: - """DockerEnvironmentProcess.recv_stderr with timeout.""" + """_DockerEnvironmentProcess.recv_stderr with timeout.""" container = MockContainer() - proc = DockerEnvironmentProcess(container, 'echo test', '/workspace') # type: ignore[arg-type] + proc = _DockerEnvironmentProcess(container, 'echo test', '/workspace') # type: ignore[arg-type] stderr_data = b'error' header = struct.pack('>BxxxI', 2, len(stderr_data)) @@ -2386,10 +2386,10 @@ async def test_docker_process_recv_stderr_with_timeout( async def test_docker_read_frame_data_eof_during_read( self, ) -> None: - """DockerEnvironmentProcess._read_frame handles EOF during data read.""" + """_DockerEnvironmentProcess._read_frame handles EOF during data read.""" container = MockContainer() - proc = DockerEnvironmentProcess(container, 'echo test', '/workspace') # type: ignore[arg-type] + proc = _DockerEnvironmentProcess(container, 'echo test', '/workspace') # type: ignore[arg-type] # Header says 100 bytes but socket returns less then EOF header = struct.pack('>BxxxI', 1, 100) @@ -2405,14 +2405,14 @@ async def test_docker_read_frame_data_eof_during_read( async def test_docker_process_start_with_env( self, ) -> None: - """DockerEnvironmentProcess._do_start passes env to exec_create.""" + """_DockerEnvironmentProcess._do_start passes env to exec_create.""" container = MockContainer() container.client.api.exec_create.return_value = {'Id': 'exec-test'} mock_sock = MagicMock() container.client.api.exec_start.return_value = mock_sock - proc = DockerEnvironmentProcess( + proc = _DockerEnvironmentProcess( container, # type: ignore[arg-type] 'echo test', '/workspace', @@ -2427,14 +2427,14 @@ async def test_docker_process_start_with_env( async def test_docker_process_aenter( self, ) -> None: - """DockerEnvironmentProcess.__aenter__ starts the process.""" + """_DockerEnvironmentProcess.__aenter__ starts the process.""" container = MockContainer() container.client.api.exec_create.return_value = {'Id': 'exec-aenter'} mock_sock = MagicMock() container.client.api.exec_start.return_value = mock_sock - proc = DockerEnvironmentProcess(container, 'echo test', '/workspace') # type: ignore[arg-type] + proc = _DockerEnvironmentProcess(container, 'echo test', '/workspace') # type: ignore[arg-type] entered = await proc.__aenter__() assert entered is proc assert proc._exec_id == 'exec-aenter' @@ -2521,8 +2521,8 @@ async def test_docker_teardown_cleanup_errors( sandbox = DockerEnvironment() mock_container = MagicMock() - mock_container.stop.side_effect = Exception('stop failed') - mock_container.remove.side_effect = Exception('remove failed') + mock_container.stop.side_effect = DockerException('stop failed') + mock_container.remove.side_effect = DockerException('remove failed') sandbox._container = mock_container # Should not raise @@ -2820,34 +2820,34 @@ async def __aenter__(self): async def test_local_process_stdin_not_available(): - """LocalEnvironmentProcess.send raises when stdin is None.""" + """_LocalEnvironmentProcess.send raises when stdin is None.""" mock_proc = MagicMock() mock_proc.stdin = None - proc = LocalEnvironmentProcess(mock_proc) + proc = _LocalEnvironmentProcess(mock_proc) with pytest.raises(RuntimeError, match='stdin'): await proc.send(b'data') async def test_local_process_stdout_not_available(): - """LocalEnvironmentProcess.recv raises when stdout is None.""" + """_LocalEnvironmentProcess.recv raises when stdout is None.""" mock_proc = MagicMock() mock_proc.stdout = None - proc = LocalEnvironmentProcess(mock_proc) + proc = _LocalEnvironmentProcess(mock_proc) with pytest.raises(RuntimeError, match='stdout'): await proc.recv() async def test_local_process_stderr_not_available(): - """LocalEnvironmentProcess.recv_stderr raises when stderr is None.""" + """_LocalEnvironmentProcess.recv_stderr raises when stderr is None.""" mock_proc = MagicMock() mock_proc.stderr = None - proc = LocalEnvironmentProcess(mock_proc) + proc = _LocalEnvironmentProcess(mock_proc) with pytest.raises(RuntimeError, match='stderr'): await proc.recv_stderr() async def test_local_process_recv_stderr_timeout(tmp_path: Path): - """LocalEnvironmentProcess.recv_stderr with timeout.""" + """_LocalEnvironmentProcess.recv_stderr with timeout.""" env = LocalEnvironment(tmp_path) proc = await env.create_process('python -c "import sys; sys.stderr.write(\'err\\n\')"') async with proc: @@ -2856,7 +2856,7 @@ async def test_local_process_recv_stderr_timeout(tmp_path: Path): async def test_local_process_recv_stderr_eof(tmp_path: Path): - """LocalEnvironmentProcess.recv_stderr returns empty on EOF.""" + """_LocalEnvironmentProcess.recv_stderr returns empty on EOF.""" env = LocalEnvironment(tmp_path) proc = await env.create_process('echo done') async with proc: @@ -2867,7 +2867,7 @@ async def test_local_process_recv_stderr_eof(tmp_path: Path): async def test_local_process_kill_terminates_sleep(tmp_path: Path): - """LocalEnvironmentProcess.kill terminates process.""" + """_LocalEnvironmentProcess.kill terminates process.""" env = LocalEnvironment(tmp_path) proc = await env.create_process('sleep 60') async with proc: @@ -3018,7 +3018,7 @@ async def test_memory_glob_in_subdirectory_with_path_filter(): async def test_local_process_wait_no_timeout(tmp_path: Path): - """LocalEnvironmentProcess.wait without timeout (line 74).""" + """_LocalEnvironmentProcess.wait without timeout (line 74).""" env = LocalEnvironment(tmp_path) proc = await env.create_process('true') async with proc: @@ -3320,7 +3320,7 @@ def capabilities(self) -> frozenset[EnvToolName]: async def test_local_recv_no_timeout(tmp_path: Path): - """LocalEnvironmentProcess.recv without timeout returns data.""" + """_LocalEnvironmentProcess.recv without timeout returns data.""" env = LocalEnvironment(tmp_path) proc = await env.create_process('echo hello') async with proc: @@ -3329,7 +3329,7 @@ async def test_local_recv_no_timeout(tmp_path: Path): async def test_local_recv_end_of_stream(tmp_path: Path): - """LocalEnvironmentProcess.recv returns empty bytes at EndOfStream.""" + """_LocalEnvironmentProcess.recv returns empty bytes at EndOfStream.""" env = LocalEnvironment(tmp_path) proc = await env.create_process('true') async with proc: From caad4294b4913e2578b499b92df54736016d1c66 Mon Sep 17 00:00:00 2001 From: Aditya Vardhan Date: Tue, 24 Feb 2026 17:40:30 +0000 Subject: [PATCH 30/49] glob fix --- .../pydantic_ai/environments/docker.py | 86 ++++-------- tests/test_environments.py | 131 ++++++++---------- 2 files changed, 81 insertions(+), 136 deletions(-) diff --git a/pydantic_ai_slim/pydantic_ai/environments/docker.py b/pydantic_ai_slim/pydantic_ai/environments/docker.py index 415842615a..30c31acb26 100644 --- a/pydantic_ai_slim/pydantic_ai/environments/docker.py +++ b/pydantic_ai_slim/pydantic_ai/environments/docker.py @@ -26,6 +26,7 @@ ExecutionResult, FileInfo, apply_edit, + glob_match, ) try: @@ -86,67 +87,15 @@ def _filter_grep_count_output(text: str) -> str: return '\n'.join(line for line in text.splitlines() if not line.endswith(':0')) -def _globstar_zero_dir_variants(pattern: str) -> list[str]: - """Generate variants of *pattern* with one or more ``**/`` segments collapsed. +def _build_find_cmd(*, path: str = '.', recursive: bool = True) -> str: + """Build a shell `find` command that lists all files under *path*. - In ``find -path``, the literal ``/`` inside ``**/`` requires at least one - directory level, so ``**`` never matches zero directories. This helper - produces all the collapsed forms needed to cover the zero-directory case. - - Examples:: - - '**/*.py' → ['*.py'] - 'src/**/*.py' → ['src/*.py'] - '**/src/**/*.py' → ['**/src/*.py', 'src/**/*.py', 'src/*.py'] + When *recursive* is False, ``-maxdepth 1`` is added so that only files + directly in *path* are returned (matching ``pathlib.glob`` semantics for + patterns without ``**``). """ - segments = pattern.split('**/') - if len(segments) <= 1: - return [] - n = len(segments) - 1 # number of **/ occurrences - all_kept = (1 << n) - 1 - variants: set[str] = set() - for mask in range(all_kept): # every subset except "all kept" (= original) - result = segments[0] - for i in range(n): - if mask & (1 << i): - result += '**/' + segments[i + 1] - else: - result += segments[i + 1] - variants.add(result) - return sorted(variants) - - -def _build_glob_cmd(pattern: str, *, path: str = '.') -> str: - """Build a shell `find` command to match files by pattern. - - When the pattern does not contain ``**``, ``-maxdepth`` is added so that - ``*.py`` only matches in the target directory (matching Local/Memory - behaviour), while ``**/*.py`` recurses without limit. - """ - path_pattern = f'{path}/{pattern}' if '/' in pattern else pattern - # Limit recursion depth for non-** patterns to match pathlib.glob semantics - if '**' in pattern: - depth_flag = '' - else: - depth_flag = f' -maxdepth {pattern.count("/") + 1}' - conditions = [f'-path {_shell_escape(path_pattern)}', f'-name {_shell_escape(pattern)}'] - # `**/` in find's -path requires at least one directory level, so `**` - # never matches zero directories. Add conditions for every collapsed - # variant to cover the zero-directory case(s). - for variant in _globstar_zero_dir_variants(pattern): - if '/' in variant: - conditions.append(f'-path {_shell_escape(path + "/" + variant)}') - else: - conditions.append(f'-name {_shell_escape(variant)}') - return f'find {_shell_escape(path)}{depth_flag} \\( {" -o ".join(conditions)} \\) 2>/dev/null | head -100' - - -def _parse_glob_output(text: str) -> list[str]: - """Parse output of a find/glob command into a list of paths.""" - text = text.strip() - if not text: - return [] - return [line.removeprefix('./') for line in text.splitlines() if line] + depth_flag = '' if recursive else ' -maxdepth 1' + return f'find {_shell_escape(path)}{depth_flag} -type f 2>/dev/null | head -100' def _put_file(container: Container, path: str, data: bytes) -> None: @@ -678,9 +627,22 @@ def _ls() -> list[FileInfo]: async def glob(self, pattern: str, *, path: str = '.') -> list[str]: def _glob() -> list[str]: - cmd = _build_glob_cmd(pattern, path=path) - _, output = self._required_container.exec_run(['sh', '-c', cmd], workdir=self._work_dir) - return _parse_glob_output(output.decode('utf-8', errors='replace')) + recursive = '**' in pattern + cmd = _build_find_cmd(path=path, recursive=recursive) + _, output = self.container.exec_run(['sh', '-c', cmd], workdir=self._work_dir) + text = output.decode('utf-8', errors='replace').strip() + if not text: + return [] + # Strip leading ./ and path prefix to get relative paths, then filter + results: list[str] = [] + prefix = path.rstrip('/') + '/' if path != '.' else './' + for line in text.splitlines(): + if not line: + continue + rel = line.removeprefix(prefix).removeprefix('./') + if glob_match(rel, pattern): + results.append(line.removeprefix('./')) + return sorted(results) return await anyio.to_thread.run_sync(_glob) diff --git a/tests/test_environments.py b/tests/test_environments.py index 211f3713d4..ae2597a08b 100644 --- a/tests/test_environments.py +++ b/tests/test_environments.py @@ -39,13 +39,10 @@ from pydantic_ai.environments.docker import ( DockerEnvironment, - _build_glob_cmd, _build_grep_cmd, _build_read_file_cmd, _DockerEnvironmentProcess, _filter_grep_count_output, - _globstar_zero_dir_variants, - _parse_glob_output, _put_file, _shell_escape, ) @@ -994,6 +991,7 @@ async def test_memory_read_write(): async def test_memory_initial_files(): env = MemoryEnvironment(files={'a.txt': 'alpha', 'b.txt': 'beta'}) async with env: + assert env.files == {'a.txt': 'alpha', 'b.txt': 'beta'} a = await env.read_file('a.txt') assert isinstance(a, str) assert 'alpha' in a @@ -1748,9 +1746,27 @@ def exec_run( # Handle find (glob) if 'find' in cmd_str: + # Extract the search path from: find '' ... + import shlex as _shlex + + find_part = cmd_str.split('|')[0].strip() + # Remove the 'sh -c' wrapper if present + if find_part.startswith('sh -c '): + find_part = find_part[len('sh -c '):] + tokens = _shlex.split(find_part) + # tokens[0] is 'find', tokens[1] is the path + search_path = tokens[1] if len(tokens) > 1 else '.' + wd = workdir or '/workspace' matches = [] - for path in sorted(self._files): - matches.append(path) # pragma: no cover + for fpath in sorted(self._files): + # Make path relative to workdir + if not fpath.startswith(wd + '/'): + continue + rel = fpath[len(wd) + 1:] + if search_path == '.': + matches.append(f'./{rel}') + elif rel.startswith(search_path + '/') or rel == search_path: + matches.append(rel) return 0, '\n'.join(matches).encode('utf-8') # Handle grep @@ -1889,10 +1905,43 @@ async def test_docker_ls(self, mock_docker_sandbox: Any, mock_container: MockCon entries = await mock_docker_sandbox.ls('.') assert isinstance(entries, list) - async def test_docker_glob(self, mock_docker_sandbox: Any) -> None: - """DockerEnvironment.glob returns matching paths.""" + async def test_docker_glob_recursive(self, mock_docker_sandbox: Any, mock_container: MockContainer) -> None: + """**/*.py matches files in subdirs and root.""" + mock_container._files['/workspace/top.py'] = b'' + mock_container._files['/workspace/src/main.py'] = b'' + mock_container._files['/workspace/src/lib/util.py'] = b'' + mock_container._files['/workspace/readme.md'] = b'' + matches = await mock_docker_sandbox.glob('**/*.py') + assert sorted(matches) == ['src/lib/util.py', 'src/main.py', 'top.py'] + + async def test_docker_glob_non_recursive(self, mock_docker_sandbox: Any, mock_container: MockContainer) -> None: + """*.py matches only in target dir, not subdirs.""" + mock_container._files['/workspace/top.py'] = b'' + mock_container._files['/workspace/src/nested.py'] = b'' matches = await mock_docker_sandbox.glob('*.py') - assert isinstance(matches, list) + assert matches == ['top.py'] + + async def test_docker_glob_with_path(self, mock_docker_sandbox: Any, mock_container: MockContainer) -> None: + """*.py with path='src' restricts scope.""" + mock_container._files['/workspace/top.py'] = b'' + mock_container._files['/workspace/src/main.py'] = b'' + mock_container._files['/workspace/src/other.txt'] = b'' + matches = await mock_docker_sandbox.glob('*.py', path='src') + assert matches == ['src/main.py'] + + async def test_docker_glob_no_matches(self, mock_docker_sandbox: Any, mock_container: MockContainer) -> None: + """Returns empty list when nothing matches.""" + mock_container._files['/workspace/readme.md'] = b'' + matches = await mock_docker_sandbox.glob('*.py') + assert matches == [] + + async def test_docker_glob_zero_dir_match(self, mock_docker_sandbox: Any, mock_container: MockContainer) -> None: + """**/*.py matches top-level .py files (zero-directory case).""" + mock_container._files['/workspace/top.py'] = b'' + mock_container._files['/workspace/deep/nested.py'] = b'' + matches = await mock_docker_sandbox.glob('**/*.py') + assert 'top.py' in matches + assert 'deep/nested.py' in matches async def test_docker_grep(self, mock_docker_sandbox: Any) -> None: """DockerEnvironment.grep returns matches.""" @@ -2664,72 +2713,6 @@ def test_build_grep_cmd_glob_pattern_escaping(self): cmd2 = _build_grep_cmd('pat', glob_pattern='$(evil)') assert '$(evil)' not in cmd2.replace("'$(evil)'", '') # Only appears inside quotes - def test_build_glob_cmd(self): - cmd = _build_glob_cmd('*.py') - assert 'find' in cmd - assert "'*.py'" in cmd - assert "'.'" in cmd - assert '-maxdepth 1' in cmd - - def test_build_glob_cmd_with_path(self): - cmd = _build_glob_cmd('*.py', path='src') - assert "'src'" in cmd - assert '-maxdepth 1' in cmd - - def test_build_glob_cmd_nested_pattern(self): - cmd = _build_glob_cmd('src/*.py') - assert '-maxdepth 2' in cmd - - def test_build_glob_cmd_recursive_no_maxdepth(self): - cmd = _build_glob_cmd('**/*.py') - assert '-maxdepth' not in cmd - # Root-level files should also match via the -name suffix condition - assert "-name '*.py'" in cmd - - def test_build_glob_cmd_recursive_with_subdir(self): - cmd = _build_glob_cmd('**/subdir/*.py') - assert '-maxdepth' not in cmd - # Should include a -path condition for the root-level subdir case - assert "-path './subdir/*.py'" in cmd - - def test_build_glob_cmd_mid_pattern_globstar(self): - """Mid-pattern **/ should add a zero-directory fallback condition.""" - cmd = _build_glob_cmd('src/**/*.py') - assert '-maxdepth' not in cmd - # The zero-directory collapse of src/**/*.py → src/*.py - assert "-path './src/*.py'" in cmd - - def test_build_glob_cmd_multiple_globstars(self): - """Multiple **/ segments should generate all collapsed variants.""" - cmd = _build_glob_cmd('**/src/**/*.py') - assert '-maxdepth' not in cmd - # All three collapsed variants - assert "-path './**/src/*.py'" in cmd - assert "-path './src/**/*.py'" in cmd - assert "-path './src/*.py'" in cmd - - def test_globstar_zero_dir_variants(self): - assert _globstar_zero_dir_variants('*.py') == [] - assert _globstar_zero_dir_variants('src/*.py') == [] - assert _globstar_zero_dir_variants('**/*.py') == ['*.py'] - assert _globstar_zero_dir_variants('src/**/*.py') == ['src/*.py'] - assert sorted(_globstar_zero_dir_variants('**/src/**/*.py')) == [ - '**/src/*.py', - 'src/**/*.py', - 'src/*.py', - ] - - def test_parse_glob_output_empty(self): - assert _parse_glob_output('') == [] - assert _parse_glob_output(' ') == [] - assert _parse_glob_output('\n') == [] - - def test_parse_glob_output_multiline(self): - assert _parse_glob_output('a.py\nb.py\nc.py\n') == ['a.py', 'b.py', 'c.py'] - - def test_parse_glob_output_strips_dot_slash(self): - assert _parse_glob_output('./a.py\n./src/b.py\nc.py\n') == ['a.py', 'src/b.py', 'c.py'] - def test_filter_grep_count_output(self): text = 'a.py:3\nb.py:0\nc.py:1' result = _filter_grep_count_output(text) From 49ba0d63bb4f898a327242ec006bb91415af2941 Mon Sep 17 00:00:00 2001 From: Aditya Vardhan Date: Tue, 24 Feb 2026 17:51:03 +0000 Subject: [PATCH 31/49] glob fix --- pydantic_ai_slim/pydantic_ai/environments/docker.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pydantic_ai_slim/pydantic_ai/environments/docker.py b/pydantic_ai_slim/pydantic_ai/environments/docker.py index 30c31acb26..f06e9c10cd 100644 --- a/pydantic_ai_slim/pydantic_ai/environments/docker.py +++ b/pydantic_ai_slim/pydantic_ai/environments/docker.py @@ -629,7 +629,7 @@ async def glob(self, pattern: str, *, path: str = '.') -> list[str]: def _glob() -> list[str]: recursive = '**' in pattern cmd = _build_find_cmd(path=path, recursive=recursive) - _, output = self.container.exec_run(['sh', '-c', cmd], workdir=self._work_dir) + _, output = self._container.exec_run(['sh', '-c', cmd], workdir=self._work_dir) text = output.decode('utf-8', errors='replace').strip() if not text: return [] From 1bd7d3186fbdc5731cd2b2327f89848403e76ffd Mon Sep 17 00:00:00 2001 From: Aditya Vardhan Date: Tue, 24 Feb 2026 17:52:23 +0000 Subject: [PATCH 32/49] glob fix --- pydantic_ai_slim/pydantic_ai/environments/docker.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pydantic_ai_slim/pydantic_ai/environments/docker.py b/pydantic_ai_slim/pydantic_ai/environments/docker.py index f06e9c10cd..803ba7e09c 100644 --- a/pydantic_ai_slim/pydantic_ai/environments/docker.py +++ b/pydantic_ai_slim/pydantic_ai/environments/docker.py @@ -95,7 +95,7 @@ def _build_find_cmd(*, path: str = '.', recursive: bool = True) -> str: patterns without ``**``). """ depth_flag = '' if recursive else ' -maxdepth 1' - return f'find {_shell_escape(path)}{depth_flag} -type f 2>/dev/null | head -100' + return f'find {_shell_escape(path)}{depth_flag} -type f 2>/dev/null' def _put_file(container: Container, path: str, data: bytes) -> None: From 672a80d6d944b5942f131b83205c8b620d7ca094 Mon Sep 17 00:00:00 2001 From: Aditya Vardhan Date: Thu, 26 Feb 2026 16:40:43 +0000 Subject: [PATCH 33/49] cov --- tests/test_environments.py | 39 +++++++++++++++++++++++++++++++++++--- 1 file changed, 36 insertions(+), 3 deletions(-) diff --git a/tests/test_environments.py b/tests/test_environments.py index ae2597a08b..612867786c 100644 --- a/tests/test_environments.py +++ b/tests/test_environments.py @@ -1751,8 +1751,8 @@ def exec_run( find_part = cmd_str.split('|')[0].strip() # Remove the 'sh -c' wrapper if present - if find_part.startswith('sh -c '): - find_part = find_part[len('sh -c '):] + if find_part.startswith('sh -c '): # pragma: no branch + find_part = find_part[len('sh -c ') :] tokens = _shlex.split(find_part) # tokens[0] is 'find', tokens[1] is the path search_path = tokens[1] if len(tokens) > 1 else '.' @@ -1762,7 +1762,7 @@ def exec_run( # Make path relative to workdir if not fpath.startswith(wd + '/'): continue - rel = fpath[len(wd) + 1:] + rel = fpath[len(wd) + 1 :] if search_path == '.': matches.append(f'./{rel}') elif rel.startswith(search_path + '/') or rel == search_path: @@ -1935,6 +1935,20 @@ async def test_docker_glob_no_matches(self, mock_docker_sandbox: Any, mock_conta matches = await mock_docker_sandbox.glob('*.py') assert matches == [] + async def test_docker_glob_empty_workspace(self, mock_docker_sandbox: Any, mock_container: MockContainer) -> None: + """Returns empty list when workspace has no files.""" + matches = await mock_docker_sandbox.glob('*.py') + assert matches == [] + + async def test_docker_glob_ignores_files_outside_workspace( + self, mock_docker_sandbox: Any, mock_container: MockContainer + ) -> None: + """Files outside the workspace directory are not included.""" + mock_container._files['/other/place/top.py'] = b'' + mock_container._files['/workspace/found.py'] = b'' + matches = await mock_docker_sandbox.glob('**/*.py') + assert matches == ['found.py'] + async def test_docker_glob_zero_dir_match(self, mock_docker_sandbox: Any, mock_container: MockContainer) -> None: """**/*.py matches top-level .py files (zero-directory case).""" mock_container._files['/workspace/top.py'] = b'' @@ -1943,6 +1957,25 @@ async def test_docker_glob_zero_dir_match(self, mock_docker_sandbox: Any, mock_c assert 'top.py' in matches assert 'deep/nested.py' in matches + async def test_docker_glob_skips_blank_lines(self, mock_docker_sandbox: Any, mock_container: MockContainer) -> None: + """Blank lines in find output are skipped.""" + mock_container._files['/workspace/a.py'] = b'' + mock_container._files['/workspace/b.py'] = b'' + # Patch exec_run to return output with embedded blank lines + original_exec_run = mock_container.exec_run + + def exec_run_with_blanks(cmd: Any, **kwargs: Any) -> tuple[int, bytes]: + exit_code, output = original_exec_run(cmd, **kwargs) + # Insert blank lines between entries + if output: + lines = output.decode().splitlines() + output = '\n\n'.join(lines).encode() + return exit_code, output + + mock_container.exec_run = exec_run_with_blanks # type: ignore[assignment] + matches = await mock_docker_sandbox.glob('**/*.py') + assert matches == ['a.py', 'b.py'] + async def test_docker_grep(self, mock_docker_sandbox: Any) -> None: """DockerEnvironment.grep returns matches.""" result = await mock_docker_sandbox.grep('pattern') From 9cf206aea5487a5596137e31b5d48b1d36f2d180 Mon Sep 17 00:00:00 2001 From: Aditya Vardhan Date: Fri, 27 Feb 2026 11:44:47 +0000 Subject: [PATCH 34/49] adding for cov --- tests/graph/test_persistence.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/graph/test_persistence.py b/tests/graph/test_persistence.py index 4bb54947e4..8779fd1e2d 100644 --- a/tests/graph/test_persistence.py +++ b/tests/graph/test_persistence.py @@ -394,7 +394,7 @@ async def run(self, ctx: GraphRunContext[CountDownState]) -> CountDown | End[int state = CountDownState(counter=3) count_down_graph = Graph(nodes=[CountDown]) - await count_down_graph.initialize(CountDown(), state=state, persistence=persistence) + await count_down_graph.initialize(CountDown(), state=state, persistence=persistence, infer_name=False) last_snapshot = persistence.last_snapshot async with count_down_graph.iter_from_persistence(persistence) as run: From c55a0df64aaa57c4a45a7fc13b68ff31ae4243c8 Mon Sep 17 00:00:00 2001 From: Aditya Vardhan Date: Fri, 27 Feb 2026 11:45:10 +0000 Subject: [PATCH 35/49] adding for cov --- tests/test_environments.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/test_environments.py b/tests/test_environments.py index 612867786c..d1449c5bba 100644 --- a/tests/test_environments.py +++ b/tests/test_environments.py @@ -1692,7 +1692,7 @@ def __init__(self) -> None: self.status = 'running' self.client = MagicMock() - def exec_run( + def exec_run( # noqa: C901 self, cmd: list[str] | str, workdir: str | None = None, From 2386886cbbf793fdd52c8e94cbb205e6cedaa026 Mon Sep 17 00:00:00 2001 From: Aditya Vardhan Date: Fri, 27 Feb 2026 12:08:37 +0000 Subject: [PATCH 36/49] glob removing, need to ensure tests removal for now --- .../pydantic_ai/environments/_base.py | 41 --- .../pydantic_ai/environments/docker.py | 34 --- .../pydantic_ai/environments/local.py | 27 +- .../pydantic_ai/environments/memory.py | 19 +- .../toolsets/execution_environment.py | 27 -- tests/test_environments.py | 281 +----------------- 6 files changed, 18 insertions(+), 411 deletions(-) diff --git a/pydantic_ai_slim/pydantic_ai/environments/_base.py b/pydantic_ai_slim/pydantic_ai/environments/_base.py index 7f1496ceba..6b7582db3e 100644 --- a/pydantic_ai_slim/pydantic_ai/environments/_base.py +++ b/pydantic_ai_slim/pydantic_ai/environments/_base.py @@ -21,7 +21,6 @@ 'read_file', 'write_file', 'edit_file', - 'glob', 'grep', ] """Tool name for an environment capability. @@ -287,18 +286,6 @@ async def replace_str( """ raise NotImplementedError(f'{type(self).__name__} does not support replace_str.') - async def glob(self, pattern: str, *, path: str = '.') -> list[str]: - """Find files matching a glob pattern. - - Args: - pattern: The glob pattern (e.g. `'**/*.py'`). - path: The directory to search in. - - Returns: - A list of matching file paths. - """ - raise NotImplementedError(f'{type(self).__name__} does not support glob.') - async def grep( self, pattern: str, @@ -406,34 +393,6 @@ def collect_grep_matches( results.append(f'{rel_path}:{line_num}:{line}') -def glob_match(path: str, pattern: str) -> bool: - """Match a path against a glob pattern with `**` support. - - This helper converts glob patterns to regex where `*` matches - within a single path segment and `**` matches zero or more - path segments (including `/`). - """ - regex = '' - i = 0 - while i < len(pattern): - if pattern[i : i + 3] == '**/': - regex += '(.*/)?' - i += 3 - elif pattern[i : i + 2] == '**': - regex += '.*' - i += 2 - elif pattern[i] == '*': - regex += '[^/]*' - i += 1 - elif pattern[i] == '?': - regex += '[^/]' - i += 1 - else: - regex += re.escape(pattern[i]) - i += 1 - return bool(re.fullmatch(regex, path)) - - def apply_edit(text: str, old_string: str, new_string: str, path: str, *, replace_all: bool) -> tuple[str, int]: """Apply a string replacement edit, returning the new text and the number of replacements. diff --git a/pydantic_ai_slim/pydantic_ai/environments/docker.py b/pydantic_ai_slim/pydantic_ai/environments/docker.py index 803ba7e09c..9b94d4c114 100644 --- a/pydantic_ai_slim/pydantic_ai/environments/docker.py +++ b/pydantic_ai_slim/pydantic_ai/environments/docker.py @@ -26,7 +26,6 @@ ExecutionResult, FileInfo, apply_edit, - glob_match, ) try: @@ -87,17 +86,6 @@ def _filter_grep_count_output(text: str) -> str: return '\n'.join(line for line in text.splitlines() if not line.endswith(':0')) -def _build_find_cmd(*, path: str = '.', recursive: bool = True) -> str: - """Build a shell `find` command that lists all files under *path*. - - When *recursive* is False, ``-maxdepth 1`` is added so that only files - directly in *path* are returned (matching ``pathlib.glob`` semantics for - patterns without ``**``). - """ - depth_flag = '' if recursive else ' -maxdepth 1' - return f'find {_shell_escape(path)}{depth_flag} -type f 2>/dev/null' - - def _put_file(container: Container, path: str, data: bytes) -> None: """Write file data into a container via put_archive.""" parent = str(PurePosixPath(path).parent) @@ -402,7 +390,6 @@ def capabilities(self) -> frozenset[EnvToolName]: # pragma: lax no cover 'read_file', 'write_file', 'edit_file', - 'glob', 'grep', } ) @@ -625,27 +612,6 @@ def _ls() -> list[FileInfo]: return await anyio.to_thread.run_sync(_ls) - async def glob(self, pattern: str, *, path: str = '.') -> list[str]: - def _glob() -> list[str]: - recursive = '**' in pattern - cmd = _build_find_cmd(path=path, recursive=recursive) - _, output = self._container.exec_run(['sh', '-c', cmd], workdir=self._work_dir) - text = output.decode('utf-8', errors='replace').strip() - if not text: - return [] - # Strip leading ./ and path prefix to get relative paths, then filter - results: list[str] = [] - prefix = path.rstrip('/') + '/' if path != '.' else './' - for line in text.splitlines(): - if not line: - continue - rel = line.removeprefix(prefix).removeprefix('./') - if glob_match(rel, pattern): - results.append(line.removeprefix('./')) - return sorted(results) - - return await anyio.to_thread.run_sync(_glob) - async def grep( self, pattern: str, diff --git a/pydantic_ai_slim/pydantic_ai/environments/local.py b/pydantic_ai_slim/pydantic_ai/environments/local.py index b8f07f7b59..40ab5bbf9f 100644 --- a/pydantic_ai_slim/pydantic_ai/environments/local.py +++ b/pydantic_ai_slim/pydantic_ai/environments/local.py @@ -6,6 +6,7 @@ from __future__ import annotations +import fnmatch import re import subprocess from pathlib import Path @@ -136,7 +137,7 @@ def __init__( @property def capabilities(self) -> frozenset[EnvToolName]: - return frozenset({'ls', 'shell', 'read_file', 'write_file', 'edit_file', 'glob', 'grep'}) + return frozenset({'ls', 'shell', 'read_file', 'write_file', 'edit_file', 'grep'}) async def __aenter__(self) -> Self: self._root_dir.mkdir(parents=True, exist_ok=True) @@ -289,17 +290,6 @@ async def ls(self, path: str = '.') -> list[FileInfo]: continue return entries - async def glob(self, pattern: str, *, path: str = '.') -> list[str]: - resolved = self._resolve_path(path) - matches: list[str] = [] - for match in sorted(resolved.glob(pattern)): - try: - rel = str(match.relative_to(self._root_dir)) - matches.append(rel) - except ValueError: # pragma: no cover - continue - return matches - async def grep( self, pattern: str, @@ -315,10 +305,13 @@ async def grep( is_explicit_file = search_dir.is_file() if is_explicit_file: files = [search_dir] - elif glob_pattern: - files = sorted(search_dir.rglob(glob_pattern)) else: - files = sorted(search_dir.rglob('*')) + # Use rglob with the glob_pattern when available for efficient + # filesystem traversal, but apply fnmatch on the basename as the + # authoritative filter below — this keeps semantics filename-only + # (consistent with Docker's grep --include and Memory's fnmatch) + # while letting pathlib narrow the search. + files = sorted(search_dir.rglob(glob_pattern or '*')) results: list[str] = [] for file_path in files: @@ -329,6 +322,10 @@ async def grep( part.startswith('.') for part in file_path.relative_to(self._root_dir).parts ): continue + # Filename-only glob filtering, consistent with Docker's grep --include + # and MemoryEnvironment's fnmatch-on-basename approach. + if not is_explicit_file and glob_pattern and not fnmatch.fnmatch(file_path.name, glob_pattern): + continue try: raw = file_path.read_bytes() except OSError: # pragma: no cover diff --git a/pydantic_ai_slim/pydantic_ai/environments/memory.py b/pydantic_ai_slim/pydantic_ai/environments/memory.py index 149b6501b6..86f7947f5e 100644 --- a/pydantic_ai_slim/pydantic_ai/environments/memory.py +++ b/pydantic_ai_slim/pydantic_ai/environments/memory.py @@ -20,7 +20,6 @@ apply_edit, collect_grep_matches, format_lines, - glob_match, ) if TYPE_CHECKING: @@ -71,7 +70,7 @@ def __init__( @property def capabilities(self) -> frozenset[EnvToolName]: - caps: set[EnvToolName] = {'ls', 'read_file', 'write_file', 'edit_file', 'glob', 'grep'} + caps: set[EnvToolName] = {'ls', 'read_file', 'write_file', 'edit_file', 'grep'} if self._command_handler is not None: caps.add('shell') return frozenset(caps) @@ -217,22 +216,6 @@ async def ls(self, path: str = '.') -> list[FileInfo]: return list(entries.values()) - async def glob(self, pattern: str, *, path: str = '.') -> list[str]: - normalized = self._normalize(path) - matches: list[str] = [] - for file_path in sorted(self._files): - if normalized != '.': - if not file_path.startswith(normalized + '/'): - continue - rel = file_path[len(normalized) + 1 :] - else: - rel = file_path - - if glob_match(rel, pattern): - matches.append(file_path) - - return matches - async def grep( self, pattern: str, diff --git a/pydantic_ai_slim/pydantic_ai/toolsets/execution_environment.py b/pydantic_ai_slim/pydantic_ai/toolsets/execution_environment.py index e080bb6395..129d771399 100644 --- a/pydantic_ai_slim/pydantic_ai/toolsets/execution_environment.py +++ b/pydantic_ai_slim/pydantic_ai/toolsets/execution_environment.py @@ -153,7 +153,6 @@ def _register_tools(self) -> None: self._register_read_file() self._register_write_file() self._register_edit_file() - self._register_glob() self._register_grep() def _register_ls(self) -> None: @@ -277,32 +276,6 @@ async def edit_file(path: str, old: str, new: str, replace_all: bool = False) -> self.tool(requires_approval=self._require_write_approval)(edit_file) - def _register_glob(self) -> None: - async def glob_tool(pattern: str, path: str = '.') -> str: - """Find files matching a glob pattern. - - Supports patterns like `**/*.py`, `src/**/*.ts`. - Returns up to 100 matching file paths. - - Args: - pattern: The glob pattern to match files against. - path: The directory to search in. Defaults to the working directory. - """ - try: - matches = await self.required_environment.glob(pattern, path=path) - except (PermissionError, OSError) as e: - return f'Error: {e}' - if not matches: - return 'No files found.' - truncated = len(matches) > 100 - matches = matches[:100] - result = '\n'.join(matches) - if truncated: - result += '\n[... truncated, showing first 100 matches]' - return result - - self.tool(name='glob')(glob_tool) - def _register_grep(self) -> None: async def grep_tool( pattern: str, diff --git a/tests/test_environments.py b/tests/test_environments.py index d1449c5bba..ed55312b40 100644 --- a/tests/test_environments.py +++ b/tests/test_environments.py @@ -26,7 +26,6 @@ from pydantic_ai.environments._base import ( apply_edit, format_lines, - glob_match, ) from pydantic_ai.environments.local import LocalEnvironment, _LocalEnvironmentProcess from pydantic_ai.environments.memory import MemoryEnvironment @@ -359,38 +358,6 @@ async def test_local_ls_not_a_directory(tmp_path: Path): await env.ls('file.txt') -# --- LocalEnvironment: glob --- - - -async def test_local_glob(tmp_path: Path): - async with LocalEnvironment(tmp_path) as env: - await env.write_file('src/main.py', '# main') - await env.write_file('src/utils.py', '# utils') - await env.write_file('src/data.json', '{}') - - matches = await env.glob('**/*.py') - assert len(matches) == 2 - assert any('main.py' in m for m in matches) - assert any('utils.py' in m for m in matches) - assert not any('data.json' in m for m in matches) - - -async def test_local_glob_non_recursive(tmp_path: Path): - async with LocalEnvironment(tmp_path) as env: - await env.write_file('top.py', '# top') - await env.write_file('sub/nested.py', '# nested') - - # Non-recursive pattern should only match in the target directory - matches = await env.glob('*.py') - assert matches == ['top.py'] - - -async def test_local_glob_no_matches(tmp_path: Path): - async with LocalEnvironment(tmp_path) as env: - matches = await env.glob('**/*.rs') - assert matches == [] - - # --- LocalEnvironment: grep --- @@ -532,7 +499,7 @@ async def test_toolset_tool_names(): ctx = build_run_context() tools = await toolset.get_tools(ctx) tool_names = sorted(tools.keys()) - assert tool_names == snapshot(['edit_file', 'glob', 'grep', 'ls', 'read_file', 'shell', 'write_file']) + assert tool_names == snapshot(['edit_file', 'grep', 'ls', 'read_file', 'shell', 'write_file']) async def test_toolset_include_flags(): @@ -625,23 +592,6 @@ async def test_toolset_edit_retry_on_permission_error(tmp_path: Path): ) -async def test_toolset_glob_tool(tmp_path: Path): - env = LocalEnvironment(tmp_path) - toolset = ExecutionEnvironmentToolset(env) - ctx = build_run_context(None) - manager = await ToolManager[None](toolset).for_run_step(ctx) - - async with env: - await env.write_file('a.py', '# a') - await env.write_file('b.py', '# b') - - result = await manager.handle_call(ToolCallPart(tool_name='glob', args={'pattern': '*.py'})) - assert result == snapshot("""\ -a.py -b.py\ -""") - - async def test_toolset_grep_tool(tmp_path: Path): env = LocalEnvironment(tmp_path) toolset = ExecutionEnvironmentToolset(env) @@ -696,20 +646,6 @@ async def test_toolset_write_path_traversal_returns_error(tmp_path: Path): assert 'Error:' in str(result) -async def test_toolset_glob_path_traversal_returns_error(tmp_path: Path): - """glob with path traversal returns an error string.""" - env = LocalEnvironment(tmp_path) - toolset = ExecutionEnvironmentToolset(env) - ctx = build_run_context(None) - manager = await ToolManager[None](toolset).for_run_step(ctx) - - async with env: - result = await manager.handle_call( - ToolCallPart(tool_name='glob', args={'pattern': '*.py', 'path': '../../etc'}) - ) - assert 'Error:' in str(result) - - async def test_toolset_grep_invalid_regex_returns_error(tmp_path: Path): """grep with invalid regex returns an error string.""" env = LocalEnvironment(tmp_path) @@ -788,7 +724,6 @@ async def test_toolset_require_write_approval(): assert tools['edit_file'].tool_def.kind == 'unapproved' # read_file and search tools should NOT require approval assert tools['read_file'].tool_def.kind == 'function' - assert tools['glob'].tool_def.kind == 'function' assert tools['grep'].tool_def.kind == 'function' @@ -1117,34 +1052,6 @@ async def test_memory_ls_not_a_directory(): await env.ls('nonexistent') -async def test_memory_glob(): - env = MemoryEnvironment( - files={ - 'src/main.py': '# main', - 'src/utils.py': '# utils', - 'src/data.json': '{}', - } - ) - async with env: - matches = await env.glob('*.py', path='src') - assert sorted(matches) == ['src/main.py', 'src/utils.py'] - - -async def test_memory_glob_non_recursive(): - env = MemoryEnvironment(files={'top.py': '# top', 'sub/nested.py': '# nested'}) - async with env: - # Non-recursive pattern should only match in the target directory - matches = await env.glob('*.py') - assert matches == ['top.py'] - - -async def test_memory_glob_no_matches(): - env = MemoryEnvironment(files={'a.py': ''}) - async with env: - matches = await env.glob('*.rs') - assert matches == [] - - async def test_memory_grep_content(): env = MemoryEnvironment( files={ @@ -1296,13 +1203,6 @@ async def test_memory_toolset_integration(): ) assert result == snapshot('File written: new.py') - # glob - result = await manager.handle_call(ToolCallPart(tool_name='glob', args={'pattern': '*.py'})) - assert result == snapshot("""\ -main.py -new.py\ -""") - # grep result = await manager.handle_call(ToolCallPart(tool_name='grep', args={'pattern': 'hello'})) assert result == snapshot('main.py:1:print("hello")') @@ -1347,38 +1247,6 @@ def test_format_lines_trailing_newline(): assert '1\tno trailing newline' in result -def test_glob_match_simple(): - assert glob_match('foo.py', '*.py') is True - assert glob_match('foo.txt', '*.py') is False - - -def test_glob_match_double_star(): - """glob_match with ** patterns for recursive matching.""" - assert glob_match('src/main.py', '**/*.py') is True - assert glob_match('deep/nested/dir/file.py', '**/*.py') is True - assert glob_match('file.py', '**/*.py') is True - assert glob_match('src/main.txt', '**/*.py') is False - - -def test_glob_match_double_star_prefix(): - """glob_match with **/ prefix.""" - assert glob_match('a/b/c.txt', '**/c.txt') is True - assert glob_match('c.txt', '**/c.txt') is True - - -def test_glob_match_double_star_suffix(): - """glob_match with ** at end.""" - assert glob_match('src/foo/bar', 'src/**') is True - - -def test_glob_match_question_mark(): - """glob_match with ? wildcard.""" - assert glob_match('test.py', 'tes?.py') is True - assert glob_match('test.py', 'te??.py') is True - assert glob_match('test.py', 't???.py') is True # t + 3 chars (est) + .py - assert glob_match('test.py', 't????.py') is False # needs 4 chars between t and .py - - def test_apply_edit_basic(): new_text, count = apply_edit('hello world', 'world', 'earth', 'test.txt', replace_all=False) assert new_text == 'hello earth' @@ -1505,36 +1373,6 @@ async def test_memory_grep_no_text_content(): assert 'data.txt' in result -async def test_memory_glob_recursive(): - """glob with ** pattern.""" - env = MemoryEnvironment( - files={ - 'src/a.py': '', - 'src/sub/b.py': '', - 'other.txt': '', - } - ) - async with env: - matches = await env.glob('**/*.py') - assert 'src/a.py' in matches - assert 'src/sub/b.py' in matches - assert 'other.txt' not in matches - - -async def test_memory_glob_in_subdirectory(): - """glob with path= restricts to subdirectory.""" - env = MemoryEnvironment( - files={ - 'src/a.py': '', - 'lib/b.py': '', - } - ) - async with env: - matches = await env.glob('*.py', path='src') - assert 'src/a.py' in matches - assert 'lib/b.py' not in matches - - async def test_memory_ls_with_bytes(): """ls reports size correctly for bytes content.""" env = MemoryEnvironment(files={'data.bin': b'\x00\x01\x02'}) @@ -1624,18 +1462,6 @@ async def test_toolset_grep_no_matches(tmp_path: Path): assert result == snapshot('No matches found.') -async def test_toolset_glob_no_matches(tmp_path: Path): - """glob with no matches returns 'No files found.'.""" - env = LocalEnvironment(tmp_path) - toolset = ExecutionEnvironmentToolset(env) - ctx = build_run_context(None) - manager = await ToolManager[None](toolset).for_run_step(ctx) - - async with env: - result = await manager.handle_call(ToolCallPart(tool_name='glob', args={'pattern': '*.nonexistent'})) - assert result == snapshot('No files found.') - - async def test_toolset_edit_success(tmp_path: Path): """edit_file tool returns success message.""" env = LocalEnvironment(tmp_path) @@ -1905,77 +1731,6 @@ async def test_docker_ls(self, mock_docker_sandbox: Any, mock_container: MockCon entries = await mock_docker_sandbox.ls('.') assert isinstance(entries, list) - async def test_docker_glob_recursive(self, mock_docker_sandbox: Any, mock_container: MockContainer) -> None: - """**/*.py matches files in subdirs and root.""" - mock_container._files['/workspace/top.py'] = b'' - mock_container._files['/workspace/src/main.py'] = b'' - mock_container._files['/workspace/src/lib/util.py'] = b'' - mock_container._files['/workspace/readme.md'] = b'' - matches = await mock_docker_sandbox.glob('**/*.py') - assert sorted(matches) == ['src/lib/util.py', 'src/main.py', 'top.py'] - - async def test_docker_glob_non_recursive(self, mock_docker_sandbox: Any, mock_container: MockContainer) -> None: - """*.py matches only in target dir, not subdirs.""" - mock_container._files['/workspace/top.py'] = b'' - mock_container._files['/workspace/src/nested.py'] = b'' - matches = await mock_docker_sandbox.glob('*.py') - assert matches == ['top.py'] - - async def test_docker_glob_with_path(self, mock_docker_sandbox: Any, mock_container: MockContainer) -> None: - """*.py with path='src' restricts scope.""" - mock_container._files['/workspace/top.py'] = b'' - mock_container._files['/workspace/src/main.py'] = b'' - mock_container._files['/workspace/src/other.txt'] = b'' - matches = await mock_docker_sandbox.glob('*.py', path='src') - assert matches == ['src/main.py'] - - async def test_docker_glob_no_matches(self, mock_docker_sandbox: Any, mock_container: MockContainer) -> None: - """Returns empty list when nothing matches.""" - mock_container._files['/workspace/readme.md'] = b'' - matches = await mock_docker_sandbox.glob('*.py') - assert matches == [] - - async def test_docker_glob_empty_workspace(self, mock_docker_sandbox: Any, mock_container: MockContainer) -> None: - """Returns empty list when workspace has no files.""" - matches = await mock_docker_sandbox.glob('*.py') - assert matches == [] - - async def test_docker_glob_ignores_files_outside_workspace( - self, mock_docker_sandbox: Any, mock_container: MockContainer - ) -> None: - """Files outside the workspace directory are not included.""" - mock_container._files['/other/place/top.py'] = b'' - mock_container._files['/workspace/found.py'] = b'' - matches = await mock_docker_sandbox.glob('**/*.py') - assert matches == ['found.py'] - - async def test_docker_glob_zero_dir_match(self, mock_docker_sandbox: Any, mock_container: MockContainer) -> None: - """**/*.py matches top-level .py files (zero-directory case).""" - mock_container._files['/workspace/top.py'] = b'' - mock_container._files['/workspace/deep/nested.py'] = b'' - matches = await mock_docker_sandbox.glob('**/*.py') - assert 'top.py' in matches - assert 'deep/nested.py' in matches - - async def test_docker_glob_skips_blank_lines(self, mock_docker_sandbox: Any, mock_container: MockContainer) -> None: - """Blank lines in find output are skipped.""" - mock_container._files['/workspace/a.py'] = b'' - mock_container._files['/workspace/b.py'] = b'' - # Patch exec_run to return output with embedded blank lines - original_exec_run = mock_container.exec_run - - def exec_run_with_blanks(cmd: Any, **kwargs: Any) -> tuple[int, bytes]: - exit_code, output = original_exec_run(cmd, **kwargs) - # Insert blank lines between entries - if output: - lines = output.decode().splitlines() - output = '\n\n'.join(lines).encode() - return exit_code, output - - mock_container.exec_run = exec_run_with_blanks # type: ignore[assignment] - matches = await mock_docker_sandbox.glob('**/*.py') - assert matches == ['a.py', 'b.py'] - async def test_docker_grep(self, mock_docker_sandbox: Any) -> None: """DockerEnvironment.grep returns matches.""" result = await mock_docker_sandbox.grep('pattern') @@ -2760,12 +2515,6 @@ def test_filter_grep_count_output_all_zero(self): # --- Additional coverage: _base.py --- -async def test_glob_match_question_mark_in_doublestar_pattern(): - """glob_match with ? inside a ** pattern.""" - assert glob_match('a/b/test.py', '**/?est.py') is True - assert glob_match('test.py', '?est.py') is True - - async def test_execution_environment_aenter_aexit(): """ExecutionEnvironment base __aenter__/__aexit__ are exercised by subclasses.""" # MemoryEnvironment exercises the base class path @@ -2789,22 +2538,6 @@ async def test_toolset_bash_empty_output(tmp_path: Path): assert 'Exit code: 0' in str(result) -async def test_toolset_glob_truncation(tmp_path: Path): - """ExecutionEnvironmentToolset glob truncates after 100 matches.""" - env = LocalEnvironment(tmp_path) - # Create 110 files - for i in range(110): - (tmp_path / f'file_{i:03d}.txt').write_text(f'content {i}') - - toolset = ExecutionEnvironmentToolset(env) - ctx = build_run_context() - manager = await ToolManager[None](toolset).for_run_step(ctx) - - async with env: - result = await manager.handle_call(ToolCallPart(tool_name='glob', args={'pattern': '*.txt'})) - assert 'truncated' in str(result) - - async def test_toolset_grep_no_matches_returns_message(tmp_path: Path): """ExecutionEnvironmentToolset grep returns message when no matches.""" (tmp_path / 'test.txt').write_text('hello world') @@ -3025,14 +2758,6 @@ async def test_memory_grep_path_filter(): assert 'other.py' not in result -async def test_memory_glob_in_subdirectory_with_path_filter(): - """MemoryEnvironment.glob works with path parameter.""" - env = MemoryEnvironment(files={'src/a.py': 'a', 'src/b.txt': 'b', 'other.py': 'c'}) - matches = await env.glob('*.py', path='src') - assert 'src/a.py' in matches - assert 'other.py' not in matches - - async def test_local_process_wait_no_timeout(tmp_path: Path): """_LocalEnvironmentProcess.wait without timeout (line 74).""" env = LocalEnvironment(tmp_path) @@ -3412,3 +3137,7 @@ async def ls(self, path: str = '.') -> list[FileInfo]: tools = await toolset.get_tools(ctx) result = await toolset.call_tool('ls', {}, ctx, tools['ls']) assert 'test.txt' in str(result) + + + + From 802161f6004e2be62214a96a661e3bec74ebd292 Mon Sep 17 00:00:00 2001 From: Aditya Vardhan Date: Fri, 27 Feb 2026 12:10:00 +0000 Subject: [PATCH 37/49] glob removing, need to ensure tests removal for now --- tests/test_environments.py | 4 ---- 1 file changed, 4 deletions(-) diff --git a/tests/test_environments.py b/tests/test_environments.py index ed55312b40..464b6d26ca 100644 --- a/tests/test_environments.py +++ b/tests/test_environments.py @@ -3137,7 +3137,3 @@ async def ls(self, path: str = '.') -> list[FileInfo]: tools = await toolset.get_tools(ctx) result = await toolset.call_tool('ls', {}, ctx, tools['ls']) assert 'test.txt' in str(result) - - - - From ef04316ea124db77a1f94148d84ff57e475dcf13 Mon Sep 17 00:00:00 2001 From: Aditya Vardhan Date: Fri, 27 Feb 2026 13:06:56 +0000 Subject: [PATCH 38/49] removing ls and grep --- docs/environments.md | 27 +- .../pydantic_ai/environments/__init__.py | 5 +- .../pydantic_ai/environments/_base.py | 79 -- .../pydantic_ai/environments/docker.py | 86 +- .../pydantic_ai/environments/local.py | 83 +- .../pydantic_ai/environments/memory.py | 97 +-- .../toolsets/execution_environment.py | 60 +- tests/test_environments.py | 789 +----------------- 8 files changed, 25 insertions(+), 1201 deletions(-) diff --git a/docs/environments.md b/docs/environments.md index dd1d860475..52310093db 100644 --- a/docs/environments.md +++ b/docs/environments.md @@ -64,7 +64,7 @@ env = LocalEnvironment( ) ``` -File operations (read, write, edit, ls, glob, grep) are confined to the root directory — path traversal attempts raise `PermissionError`. +File operations (read, write, edit) are confined to the root directory — path traversal attempts raise `PermissionError`. !!! info "Environment variable inheritance" By default, `LocalEnvironment` inherits the host's environment variables. Set `inherit_env=False` for a clean environment where only explicitly provided `env_vars` (and per-call `env` overrides) are available. This is useful for reproducibility and testing. @@ -163,13 +163,10 @@ env = DockerEnvironment.hardened( | Tool | Description | |---|---| -| `ls` | List directory contents | | `shell` | Execute shell commands | | `read_file` | Read files with line numbers (renders images for multimodal models) | | `write_file` | Create or overwrite files | | `edit_file` | Edit files by exact string replacement | -| `glob` | Find files by pattern | -| `grep` | Search file contents with regex | Tools are dynamically registered based on the environment's capabilities. You can selectively include or exclude capabilities: @@ -321,14 +318,12 @@ Filesystem changes (created files, installed packages) persist for the lifetime You can implement [`ExecutionEnvironment`][pydantic_ai.environments.ExecutionEnvironment] to integrate with any execution backend. The only abstract member is `capabilities`; override the methods that match your declared capabilities. Override [`create_process()`][pydantic_ai.environments.ExecutionEnvironment.create_process] if you need interactive process support. ```python {title="environments_custom.py" test="skip" lint="skip"} -from typing import Literal - -from pydantic_ai.environments import EnvToolName, ExecutionEnvironment, ExecutionProcess, ExecutionResult, FileInfo +from pydantic_ai.environments import EnvToolName, ExecutionEnvironment, ExecutionProcess, ExecutionResult class MyCloudEnvironment(ExecutionEnvironment): @property def capabilities(self) -> frozenset[EnvToolName]: - return frozenset({'shell', 'read_file', 'write_file', 'edit_file', 'ls', 'glob', 'grep'}) + return frozenset({'shell', 'read_file', 'write_file', 'edit_file'}) async def shell( self, command: str, *, timeout: float | None = 120, env: dict[str, str] | None = None @@ -348,20 +343,4 @@ class MyCloudEnvironment(ExecutionEnvironment): self, path: str, old: str, new: str, *, replace_all: bool = False ) -> int: ... - - async def ls(self, path: str = '.') -> list[FileInfo]: - ... - - async def glob(self, pattern: str, *, path: str = '.') -> list[str]: - ... - - async def grep( - self, - pattern: str, - *, - path: str | None = None, - glob_pattern: str | None = None, - output_mode: Literal['content', 'files_with_matches', 'count'] = 'content', - ) -> str: - ... ``` diff --git a/pydantic_ai_slim/pydantic_ai/environments/__init__.py b/pydantic_ai_slim/pydantic_ai/environments/__init__.py index d0f7b4c294..20743ce18a 100644 --- a/pydantic_ai_slim/pydantic_ai/environments/__init__.py +++ b/pydantic_ai_slim/pydantic_ai/environments/__init__.py @@ -5,7 +5,7 @@ - `ExecutionEnvironment` — abstract base class for execution environments - `ExecutionProcess` — interactive process handle with bidirectional I/O - `ExecutionEnvironmentToolset` — toolset exposing coding-agent-style tools backed by an environment -- `ExecutionResult`, `FileInfo` — result types +- `ExecutionResult` — result type Implementations: @@ -16,7 +16,7 @@ from pydantic_ai.toolsets.execution_environment import ExecutionEnvironmentToolset -from ._base import EnvToolName, ExecutionEnvironment, ExecutionProcess, ExecutionResult, FileInfo +from ._base import EnvToolName, ExecutionEnvironment, ExecutionProcess, ExecutionResult __all__ = ( 'EnvToolName', @@ -24,5 +24,4 @@ 'ExecutionEnvironment', 'ExecutionEnvironmentToolset', 'ExecutionProcess', - 'FileInfo', ) diff --git a/pydantic_ai_slim/pydantic_ai/environments/_base.py b/pydantic_ai_slim/pydantic_ai/environments/_base.py index 6b7582db3e..bca61a891a 100644 --- a/pydantic_ai_slim/pydantic_ai/environments/_base.py +++ b/pydantic_ai_slim/pydantic_ai/environments/_base.py @@ -6,7 +6,6 @@ from __future__ import annotations -import re from abc import ABC, abstractmethod from dataclasses import dataclass from typing import Any, Literal @@ -16,12 +15,10 @@ # --- Type aliases --- EnvToolName = Literal[ - 'ls', 'shell', 'read_file', 'write_file', 'edit_file', - 'grep', ] """Tool name for an environment capability. @@ -47,23 +44,6 @@ class ExecutionResult: """Whether the output was truncated due to length limits.""" -@dataclass -class FileInfo: - """Metadata about a file or directory.""" - - name: str - """The file or directory name.""" - - path: str - """The full path.""" - - is_dir: bool - """Whether this entry is a directory.""" - - size: int | None = None - """The file size in bytes, or None for directories.""" - - class ExecutionProcess(ABC): """Handle to a running process with bidirectional streaming I/O. @@ -194,17 +174,6 @@ def capabilities(self) -> frozenset[EnvToolName]: # All raise NotImplementedError by default. Concrete subclasses override # the methods that match their declared capabilities. - async def ls(self, path: str = '.') -> list[FileInfo]: - """List directory contents. - - Args: - path: The directory path within the environment. - - Returns: - A list of `FileInfo` entries. - """ - raise NotImplementedError(f'{type(self).__name__} does not support ls.') - async def shell( self, command: str, @@ -286,30 +255,6 @@ async def replace_str( """ raise NotImplementedError(f'{type(self).__name__} does not support replace_str.') - async def grep( - self, - pattern: str, - *, - path: str | None = None, - glob_pattern: str | None = None, - output_mode: Literal['content', 'files_with_matches', 'count'] = 'content', - ) -> str: - """Search file contents with a regex pattern. - - Args: - pattern: The regex pattern to search for. - path: The file or directory to search in. - glob_pattern: Optional glob to filter which files are searched. - output_mode: Controls output format: - - `'content'` (default): matching lines as `file:line_number:text` - - `'files_with_matches'`: only file paths containing matches - - `'count'`: `file:count` pairs - - Returns: - Matching lines formatted as text. - """ - raise NotImplementedError(f'{type(self).__name__} does not support grep.') - # --- Internal helpers (not tools) --- async def create_process( @@ -369,30 +314,6 @@ def format_lines(text: str, offset: int, limit: int) -> str: return result -def collect_grep_matches( - rel_path: str, - text: str, - compiled: re.Pattern[str], - output_mode: Literal['content', 'files_with_matches', 'count'], - results: list[str], -) -> None: - """Collect grep matches from a single file into `results`. - - Shared helper used by `LocalEnvironment` and `MemoryEnvironment`. - """ - if output_mode == 'files_with_matches': - if any(compiled.search(line) for line in text.splitlines()): - results.append(rel_path) - elif output_mode == 'count': - match_count = sum(1 for line in text.splitlines() if compiled.search(line)) - if match_count > 0: - results.append(f'{rel_path}:{match_count}') - else: - for line_num, line in enumerate(text.splitlines(), start=1): - if compiled.search(line): - results.append(f'{rel_path}:{line_num}:{line}') - - def apply_edit(text: str, old_string: str, new_string: str, path: str, *, replace_all: bool) -> tuple[str, int]: """Apply a string replacement edit, returning the new text and the number of replacements. diff --git a/pydantic_ai_slim/pydantic_ai/environments/docker.py b/pydantic_ai_slim/pydantic_ai/environments/docker.py index 9b94d4c114..5c4e8a2bf2 100644 --- a/pydantic_ai_slim/pydantic_ai/environments/docker.py +++ b/pydantic_ai_slim/pydantic_ai/environments/docker.py @@ -11,7 +11,7 @@ import struct import tarfile from pathlib import PurePosixPath -from typing import Any, Literal, cast +from typing import Any, cast import anyio import anyio.to_thread @@ -24,7 +24,6 @@ ExecutionEnvironment, ExecutionProcess, ExecutionResult, - FileInfo, apply_edit, ) @@ -59,33 +58,6 @@ def _build_read_file_cmd(path: str, *, offset: int = 0, limit: int = 2000) -> st ) -def _build_grep_cmd( - pattern: str, - *, - path: str | None = None, - glob_pattern: str | None = None, - output_mode: Literal['content', 'files_with_matches', 'count'] = 'content', -) -> str: - """Build a shell `grep` command from structured arguments.""" - parts = ['grep', '-rIE'] # -I skips binary files, -E uses extended regex - if output_mode == 'files_with_matches': - parts.append('-l') - elif output_mode == 'count': - parts.append('-c') - else: - parts.append('-n') - if glob_pattern: - parts.extend(['--include', _shell_escape(glob_pattern)]) - parts.append(_shell_escape(pattern)) - parts.append(_shell_escape(path or '.')) - return ' '.join(parts) - - -def _filter_grep_count_output(text: str) -> str: - """Filter `grep -c` output to remove files with 0 matches.""" - return '\n'.join(line for line in text.splitlines() if not line.endswith(':0')) - - def _put_file(container: Container, path: str, data: bytes) -> None: """Write file data into a container via put_archive.""" parent = str(PurePosixPath(path).parent) @@ -385,12 +357,10 @@ def hardened( def capabilities(self) -> frozenset[EnvToolName]: # pragma: lax no cover return frozenset( { - 'ls', 'shell', 'read_file', 'write_file', 'edit_file', - 'grep', } ) @@ -585,60 +555,6 @@ def _edit() -> int: return await anyio.to_thread.run_sync(_edit) - async def ls(self, path: str = '.') -> list[FileInfo]: - def _ls() -> list[FileInfo]: - cmd = f'ls -la {_shell_escape(path)}' - exit_code, output = self._required_container.exec_run(['sh', '-c', cmd], workdir=self._work_dir) - if exit_code != 0: - raise NotADirectoryError(f'Not a directory or not found: {path}') - - entries: list[FileInfo] = [] - for line in output.decode('utf-8', errors='replace').splitlines(): - # Skip total line and empty lines - if not line or line.startswith('total'): - continue - parts = line.split(None, 8) - if len(parts) < 9: - continue - perms, _, _, _, size_str, _, _, _, name = parts - is_dir = perms.startswith('d') - try: - size = int(size_str) if not is_dir else None - except ValueError: - size = None - entry_path = f'{path}/{name}' if path != '.' else name - entries.append(FileInfo(name=name, path=entry_path, is_dir=is_dir, size=size)) - return entries - - return await anyio.to_thread.run_sync(_ls) - - async def grep( - self, - pattern: str, - *, - path: str | None = None, - glob_pattern: str | None = None, - output_mode: Literal['content', 'files_with_matches', 'count'] = 'content', - ) -> str: - r"""Search file contents using a regex pattern. - - Patterns use POSIX Extended Regular Expression (ERE) syntax, as interpreted by `grep -E`. - Supported: `|`, `+`, `?`, `()`, `{}`, character classes like `[[:digit:]]`. - Not available: lookaheads/lookbehinds, `\d`, `\w`, `\b`, non-greedy quantifiers (`*?`, `+?`). - """ - - def _grep() -> str: - cmd = _build_grep_cmd(pattern, path=path, glob_pattern=glob_pattern, output_mode=output_mode) - _, output = self._required_container.exec_run(['sh', '-c', cmd], workdir=self._work_dir) - text = output.decode('utf-8', errors='replace').strip() - # Strip `./` prefix from paths to match Local/Memory environment output - text = '\n'.join(line.removeprefix('./') for line in text.splitlines()) - if output_mode == 'count': - text = _filter_grep_count_output(text) - return text - - return await anyio.to_thread.run_sync(_grep) - async def is_alive(self) -> bool: """Check if the container is running. diff --git a/pydantic_ai_slim/pydantic_ai/environments/local.py b/pydantic_ai_slim/pydantic_ai/environments/local.py index 40ab5bbf9f..b90da29ff9 100644 --- a/pydantic_ai_slim/pydantic_ai/environments/local.py +++ b/pydantic_ai_slim/pydantic_ai/environments/local.py @@ -6,11 +6,9 @@ from __future__ import annotations -import fnmatch -import re import subprocess from pathlib import Path -from typing import Any, Literal +from typing import Any import anyio import anyio.abc @@ -23,9 +21,7 @@ ExecutionEnvironment, ExecutionProcess, ExecutionResult, - FileInfo, apply_edit, - collect_grep_matches, format_lines, ) @@ -137,7 +133,7 @@ def __init__( @property def capabilities(self) -> frozenset[EnvToolName]: - return frozenset({'ls', 'shell', 'read_file', 'write_file', 'edit_file', 'grep'}) + return frozenset({'shell', 'read_file', 'write_file', 'edit_file'}) async def __aenter__(self) -> Self: self._root_dir.mkdir(parents=True, exist_ok=True) @@ -269,78 +265,3 @@ async def replace_str( resolved.write_text(new_text, encoding='utf-8') return count - async def ls(self, path: str = '.') -> list[FileInfo]: - resolved = self._resolve_path(path) - if not resolved.is_dir(): - raise NotADirectoryError(f'Not a directory: {path}') - - entries: list[FileInfo] = [] - for entry in sorted(resolved.iterdir()): - try: - stat = entry.stat() - entries.append( - FileInfo( - name=entry.name, - path=str(entry.relative_to(self._root_dir)), - is_dir=entry.is_dir(), - size=stat.st_size if not entry.is_dir() else None, - ) - ) - except OSError: # pragma: no cover - continue - return entries - - async def grep( - self, - pattern: str, - *, - path: str | None = None, - glob_pattern: str | None = None, - output_mode: Literal['content', 'files_with_matches', 'count'] = 'content', - ) -> str: - """Search file contents using a regex pattern (Python `re` module syntax).""" - search_dir = self._resolve_path(path or '.') - compiled = re.compile(pattern) - - is_explicit_file = search_dir.is_file() - if is_explicit_file: - files = [search_dir] - else: - # Use rglob with the glob_pattern when available for efficient - # filesystem traversal, but apply fnmatch on the basename as the - # authoritative filter below — this keeps semantics filename-only - # (consistent with Docker's grep --include and Memory's fnmatch) - # while letting pathlib narrow the search. - files = sorted(search_dir.rglob(glob_pattern or '*')) - - results: list[str] = [] - for file_path in files: - if not file_path.is_file(): - continue - # Skip hidden files/directories (e.g. .git/, .venv/) unless explicitly specified - if not is_explicit_file and any( - part.startswith('.') for part in file_path.relative_to(self._root_dir).parts - ): - continue - # Filename-only glob filtering, consistent with Docker's grep --include - # and MemoryEnvironment's fnmatch-on-basename approach. - if not is_explicit_file and glob_pattern and not fnmatch.fnmatch(file_path.name, glob_pattern): - continue - try: - raw = file_path.read_bytes() - except OSError: # pragma: no cover - continue - - # Skip binary files (null byte in first 8KB) - if b'\x00' in raw[:8192]: - continue - - text = raw.decode('utf-8', errors='replace') - rel_path = str(file_path.relative_to(self._root_dir)) - collect_grep_matches(rel_path, text, compiled, output_mode, results) - - if len(results) > 1000: - results.append('[... truncated at 1000 matches]') - break - - return '\n'.join(results) diff --git a/pydantic_ai_slim/pydantic_ai/environments/memory.py b/pydantic_ai_slim/pydantic_ai/environments/memory.py index 86f7947f5e..ffbf2c6677 100644 --- a/pydantic_ai_slim/pydantic_ai/environments/memory.py +++ b/pydantic_ai_slim/pydantic_ai/environments/memory.py @@ -6,19 +6,15 @@ from __future__ import annotations -import fnmatch import posixpath -import re from collections.abc import Callable, Mapping -from typing import TYPE_CHECKING, Literal +from typing import TYPE_CHECKING from ._base import ( IMAGE_EXTENSIONS, ExecutionEnvironment, ExecutionResult, - FileInfo, apply_edit, - collect_grep_matches, format_lines, ) @@ -70,7 +66,7 @@ def __init__( @property def capabilities(self) -> frozenset[EnvToolName]: - caps: set[EnvToolName] = {'ls', 'read_file', 'write_file', 'edit_file', 'grep'} + caps: set[EnvToolName] = {'read_file', 'write_file', 'edit_file'} if self._command_handler is not None: caps.add('shell') return frozenset(caps) @@ -175,92 +171,3 @@ async def replace_str( self._files[normalized] = new_text return count - async def ls(self, path: str = '.') -> list[FileInfo]: - normalized = self._normalize(path) - - # Collect direct children - entries: dict[str, FileInfo] = {} - for file_path in sorted(self._files): - if normalized == '.': - rel = file_path - elif file_path.startswith(normalized + '/'): - rel = file_path[len(normalized) + 1 :] - else: - continue - - # Get the first component (direct child) - parts = rel.split('/', 1) - name = parts[0] - if name in entries: - continue - - is_dir = len(parts) > 1 - if is_dir: - entries[name] = FileInfo( - name=name, - path=f'{normalized}/{name}' if normalized != '.' else name, - is_dir=True, - ) - else: - content = self._files[file_path] - size = len(content) if isinstance(content, bytes) else len(content.encode('utf-8')) - entries[name] = FileInfo( - name=name, - path=f'{normalized}/{name}' if normalized != '.' else name, - is_dir=False, - size=size, - ) - - if not entries and normalized != '.': - raise NotADirectoryError(f'Not a directory: {path}') - - return list(entries.values()) - - async def grep( - self, - pattern: str, - *, - path: str | None = None, - glob_pattern: str | None = None, - output_mode: Literal['content', 'files_with_matches', 'count'] = 'content', - ) -> str: - """Search file contents using a regex pattern (Python `re` module syntax).""" - normalized = self._normalize(path or '.') - compiled = re.compile(pattern) - - is_exact_file = normalized != '.' and normalized in self._files - - results: list[str] = [] - for file_path in sorted(self._files): - # Path filtering - if normalized != '.': - if normalized == file_path: - pass # exact file match - elif not file_path.startswith(normalized + '/'): - continue - - # Glob filtering (skip for exact file matches, matching LocalEnvironment behavior) - if not is_exact_file and glob_pattern and not fnmatch.fnmatch(posixpath.basename(file_path), glob_pattern): - continue - - # Skip hidden files unless explicitly specified - if not is_exact_file and any(part.startswith('.') for part in file_path.split('/')): - continue - - content = self._files[file_path] - - # Skip binary files - if isinstance(content, bytes): - if b'\x00' in content[:8192]: - continue - text = content.decode('utf-8', errors='replace') - else: - text = content - - collect_grep_matches(file_path, text, compiled, output_mode, results) - - if len(results) > 1000: - results.append('[... truncated at 1000 matches]') - break - - return '\n'.join(results) diff --git a/pydantic_ai_slim/pydantic_ai/toolsets/execution_environment.py b/pydantic_ai_slim/pydantic_ai/toolsets/execution_environment.py index 129d771399..ec498fd2cf 100644 --- a/pydantic_ai_slim/pydantic_ai/toolsets/execution_environment.py +++ b/pydantic_ai_slim/pydantic_ai/toolsets/execution_environment.py @@ -4,12 +4,11 @@ import inspect import posixpath -import re from collections.abc import Callable, Iterator, Sequence from contextlib import AsyncExitStack, contextmanager from contextvars import ContextVar, Token from dataclasses import replace -from typing import TYPE_CHECKING, Any, Literal +from typing import TYPE_CHECKING, Any import anyio from typing_extensions import Self @@ -148,37 +147,10 @@ def _register_tools(self) -> None: is deferred to ``get_tools()``, which runs at request time when the active environment is known. """ - self._register_ls() self._register_shell() self._register_read_file() self._register_write_file() self._register_edit_file() - self._register_grep() - - def _register_ls(self) -> None: - async def ls(path: str = '.') -> str: - """List directory contents. - - Args: - path: The directory path to list. Defaults to the working directory. - """ - try: - entries = await self.required_environment.ls(path) - except (NotADirectoryError, PermissionError, OSError) as e: - return f'Error: {e}' - if not entries: - return 'Empty directory.' - lines: list[str] = [] - for entry in entries: - if entry.is_dir: - lines.append(f'{entry.name}/') - elif entry.size is not None: - lines.append(f'{entry.name} ({entry.size} bytes)') - else: - lines.append(entry.name) - return '\n'.join(lines) - - self.tool(ls) def _register_shell(self) -> None: async def shell(command: str, timeout: int = 120) -> str: @@ -276,36 +248,6 @@ async def edit_file(path: str, old: str, new: str, replace_all: bool = False) -> self.tool(requires_approval=self._require_write_approval)(edit_file) - def _register_grep(self) -> None: - async def grep_tool( - pattern: str, - path: str | None = None, - glob: str | None = None, - output_mode: Literal['content', 'files_with_matches', 'count'] = 'content', - ) -> str: - """Search file contents with a regex pattern. - - Args: - pattern: The regex pattern to search for. - path: The file or directory to search in. - glob: Glob pattern to filter which files are searched (e.g. `*.py`). - output_mode: Controls output format: - `content` (default) shows matching lines with file paths and line numbers, - `files_with_matches` shows only file paths, - `count` shows match counts per file. - """ - try: - result = await self.required_environment.grep( - pattern, path=path, glob_pattern=glob, output_mode=output_mode - ) - except (PermissionError, OSError, re.error) as e: - return f'Error: {e}' - if not result.strip(): - return 'No matches found.' - return result - - self.tool(name='grep')(grep_tool) - async def get_tools(self, ctx: RunContext[AgentDepsT]) -> dict[str, ToolsetTool[AgentDepsT]]: all_tools = await super().get_tools(ctx) env = self.required_environment diff --git a/tests/test_environments.py b/tests/test_environments.py index 464b6d26ca..d59b1cdcbb 100644 --- a/tests/test_environments.py +++ b/tests/test_environments.py @@ -21,7 +21,6 @@ ExecutionEnvironment as BaseEnv, ExecutionEnvironmentToolset, ExecutionResult, - FileInfo, ) from pydantic_ai.environments._base import ( apply_edit, @@ -38,10 +37,8 @@ from pydantic_ai.environments.docker import ( DockerEnvironment, - _build_grep_cmd, _build_read_file_cmd, _DockerEnvironmentProcess, - _filter_grep_count_output, _put_file, _shell_escape, ) @@ -79,19 +76,6 @@ def test_execute_result_truncated(): assert result.truncated is True -def test_file_info(): - info = FileInfo(name='test.py', path='src/test.py', is_dir=False, size=42) - assert info.name == 'test.py' - assert info.is_dir is False - assert info.size == 42 - - -def test_file_info_directory(): - info = FileInfo(name='src', path='src', is_dir=True) - assert info.is_dir is True - assert info.size is None - - # --- LocalEnvironment: execute --- @@ -330,95 +314,6 @@ async def test_local_edit_multiline(tmp_path: Path): assert 'print("test")' in content -# --- LocalEnvironment: ls --- - - -async def test_local_ls(tmp_path: Path): - async with LocalEnvironment(tmp_path) as env: - await env.write_file('a.txt', 'a') - await env.write_file('b.txt', 'b') - (tmp_path / 'subdir').mkdir() - - entries = await env.ls('.') - names = {e.name for e in entries} - assert 'a.txt' in names - assert 'b.txt' in names - assert 'subdir' in names - - dirs = [e for e in entries if e.is_dir] - files = [e for e in entries if not e.is_dir] - assert any(d.name == 'subdir' for d in dirs) - assert all(f.size is not None and f.size > 0 for f in files) - - -async def test_local_ls_not_a_directory(tmp_path: Path): - async with LocalEnvironment(tmp_path) as env: - await env.write_file('file.txt', 'content') - with pytest.raises(NotADirectoryError): - await env.ls('file.txt') - - -# --- LocalEnvironment: grep --- - - -async def test_local_grep(tmp_path: Path): - async with LocalEnvironment(tmp_path) as env: - await env.write_file('a.py', 'def hello():\n pass\n') - await env.write_file('b.py', 'x = 1\n') - - result = await env.grep('hello') - assert 'a.py' in result - assert 'hello' in result - assert 'b.py' not in result - - -async def test_local_grep_with_glob_pattern(tmp_path: Path): - async with LocalEnvironment(tmp_path) as env: - await env.write_file('code.py', 'target = 1\n') - await env.write_file('code.js', 'target = 2\n') - - result = await env.grep('target', glob_pattern='*.py') - assert 'code.py' in result - assert 'code.js' not in result - - -async def test_local_grep_line_numbers(tmp_path: Path): - async with LocalEnvironment(tmp_path) as env: - await env.write_file('test.txt', 'alpha\nbeta\ngamma\nbeta\n') - - result = await env.grep('beta') - assert result == snapshot('test.txt:2:beta\ntest.txt:4:beta') - - -async def test_local_grep_no_matches(tmp_path: Path): - async with LocalEnvironment(tmp_path) as env: - await env.write_file('test.txt', 'nothing interesting') - result = await env.grep('nonexistent_pattern') - assert result == '' - - -async def test_local_grep_skips_hidden_files(tmp_path: Path): - async with LocalEnvironment(tmp_path) as env: - await env.write_file('visible.py', 'target_string\n') - (tmp_path / '.hidden').mkdir() - (tmp_path / '.hidden' / 'secret.py').write_text('target_string\n') - (tmp_path / '.dotfile').write_text('target_string\n') - - result = await env.grep('target_string') - assert 'visible.py' in result - assert '.hidden' not in result - assert '.dotfile' not in result - - -async def test_local_grep_explicit_hidden_file(tmp_path: Path): - """Explicitly-specified hidden files should be searchable.""" - async with LocalEnvironment(tmp_path) as env: - (tmp_path / '.env').write_text('SECRET=hunter2\n') - - result = await env.grep('SECRET', path='.env') - assert 'SECRET=hunter2' in result - - # --- LocalEnvironment: create_process --- @@ -499,7 +394,7 @@ async def test_toolset_tool_names(): ctx = build_run_context() tools = await toolset.get_tools(ctx) tool_names = sorted(tools.keys()) - assert tool_names == snapshot(['edit_file', 'grep', 'ls', 'read_file', 'shell', 'write_file']) + assert tool_names == snapshot(['edit_file', 'read_file', 'shell', 'write_file']) async def test_toolset_include_flags(): @@ -592,19 +487,6 @@ async def test_toolset_edit_retry_on_permission_error(tmp_path: Path): ) -async def test_toolset_grep_tool(tmp_path: Path): - env = LocalEnvironment(tmp_path) - toolset = ExecutionEnvironmentToolset(env) - ctx = build_run_context(None) - manager = await ToolManager[None](toolset).for_run_step(ctx) - - async with env: - await env.write_file('search.py', 'def find_me():\n pass\n') - - result = await manager.handle_call(ToolCallPart(tool_name='grep', args={'pattern': 'find_me'})) - assert result == snapshot('search.py:1:def find_me():') - - # --- ExecutionEnvironmentToolset: error handling --- @@ -646,20 +528,6 @@ async def test_toolset_write_path_traversal_returns_error(tmp_path: Path): assert 'Error:' in str(result) -async def test_toolset_grep_invalid_regex_returns_error(tmp_path: Path): - """grep with invalid regex returns an error string.""" - env = LocalEnvironment(tmp_path) - toolset = ExecutionEnvironmentToolset(env) - ctx = build_run_context(None) - manager = await ToolManager[None](toolset).for_run_step(ctx) - - async with env: - await env.write_file('test.txt', 'content') - - result = await manager.handle_call(ToolCallPart(tool_name='grep', args={'pattern': '[invalid'})) - assert 'Error:' in str(result) - - async def test_toolset_read_offset_out_of_bounds_returns_error(tmp_path: Path): """read_file with offset past EOF returns an error string.""" env = LocalEnvironment(tmp_path) @@ -722,9 +590,8 @@ async def test_toolset_require_write_approval(): tools = await toolset.get_tools(ctx) assert tools['write_file'].tool_def.kind == 'unapproved' assert tools['edit_file'].tool_def.kind == 'unapproved' - # read_file and search tools should NOT require approval + # read_file should NOT require approval assert tools['read_file'].tool_def.kind == 'function' - assert tools['grep'].tool_def.kind == 'function' async def test_toolset_default_no_approval(): @@ -819,98 +686,6 @@ async def test_toolset_image_support_disabled(tmp_path: Path): assert result == snapshot('[Image file: photo.png — image_support is disabled on this toolset]') -# --- LocalEnvironment: grep output modes --- - - -async def test_local_grep_files_with_matches(tmp_path: Path): - async with LocalEnvironment(tmp_path) as env: - await env.write_file('a.py', 'target = 1\nother = 2\n') - await env.write_file('b.py', 'target = 3\ntarget = 4\n') - await env.write_file('c.py', 'nothing here\n') - - result = await env.grep('target', output_mode='files_with_matches') - lines = result.strip().splitlines() - assert sorted(lines) == ['a.py', 'b.py'] - - -async def test_local_grep_count(tmp_path: Path): - async with LocalEnvironment(tmp_path) as env: - await env.write_file('a.py', 'target = 1\nother = 2\n') - await env.write_file('b.py', 'target = 3\ntarget = 4\n') - await env.write_file('c.py', 'nothing here\n') - - result = await env.grep('target', output_mode='count') - lines = sorted(result.strip().splitlines()) - assert lines == ['a.py:1', 'b.py:2'] - - -async def test_local_grep_content_default(tmp_path: Path): - """Default output_mode is 'content' with file:line:text format.""" - async with LocalEnvironment(tmp_path) as env: - await env.write_file('test.py', 'hello\nworld\n') - - result = await env.grep('hello') - assert result == snapshot('test.py:1:hello') - - -# --- LocalEnvironment: binary file detection --- - - -async def test_local_grep_skips_binary_files(tmp_path: Path): - async with LocalEnvironment(tmp_path) as env: - await env.write_file('text.py', 'findme = True\n') - await env.write_file('binary.pyc', b'\x00\x01\x02findme\x03\x04') - - result = await env.grep('findme') - assert 'text.py' in result - assert 'binary.pyc' not in result - - -async def test_local_grep_binary_detection_first_8kb(tmp_path: Path): - """Binary detection checks only the first 8KB.""" - async with LocalEnvironment(tmp_path) as env: - # File with null byte after 8KB — should be treated as text - content = 'findme\n' + ('x' * 8200) + '\x00' - await env.write_file('mostly_text.txt', content) - - result = await env.grep('findme') - assert 'mostly_text.txt' in result - - -# --- Toolset: grep output_mode --- - - -async def test_toolset_grep_files_with_matches(tmp_path: Path): - env = LocalEnvironment(tmp_path) - toolset = ExecutionEnvironmentToolset(env) - ctx = build_run_context(None) - manager = await ToolManager[None](toolset).for_run_step(ctx) - - async with env: - await env.write_file('a.py', 'target = 1\n') - await env.write_file('b.py', 'other = 2\n') - - result = await manager.handle_call( - ToolCallPart(tool_name='grep', args={'pattern': 'target', 'output_mode': 'files_with_matches'}) - ) - assert result == snapshot('a.py') - - -async def test_toolset_grep_count(tmp_path: Path): - env = LocalEnvironment(tmp_path) - toolset = ExecutionEnvironmentToolset(env) - ctx = build_run_context(None) - manager = await ToolManager[None](toolset).for_run_step(ctx) - - async with env: - await env.write_file('a.py', 'x = 1\nx = 2\nx = 3\n') - - result = await manager.handle_call( - ToolCallPart(tool_name='grep', args={'pattern': 'x', 'output_mode': 'count'}) - ) - assert result == snapshot('a.py:3') - - # --- MemoryEnvironment --- @@ -1018,131 +793,6 @@ async def test_memory_edit_replace_all(): assert 'xxx bbb xxx' in content -async def test_memory_ls(): - env = MemoryEnvironment( - files={ - 'a.txt': 'a', - 'b.txt': 'bb', - 'sub/c.txt': 'ccc', - } - ) - async with env: - entries = await env.ls('.') - names = {e.name for e in entries} - assert names == {'a.txt', 'b.txt', 'sub'} - - dirs = [e for e in entries if e.is_dir] - files = [e for e in entries if not e.is_dir] - assert len(dirs) == 1 - assert dirs[0].name == 'sub' - assert all(f.size is not None for f in files) - - -async def test_memory_ls_subdirectory(): - env = MemoryEnvironment(files={'sub/a.txt': 'a', 'sub/b.txt': 'b'}) - async with env: - entries = await env.ls('sub') - names = {e.name for e in entries} - assert names == {'a.txt', 'b.txt'} - - -async def test_memory_ls_not_a_directory(): - async with MemoryEnvironment() as env: - with pytest.raises(NotADirectoryError): - await env.ls('nonexistent') - - -async def test_memory_grep_content(): - env = MemoryEnvironment( - files={ - 'a.py': 'def hello():\n pass\n', - 'b.py': 'x = 1\n', - } - ) - async with env: - result = await env.grep('hello') - assert result == snapshot('a.py:1:def hello():') - - -async def test_memory_grep_files_with_matches(): - env = MemoryEnvironment( - files={ - 'a.py': 'target = 1\n', - 'b.py': 'target = 2\ntarget = 3\n', - 'c.py': 'nothing\n', - } - ) - async with env: - result = await env.grep('target', output_mode='files_with_matches') - lines = sorted(result.strip().splitlines()) - assert lines == ['a.py', 'b.py'] - - -async def test_memory_grep_count(): - env = MemoryEnvironment( - files={ - 'a.py': 'x = 1\n', - 'b.py': 'x = 2\nx = 3\n', - } - ) - async with env: - result = await env.grep('x', output_mode='count') - lines = sorted(result.strip().splitlines()) - assert lines == ['a.py:1', 'b.py:2'] - - -async def test_memory_grep_skips_binary(): - env = MemoryEnvironment( - files={ - 'text.py': 'findme = True\n', - 'binary.dat': b'\x00\x01findme\x02', - } - ) - async with env: - result = await env.grep('findme') - assert 'text.py' in result - assert 'binary.dat' not in result - - -async def test_memory_grep_skips_hidden(): - env = MemoryEnvironment( - files={ - 'visible.py': 'target\n', - '.hidden/secret.py': 'target\n', - } - ) - async with env: - result = await env.grep('target') - assert 'visible.py' in result - assert '.hidden' not in result - - -async def test_memory_grep_explicit_hidden_file(): - """Explicitly-specified hidden files should be searchable.""" - env = MemoryEnvironment( - files={ - '.env': 'SECRET=hunter2\n', - 'visible.py': 'hello\n', - } - ) - async with env: - result = await env.grep('SECRET', path='.env') - assert 'SECRET=hunter2' in result - - -async def test_memory_grep_with_glob_pattern(): - env = MemoryEnvironment( - files={ - 'code.py': 'target\n', - 'code.js': 'target\n', - } - ) - async with env: - result = await env.grep('target', glob_pattern='*.py') - assert 'code.py' in result - assert 'code.js' not in result - - async def test_memory_execute_with_handler(): def handler(cmd: str) -> ExecutionResult: return ExecutionResult(output=f'ran: {cmd}\n', exit_code=0) @@ -1203,10 +853,6 @@ async def test_memory_toolset_integration(): ) assert result == snapshot('File written: new.py') - # grep - result = await manager.handle_call(ToolCallPart(tool_name='grep', args={'pattern': 'hello'})) - assert result == snapshot('main.py:1:print("hello")') - # --- Agent-level integration test --- @@ -1295,17 +941,6 @@ async def test_local_read_file_bytes_nonexistent(tmp_path: Path): await env.read_file('nope.bin') -async def test_local_grep_specific_file(tmp_path: Path): - """grep targeting a specific file works.""" - async with LocalEnvironment(tmp_path) as env: - await env.write_file('target.py', 'findme = True\n') - await env.write_file('other.py', 'findme = False\n') - - result = await env.grep('findme', path='target.py') - assert 'target.py' in result - assert 'other.py' not in result - - # --- MemoryEnvironment: additional edge cases --- @@ -1351,38 +986,6 @@ async def test_memory_edit_binary(): assert count == 1 -async def test_memory_grep_exact_path(): - """grep with path= targeting an exact file.""" - env = MemoryEnvironment( - files={ - 'src/a.py': 'target\n', - 'src/b.py': 'target\n', - } - ) - async with env: - result = await env.grep('target', path='src/a.py') - assert 'src/a.py' in result - assert 'src/b.py' not in result - - -async def test_memory_grep_no_text_content(): - """grep with text bytes (non-binary) works.""" - env = MemoryEnvironment(files={'data.txt': b'findme in bytes'}) - async with env: - result = await env.grep('findme') - assert 'data.txt' in result - - -async def test_memory_ls_with_bytes(): - """ls reports size correctly for bytes content.""" - env = MemoryEnvironment(files={'data.bin': b'\x00\x01\x02'}) - async with env: - entries = await env.ls('.') - assert len(entries) == 1 - assert entries[0].size == 3 - assert entries[0].is_dir is False - - # --- ExecutionEnvironmentToolset: additional coverage --- @@ -1449,19 +1052,6 @@ async def test_toolset_read_binary_non_image(): assert result == '[Binary file: data.bin — cannot display as text]' -async def test_toolset_grep_no_matches(tmp_path: Path): - """grep with no matches returns 'No matches found.'.""" - env = LocalEnvironment(tmp_path) - toolset = ExecutionEnvironmentToolset(env) - ctx = build_run_context(None) - manager = await ToolManager[None](toolset).for_run_step(ctx) - - async with env: - await env.write_file('test.txt', 'nothing relevant\n') - result = await manager.handle_call(ToolCallPart(tool_name='grep', args={'pattern': 'nonexistent_xyz'})) - assert result == snapshot('No matches found.') - - async def test_toolset_edit_success(tmp_path: Path): """edit_file tool returns success message.""" env = LocalEnvironment(tmp_path) @@ -1562,14 +1152,6 @@ def exec_run( # noqa: C901 return 0, result.encode('utf-8') return 1, b'File not found' - # Handle ls -la - if 'ls -la' in cmd_str: - output_lines = ['total 0'] - for path, data in sorted(self._files.items()): - name = path.rsplit('/', 1)[-1] if '/' in path else path - output_lines.append(f'-rw-r--r-- 1 root root {len(data)} Jan 1 00:00 {name}') - return 0, '\n'.join(output_lines).encode('utf-8') - # Handle find (glob) if 'find' in cmd_str: # Extract the search path from: find '' ... @@ -1595,10 +1177,6 @@ def exec_run( # noqa: C901 matches.append(rel) return 0, '\n'.join(matches).encode('utf-8') - # Handle grep - if 'grep' in cmd_str: - return 0, b'match:1:result' - # Handle general commands if 'echo' in cmd_str: # Extract the echo argument @@ -1725,38 +1303,6 @@ async def test_docker_edit_file(self, mock_docker_sandbox: Any, mock_container: count = await mock_docker_sandbox.replace_str('code.py', 'old_value', 'new_value') assert count == 1 - async def test_docker_ls(self, mock_docker_sandbox: Any, mock_container: MockContainer) -> None: - """DockerEnvironment.ls returns file entries.""" - mock_container._files['test.txt'] = b'hello' - entries = await mock_docker_sandbox.ls('.') - assert isinstance(entries, list) - - async def test_docker_grep(self, mock_docker_sandbox: Any) -> None: - """DockerEnvironment.grep returns matches.""" - result = await mock_docker_sandbox.grep('pattern') - assert isinstance(result, str) - - async def test_docker_grep_with_options(self, mock_docker_sandbox: Any) -> None: - """DockerEnvironment.grep with output_mode and glob_pattern.""" - result = await mock_docker_sandbox.grep('pattern', glob_pattern='*.py', output_mode='files_with_matches') - assert isinstance(result, str) - - async def test_docker_grep_count(self, mock_docker_sandbox: Any, mock_container: MockContainer) -> None: - """DockerEnvironment.grep count mode filters zero-count results.""" - # Override exec_run to return count-style output - original_exec_run = mock_container.exec_run - - def count_exec_run(cmd: Any, **kwargs: Any) -> tuple[int, bytes]: - if isinstance(cmd, list) and 'sh' in cmd[0]: - cmd_str = cmd[-1] if len(cmd) > 1 else '' - if 'grep' in cmd_str and '-c' in cmd_str: - return 0, b'a.py:3\nb.py:0\nc.py:1' - return original_exec_run(cmd, **kwargs) # pragma: no cover - - mock_container.exec_run = count_exec_run # type: ignore[assignment] - result = await mock_docker_sandbox.grep('pattern', output_mode='count') - assert 'b.py:0' not in result - async def test_docker_container_property(self, mock_docker_sandbox: Any) -> None: """DockerEnvironment._required_container raises when not started.""" @@ -1986,37 +1532,6 @@ def exec_with_binary(cmd: Any, **kwargs: Any) -> tuple[int, bytes]: result = await mock_docker_sandbox.read_file('data.bin') assert isinstance(result, bytes) - async def test_docker_ls_size_value_error(self, mock_docker_sandbox: Any, mock_container: MockContainer) -> None: - """DockerEnvironment.ls handles non-numeric size fields gracefully.""" - original = mock_container.exec_run - - def exec_with_bad_size(cmd: Any, **kwargs: Any) -> tuple[int, bytes]: - cmd_str = ' '.join(cmd) if isinstance(cmd, list) else cmd - if 'ls -la' in cmd_str: - return 0, b'total 0\n-rw-r--r-- 1 root root NaN Jan 1 00:00 file.txt' - return original(cmd, **kwargs) # pragma: no cover - - mock_container.exec_run = exec_with_bad_size # type: ignore[assignment] - entries = await mock_docker_sandbox.ls() - assert len(entries) == 1 - assert entries[0].name == 'file.txt' - assert entries[0].size is None - - async def test_docker_ls_short_line(self, mock_docker_sandbox: Any, mock_container: MockContainer) -> None: - """DockerEnvironment.ls skips lines with fewer than 9 fields.""" - original = mock_container.exec_run - - def exec_with_short_lines(cmd: Any, **kwargs: Any) -> tuple[int, bytes]: - cmd_str = ' '.join(cmd) if isinstance(cmd, list) else cmd - if 'ls -la' in cmd_str: - return 0, b'total 0\nshort line\n-rw-r--r-- 1 root root 42 Jan 1 00:00 real.txt' - return original(cmd, **kwargs) # pragma: no cover - - mock_container.exec_run = exec_with_short_lines # type: ignore[assignment] - entries = await mock_docker_sandbox.ls() - assert len(entries) == 1 - assert entries[0].name == 'real.txt' - async def test_docker_is_alive_exception(self, mock_docker_sandbox: Any, mock_container: MockContainer) -> None: """DockerEnvironment.is_alive returns False when reload raises.""" mock_container.reload = MagicMock(side_effect=DockerException('connection error')) @@ -2276,19 +1791,6 @@ async def test_docker_process_aenter( assert entered is proc assert proc._exec_id == 'exec-aenter' - async def test_docker_ls_not_found(self, mock_docker_sandbox: Any, mock_container: MockContainer) -> None: - """DockerEnvironment.ls raises NotADirectoryError on missing dirs.""" - original = mock_container.exec_run - - def fail_ls(cmd: Any, **kwargs: Any) -> tuple[int, bytes]: - if isinstance(cmd, list) and 'ls -la' in ' '.join(cmd): - return 1, b'ls: cannot access: No such file or directory' - return original(cmd, **kwargs) # pragma: no cover - - mock_container.exec_run = fail_ls # type: ignore[assignment] - with pytest.raises(NotADirectoryError): - await mock_docker_sandbox.ls('nonexistent') - async def test_docker_read_file_image_not_found( self, mock_docker_sandbox: Any, mock_container: MockContainer ) -> None: @@ -2465,51 +1967,6 @@ def test_build_read_file_cmd_continuation_hint(self): assert 'more lines' in cmd assert 'offset=10' in cmd - def test_build_grep_cmd_content(self): - cmd = _build_grep_cmd('pattern') - assert 'grep -rIE' in cmd - assert '-n' in cmd - assert "'pattern'" in cmd - assert "'.'" in cmd - - def test_build_grep_cmd_files_with_matches(self): - cmd = _build_grep_cmd('pat', output_mode='files_with_matches') - assert '-l' in cmd - assert '-n' not in cmd - - def test_build_grep_cmd_count(self): - cmd = _build_grep_cmd('pat', output_mode='count') - assert '-c' in cmd - - def test_build_grep_cmd_with_path(self): - cmd = _build_grep_cmd('pat', path='src') - assert "'src'" in cmd - - def test_build_grep_cmd_with_glob_pattern(self): - """glob_pattern is shell-escaped to prevent injection.""" - cmd = _build_grep_cmd('pat', glob_pattern='*.py') - assert '--include' in cmd - assert "'*.py'" in cmd - - def test_build_grep_cmd_glob_pattern_escaping(self): - """Verify glob_pattern with special chars is properly shell-escaped.""" - cmd = _build_grep_cmd('pat', glob_pattern='*.py') - # The glob pattern should be shell-escaped (wrapped in single quotes) - assert "--include '*.py'" in cmd - - # Even a malicious glob_pattern gets safely escaped - cmd2 = _build_grep_cmd('pat', glob_pattern='$(evil)') - assert '$(evil)' not in cmd2.replace("'$(evil)'", '') # Only appears inside quotes - - def test_filter_grep_count_output(self): - text = 'a.py:3\nb.py:0\nc.py:1' - result = _filter_grep_count_output(text) - assert result == 'a.py:3\nc.py:1' - - def test_filter_grep_count_output_all_zero(self): - text = 'a.py:0\nb.py:0' - result = _filter_grep_count_output(text) - assert result == '' # --- Additional coverage: _base.py --- @@ -2538,19 +1995,6 @@ async def test_toolset_bash_empty_output(tmp_path: Path): assert 'Exit code: 0' in str(result) -async def test_toolset_grep_no_matches_returns_message(tmp_path: Path): - """ExecutionEnvironmentToolset grep returns message when no matches.""" - (tmp_path / 'test.txt').write_text('hello world') - env = LocalEnvironment(tmp_path) - toolset = ExecutionEnvironmentToolset(env) - ctx = build_run_context() - manager = await ToolManager[None](toolset).for_run_step(ctx) - - async with env: - result = await manager.handle_call(ToolCallPart(tool_name='grep', args={'pattern': 'zzz_nonexistent'})) - assert 'No matches' in str(result) - - async def test_toolset_lifecycle_error(tmp_path: Path): """ExecutionEnvironmentToolset handles environment startup failures.""" @@ -2639,47 +2083,6 @@ async def test_local_read_file_bytes_not_found(tmp_path: Path): await env.read_file('nonexistent.txt') -async def test_local_grep_on_file(tmp_path: Path): - """LocalEnvironment.grep on a specific file path.""" - (tmp_path / 'target.py').write_text('found = True\nmissed = False\n') - env = LocalEnvironment(tmp_path) - result = await env.grep('found', path='target.py') - assert 'found' in result - assert 'missed' not in result - - -async def test_local_grep_with_glob_pattern_filters_by_extension(tmp_path: Path): - """LocalEnvironment.grep with glob filtering.""" - (tmp_path / 'a.py').write_text('match_here\n') - (tmp_path / 'b.txt').write_text('match_here\n') - env = LocalEnvironment(tmp_path) - result = await env.grep('match_here', glob_pattern='*.py') - assert 'a.py' in result - assert 'b.txt' not in result - - -async def test_local_grep_skips_binary_files_with_null_bytes(tmp_path: Path): - """LocalEnvironment.grep skips files with null bytes.""" - (tmp_path / 'binary.bin').write_bytes(b'\x00binary content') - (tmp_path / 'text.txt').write_text('searchable\n') - env = LocalEnvironment(tmp_path) - result = await env.grep('searchable') - assert 'text.txt' in result - assert 'binary' not in result - - -async def test_local_grep_skips_hidden_files_in_hidden_dirs(tmp_path: Path): - """LocalEnvironment.grep skips hidden files/dirs.""" - hidden_dir = tmp_path / '.hidden' - hidden_dir.mkdir() - (hidden_dir / 'secret.txt').write_text('findme\n') - (tmp_path / 'visible.txt').write_text('findme\n') - env = LocalEnvironment(tmp_path) - result = await env.grep('findme') - assert 'visible.txt' in result - assert '.hidden' not in result - - async def test_local_execute_output_truncation(tmp_path: Path): """LocalEnvironment.execute truncates long output.""" # Write a script that outputs lots of text @@ -2716,48 +2119,6 @@ async def test_memory_read_file_bytes_not_found_raises_error(): await env.read_file('missing.txt') -async def test_memory_ls_non_root_directory(): - """MemoryEnvironment.ls lists files in a subdirectory.""" - env = MemoryEnvironment(files={'sub/a.txt': 'a', 'sub/b.txt': 'b', 'other.txt': 'c'}) - entries = await env.ls('sub') - assert len(entries) == 2 - names = {e.name for e in entries} - assert names == {'a.txt', 'b.txt'} - - -async def test_memory_ls_with_subdirs(): - """MemoryEnvironment.ls shows directories in listing.""" - env = MemoryEnvironment(files={'dir/sub/file.txt': 'content'}) - entries = await env.ls('dir') - assert len(entries) == 1 - assert entries[0].name == 'sub' - assert entries[0].is_dir is True - - -async def test_memory_ls_skips_non_children(): - """MemoryEnvironment.ls skips files not under the directory.""" - env = MemoryEnvironment(files={'a/b.txt': 'x', 'c/d.txt': 'y'}) - entries = await env.ls('a') - assert len(entries) == 1 - assert entries[0].name == 'b.txt' - - -async def test_memory_grep_binary_skip(): - """MemoryEnvironment.grep skips binary files.""" - env = MemoryEnvironment(files={'binary.bin': b'\x00binary data', 'text.txt': 'findme'}) - result = await env.grep('findme') - assert 'text.txt' in result - assert 'binary' not in result - - -async def test_memory_grep_path_filter(): - """MemoryEnvironment.grep filters by exact file path.""" - env = MemoryEnvironment(files={'sub/target.py': 'match_here', 'other.py': 'match_here'}) - result = await env.grep('match_here', path='sub') - assert 'sub/target.py' in result - assert 'other.py' not in result - - async def test_local_process_wait_no_timeout(tmp_path: Path): """_LocalEnvironmentProcess.wait without timeout (line 74).""" env = LocalEnvironment(tmp_path) @@ -2785,80 +2146,6 @@ async def test_memory_read_file_that_is_also_directory_prefix(): assert 'I am a file' in content -# --- ExecutionEnvironmentToolset: capability and edit strategy resolution --- - - -# --- ExecutionEnvironmentToolset: ls formatting through toolset --- - - -async def test_toolset_ls_error_handling(): - """Toolset ls returns error string when environment raises.""" - - class _ErrorLsEnv(BaseEnv): - @property - def capabilities(self) -> frozenset[EnvToolName]: - return frozenset({'ls'}) - - async def ls(self, path: str = '.') -> list[FileInfo]: - raise NotADirectoryError(f'Not a directory: {path}') - - env = _ErrorLsEnv() - toolset = ExecutionEnvironmentToolset(env) - ctx = build_run_context() - tools = await toolset.get_tools(ctx) - result = await toolset.call_tool('ls', {'path': '/bad'}, ctx, tools['ls']) - assert 'Error:' in str(result) - - -async def test_toolset_ls_formats_dirs(): - """Toolset ls formats directory entries with trailing /.""" - env = MemoryEnvironment(files={'sub/a.txt': 'hello'}) - toolset = ExecutionEnvironmentToolset(env) - ctx = build_run_context() - tools = await toolset.get_tools(ctx) - async with env: - result = await toolset.call_tool('ls', {'path': '.'}, ctx, tools['ls']) - assert 'sub/' in str(result) - - -async def test_toolset_ls_formats_files_without_size(): - """Toolset ls formats file entries without size (just the name).""" - - class _NoSizeEnv(BaseEnv): - @property - def capabilities(self) -> frozenset[EnvToolName]: - return frozenset({'ls'}) - - async def ls(self, path: str = '.') -> list[FileInfo]: - return [FileInfo(name='readme.txt', path='readme.txt', is_dir=False, size=None)] - - env = _NoSizeEnv() - toolset = ExecutionEnvironmentToolset(env) - ctx = build_run_context() - tools = await toolset.get_tools(ctx) - result = await toolset.call_tool('ls', {'path': '.'}, ctx, tools['ls']) - assert str(result) == 'readme.txt' - - -async def test_toolset_ls_empty_directory(): - """Toolset ls returns 'Empty directory.' for empty listings.""" - - class _EmptyLsEnv(BaseEnv): - @property - def capabilities(self) -> frozenset[EnvToolName]: - return frozenset({'ls'}) - - async def ls(self, path: str = '.') -> list[FileInfo]: - return [] - - env = _EmptyLsEnv() - toolset = ExecutionEnvironmentToolset(env) - ctx = build_run_context() - tools = await toolset.get_tools(ctx) - result = await toolset.call_tool('ls', {'path': '.'}, ctx, tools['ls']) - assert str(result) == 'Empty directory.' - - # --- ExecutionEnvironmentToolset: environment_factory --- @@ -3009,32 +2296,32 @@ async def test_memory_read_image_stored_as_string(): async def test_toolset_factory_filters_tools_by_capabilities(): """When using environment_factory, get_tools() only returns tools supported by the runtime environment.""" - class _LsOnlyEnv(BaseEnv): + class _ShellOnlyEnv(BaseEnv): @property def capabilities(self) -> frozenset[EnvToolName]: - return frozenset({'ls'}) + return frozenset({'shell'}) - async def ls(self, path: str = '.') -> list[FileInfo]: - return [] # pragma: no cover + async def shell(self, command: str, *, timeout: float | None = None, env: dict[str, str] | None = None) -> ExecutionResult: + return ExecutionResult(output='', exit_code=0) # pragma: no cover - toolset = ExecutionEnvironmentToolset(environment_factory=_LsOnlyEnv) + toolset = ExecutionEnvironmentToolset(environment_factory=_ShellOnlyEnv) # Before entering, all tools are registered (no env to check) ctx = build_run_context() async with toolset: tools = await toolset.get_tools(ctx) - # Only ls should be exposed — the runtime env only supports ls - assert set(tools.keys()) == {'ls'} + # Only shell should be exposed — the runtime env only supports shell + assert set(tools.keys()) == {'shell'} async def test_toolset_use_environment_filters_tools(): """use_environment() with a limited env filters tools from get_tools().""" - class _LsOnlyEnv(BaseEnv): + class _ShellOnlyEnv(BaseEnv): @property def capabilities(self) -> frozenset[EnvToolName]: - return frozenset({'ls'}) + return frozenset({'shell'}) # Full-capability shared env registers all tools full_env = MemoryEnvironment() @@ -3043,14 +2330,13 @@ def capabilities(self) -> frozenset[EnvToolName]: async with full_env: all_tools = await toolset.get_tools(ctx) - assert 'ls' in all_tools assert 'read_file' in all_tools assert 'write_file' in all_tools - # Override with a limited env — only ls should remain - with toolset.use_environment(_LsOnlyEnv()): + # Override with a limited env — only shell should remain + with toolset.use_environment(_ShellOnlyEnv()): limited_tools = await toolset.get_tools(ctx) - assert set(limited_tools.keys()) == {'ls'} + assert set(limited_tools.keys()) == {'shell'} # After exiting use_environment, all tools are back restored_tools = await toolset.get_tools(ctx) @@ -3090,50 +2376,3 @@ async def test_local_read_file_binary_non_image(tmp_path: Path): assert result == b'\x80\x81\x82\xff' -async def test_local_grep_truncation(tmp_path: Path): - """LocalEnvironment.grep truncates at 1000+ matches.""" - async with LocalEnvironment(tmp_path) as env: - # Create a file with 1002 matching lines - lines = '\n'.join(f'match_{i}' for i in range(1002)) - await env.write_file('big.txt', lines) - result = await env.grep('match_') - assert '[... truncated at 1000 matches]' in result - - -async def test_memory_ls_duplicate_entry(): - """MemoryEnvironment.ls deduplicates entries at the same directory level.""" - # 'sub/a.txt' and 'sub/b.txt' both create a 'sub' directory entry - env = MemoryEnvironment(files={'sub/a.txt': 'a', 'sub/b.txt': 'b'}) - async with env: - entries = await env.ls() - names = [e.name for e in entries] - assert names.count('sub') == 1 - - -async def test_memory_grep_truncation(): - """MemoryEnvironment.grep truncates at 1000+ matches.""" - lines = '\n'.join(f'match_{i}' for i in range(1002)) - env = MemoryEnvironment(files={'big.txt': lines}) - async with env: - result = await env.grep('match_') - assert '[... truncated at 1000 matches]' in result - - -async def test_toolset_factory_ls_only_calls_ls(): - """Test that _LsOnlyEnv.ls is actually callable (covers line 3071).""" - - class _LsOnlyEnv(BaseEnv): - @property - def capabilities(self) -> frozenset[EnvToolName]: - return frozenset({'ls'}) - - async def ls(self, path: str = '.') -> list[FileInfo]: - return [FileInfo(name='test.txt', path='test.txt', is_dir=False, size=10)] - - toolset = ExecutionEnvironmentToolset(environment_factory=_LsOnlyEnv) - ctx = build_run_context() - - async with toolset: - tools = await toolset.get_tools(ctx) - result = await toolset.call_tool('ls', {}, ctx, tools['ls']) - assert 'test.txt' in str(result) From dbbc126c76c89a479b4ef783fc61ab87bff7f45b Mon Sep 17 00:00:00 2001 From: Aditya Vardhan Date: Fri, 27 Feb 2026 13:11:58 +0000 Subject: [PATCH 39/49] lint --- pydantic_ai_slim/pydantic_ai/environments/local.py | 1 - pydantic_ai_slim/pydantic_ai/environments/memory.py | 1 - tests/test_environments.py | 7 +++---- 3 files changed, 3 insertions(+), 6 deletions(-) diff --git a/pydantic_ai_slim/pydantic_ai/environments/local.py b/pydantic_ai_slim/pydantic_ai/environments/local.py index b90da29ff9..f31d733cab 100644 --- a/pydantic_ai_slim/pydantic_ai/environments/local.py +++ b/pydantic_ai_slim/pydantic_ai/environments/local.py @@ -264,4 +264,3 @@ async def replace_str( new_text, count = apply_edit(text, old, new, path, replace_all=replace_all) resolved.write_text(new_text, encoding='utf-8') return count - diff --git a/pydantic_ai_slim/pydantic_ai/environments/memory.py b/pydantic_ai_slim/pydantic_ai/environments/memory.py index ffbf2c6677..78134c0179 100644 --- a/pydantic_ai_slim/pydantic_ai/environments/memory.py +++ b/pydantic_ai_slim/pydantic_ai/environments/memory.py @@ -170,4 +170,3 @@ async def replace_str( new_text, count = apply_edit(text, old, new, path, replace_all=replace_all) self._files[normalized] = new_text return count - diff --git a/tests/test_environments.py b/tests/test_environments.py index d59b1cdcbb..22fe1fced6 100644 --- a/tests/test_environments.py +++ b/tests/test_environments.py @@ -1968,7 +1968,6 @@ def test_build_read_file_cmd_continuation_hint(self): assert 'offset=10' in cmd - # --- Additional coverage: _base.py --- @@ -2301,7 +2300,9 @@ class _ShellOnlyEnv(BaseEnv): def capabilities(self) -> frozenset[EnvToolName]: return frozenset({'shell'}) - async def shell(self, command: str, *, timeout: float | None = None, env: dict[str, str] | None = None) -> ExecutionResult: + async def shell( + self, command: str, *, timeout: float | None = None, env: dict[str, str] | None = None + ) -> ExecutionResult: return ExecutionResult(output='', exit_code=0) # pragma: no cover toolset = ExecutionEnvironmentToolset(environment_factory=_ShellOnlyEnv) @@ -2374,5 +2375,3 @@ async def test_local_read_file_binary_non_image(tmp_path: Path): result = await env.read_file('data.bin') assert isinstance(result, bytes) assert result == b'\x80\x81\x82\xff' - - From 29ff88cc0104d1ed78522472cce8dd350c2e55f6 Mon Sep 17 00:00:00 2001 From: Aditya Vardhan Date: Fri, 27 Feb 2026 13:23:19 +0000 Subject: [PATCH 40/49] non blocking writes / reads --- docs/api/environments.md | 1 - .../pydantic_ai/environments/local.py | 61 +++++++++++-------- 2 files changed, 37 insertions(+), 25 deletions(-) diff --git a/docs/api/environments.md b/docs/api/environments.md index 4d9bc9f0a0..3cc75090ae 100644 --- a/docs/api/environments.md +++ b/docs/api/environments.md @@ -8,7 +8,6 @@ - ExecutionEnvironmentToolset - ExecutionProcess - ExecutionResult - - FileInfo ## `pydantic_ai.environments.local` diff --git a/pydantic_ai_slim/pydantic_ai/environments/local.py b/pydantic_ai_slim/pydantic_ai/environments/local.py index f31d733cab..dbb1c5f758 100644 --- a/pydantic_ai_slim/pydantic_ai/environments/local.py +++ b/pydantic_ai_slim/pydantic_ai/environments/local.py @@ -12,6 +12,7 @@ import anyio import anyio.abc +import anyio.to_thread from typing_extensions import Self from ._base import ( @@ -136,7 +137,7 @@ def capabilities(self) -> frozenset[EnvToolName]: return frozenset({'shell', 'read_file', 'write_file', 'edit_file'}) async def __aenter__(self) -> Self: - self._root_dir.mkdir(parents=True, exist_ok=True) + await anyio.to_thread.run_sync(lambda: self._root_dir.mkdir(parents=True, exist_ok=True)) return self async def __aexit__(self, *_args: Any) -> None: @@ -225,28 +226,36 @@ async def shell( async def read_file(self, path: str, *, offset: int = 0, limit: int = 2000) -> str | bytes: resolved = self._resolve_path(path) - if not resolved.is_file(): - if resolved.is_dir(): - raise FileNotFoundError(f"'{path}' is a directory, not a file.") - raise FileNotFoundError(f'File not found: {path}') - if resolved.suffix.lower() in IMAGE_EXTENSIONS: - return resolved.read_bytes() + def _read() -> str | bytes: + if not resolved.is_file(): + if resolved.is_dir(): + raise FileNotFoundError(f"'{path}' is a directory, not a file.") + raise FileNotFoundError(f'File not found: {path}') - raw = resolved.read_bytes() - try: - text = raw.decode('utf-8') - except UnicodeDecodeError: - return raw - return format_lines(text, offset, limit) + if resolved.suffix.lower() in IMAGE_EXTENSIONS: + return resolved.read_bytes() + + raw = resolved.read_bytes() + try: + text = raw.decode('utf-8') + except UnicodeDecodeError: + return raw + return format_lines(text, offset, limit) + + return await anyio.to_thread.run_sync(_read) async def write_file(self, path: str, content: str | bytes) -> None: resolved = self._resolve_path(path) - resolved.parent.mkdir(parents=True, exist_ok=True) - if isinstance(content, bytes): - resolved.write_bytes(content) - else: - resolved.write_text(content, encoding='utf-8') + + def _write() -> None: + resolved.parent.mkdir(parents=True, exist_ok=True) + if isinstance(content, bytes): + resolved.write_bytes(content) + else: + resolved.write_text(content, encoding='utf-8') + + await anyio.to_thread.run_sync(_write) async def replace_str( self, @@ -257,10 +266,14 @@ async def replace_str( replace_all: bool = False, ) -> int: resolved = self._resolve_path(path) - if not resolved.is_file(): - raise FileNotFoundError(f'File not found: {path}') - text = resolved.read_text(encoding='utf-8') - new_text, count = apply_edit(text, old, new, path, replace_all=replace_all) - resolved.write_text(new_text, encoding='utf-8') - return count + def _edit() -> int: + if not resolved.is_file(): + raise FileNotFoundError(f'File not found: {path}') + + text = resolved.read_text(encoding='utf-8') + new_text, count = apply_edit(text, old, new, path, replace_all=replace_all) + resolved.write_text(new_text, encoding='utf-8') + return count + + return await anyio.to_thread.run_sync(_edit) From 3623284ba2bc42b7bc9f92b3b614834037810361 Mon Sep 17 00:00:00 2001 From: Aditya Vardhan Date: Fri, 27 Feb 2026 13:27:29 +0000 Subject: [PATCH 41/49] addressing issues with open sockets --- .../pydantic_ai/environments/docker.py | 21 ++++++++++++++++++- tests/test_environments.py | 15 +++++++++---- 2 files changed, 31 insertions(+), 5 deletions(-) diff --git a/pydantic_ai_slim/pydantic_ai/environments/docker.py b/pydantic_ai_slim/pydantic_ai/environments/docker.py index 5c4e8a2bf2..ce3832b32e 100644 --- a/pydantic_ai_slim/pydantic_ai/environments/docker.py +++ b/pydantic_ai_slim/pydantic_ai/environments/docker.py @@ -198,6 +198,10 @@ def _read_frame(self) -> tuple[int, bytes]: @property def returncode(self) -> int | None: + return self._returncode + + def _inspect_exit_code(self) -> int | None: + """Synchronously inspect the Docker exec state and cache the exit code.""" if self._returncode is not None: return self._returncode if self._exec_id is None: @@ -215,10 +219,14 @@ def returncode(self) -> int | None: pass return None + async def _poll_exit_code(self) -> int | None: + """Check the Docker exec state without blocking the event loop.""" + return await anyio.to_thread.run_sync(self._inspect_exit_code) + async def wait(self, timeout: float | None = None) -> int: async def _poll() -> int: while True: - rc = self.returncode + rc = await self._poll_exit_code() if rc is not None: return rc await anyio.sleep(0.1) @@ -235,6 +243,11 @@ async def kill(self) -> None: except OSError: pass + async def __aexit__(self, *args: Any) -> None: + await self._poll_exit_code() + if self._returncode is None: + await self.kill() + class DockerEnvironment(ExecutionEnvironment): """Docker container-based environment for isolated code execution. @@ -429,6 +442,12 @@ def _teardown(self) -> None: # Best-effort cleanup: container may already be removed pass self._container = None + if self._client is not None: + try: + self._client.close() + except (DockerException, OSError): + pass + self._client = None @property def _required_container(self) -> Container: diff --git a/tests/test_environments.py b/tests/test_environments.py index 22fe1fced6..984727066a 100644 --- a/tests/test_environments.py +++ b/tests/test_environments.py @@ -1653,20 +1653,25 @@ async def test_docker_process_returncode( async def test_docker_process_returncode_from_inspect( self, ) -> None: - """_DockerEnvironmentProcess.returncode polls Docker API.""" + """_DockerEnvironmentProcess._poll_exit_code polls Docker API.""" container = MockContainer() container.client.api.exec_inspect.return_value = {'ExitCode': 42, 'Running': False} proc = _DockerEnvironmentProcess(container, 'echo test', '/workspace') # type: ignore[arg-type] proc._exec_id = 'exec-123' + # returncode only returns cached value — no I/O + assert proc.returncode is None + + # _poll_exit_code offloads the HTTP call and caches the result + rc = await proc._poll_exit_code() + assert rc == 42 assert proc.returncode == 42 - assert proc._returncode == 42 async def test_docker_process_returncode_still_running( self, ) -> None: - """_DockerEnvironmentProcess.returncode returns None when process is running (ExitCode=0, Running=True).""" + """_DockerEnvironmentProcess._poll_exit_code returns None when process is running.""" container = MockContainer() # Docker returns ExitCode=0 + Running=True for still-running processes @@ -1674,18 +1679,20 @@ async def test_docker_process_returncode_still_running( proc = _DockerEnvironmentProcess(container, 'echo test', '/workspace') # type: ignore[arg-type] proc._exec_id = 'exec-123' + assert await proc._poll_exit_code() is None assert proc.returncode is None async def test_docker_process_returncode_inspect_error( self, ) -> None: - """_DockerEnvironmentProcess.returncode handles API errors.""" + """_DockerEnvironmentProcess._poll_exit_code handles API errors.""" container = MockContainer() container.client.api.exec_inspect.side_effect = OSError('connection failed') proc = _DockerEnvironmentProcess(container, 'echo test', '/workspace') # type: ignore[arg-type] proc._exec_id = 'exec-123' + assert await proc._poll_exit_code() is None assert proc.returncode is None async def test_docker_process_send( From e4232834d9fcb95135616e3be5d57c3ca1a0a5a9 Mon Sep 17 00:00:00 2001 From: Aditya Vardhan Date: Fri, 27 Feb 2026 13:52:19 +0000 Subject: [PATCH 42/49] cov --- tests/test_environments.py | 81 ++++++++++++++++++++++++++++++++++++++ 1 file changed, 81 insertions(+) diff --git a/tests/test_environments.py b/tests/test_environments.py index 984727066a..4a52abf5aa 100644 --- a/tests/test_environments.py +++ b/tests/test_environments.py @@ -1798,6 +1798,87 @@ async def test_docker_process_aenter( assert entered is proc assert proc._exec_id == 'exec-aenter' + async def test_docker_process_poll_exit_code_no_exec_id( + self, + ) -> None: + """_DockerEnvironmentProcess._poll_exit_code returns None when _exec_id is None.""" + container = MockContainer() + proc = _DockerEnvironmentProcess(container, 'echo test', '/workspace') # type: ignore[arg-type] + + # _exec_id is None by default (process not started) + assert proc._exec_id is None + assert await proc._poll_exit_code() is None + + async def test_docker_process_aexit_kills_running( + self, + ) -> None: + """_DockerEnvironmentProcess.__aexit__ kills the process if still running.""" + container = MockContainer() + proc = _DockerEnvironmentProcess(container, 'echo test', '/workspace') # type: ignore[arg-type] + mock_socket = MagicMock() + proc._socket = mock_socket + # Process is "still running" — _exec_id set but inspect says Running=True + proc._exec_id = 'exec-aexit' + container.client.api.exec_inspect.return_value = {'Running': True, 'ExitCode': None} + + await proc.__aexit__(None, None, None) + + # Should have called kill (socket.close) + mock_socket.close.assert_called_once() + assert proc._returncode is None + + async def test_docker_process_aexit_already_exited( + self, + ) -> None: + """_DockerEnvironmentProcess.__aexit__ does not kill if process already exited.""" + container = MockContainer() + proc = _DockerEnvironmentProcess(container, 'echo test', '/workspace') # type: ignore[arg-type] + mock_socket = MagicMock() + proc._socket = mock_socket + proc._exec_id = 'exec-aexit' + container.client.api.exec_inspect.return_value = {'Running': False, 'ExitCode': 0} + + await proc.__aexit__(None, None, None) + + # Should NOT have called kill + mock_socket.close.assert_not_called() + assert proc._returncode == 0 + + async def test_mock_container_find_command( + self, + ) -> None: + """MockContainer.exec_run handles find commands for glob operations.""" + container = MockContainer() + container._files['/workspace/src/main.py'] = b'print("hello")' + container._files['/workspace/src/utils.py'] = b'# utils' + container._files['/workspace/README.md'] = b'# readme' + + # find with '.' searches all files relative to workdir + exit_code, output = container.exec_run( + ['sh', '-c', "find '.' -type f"], workdir='/workspace' + ) + output_str = output.decode() + assert exit_code == 0 + assert './src/main.py' in output_str + assert './src/utils.py' in output_str + assert './README.md' in output_str + + async def test_mock_container_find_command_subpath( + self, + ) -> None: + """MockContainer.exec_run handles find commands with a specific subdirectory.""" + container = MockContainer() + container._files['/workspace/src/main.py'] = b'print("hello")' + container._files['/workspace/docs/guide.md'] = b'# guide' + + exit_code, output = container.exec_run( + ['sh', '-c', "find 'src' -type f"], workdir='/workspace' + ) + output_str = output.decode() + assert exit_code == 0 + assert 'src/main.py' in output_str + assert 'guide.md' not in output_str + async def test_docker_read_file_image_not_found( self, mock_docker_sandbox: Any, mock_container: MockContainer ) -> None: From 114ac84b727a5444b081270288c7bc7128db78bf Mon Sep 17 00:00:00 2001 From: Aditya Vardhan Date: Fri, 27 Feb 2026 14:11:03 +0000 Subject: [PATCH 43/49] cov --- tests/test_environments.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/tests/test_environments.py b/tests/test_environments.py index 4a52abf5aa..753389ba50 100644 --- a/tests/test_environments.py +++ b/tests/test_environments.py @@ -1852,6 +1852,8 @@ async def test_mock_container_find_command( container._files['/workspace/src/main.py'] = b'print("hello")' container._files['/workspace/src/utils.py'] = b'# utils' container._files['/workspace/README.md'] = b'# readme' + # File outside workdir should be skipped + container._files['/other/secret.txt'] = b'secret' # find with '.' searches all files relative to workdir exit_code, output = container.exec_run( @@ -1862,6 +1864,7 @@ async def test_mock_container_find_command( assert './src/main.py' in output_str assert './src/utils.py' in output_str assert './README.md' in output_str + assert 'secret' not in output_str async def test_mock_container_find_command_subpath( self, From 26ba4e8b0f5b0548e9e246e6201b537c0678f633 Mon Sep 17 00:00:00 2001 From: Aditya Vardhan Date: Fri, 27 Feb 2026 14:11:52 +0000 Subject: [PATCH 44/49] docker fix timeout 0 unintentional --- pydantic_ai_slim/pydantic_ai/environments/docker.py | 4 ++++ pydantic_ai_slim/pydantic_ai/environments/local.py | 2 ++ 2 files changed, 6 insertions(+) diff --git a/pydantic_ai_slim/pydantic_ai/environments/docker.py b/pydantic_ai_slim/pydantic_ai/environments/docker.py index ce3832b32e..7646e31642 100644 --- a/pydantic_ai_slim/pydantic_ai/environments/docker.py +++ b/pydantic_ai_slim/pydantic_ai/environments/docker.py @@ -482,9 +482,13 @@ async def shell( env: dict[str, str] | None = None, ) -> ExecutionResult: """Execute a command in the container.""" + if timeout is not None and timeout <= 0: + raise ValueError(f'timeout must be positive or None, got {timeout}') def _exec() -> tuple[int, bytes]: if timeout is not None: + # Note: GNU coreutils `timeout 0` means "no timeout" (wait forever), + # so we validate timeout > 0 above to prevent surprising behavior. wrapped = f'timeout {math.ceil(timeout)} sh -c {_shell_escape(command)}' else: wrapped = command diff --git a/pydantic_ai_slim/pydantic_ai/environments/local.py b/pydantic_ai_slim/pydantic_ai/environments/local.py index dbb1c5f758..9a24cbbf08 100644 --- a/pydantic_ai_slim/pydantic_ai/environments/local.py +++ b/pydantic_ai_slim/pydantic_ai/environments/local.py @@ -186,6 +186,8 @@ async def shell( env: dict[str, str] | None = None, ) -> ExecutionResult: """Execute a command using subprocess for simplicity and reliability.""" + if timeout is not None and timeout <= 0: + raise ValueError(f'timeout must be positive or None, got {timeout}') proc = await anyio.open_process( command, stdout=subprocess.PIPE, From e52ae45f578c26fc0897096a926109f032a643f6 Mon Sep 17 00:00:00 2001 From: Aditya Vardhan Date: Fri, 27 Feb 2026 14:31:28 +0000 Subject: [PATCH 45/49] merge this shit --- tests/test_environments.py | 8 ++------ 1 file changed, 2 insertions(+), 6 deletions(-) diff --git a/tests/test_environments.py b/tests/test_environments.py index 753389ba50..951b011c7c 100644 --- a/tests/test_environments.py +++ b/tests/test_environments.py @@ -1856,9 +1856,7 @@ async def test_mock_container_find_command( container._files['/other/secret.txt'] = b'secret' # find with '.' searches all files relative to workdir - exit_code, output = container.exec_run( - ['sh', '-c', "find '.' -type f"], workdir='/workspace' - ) + exit_code, output = container.exec_run(['sh', '-c', "find '.' -type f"], workdir='/workspace') output_str = output.decode() assert exit_code == 0 assert './src/main.py' in output_str @@ -1874,9 +1872,7 @@ async def test_mock_container_find_command_subpath( container._files['/workspace/src/main.py'] = b'print("hello")' container._files['/workspace/docs/guide.md'] = b'# guide' - exit_code, output = container.exec_run( - ['sh', '-c', "find 'src' -type f"], workdir='/workspace' - ) + exit_code, output = container.exec_run(['sh', '-c', "find 'src' -type f"], workdir='/workspace') output_str = output.decode() assert exit_code == 0 assert 'src/main.py' in output_str From 529b0e0b4ff368dc8425c5a4c1514bcfe014c973 Mon Sep 17 00:00:00 2001 From: Aditya Vardhan Date: Fri, 27 Feb 2026 14:33:48 +0000 Subject: [PATCH 46/49] coverage for timeouts --- tests/test_environments.py | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) diff --git a/tests/test_environments.py b/tests/test_environments.py index 951b011c7c..5d863ad90f 100644 --- a/tests/test_environments.py +++ b/tests/test_environments.py @@ -926,6 +926,15 @@ async def test_local_execute_no_timeout(tmp_path: Path): assert 'no_timeout' in result.output +async def test_local_execute_invalid_timeout(tmp_path: Path): + """execute() with non-positive timeout raises ValueError.""" + async with LocalEnvironment(tmp_path) as env: + with pytest.raises(ValueError, match='timeout must be positive or None'): + await env.shell('echo test', timeout=0) + with pytest.raises(ValueError, match='timeout must be positive or None'): + await env.shell('echo test', timeout=-1) + + async def test_local_read_file_bytes_directory(tmp_path: Path): """read_file_bytes on a directory raises FileNotFoundError.""" async with LocalEnvironment(tmp_path) as env: @@ -1252,6 +1261,13 @@ async def test_docker_execute_no_timeout(self, mock_docker_sandbox: Any) -> None result = await mock_docker_sandbox.shell('echo test', timeout=None) assert result.exit_code == 0 + async def test_docker_execute_invalid_timeout(self, mock_docker_sandbox: Any) -> None: + """DockerEnvironment.execute with non-positive timeout raises ValueError.""" + with pytest.raises(ValueError, match='timeout must be positive or None'): + await mock_docker_sandbox.shell('echo test', timeout=0) + with pytest.raises(ValueError, match='timeout must be positive or None'): + await mock_docker_sandbox.shell('echo test', timeout=-1) + async def test_docker_execute_with_env(self, mock_docker_sandbox: Any) -> None: """DockerEnvironment.execute passes env vars.""" result = await mock_docker_sandbox.shell('echo test', env={'KEY': 'value'}) From 46c57bf2da681259fcd06bbfad500f903b603d4a Mon Sep 17 00:00:00 2001 From: Aditya Vardhan Date: Fri, 27 Feb 2026 15:39:48 +0000 Subject: [PATCH 47/49] removing truncation? We don't need it in the execution env --- pydantic_ai_slim/pydantic_ai/environments/_base.py | 3 --- pydantic_ai_slim/pydantic_ai/environments/docker.py | 7 ++----- pydantic_ai_slim/pydantic_ai/environments/local.py | 5 ----- 3 files changed, 2 insertions(+), 13 deletions(-) diff --git a/pydantic_ai_slim/pydantic_ai/environments/_base.py b/pydantic_ai_slim/pydantic_ai/environments/_base.py index bca61a891a..1ec71e540b 100644 --- a/pydantic_ai_slim/pydantic_ai/environments/_base.py +++ b/pydantic_ai_slim/pydantic_ai/environments/_base.py @@ -40,9 +40,6 @@ class ExecutionResult: exit_code: int """The exit code of the command.""" - truncated: bool = False - """Whether the output was truncated due to length limits.""" - class ExecutionProcess(ABC): """Handle to a running process with bidirectional streaming I/O. diff --git a/pydantic_ai_slim/pydantic_ai/environments/docker.py b/pydantic_ai_slim/pydantic_ai/environments/docker.py index 7646e31642..79e892f03a 100644 --- a/pydantic_ai_slim/pydantic_ai/environments/docker.py +++ b/pydantic_ai_slim/pydantic_ai/environments/docker.py @@ -19,7 +19,6 @@ from ._base import ( IMAGE_EXTENSIONS, - MAX_OUTPUT_CHARS, EnvToolName, ExecutionEnvironment, ExecutionProcess, @@ -68,6 +67,7 @@ def _put_file(container: Container, path: str, data: bytes) -> None: info.size = len(data) tar.addfile(info, io.BytesIO(data)) f.seek(0) + # Unfortunately no types on docker put_archive container.put_archive(parent, f) # pyright: ignore[reportUnknownMemberType] @@ -503,13 +503,10 @@ def _exec() -> tuple[int, bytes]: exit_code, output_bytes = await anyio.to_thread.run_sync(_exec) output = output_bytes.decode('utf-8', errors='replace') - truncated = len(output) > MAX_OUTPUT_CHARS - if truncated: - output = output[:MAX_OUTPUT_CHARS] # timeout command returns 124 on timeout if exit_code == 124 and timeout is not None: output += '\n[Command timed out]' - return ExecutionResult(output=output, exit_code=exit_code, truncated=truncated) + return ExecutionResult(output=output, exit_code=exit_code) async def read_file(self, path: str, *, offset: int = 0, limit: int = 2000) -> str | bytes: ext = posixpath.splitext(path)[1].lower() diff --git a/pydantic_ai_slim/pydantic_ai/environments/local.py b/pydantic_ai_slim/pydantic_ai/environments/local.py index 9a24cbbf08..5ba2569686 100644 --- a/pydantic_ai_slim/pydantic_ai/environments/local.py +++ b/pydantic_ai_slim/pydantic_ai/environments/local.py @@ -17,7 +17,6 @@ from ._base import ( IMAGE_EXTENSIONS, - MAX_OUTPUT_CHARS, EnvToolName, ExecutionEnvironment, ExecutionProcess, @@ -217,13 +216,9 @@ async def shell( _close_subprocess_transport(proc) stdout = b''.join(chunks) output = stdout.decode('utf-8', errors='replace') - truncated = len(output) > MAX_OUTPUT_CHARS - if truncated: - output = output[:MAX_OUTPUT_CHARS] return ExecutionResult( output=output, exit_code=proc.returncode if proc.returncode is not None else 0, - truncated=truncated, ) async def read_file(self, path: str, *, offset: int = 0, limit: int = 2000) -> str | bytes: From bff327147772026d88dd9eb0fff4c0defb5a95d3 Mon Sep 17 00:00:00 2001 From: Aditya Vardhan Date: Fri, 27 Feb 2026 15:52:21 +0000 Subject: [PATCH 48/49] removing truncation tests --- .../pydantic_ai/environments/_base.py | 2 -- .../toolsets/execution_environment.py | 2 -- tests/test_environments.py | 30 ++----------------- 3 files changed, 3 insertions(+), 31 deletions(-) diff --git a/pydantic_ai_slim/pydantic_ai/environments/_base.py b/pydantic_ai_slim/pydantic_ai/environments/_base.py index 1ec71e540b..b5c8cf8948 100644 --- a/pydantic_ai_slim/pydantic_ai/environments/_base.py +++ b/pydantic_ai_slim/pydantic_ai/environments/_base.py @@ -135,8 +135,6 @@ async def __aexit__(self, *args: Any) -> None: '.svg': 'image/svg+xml', } -MAX_OUTPUT_CHARS = 100_000 - # --- ExecutionEnvironment --- diff --git a/pydantic_ai_slim/pydantic_ai/toolsets/execution_environment.py b/pydantic_ai_slim/pydantic_ai/toolsets/execution_environment.py index ec498fd2cf..788fcedd41 100644 --- a/pydantic_ai_slim/pydantic_ai/toolsets/execution_environment.py +++ b/pydantic_ai_slim/pydantic_ai/toolsets/execution_environment.py @@ -166,8 +166,6 @@ async def shell(command: str, timeout: int = 120) -> str: parts: list[str] = [] if result.output: parts.append(result.output) - if result.truncated: - parts.append('[output truncated]') parts.append(f'Exit code: {result.exit_code}') return '\n'.join(parts) diff --git a/tests/test_environments.py b/tests/test_environments.py index 5d863ad90f..26e9e27351 100644 --- a/tests/test_environments.py +++ b/tests/test_environments.py @@ -68,12 +68,6 @@ def test_execute_result(): result = ExecutionResult(output='hello\n', exit_code=0) assert result.output == 'hello\n' assert result.exit_code == 0 - assert result.truncated is False - - -def test_execute_result_truncated(): - result = ExecutionResult(output='data', exit_code=1, truncated=True) - assert result.truncated is True # --- LocalEnvironment: execute --- @@ -998,22 +992,6 @@ async def test_memory_edit_binary(): # --- ExecutionEnvironmentToolset: additional coverage --- -async def test_toolset_bash_truncated(tmp_path: Path): - """bash tool truncation message when output exceeds limit.""" - env = LocalEnvironment(tmp_path) - toolset = ExecutionEnvironmentToolset(env) - ctx = build_run_context(None) - manager = await ToolManager[None](toolset).for_run_step(ctx) - - async with env: - # Generate output longer than MAX_OUTPUT_CHARS (100_000) - result = await manager.handle_call( - ToolCallPart(tool_name='shell', args={'command': 'python3 -c "print(\'x\' * 200000)"'}) - ) - assert '[output truncated]' in str(result) - assert 'Exit code: 0' in str(result) - - async def test_toolset_image_too_large(tmp_path: Path): """read_file on an image that's too large returns error string.""" env = LocalEnvironment(tmp_path) @@ -1919,8 +1897,7 @@ def big_output(cmd: Any, **kwargs: Any) -> tuple[int, bytes]: mock_container.exec_run = big_output # type: ignore[assignment] result = await mock_docker_sandbox.shell('echo big') - assert result.truncated is True - assert len(result.output) == 100_000 + assert len(result.output) == snapshot() async def test_docker_execute_timeout_exit_code( self, mock_docker_sandbox: Any, mock_container: MockContainer @@ -2191,9 +2168,8 @@ async def test_local_execute_output_truncation(tmp_path: Path): script = tmp_path / 'big.py' script.write_text("print('x' * 200000)") env = LocalEnvironment(tmp_path) - result = await env.shell(f'python {script}') - assert result.truncated is True - assert len(result.output) == 100_000 + result = await env.shell(f'python {scr200001ipt}') + assert len(result.output) == snapshot() # --- Additional coverage: memory.py --- From 0d2b2e9f459552e530e84e7446152bd710545c8c Mon Sep 17 00:00:00 2001 From: Aditya Vardhan Date: Fri, 27 Feb 2026 15:53:05 +0000 Subject: [PATCH 49/49] removing truncation tests --- tests/test_environments.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/tests/test_environments.py b/tests/test_environments.py index 26e9e27351..aad91adae6 100644 --- a/tests/test_environments.py +++ b/tests/test_environments.py @@ -1897,7 +1897,7 @@ def big_output(cmd: Any, **kwargs: Any) -> tuple[int, bytes]: mock_container.exec_run = big_output # type: ignore[assignment] result = await mock_docker_sandbox.shell('echo big') - assert len(result.output) == snapshot() + assert len(result.output) == snapshot(200000) async def test_docker_execute_timeout_exit_code( self, mock_docker_sandbox: Any, mock_container: MockContainer @@ -2168,8 +2168,8 @@ async def test_local_execute_output_truncation(tmp_path: Path): script = tmp_path / 'big.py' script.write_text("print('x' * 200000)") env = LocalEnvironment(tmp_path) - result = await env.shell(f'python {scr200001ipt}') - assert len(result.output) == snapshot() + result = await env.shell(f'python {script}') + assert len(result.output) == snapshot(200001) # --- Additional coverage: memory.py ---