From 79cd3b3dd0d584e6a035ed88e1e094c5b9a7f8f0 Mon Sep 17 00:00:00 2001 From: David Sanchez <64162682+dsfaccini@users.noreply.github.com> Date: Fri, 14 Nov 2025 23:21:47 -0500 Subject: [PATCH 1/2] Add devcontainer setup for AI coding agents - Dockerfile with Python 3.12, uv, pre-commit, deno - docker-compose with optional services (Ollama, PostgreSQL, pgvector) - docs with setup instructions - platform compatibility (x86_64) to support all dependencies (tested on an M4 chip) --- .devcontainer/.env.example | 159 +++++++++ .devcontainer/AGENTS.md | 190 ++++++++++ .devcontainer/CLAUDE.md | 190 ++++++++++ .devcontainer/Dockerfile | 65 ++++ .devcontainer/README.md | 529 ++++++++++++++++++++++++++++ .devcontainer/devcontainer.json | 123 +++++++ .devcontainer/docker-compose.yml | 120 +++++++ .devcontainer/install.sh | 97 +++++ .devcontainer/mcp-proxy-config.json | 30 ++ 9 files changed, 1503 insertions(+) create mode 100644 .devcontainer/.env.example create mode 100644 .devcontainer/AGENTS.md create mode 100644 .devcontainer/CLAUDE.md create mode 100644 .devcontainer/Dockerfile create mode 100644 .devcontainer/README.md create mode 100644 .devcontainer/devcontainer.json create mode 100644 .devcontainer/docker-compose.yml create mode 100755 .devcontainer/install.sh create mode 100644 .devcontainer/mcp-proxy-config.json diff --git a/.devcontainer/.env.example b/.devcontainer/.env.example new file mode 100644 index 0000000000..1e2268ae18 --- /dev/null +++ b/.devcontainer/.env.example @@ -0,0 +1,159 @@ +# Pydantic AI DevContainer Environment Variables +# Copy this file to .env and fill in your actual values + +# ============================================================================ +# MODEL PROVIDER API KEYS +# ============================================================================ + +# OpenAI (Required for: OpenAI models, OpenAI-compatible providers) +# Get your key at: https://platform.openai.com/api-keys +OPENAI_API_KEY= + +# Anthropic (Required for: Claude models) +# Get your key at: https://console.anthropic.com/settings/keys +ANTHROPIC_API_KEY= + +# Google Generative AI (Required for: Gemini models via Google AI Studio) +# Get your key at: https://aistudio.google.com/apikey +GEMINI_API_KEY= + +# Google Cloud (Required for: Gemini models via Vertex AI) +# Service account JSON content (not a file path) +# Get it from: https://console.cloud.google.com/iam-admin/serviceaccounts +GOOGLE_SERVICE_ACCOUNT_CONTENT= + +# Groq (Required for: Groq models) +# Get your key at: https://console.groq.com/keys +GROQ_API_KEY= + +# Mistral AI (Required for: Mistral models) +# Get your key at: https://console.mistral.ai/api-keys +MISTRAL_API_KEY= + +# Cohere (Required for: Cohere models) +# Get your key at: https://dashboard.cohere.com/api-keys +CO_API_KEY= + +# AWS Bedrock (Required for: AWS Bedrock models) +# Configure via AWS CLI or set these: +# AWS_ACCESS_KEY_ID= +# AWS_SECRET_ACCESS_KEY= +# AWS_REGION=us-east-1 + +# ============================================================================ +# ADDITIONAL MODEL PROVIDERS (OpenAI-compatible) +# ============================================================================ + +# DeepSeek (OpenAI-compatible) +# Get your key at: https://platform.deepseek.com/api_keys +DEEPSEEK_API_KEY= + +# xAI Grok (OpenAI-compatible) +# Get your key at: https://console.x.ai/ +GROK_API_KEY= + +# OpenRouter (Aggregates multiple providers) +# Get your key at: https://openrouter.ai/settings/keys +OPENROUTER_API_KEY= + +# Vercel AI Gateway +# Configure at: https://vercel.com/docs/ai-gateway +VERCEL_AI_GATEWAY_API_KEY= + +# Fireworks AI (OpenAI-compatible) +# Get your key at: https://fireworks.ai/api-keys +FIREWORKS_API_KEY= + +# Together AI (OpenAI-compatible) +# Get your key at: https://api.together.ai/settings/api-keys +TOGETHER_API_KEY= + +# Cerebras (OpenAI-compatible) +# Get your key at: https://cloud.cerebras.ai/ +CEREBRAS_API_KEY= + +# Nebius AI (OpenAI-compatible) +# Get your key at: https://studio.nebius.ai/ +NEBIUS_API_KEY= + +# OVHcloud AI Endpoints (OpenAI-compatible) +# Get your key at: https://endpoints.ai.cloud.ovh.net/ +OVHCLOUD_API_KEY= + +# MoonshotAI (OpenAI-compatible) +# Get your key at: https://platform.moonshot.cn/ +MOONSHOTAI_API_KEY= + +# Heroku Inference (OpenAI-compatible) +# Get your key at: https://www.heroku.com/ai +HEROKU_INFERENCE_KEY= + +# ============================================================================ +# LOCAL MODEL PROVIDERS +# ============================================================================ + +# Ollama (Optional - for local models) +# If running Ollama locally or via docker-compose, set the base URL +# Default when using docker-compose ollama service: +# OLLAMA_BASE_URL=http://localhost:11434/v1/ +# OLLAMA_API_KEY=placeholder # Not needed for local, but some tools require it + +# ============================================================================ +# OBSERVABILITY & MONITORING +# ============================================================================ + +# Logfire (Optional - for structured logging and tracing) +# Get your token at: https://logfire.pydantic.dev/ +# LOGFIRE_TOKEN= +# LOGFIRE_SERVICE_NAME=pydantic-ai-dev + +# ============================================================================ +# SEARCH PROVIDERS (for tool integrations) +# ============================================================================ + +# Brave Search (Optional - for web search tools) +# Get your key at: https://brave.com/search/api/ +# BRAVE_API_KEY= + +# Tavily Search (Optional - for web search tools) +# Get your key at: https://tavily.com/ +# TAVILY_API_KEY= + +# ============================================================================ +# MODEL CONTEXT PROTOCOL (MCP) +# ============================================================================ + +# GitHub Personal Access Token (Optional - for MCP GitHub server) +# Create at: https://github.com/settings/tokens +# Needs: repo, read:org scopes +# GITHUB_PERSONAL_ACCESS_TOKEN= + +# ============================================================================ +# DATABASE CONNECTIONS (for examples) +# ============================================================================ + +# PostgreSQL (Optional - for SQL/RAG examples) +# Default when using docker-compose postgres service: +# DATABASE_URL=postgresql://postgres:postgres@localhost:54320/postgres + +# PostgreSQL with pgvector (Optional - for RAG examples) +# Default when using docker-compose pgvector service: +# PGVECTOR_DATABASE_URL=postgresql://postgres:postgres@localhost:54321/postgres + +# ============================================================================ +# TESTING FLAGS +# ============================================================================ + +# Enable live API testing (Optional - USE WITH CAUTION - incurs API costs!) +# Set to exact value below to enable live tests that hit real APIs +# PYDANTIC_AI_LIVE_TEST_DANGEROUS=CHARGE-ME! + +# ============================================================================ +# NOTES +# ============================================================================ +# +# - Most API keys are OPTIONAL - only set the ones you plan to use +# - For testing, use test models or Ollama to avoid API costs +# - Never commit this file with real API keys +# - Add .env to .gitignore (already done in this project) +# - See README.md for detailed setup instructions per provider diff --git a/.devcontainer/AGENTS.md b/.devcontainer/AGENTS.md new file mode 100644 index 0000000000..5d80b76a3b --- /dev/null +++ b/.devcontainer/AGENTS.md @@ -0,0 +1,190 @@ +# DevContainer Maintenance Guide + +## About This Codebase + +- **Pydantic AI**: Agent framework for building LLM-powered applications with Pydantic +- **Workspace structure**: uv monorepo with multiple packages + - `pydantic-ai-slim`: Core framework (minimal dependencies) + - `pydantic-evals`: Evaluation framework + - `pydantic-graph`: Graph execution engine + - `examples/`: Example applications + - `clai/`: CLI tool +- **Primary users**: Contributors, AI coding agents (Claude Code, Cursor), PR reviewers + +## DevContainer Purpose + +- Provides isolated, reproducible development environment +- Matches exact dependencies and tools across all developers and AI agents +- Prevents "works on my machine" issues +- Ensures AI agents have proper access to testing/building tools +- Security isolation for AI agents + +## Platform Configuration + +- **Default platform**: `linux/amd64` (x86_64) +- **Why not ARM64**: Some Python packages (e.g., mlx) lack Linux ARM64 wheels +- **Apple Silicon**: Uses Rosetta/QEMU emulation automatically (slightly slower but compatible) +- **Change if needed**: Edit `docker-compose.yml` platform setting + +## Installation Modes + +### Standard Mode (Default) +- Installs: Cloud API providers + dev tools +- Excludes: PyTorch, transformers, vLLM, outlines ML extras +- Use case: 95% of development (PR testing, features, bug fixes) +- Why: Saves significant install time and disk space +- Command: Uses explicit `--extra` flags in `install.sh` + +### Full Mode +- Installs: Everything including ML frameworks +- Use case: Working on outlines integration, local model features +- Command: `--all-extras --all-packages` + +### Mode Selection +- Interactive (VSCode): User prompted to choose +- Non-interactive (agents/CI): Defaults to Standard +- Override: Set `INSTALL_MODE=standard|full` environment variable + +## Key Files + +### `.devcontainer/devcontainer.json` +- VSCode configuration for the devcontainer +- Editor settings, extensions, port forwarding +- Lifecycle commands (`postCreateCommand`, `postStartCommand`) +- Environment variables (`UV_LINK_MODE`, `UV_PROJECT_ENVIRONMENT`, etc.) +- Git identity for AI commits + +### `.devcontainer/Dockerfile` +- Base image: `mcr.microsoft.com/devcontainers/base:debian-12` +- System dependencies for Python builds +- Installs: uv, deno, pre-commit, Python 3.12 +- Runs as non-root user `vscode` + +### `.devcontainer/docker-compose.yml` +- Service orchestration +- Platform specification (`linux/amd64`) +- Optional services (commented out): Ollama, PostgreSQL, pgvector, MCP proxy +- Volume management for persistence + +### `.devcontainer/install.sh` +- Interactive installation script +- Detects interactive vs non-interactive mode +- Implements Standard vs Full installation logic +- Installs pre-commit hooks +- Called by `postCreateCommand` in devcontainer.json + +## Environment Variables + +### Critical Variables (devcontainer.json) +- `UV_PROJECT_ENVIRONMENT=/workspace/.venv`: Virtual environment location +- `UV_LINK_MODE=copy`: Suppress hardlink warnings in Docker volumes +- `PYTHONUNBUFFERED=1`: Ensure Python output appears immediately +- `COLUMNS=150`: Terminal width for better output formatting +- `GIT_AUTHOR_*`, `GIT_COMMITTER_*`: Git identity for AI commits + +### Optional Variables +- `INSTALL_MODE=standard|full`: Override installation mode +- API keys: Should be set in `.devcontainer/.env` (not committed) + +## Dependencies and Extras + +### Always Installed (Both Modes) +- Core: pydantic, httpx, opentelemetry-api +- Cloud APIs: openai, anthropic, google, groq, mistral, cohere, bedrock, huggingface +- Dev tools: cli, mcp, fastmcp, logfire, retries, temporal, ui, ag-ui, evals +- Build tools: lint group, docs group (ruff, mypy, pyright, mkdocs) + +### Only in Full Mode +- `outlines-transformers`: PyTorch + Transformers library +- `outlines-vllm-offline`: vLLM inference engine +- `outlines-sglang`: SGLang framework +- `outlines-mlxlm`: Apple MLX framework +- `outlines-llamacpp`: LlamaCPP bindings + +## Common Maintenance Tasks + +### Adding New System Dependencies +- Edit `Dockerfile`: Add to `apt-get install` command +- Rebuild container required + +### Adding Python Packages +- Use `uv add package-name` (not manual pyproject.toml edits) +- For new extras: Add to `pydantic_ai_slim/pyproject.toml` optional-dependencies +- Update `install.sh` if extra should be in Standard mode + +### Adding VSCode Extensions +- Edit `devcontainer.json`: Add to `customizations.vscode.extensions` array +- Rebuild container required + +### Updating Base Image/Tools +- `Dockerfile`: Change base image tag +- Update uv/deno install commands if needed +- Test with both Standard and Full modes + +### Adding Optional Services +- Uncomment service in `docker-compose.yml` +- Uncomment corresponding volume if needed +- Document in README.md optional services section + +## Troubleshooting + +### Container Build Fails +- Check Docker daemon is running +- Check internet connectivity +- Try: `docker system prune -a` to clean cache +- Check Dockerfile for syntax errors: `docker build -f .devcontainer/Dockerfile .` + +### Installation Script Fails +- Check `install.sh` syntax: `bash -n .devcontainer/install.sh` +- Run manually in container to see detailed errors +- Check uv lockfile is up to date: `uv lock` + +### Performance Issues +- Verify Docker resources (4+ GB RAM recommended) +- Check platform setting (amd64 vs arm64) +- Volume cache consistency setting in docker-compose.yml + +### UV Warnings +- Hardlink warning: Ensure `UV_LINK_MODE=copy` is set +- Lockfile conflicts: Run `uv lock` to regenerate + +## Best Practices + +### When Changing install.sh +- Test both Standard and Full modes +- Test interactive and non-interactive flows +- Verify syntax with `bash -n install.sh` +- Update README.md to match + +### When Changing Dependencies +- Keep Standard mode lean (exclude heavy ML frameworks) +- Update install.sh if adding new extras +- Document in README.md what's included/excluded +- Test install time impact + +### When Updating Documentation +- Keep README.md user-facing and comprehensive +- Keep CLAUDE.md maintainer-focused and concise +- No time estimates (machine-dependent) +- Link to official docs where applicable + +## Git Configuration + +- Credentials forwarded automatically by VSCode (no manual setup needed) +- Git identity set via environment variables (not .gitconfig file) +- Safe directory configured in `postStartCommand` +- AI commits use identity from `GIT_AUTHOR_*` variables + +## Testing the Setup + +### Manual Test +1. Make changes to devcontainer files +2. Rebuild container: "Dev Containers: Rebuild Container" +3. Test Standard mode installation +4. Test Full mode: `INSTALL_MODE=full` or run `make install` +5. Verify: `uv run pytest tests/test_agent.py::test_simple_sync` + +### CI Considerations +- Container should work in non-interactive mode +- Default Standard mode should cover 95% of test suite +- Full mode needed only for outlines/ML framework tests diff --git a/.devcontainer/CLAUDE.md b/.devcontainer/CLAUDE.md new file mode 100644 index 0000000000..5d80b76a3b --- /dev/null +++ b/.devcontainer/CLAUDE.md @@ -0,0 +1,190 @@ +# DevContainer Maintenance Guide + +## About This Codebase + +- **Pydantic AI**: Agent framework for building LLM-powered applications with Pydantic +- **Workspace structure**: uv monorepo with multiple packages + - `pydantic-ai-slim`: Core framework (minimal dependencies) + - `pydantic-evals`: Evaluation framework + - `pydantic-graph`: Graph execution engine + - `examples/`: Example applications + - `clai/`: CLI tool +- **Primary users**: Contributors, AI coding agents (Claude Code, Cursor), PR reviewers + +## DevContainer Purpose + +- Provides isolated, reproducible development environment +- Matches exact dependencies and tools across all developers and AI agents +- Prevents "works on my machine" issues +- Ensures AI agents have proper access to testing/building tools +- Security isolation for AI agents + +## Platform Configuration + +- **Default platform**: `linux/amd64` (x86_64) +- **Why not ARM64**: Some Python packages (e.g., mlx) lack Linux ARM64 wheels +- **Apple Silicon**: Uses Rosetta/QEMU emulation automatically (slightly slower but compatible) +- **Change if needed**: Edit `docker-compose.yml` platform setting + +## Installation Modes + +### Standard Mode (Default) +- Installs: Cloud API providers + dev tools +- Excludes: PyTorch, transformers, vLLM, outlines ML extras +- Use case: 95% of development (PR testing, features, bug fixes) +- Why: Saves significant install time and disk space +- Command: Uses explicit `--extra` flags in `install.sh` + +### Full Mode +- Installs: Everything including ML frameworks +- Use case: Working on outlines integration, local model features +- Command: `--all-extras --all-packages` + +### Mode Selection +- Interactive (VSCode): User prompted to choose +- Non-interactive (agents/CI): Defaults to Standard +- Override: Set `INSTALL_MODE=standard|full` environment variable + +## Key Files + +### `.devcontainer/devcontainer.json` +- VSCode configuration for the devcontainer +- Editor settings, extensions, port forwarding +- Lifecycle commands (`postCreateCommand`, `postStartCommand`) +- Environment variables (`UV_LINK_MODE`, `UV_PROJECT_ENVIRONMENT`, etc.) +- Git identity for AI commits + +### `.devcontainer/Dockerfile` +- Base image: `mcr.microsoft.com/devcontainers/base:debian-12` +- System dependencies for Python builds +- Installs: uv, deno, pre-commit, Python 3.12 +- Runs as non-root user `vscode` + +### `.devcontainer/docker-compose.yml` +- Service orchestration +- Platform specification (`linux/amd64`) +- Optional services (commented out): Ollama, PostgreSQL, pgvector, MCP proxy +- Volume management for persistence + +### `.devcontainer/install.sh` +- Interactive installation script +- Detects interactive vs non-interactive mode +- Implements Standard vs Full installation logic +- Installs pre-commit hooks +- Called by `postCreateCommand` in devcontainer.json + +## Environment Variables + +### Critical Variables (devcontainer.json) +- `UV_PROJECT_ENVIRONMENT=/workspace/.venv`: Virtual environment location +- `UV_LINK_MODE=copy`: Suppress hardlink warnings in Docker volumes +- `PYTHONUNBUFFERED=1`: Ensure Python output appears immediately +- `COLUMNS=150`: Terminal width for better output formatting +- `GIT_AUTHOR_*`, `GIT_COMMITTER_*`: Git identity for AI commits + +### Optional Variables +- `INSTALL_MODE=standard|full`: Override installation mode +- API keys: Should be set in `.devcontainer/.env` (not committed) + +## Dependencies and Extras + +### Always Installed (Both Modes) +- Core: pydantic, httpx, opentelemetry-api +- Cloud APIs: openai, anthropic, google, groq, mistral, cohere, bedrock, huggingface +- Dev tools: cli, mcp, fastmcp, logfire, retries, temporal, ui, ag-ui, evals +- Build tools: lint group, docs group (ruff, mypy, pyright, mkdocs) + +### Only in Full Mode +- `outlines-transformers`: PyTorch + Transformers library +- `outlines-vllm-offline`: vLLM inference engine +- `outlines-sglang`: SGLang framework +- `outlines-mlxlm`: Apple MLX framework +- `outlines-llamacpp`: LlamaCPP bindings + +## Common Maintenance Tasks + +### Adding New System Dependencies +- Edit `Dockerfile`: Add to `apt-get install` command +- Rebuild container required + +### Adding Python Packages +- Use `uv add package-name` (not manual pyproject.toml edits) +- For new extras: Add to `pydantic_ai_slim/pyproject.toml` optional-dependencies +- Update `install.sh` if extra should be in Standard mode + +### Adding VSCode Extensions +- Edit `devcontainer.json`: Add to `customizations.vscode.extensions` array +- Rebuild container required + +### Updating Base Image/Tools +- `Dockerfile`: Change base image tag +- Update uv/deno install commands if needed +- Test with both Standard and Full modes + +### Adding Optional Services +- Uncomment service in `docker-compose.yml` +- Uncomment corresponding volume if needed +- Document in README.md optional services section + +## Troubleshooting + +### Container Build Fails +- Check Docker daemon is running +- Check internet connectivity +- Try: `docker system prune -a` to clean cache +- Check Dockerfile for syntax errors: `docker build -f .devcontainer/Dockerfile .` + +### Installation Script Fails +- Check `install.sh` syntax: `bash -n .devcontainer/install.sh` +- Run manually in container to see detailed errors +- Check uv lockfile is up to date: `uv lock` + +### Performance Issues +- Verify Docker resources (4+ GB RAM recommended) +- Check platform setting (amd64 vs arm64) +- Volume cache consistency setting in docker-compose.yml + +### UV Warnings +- Hardlink warning: Ensure `UV_LINK_MODE=copy` is set +- Lockfile conflicts: Run `uv lock` to regenerate + +## Best Practices + +### When Changing install.sh +- Test both Standard and Full modes +- Test interactive and non-interactive flows +- Verify syntax with `bash -n install.sh` +- Update README.md to match + +### When Changing Dependencies +- Keep Standard mode lean (exclude heavy ML frameworks) +- Update install.sh if adding new extras +- Document in README.md what's included/excluded +- Test install time impact + +### When Updating Documentation +- Keep README.md user-facing and comprehensive +- Keep CLAUDE.md maintainer-focused and concise +- No time estimates (machine-dependent) +- Link to official docs where applicable + +## Git Configuration + +- Credentials forwarded automatically by VSCode (no manual setup needed) +- Git identity set via environment variables (not .gitconfig file) +- Safe directory configured in `postStartCommand` +- AI commits use identity from `GIT_AUTHOR_*` variables + +## Testing the Setup + +### Manual Test +1. Make changes to devcontainer files +2. Rebuild container: "Dev Containers: Rebuild Container" +3. Test Standard mode installation +4. Test Full mode: `INSTALL_MODE=full` or run `make install` +5. Verify: `uv run pytest tests/test_agent.py::test_simple_sync` + +### CI Considerations +- Container should work in non-interactive mode +- Default Standard mode should cover 95% of test suite +- Full mode needed only for outlines/ML framework tests diff --git a/.devcontainer/Dockerfile b/.devcontainer/Dockerfile new file mode 100644 index 0000000000..4218f33c8e --- /dev/null +++ b/.devcontainer/Dockerfile @@ -0,0 +1,65 @@ +# Pydantic AI Development Container +# Optimized for AI coding agents (Claude Code, Cursor) + +FROM mcr.microsoft.com/devcontainers/base:debian-12 + +# Avoid warnings by switching to noninteractive +ENV DEBIAN_FRONTEND=noninteractive + +# Install system dependencies +RUN apt-get update && apt-get install -y \ + git \ + curl \ + make \ + build-essential \ + ca-certificates \ + wget \ + unzip \ + # For Python builds + libbz2-dev \ + libffi-dev \ + liblzma-dev \ + libncurses5-dev \ + libreadline-dev \ + libsqlite3-dev \ + libssl-dev \ + tk-dev \ + zlib1g-dev \ + && rm -rf /var/lib/apt/lists/* + +# Switch to vscode user for non-root operations +USER vscode + +# Install uv and deno +RUN curl -LsSf https://astral.sh/uv/install.sh | sh && \ + curl -fsSL https://deno.land/install.sh | sh + +# Set all PATH and environment variables together +ENV PATH="/home/vscode/.local/bin:/home/vscode/.deno/bin:${PATH}" +ENV DENO_INSTALL="/home/vscode/.deno" +ENV UV_PYTHON=3.12 + +# Install Python tools (now that PATH is set) +RUN uv tool install pre-commit && \ + uv python install 3.12 + +# Pre-create cache directories with correct permissions +RUN mkdir -p /home/vscode/.cache/uv /home/vscode/.cache/pre-commit && \ + chown -R vscode:vscode /home/vscode/.cache + +# Set working directory +WORKDIR /workspace + +# Pre-create .venv directory with correct permissions +RUN mkdir -p /workspace/.venv && chown vscode:vscode /workspace/.venv + +# Set environment variables for optimal development +ENV COLUMNS=150 +ENV PYTHONUNBUFFERED=1 +ENV UV_PROJECT_ENVIRONMENT=/workspace/.venv + +# Switch back to noninteractive +ENV DEBIAN_FRONTEND=dialog + +# Default command +CMD ["/bin/bash"] diff --git a/.devcontainer/README.md b/.devcontainer/README.md new file mode 100644 index 0000000000..20aa1b8d02 --- /dev/null +++ b/.devcontainer/README.md @@ -0,0 +1,529 @@ +# Pydantic AI DevContainer + +This directory contains a complete development container configuration optimized for AI coding agents like **Claude Code** and **Cursor**. + +## Why Use a DevContainer? + +Running AI coding agents inside containers is [Anthropic's official best practice](https://docs.anthropic.com/en/docs/claude-code) because it provides: + +- **Security isolation**: Protects your host system from accidental changes +- **Environment fidelity**: Claude experiences the exact same Python interpreter, dependencies, and tools as production +- **Reproducibility**: Every developer and AI agent works in an identical environment +- **Clean separation**: Keeps project dependencies isolated from your system + +## What's Included + +This devcontainer setup includes: + +- **Python 3.12** (the project's default version) +- **uv** - Fast Python package manager +- **pre-commit** - Git hooks for code quality +- **deno** - JavaScript runtime for documentation tools +- **All development dependencies** pre-installed via `make install` +- **All model provider SDKs** (OpenAI, Anthropic, Google, Groq, Mistral, Cohere, Bedrock) +- **VS Code extensions** for Python, Ruff, MyPy, Pyright, GitLens, and more +- **MCP proxy support** (optional) for Model Context Protocol integration + +## Platform Compatibility + +### ARM64 / Apple Silicon + +The devcontainer is configured to use the **`linux/amd64` (x86_64) platform** by default for maximum compatibility with Full mode (which includes ML frameworks like `mlx`, PyTorch, transformers). + +**On Apple Silicon Macs**: Docker automatically uses Rosetta/QEMU emulation to run x86_64 containers. This works seamlessly but may be slightly slower than native ARM64. + +**For better performance on Apple Silicon (Standard mode users):** +1. Edit `docker-compose.yml` +2. Change `platform: linux/amd64` to `platform: linux/arm64` +3. Use Standard mode installation (default) +4. ⚠️ **Note**: If you later need Full mode, you must switch back to `linux/amd64` due to ML framework dependencies + +**Full mode requires amd64** because packages like `mlx` (Apple's ML framework) don't have Linux ARM64 wheels. Standard mode has no such restrictions. + +### Why x86_64 by Default? + +Using x86_64 ensures compatibility with Full mode (ML frameworks) out of the box. On Apple Silicon, this uses emulation which is slightly slower but works for both Standard and Full modes. + +## Installation Modes + +The devcontainer supports two installation modes to balance speed and functionality: + +### Standard Mode (Default, Recommended) +- **Includes**: Cloud API providers + all development tools +- **Excludes**: Heavy ML frameworks (PyTorch, transformers, vLLM, outlines extras) +- **Use case**: PR testing, bug fixes, feature development (95% of users) +- **What's installed**: + - All cloud model providers (OpenAI, Anthropic, Google, Groq, Mistral, Cohere, Bedrock, HuggingFace) + - Development tools (CLI, MCP, FastMCP) + - Optional integrations (Logfire, retries, Temporal, UI, AG-UI, evals) + - Testing tools, linters, type checkers + +### Full Mode (ML Development) +- **Includes**: Everything including heavy ML frameworks for local model inference +- **Use case**: Working on outlines integration, local model features, ML framework development +- **What's installed**: Everything in Standard + PyTorch, transformers, vLLM, SGLang, MLX, LlamaCPP, and all workspace packages + +### How Installation Works + +**Interactive Mode** (VS Code, local development): +- When you first open the devcontainer, you'll be prompted to choose Standard or Full +- Your choice applies to the initial setup only +- You can always install ML frameworks later by running `make install` + +**Non-Interactive Mode** (AI agents, CI, remote servers): +- Automatically uses **Standard mode** (excludes heavy ML frameworks) +- Override by setting environment variable: `INSTALL_MODE=full` + +**Switching Modes Later**: +```bash +# Install everything including ML frameworks +make install + +# Or manually install all extras for pydantic-ai-slim only +uv sync --frozen --package pydantic-ai-slim --all-extras --group lint --group docs + +# Or install everything across all packages +uv sync --frozen --all-extras --all-packages --group lint --group docs +``` + +## Getting Started + +### Using VS Code + +1. Install [Docker Desktop](https://www.docker.com/products/docker-desktop/) +2. Install the [Dev Containers extension](https://marketplace.visualstudio.com/items?itemName=ms-vscode-remote.remote-containers) in VS Code +3. Open this repository in VS Code +4. Click the prompt to "Reopen in Container" (or press `F1` → "Dev Containers: Reopen in Container") +5. Wait for the container to build +6. **Choose installation mode** when prompted (Standard recommended for most users) +7. Once built, the environment is ready! + +### Using Claude Code + +Claude Code automatically detects and uses devcontainers when configured. Simply: + +1. Ensure Docker is running +2. Open the project - Claude Code will prompt to use the devcontainer +3. Accept the prompt, and Claude will operate inside the container +4. **Installation mode**: Automatically uses **Standard mode** (excludes ML frameworks, agents are non-interactive) + - To use Full mode, set `INSTALL_MODE=full` in the container's environment variables + +This ensures Claude has access to the same tools and environment as your tests. + +### Using Cursor + +Similar to Claude Code: + +1. Ensure Docker is running +2. Open the project in Cursor +3. When prompted, select "Reopen in Container" +4. **Installation mode**: Automatically uses **Standard mode** (excludes ML frameworks, agents are non-interactive) + - To use Full mode, set `INSTALL_MODE=full` in the container's environment variables +5. Cursor's AI will now operate within the containerized environment + +## Setting Up API Keys + +Most model providers require API keys. The devcontainer includes a comprehensive `.env.example` file documenting all supported providers. + +### Quick Setup + +1. **Copy the example file**: + ```bash + cp .devcontainer/.env.example .devcontainer/.env + ``` + +2. **Add your API keys** to `.devcontainer/.env`: + ```bash + # Required for testing with specific models + OPENAI_API_KEY=sk-... + ANTHROPIC_API_KEY=sk-ant-... + GEMINI_API_KEY=AI... + # Add others as needed + ``` + +3. **Update docker-compose** to load the `.env` file (add to the `dev` service): + ```yaml + dev: + env_file: + - .env + ``` + +4. **Rebuild the container**: `Dev Containers: Rebuild Container` + +### Supported Providers + +See `.devcontainer/.env.example` for the complete list of API keys for: +- **Major providers**: OpenAI, Anthropic (Claude), Google (Gemini), Groq, Mistral, Cohere +- **AWS Bedrock**: Configure via AWS credentials +- **OpenAI-compatible**: DeepSeek, Grok, OpenRouter, Fireworks, Together, and more +- **Search tools**: Brave Search, Tavily (optional) +- **Observability**: Logfire (optional) + +### Testing Without API Keys + +You can develop and test without paid API keys using: +- **Ollama** (see "Using Ollama Locally" below) - Free local models +- **Test models** - The test suite includes mock models +- **VCR cassettes** - Pre-recorded API interactions in `tests/cassettes/` + +## Available Commands + +Once inside the container, all standard project commands work: + +```bash +# Run all checks (format, lint, typecheck, tests) +make + +# Run tests only +make test + +# Run tests with coverage report +make testcov + +# Format code +make format + +# Lint code +make lint + +# Type check with Pyright +make typecheck + +# Build and serve documentation locally +make docs-serve # Available at http://localhost:8000 +``` + +## Container Architecture + +### File Structure + +``` +.devcontainer/ +├── Dockerfile # Container image definition +├── devcontainer.json # VS Code configuration +├── docker-compose.yml # Service orchestration +├── mcp-proxy-config.json # MCP server proxy config (optional) +└── README.md # This file +``` + +### Volumes + +The setup uses several volumes for optimal performance: + +- **Workspace**: Your code is mounted at `/workspace` +- **Virtual environment**: `.venv/` persists across container restarts +- **Cache directories**: `uv` and `pre-commit` caches are preserved for faster operations + +### Networking + +The container uses `host` networking mode for simplicity. This means: + +- The docs server at `localhost:8000` is directly accessible +- No port mapping configuration needed +- Direct access to any services you run + +## MCP Integration (Advanced) + +The devcontainer includes optional MCP (Model Context Protocol) proxy support. This allows Claude Code to communicate with stdio-based MCP servers from inside the container. + +### Enabling MCP Proxy + +1. Edit `docker-compose.yml` and uncomment the `mcp-proxy` service +2. Edit `mcp-proxy-config.json` to configure your MCP servers: + - Set `disabled: false` for servers you want to enable + - Add environment variables for API keys +3. Rebuild the container: `Dev Containers: Rebuild Container` +4. The proxy will be available at `http://localhost:3000` + +### Configuring MCP Servers + +Example configuration in `mcp-proxy-config.json`: + +```json +{ + "mcpServers": { + "github": { + "command": "npx", + "args": ["-y", "@modelcontextprotocol/server-github"], + "env": { + "GITHUB_PERSONAL_ACCESS_TOKEN": "your-token-here" + }, + "disabled": false + } + } +} +``` + +**Security Note**: Never commit API keys. Use environment variables or Docker secrets instead. + +## Using Ollama Locally + +Ollama allows you to run open-source LLMs locally without API keys or usage costs. The devcontainer includes an optional Ollama service. + +### Enabling Ollama + +1. **Edit `docker-compose.yml`** and uncomment the `ollama` service section +2. **Uncomment the volume** in the `volumes` section: `ollama-models:` +3. **Rebuild the container**: `Dev Containers: Rebuild Container` + +### Pulling Models + +Once Ollama is running, pull models: + +```bash +# Pull a small, fast model (used in CI tests) +docker exec pydantic-ai-ollama ollama pull qwen2:0.5b + +# Pull other popular models +docker exec pydantic-ai-ollama ollama pull llama3.2:3b +docker exec pydantic-ai-ollama ollama pull phi3:mini +``` + +### Using Ollama in Code + +Ollama uses the OpenAI-compatible API: + +```python +from pydantic_ai import Agent + +# Using the docker-compose ollama service +agent = Agent( + 'openai:qwen2:0.5b', + openai_base_url='http://localhost:11434/v1/' +) +``` + +Or set the environment variable: +```bash +export OLLAMA_BASE_URL=http://localhost:11434/v1/ +``` + +### Checking Available Models + +```bash +docker exec pydantic-ai-ollama ollama list +``` + +## Running Examples with Databases + +Some examples in `examples/` require PostgreSQL. The devcontainer includes optional database services. + +### SQL Generation Example (`examples/sql_gen.py`) + +1. **Edit `docker-compose.yml`** and uncomment the `postgres` service section +2. **Uncomment the volume**: `postgres-data:` +3. **Rebuild the container**: `Dev Containers: Rebuild Container` +4. **Run the example**: + ```bash + # The example expects PostgreSQL at localhost:54320 + cd examples + uv run pydantic_ai_examples/sql_gen.py + ``` + +### RAG Example (`examples/rag.py`) + +This example requires PostgreSQL with the pgvector extension: + +1. **Edit `docker-compose.yml`** and uncomment the `pgvector` service section +2. **Uncomment the volume**: `pgvector-data:` +3. **Rebuild the container**: `Dev Containers: Rebuild Container` +4. **Run the example**: + ```bash + # The example expects pgvector at localhost:54321 + cd examples + uv run pydantic_ai_examples/rag.py + ``` + +### Connection Strings + +The default credentials for both services are: +``` +User: postgres +Password: postgres +Database: postgres +``` + +- **Standard PostgreSQL**: `postgresql://postgres:postgres@localhost:54320/postgres` +- **PostgreSQL + pgvector**: `postgresql://postgres:postgres@localhost:54321/postgres` + +## Optional Services Summary + +The devcontainer supports these optional services (all commented out by default): + +| Service | Port | Use Case | Enable By | +|---------|------|----------|-----------| +| **Ollama** | 11434 | Local LLM testing | Uncomment in `docker-compose.yml` | +| **PostgreSQL** | 54320 | SQL generation example | Uncomment in `docker-compose.yml` | +| **pgvector** | 54321 | RAG example | Uncomment in `docker-compose.yml` | +| **MCP Proxy** | 3000 | MCP server integration | Uncomment in `docker-compose.yml` | + +**Why commented out?** To keep the default setup minimal and fast. Enable only what you need. + +## Git Configuration + +### How Git Works in the DevContainer + +This devcontainer follows **2025 best practices** for git configuration with AI coding agents: + +**Automatic Credential Forwarding** (No manual setup needed) +- VS Code automatically forwards your SSH agent and git credentials to the container +- No need to mount `.gitconfig` or `.ssh` directories +- Works seamlessly with SSH keys, Personal Access Tokens, and credential helpers +- **Source**: [VS Code: Sharing Git credentials](https://code.visualstudio.com/remote/advancedcontainers/sharing-git-credentials) + +**Git Identity via Environment Variables** +- The container uses environment variables for git commits made by AI agents +- Default identity: `AI Agent ` +- Customize in `.devcontainer/devcontainer.json` under `remoteEnv`: + ```json + "GIT_AUTHOR_NAME": "Your Preferred Name", + "GIT_AUTHOR_EMAIL": "your@email.com", + "GIT_COMMITTER_NAME": "Your Preferred Name", + "GIT_COMMITTER_EMAIL": "your@email.com" + ``` +- **Source**: [Git Environment Variables](https://git-scm.com/book/en/v2/Git-Internals-Environment-Variables) + +**Safe Directory Configuration** +- The `postStartCommand` automatically trusts the workspace directory +- This resolves git's "dubious ownership" security check (CVE-2022-24765) +- **Source**: [Avoiding Dubious Ownership in Dev Containers](https://www.kenmuse.com/blog/avoiding-dubious-ownership-in-dev-containers/) + +**Why This Approach?** +- **More secure**: Host's gitconfig remains untouched by the container +- **Cleaner**: No file mounts needed - VS Code handles everything +- **Flexible**: Easy to customize AI commit identity via environment variables +- **Modern**: Follows 2025 devcontainer best practices + +## Customization + +### Adding Python Packages + +Install packages using `uv`: + +```bash +# Add a new dependency +uv add package-name + +# Add a dev dependency +uv add --dev package-name + +# Sync with lockfile +uv sync +``` + +### Adding VS Code Extensions + +Edit `devcontainer.json` and add extension IDs to the `extensions` array: + +```json +"extensions": [ + "ms-python.python", + "your-extension-id" +] +``` + +### Changing Python Version + +To use a different Python version: + +1. Edit `Dockerfile`: Change `ENV UV_PYTHON=3.12` to your desired version +2. Rebuild: `Dev Containers: Rebuild Container` + +## Troubleshooting + +### ARM64 Platform Compatibility Error + +**Issue**: Full mode installation fails with error about `mlx` or other ML packages not having wheels for Linux ARM64 + +**Solution**: This only affects Full mode (ML frameworks). If you see this error: +1. Verify `docker-compose.yml` has `platform: linux/amd64` under the `dev` service's `build` section +2. Rebuild: `Dev Containers: Rebuild Container` +3. On Apple Silicon, Docker will use Rosetta emulation automatically +4. Alternatively, use Standard mode which has no ARM64 restrictions + +### Container Build Fails + +**Issue**: Docker build fails with network errors + +**Solution**: Check your internet connection and Docker proxy settings. Try: +```bash +docker system prune -a # Clean Docker cache +``` + +### `make install` Fails + +**Issue**: Post-create command fails during container creation + +**Solution**: +1. Open a terminal in the container +2. Run `make install` manually to see detailed error messages +3. Check that `uv`, `pre-commit`, and `deno` are installed: `which uv pre-commit deno` + +### Tests Fail with Import Errors + +**Issue**: Tests can't find installed packages + +**Solution**: Ensure the virtual environment is activated: +```bash +source .venv/bin/activate +python -c "import sys; print(sys.prefix)" # Should show /workspace/.venv +``` + +### Git Operations Fail + +**Issue**: Git commands show "permission denied" or "unsafe repository" + +**Solution**: The devcontainer automatically runs `git config --global --add safe.directory /workspace` on start. If issues persist: +```bash +git config --global --add safe.directory /workspace +``` + +### Port 8000 Already in Use + +**Issue**: Can't access docs at `localhost:8000` + +**Solution**: Check if another service is using port 8000: +```bash +# On host machine +lsof -i :8000 +# Kill the process or use a different port in mkdocs serve +``` + +### Slow Performance + +**Issue**: Container operations are slow + +**Solution**: +- Ensure Docker has adequate resources (4+ GB RAM recommended) +- Use Docker Desktop's built-in resource settings +- Consider using WSL2 backend on Windows for better performance + +## Performance Tips + +1. **Persistent volumes**: The `.venv` volume persists across rebuilds, making subsequent starts much faster +2. **Cached mounts**: Workspace is mounted with `cached` consistency for better I/O performance +3. **Prune regularly**: Run `docker system prune` periodically to free disk space + +## Security Considerations + +- The container runs as non-root user `vscode` (UID 1000) +- Git credentials are mounted read-only +- SSH keys are mounted read-only +- MCP proxy should only expose necessary servers +- Never commit secrets or API keys to configuration files + +## Additional Resources + +- [Pydantic AI Contributing Guide](../docs/contributing.md) +- [Dev Containers Documentation](https://containers.dev/) +- [Claude Code Documentation](https://docs.anthropic.com/en/docs/claude-code) +- [uv Documentation](https://docs.astral.sh/uv/) + +## Support + +If you encounter issues with the devcontainer setup: + +1. Check this README's troubleshooting section +2. Review container logs: `docker logs ` +3. Rebuild the container: `Dev Containers: Rebuild Container` +4. Open an issue in the [Pydantic AI repository](https://github.com/pydantic/pydantic-ai/issues) diff --git a/.devcontainer/devcontainer.json b/.devcontainer/devcontainer.json new file mode 100644 index 0000000000..a45061001e --- /dev/null +++ b/.devcontainer/devcontainer.json @@ -0,0 +1,123 @@ +{ + "name": "Pydantic AI Development", + "dockerComposeFile": "docker-compose.yml", + "service": "dev", + "workspaceFolder": "/workspace", + // Configure tool-specific properties + "customizations": { + "vscode": { + "settings": { + // Python settings + "python.defaultInterpreterPath": "/workspace/.venv/bin/python", + "python.terminal.activateEnvironment": true, + "python.analysis.typeCheckingMode": "strict", + "python.analysis.autoImportCompletions": true, + // Ruff settings + "[python]": { + "editor.formatOnSave": true, + "editor.codeActionsOnSave": { + "source.fixAll": "explicit", + "source.organizeImports": "explicit" + }, + "editor.defaultFormatter": "charliermarsh.ruff" + }, + // Type checking + "mypy-type-checker.importStrategy": "fromEnvironment", + "pyright.disableOrganizeImports": true, + // Terminal + "terminal.integrated.defaultProfile.linux": "bash", + "terminal.integrated.profiles.linux": { + "bash": { + "path": "/bin/bash", + "icon": "terminal-bash" + } + }, + // Files + "files.exclude": { + "**/__pycache__": true, + "**/.pytest_cache": true, + "**/.mypy_cache": true, + "**/.ruff_cache": true, + "**/*.pyc": true + }, + // Editor + "editor.rulers": [ + 100, + 120 + ], + "editor.formatOnSave": true, + "files.insertFinalNewline": true, + "files.trimTrailingWhitespace": true + }, + "extensions": [ + // Python development + "ms-python.python", + "ms-python.vscode-pylance", + "ms-python.mypy-type-checker", + "charliermarsh.ruff", + // Development tools + "ms-vscode.makefile-tools", + "eamodio.gitlens", + "donjayamanne.githistory", + // General utilities + "christian-kohler.path-intellisense", + "streetsidesoftware.code-spell-checker" + ] + } + }, + // Features to add to the dev container + "features": { + "ghcr.io/devcontainers/features/common-utils:2": { + "installZsh": false, + "installOhMyZsh": false, + "upgradePackages": true + } + }, + // Port forwarding for local services + "forwardPorts": [ + 8000, + 8080, + 11434, + 54320, + 54321 + ], + "portsAttributes": { + "8000": { + "label": "MkDocs Server", + "onAutoForward": "notify" + }, + "8080": { + "label": "FastAPI/Example Apps", + "onAutoForward": "notify" + }, + "11434": { + "label": "Ollama", + "onAutoForward": "silent" + }, + "54320": { + "label": "PostgreSQL", + "onAutoForward": "silent" + }, + "54321": { + "label": "PostgreSQL (pgvector)", + "onAutoForward": "silent" + } + }, + // Lifecycle scripts + "postCreateCommand": ".devcontainer/install.sh", + "postStartCommand": "git config --global --add safe.directory /workspace", + // Run as non-root user + "remoteUser": "vscode", + // Container environment variables + "remoteEnv": { + "COLUMNS": "150", + "UV_PROJECT_ENVIRONMENT": "/workspace/.venv", + "UV_LINK_MODE": "copy", + "PYTHONUNBUFFERED": "1", + // Git identity for AI commits (customize these values) + "GIT_AUTHOR_NAME": "AI Agent", + "GIT_AUTHOR_EMAIL": "ai@devcontainer.local", + "GIT_COMMITTER_NAME": "AI Agent", + "GIT_COMMITTER_EMAIL": "ai@devcontainer.local" + } +} diff --git a/.devcontainer/docker-compose.yml b/.devcontainer/docker-compose.yml new file mode 100644 index 0000000000..204d23370e --- /dev/null +++ b/.devcontainer/docker-compose.yml @@ -0,0 +1,120 @@ +version: '3.8' + +services: + # Main development container + dev: + build: + context: . + dockerfile: Dockerfile + # Force x86_64 platform to avoid ARM64 Linux compatibility issues (e.g., mlx package) + # This works on Apple Silicon via Rosetta/QEMU emulation + platform: linux/amd64 + + volumes: + # Bind workspace + - ..:/workspace:cached + + # Persist .venv across rebuilds for faster startup + - venv-data:/workspace/.venv + + # Cache directories for faster rebuilds + - uv-cache:/home/vscode/.cache/uv + - pre-commit-cache:/home/vscode/.cache/pre-commit + + working_dir: /workspace + + # Keep container running + command: sleep infinity + + network_mode: host + + security_opt: + - seccomp:unconfined + + cap_add: + - SYS_PTRACE + + environment: + - COLUMNS=150 + - UV_PROJECT_ENVIRONMENT=/workspace/.venv + - PYTHONUNBUFFERED=1 + - UV_PYTHON=3.12 + # MCP proxy endpoint (if using separate service) + - MCP_PROXY_URL=http://localhost:3000 + + # Init to handle signals properly + init: true + + # MCP Proxy service (optional - for advanced MCP integration) + # Uncomment if you need to proxy stdio MCP servers over HTTP + # mcp-proxy: + # image: node:24-slim + # platform: linux/amd64 + # working_dir: /app + # volumes: + # - ./mcp-proxy-config.json:/app/config.json:ro + # command: > + # sh -c " + # npm install -g @modelcontextprotocol/proxy && + # mcp-proxy --config /app/config.json + # " + # ports: + # - "3000:3000" + # restart: unless-stopped + # init: true + + # Ollama service (optional - for local model testing) + # Uncomment to run Ollama locally for testing with local models + # After starting, pull models with: docker exec pydantic-ai-ollama ollama pull qwen2:0.5b + # ollama: + # image: ollama/ollama:latest + # platform: linux/amd64 + # container_name: pydantic-ai-ollama + # ports: + # - "11434:11434" + # volumes: + # - ollama-models:/root/.ollama + # restart: unless-stopped + # init: true + + # PostgreSQL service (optional - for SQL examples) + # Uncomment to run examples/sql_gen.py + # postgres: + # image: postgres:18 + # platform: linux/amd64 + # container_name: pydantic-ai-postgres + # ports: + # - "54320:5432" + # environment: + # - POSTGRES_USER=postgres + # - POSTGRES_PASSWORD=postgres + # - POSTGRES_DB=postgres + # volumes: + # - postgres-data:/var/lib/postgresql/data + # restart: unless-stopped + # init: true + + # PostgreSQL with pgvector (optional - for RAG examples) + # Uncomment to run examples/rag.py + # pgvector: + # image: pgvector/pgvector:pg18 + # platform: linux/amd64 + # container_name: pydantic-ai-pgvector + # ports: + # - "54321:5432" + # environment: + # - POSTGRES_USER=postgres + # - POSTGRES_PASSWORD=postgres + # - POSTGRES_DB=postgres + # volumes: + # - pgvector-data:/var/lib/postgresql/data + # restart: unless-stopped + # init: true + +volumes: + venv-data: + uv-cache: + pre-commit-cache: + # ollama-models: # Uncomment if using Ollama service + # postgres-data: # Uncomment if using PostgreSQL service + # pgvector-data: # Uncomment if using pgvector service diff --git a/.devcontainer/install.sh b/.devcontainer/install.sh new file mode 100755 index 0000000000..68b49e5e50 --- /dev/null +++ b/.devcontainer/install.sh @@ -0,0 +1,97 @@ +#!/bin/bash +set -e + +# Colors for output +GREEN='\033[0;32m' +BLUE='\033[0;34m' +YELLOW='\033[1;33m' +NC='\033[0m' # No Color + +echo -e "${BLUE}=== Pydantic AI DevContainer Setup ===${NC}" +echo "" + +# Check if INSTALL_MODE is set via environment variable +if [ -n "$INSTALL_MODE" ]; then + echo -e "${YELLOW}INSTALL_MODE environment variable detected: $INSTALL_MODE${NC}" + MODE="$INSTALL_MODE" +else + # Detect if running interactively + if [ -t 0 ] && [ -t 1 ]; then + # Interactive mode - prompt user + echo "Choose installation mode:" + echo "" + echo -e "${GREEN}1) Standard${NC} (Recommended)" + echo " - Cloud API providers + development tools" + echo " - Excludes: Heavy ML frameworks (PyTorch, transformers, vLLM)" + echo " - Use case: PR testing, bug fixes, feature development (95% of users)" + echo "" + echo -e "${BLUE}2) Full${NC} (ML Development)" + echo " - Everything including ML frameworks for local model inference" + echo " - Use case: Working on outlines integration, local model features" + echo "" + + while true; do + read -p "Enter your choice (1 or 2) [default: 1]: " choice + choice=${choice:-1} + + case $choice in + 1) + MODE="standard" + break + ;; + 2) + MODE="full" + break + ;; + *) + echo "Invalid choice. Please enter 1 or 2." + ;; + esac + done + else + # Non-interactive mode (agent/CI) - default to standard + echo -e "${YELLOW}Non-interactive mode detected (agent/CI)${NC}" + echo "Defaulting to STANDARD installation (excludes heavy ML frameworks)." + echo "To override, set INSTALL_MODE=full environment variable." + MODE="standard" + fi +fi + +echo "" +echo -e "${BLUE}Installing in ${MODE^^} mode...${NC}" +echo "" + +# Run installation based on mode +if [ "$MODE" = "standard" ]; then + echo "Installing standard mode (cloud APIs + dev tools, excluding ML frameworks)..." + uv sync --frozen --group lint --group docs +elif [ "$MODE" = "full" ]; then + echo "Installing full mode (everything including ML frameworks)..." + uv sync --frozen --all-extras --all-packages --group lint --group docs +else + echo -e "${YELLOW}Warning: Unknown mode '$MODE', defaulting to standard${NC}" + uv sync --frozen --group lint --group docs +fi + +# Install pre-commit hooks +echo "" +echo "Installing pre-commit hooks..." +pre-commit install --install-hooks + +echo "" +echo -e "${GREEN}✓ Installation complete!${NC}" +echo "" + +if [ "$MODE" = "standard" ]; then + echo "You're using STANDARD mode (ML frameworks excluded)." + echo "To install ML frameworks later if needed:" + echo " make install # Install everything" + echo " uv sync --frozen --all-extras # Install all extras for pydantic-ai-slim" +fi + +echo "" +echo "Ready to start developing! Try:" +echo " make test # Run tests" +echo " make # Run all checks" +echo " make docs-serve # Serve documentation" +echo "" diff --git a/.devcontainer/mcp-proxy-config.json b/.devcontainer/mcp-proxy-config.json new file mode 100644 index 0000000000..f42b85c50d --- /dev/null +++ b/.devcontainer/mcp-proxy-config.json @@ -0,0 +1,30 @@ +{ + "$schema": "https://modelcontextprotocol.io/schemas/proxy-config.json", + "mcpServers": { + "example-github": { + "command": "npx", + "args": ["-y", "@modelcontextprotocol/server-github"], + "env": { + "GITHUB_PERSONAL_ACCESS_TOKEN": "${GITHUB_TOKEN}" + }, + "disabled": true + }, + "example-filesystem": { + "command": "npx", + "args": ["-y", "@modelcontextprotocol/server-filesystem", "/workspace"], + "disabled": true + }, + "example-brave-search": { + "command": "npx", + "args": ["-y", "@modelcontextprotocol/server-brave-search"], + "env": { + "BRAVE_API_KEY": "${BRAVE_API_KEY}" + }, + "disabled": true + } + }, + "proxy": { + "port": 3000, + "host": "0.0.0.0" + } +} From 96dac786d7cc604a141d004931d06dffd6c604bb Mon Sep 17 00:00:00 2001 From: David Sanchez <64162682+dsfaccini@users.noreply.github.com> Date: Fri, 14 Nov 2025 23:22:40 -0500 Subject: [PATCH 2/2] Unignore .devcontainer for contributor use --- .gitignore | 1 - 1 file changed, 1 deletion(-) diff --git a/.gitignore b/.gitignore index c1b21315b8..aca8999b31 100644 --- a/.gitignore +++ b/.gitignore @@ -22,4 +22,3 @@ node_modules/ .mcp.json .claude/ /.cursor/ -/.devcontainer/