diff --git a/.github/workflows/semantic-release.yml b/.github/workflows/semantic-release.yml index eb5007d..4517c94 100644 --- a/.github/workflows/semantic-release.yml +++ b/.github/workflows/semantic-release.yml @@ -31,12 +31,16 @@ jobs: - name: Test basic import run: | - python -c "import {{PACKAGE_NAME}}; print('{{PROJECT_NAME}} package imports successfully')" + python -c "import mcp_langfuse; print('mcp-langfuse package imports successfully')" release: needs: test runs-on: ubuntu-latest if: github.event_name == 'push' + outputs: + released: ${{ steps.release.outputs.released }} + version: ${{ steps.release.outputs.version }} + tag: ${{ steps.release.outputs.tag }} steps: - name: Generate GitHub App Token id: generate_token @@ -65,4 +69,46 @@ jobs: - name: Release env: GITHUB_TOKEN: ${{ steps.generate_token.outputs.token }} - run: npx semantic-release + GH_TOKEN: ${{ steps.generate_token.outputs.token }} + run: | + git config user.name "github-actions[bot]" + git config user.email "github-actions[bot]@users.noreply.github.com" + npx semantic-release + + - name: Build Python Package + if: success() + run: | + python -m pip install build + python -m build + + - name: Upload Build Artifacts + if: success() + uses: actions/upload-artifact@v4 + with: + name: dist-package + path: dist/ + retention-days: 30 + + publish-pypi: + name: Publish to PyPI + runs-on: ubuntu-latest + needs: [test, release] + if: success() && github.event_name == 'push' + environment: + name: pypi + url: https://pypi.org/project/mcp-langfuse/ + permissions: + id-token: write + + steps: + - name: Download Build Artifacts + uses: actions/download-artifact@v4 + with: + name: dist-package + path: dist/ + + - name: Publish to PyPI + uses: pypa/gh-action-pypi-publish@release/v1 + with: + print-hash: true + verbose: true diff --git a/.gitignore b/.gitignore index 08a60a9..2309421 100644 --- a/.gitignore +++ b/.gitignore @@ -215,3 +215,6 @@ yarn-error.log* # Semantic-release .semantic-release/ + +# Kiro IDE +.kiro/ diff --git a/.kiro/specs/mkdocs-template-setup/design.md b/.kiro/specs/mkdocs-template-setup/design.md deleted file mode 100644 index cf50b11..0000000 --- a/.kiro/specs/mkdocs-template-setup/design.md +++ /dev/null @@ -1,911 +0,0 @@ -# Design Document: MkDocs Template Setup - -## Overview - -This design document outlines the implementation of a complete MkDocs documentation infrastructure for the py-repo-template repository. The solution will provide all necessary configuration files, directory structures, and starter content so that any project created from this template has professional, automated documentation generation ready to use without additional setup. - -The implementation follows the Cracking Shells organization's documentation standards, including: -- MkDocs with Material theme for modern, responsive documentation -- mkdocstrings for automated API documentation from Google-style docstrings -- ReadTheDocs integration for automated builds and publishing -- Standardized directory structure with clear separation of user, developer, and API content -- Mermaid diagrams as the primary diagramming standard -- Style guide compliance for consistent tone and content quality - -## Architecture - -### High-Level Structure - -The documentation system consists of four main components: - -1. **Configuration Layer**: MkDocs configuration, ReadTheDocs configuration, and dependency management -2. **Content Layer**: Markdown documentation files organized by audience and purpose -3. **Resource Layer**: Diagrams, images, and other non-markdown assets -4. **Code Documentation Layer**: Google-style docstrings in Python code for automated API documentation - -```mermaid -graph TD - A[Template Repository] --> B[Configuration Layer] - A --> C[Content Layer] - A --> D[Resource Layer] - A --> E[Code Documentation Layer] - - B --> B1[mkdocs.yml] - B --> B2[.readthedocs.yaml] - B --> B3[docs/requirements.txt] - B --> B4[pyproject.toml] - - C --> C1[docs/index.md] - C --> C2[docs/articles/users/] - C --> C3[docs/articles/devs/] - C --> C4[docs/articles/api/] - C --> C5[docs/articles/appendices/] - - D --> D1[docs/resources/diagrams/mermaid/] - D --> D2[docs/resources/images/] - - E --> E1[Package __init__.py] - E --> E2[Package core.py] - E --> E3[Other modules] -``` - -### Component Interactions - -```mermaid -sequenceDiagram - participant Dev as Developer - participant MkDocs as MkDocs Build - participant mkdocstrings as mkdocstrings Plugin - participant Code as Python Code - participant RTD as ReadTheDocs - - Dev->>MkDocs: mkdocs serve/build - MkDocs->>mkdocstrings: Process API docs - mkdocstrings->>Code: Import & introspect - Code-->>mkdocstrings: Docstrings & signatures - mkdocstrings-->>MkDocs: Generated API HTML - MkDocs-->>Dev: Rendered documentation - - Dev->>RTD: Push to repository - RTD->>MkDocs: Automated build - MkDocs-->>RTD: Static HTML - RTD-->>Dev: Published documentation -``` - -## Components and Interfaces - -### 1. Configuration Files - -#### mkdocs.yml -**Purpose**: Main MkDocs configuration file defining site structure, theme, plugins, and navigation. - -**Location**: Repository root - -**Key Sections**: -- Site metadata (name, description, URL) -- Theme configuration (Material theme with features) -- Plugin configuration (search, mkdocstrings) -- Markdown extensions (admonitions, tables, code blocks, TOC) -- Navigation structure (Home, Users, Developers, API Reference, Appendices) - -**Template Placeholders**: -- `{{PROJECT_NAME}}` - Project name for site title and URLs -- `{{PROJECT_DESCRIPTION}}` - Project description for site metadata -- `{{PACKAGE_NAME}}` - Python package name for API documentation paths - -#### .readthedocs.yaml -**Purpose**: ReadTheDocs build configuration for automated documentation publishing. - -**Location**: Repository root - -**Configuration**: -- Build OS: Ubuntu 24.04 -- Python version: 3.13 -- MkDocs configuration reference -- Dependency installation from docs/requirements.txt - -#### docs/requirements.txt -**Purpose**: Python dependencies required for building documentation. - -**Location**: docs/ directory - -**Contents**: -``` -mkdocstrings -mkdocstrings-python -mkdocs-material -``` - -#### pyproject.toml (additions) -**Purpose**: Optional documentation and development dependencies for local development. - -**Location**: Repository root - -**Additions**: -```toml -[project.optional-dependencies] -docs = [ - "mkdocs>=1.4.0", - "mkdocstrings[python]>=0.20.0" -] -dev = [ - "ruff>=0.1.0", - "black>=23.0.0", - "pre-commit>=3.0.0" -] - -[tool.ruff] -line-length = 88 -target-version = "py312" -select = [ - "E", # pycodestyle errors - "W", # pycodestyle warnings - "F", # pyflakes - "I", # isort - "B", # flake8-bugbear - "C4", # flake8-comprehensions - "UP", # pyupgrade -] -ignore = [] - -[tool.black] -line-length = 88 -target-version = ['py312'] -include = '\.pyi?$' - -[tool.ruff.isort] -known-first-party = ["{{PACKAGE_NAME}}"] -``` - -#### .pre-commit-config.yaml -**Purpose**: Git hook configuration for automated code quality checks. - -**Location**: Repository root - -**Configuration**: -```yaml -repos: - - repo: https://github.com/pre-commit/pre-commit-hooks - rev: v4.5.0 - hooks: - - id: trailing-whitespace - - id: end-of-file-fixer - - id: check-yaml - - id: check-added-large-files - - id: check-toml - - - repo: https://github.com/psf/black - rev: 23.12.1 - hooks: - - id: black - language_version: python3.12 - - - repo: https://github.com/astral-sh/ruff-pre-commit - rev: v0.1.9 - hooks: - - id: ruff - args: [--fix, --exit-non-zero-on-fix] -``` - -#### TEMPLATE_USAGE.md (updates) -**Purpose**: Comprehensive guide for using the template with all new features. - -**Location**: Repository root - -**Key Updates**: - -**Template Variables Section**: -- Add documentation files to replacement list -- Include mkdocs.yml, all docs/**/*.md files - -**Variable Replacement Commands**: -```bash -# PowerShell commands for Windows -Get-ChildItem -Recurse -Include *.md,*.yml,*.toml,*.py,*.json | ForEach-Object { - (Get-Content $_.FullName) -replace '{{PROJECT_NAME}}', 'YourProjectName' | Set-Content $_.FullName - (Get-Content $_.FullName) -replace '{{PACKAGE_NAME}}', 'your_package_name' | Set-Content $_.FullName - (Get-Content $_.FullName) -replace '{{PROJECT_DESCRIPTION}}', 'Your project description' | Set-Content $_.FullName -} - -# Bash commands for Linux/Mac -find . -type f \( -name "*.md" -o -name "*.yml" -o -name "*.toml" -o -name "*.py" -o -name "*.json" \) -exec sed -i 's/{{PROJECT_NAME}}/YourProjectName/g' {} + -find . -type f \( -name "*.md" -o -name "*.yml" -o -name "*.toml" -o -name "*.py" -o -name "*.json" \) -exec sed -i 's/{{PACKAGE_NAME}}/your_package_name/g' {} + -find . -type f \( -name "*.md" -o -name "*.yml" -o -name "*.toml" -o -name "*.py" -o -name "*.json" \) -exec sed -i 's/{{PROJECT_DESCRIPTION}}/Your project description/g' {} + -``` - -**Files to Update List**: -- Add mkdocs.yml -- Add .readthedocs.yaml -- Add .pre-commit-config.yaml -- Add docs/index.md -- Add docs/articles/**/*.md (all documentation files) - -**Initial Setup Section**: -```bash -# Install all dependencies including docs and dev tools -pip install -e .[docs,dev] - -# Set up pre-commit hooks -pre-commit install - -# Build documentation locally -mkdocs serve - -# Run code quality checks -pre-commit run --all-files -``` - -**What's Included Section Updates**: - -Add new subsections: -- **Documentation Infrastructure**: Complete MkDocs setup with Material theme, mkdocstrings for API docs, ReadTheDocs integration, Mermaid diagram support -- **Code Quality Tools**: ruff for linting, black for formatting, pre-commit hooks for automated checks -- **Development Workflow**: Pre-commit hooks, code quality automation, documentation generation - -**What's NOT Included Section Updates**: -- Remove "Code Quality Tools (Deferred)" - now included -- Remove "Comprehensive Documentation (Deferred)" - now included -- Keep "Advanced Testing (Deferred)" - still waiting for wobble - -### 2. Documentation Structure - -#### Directory Organization - -``` -docs/ -├── index.md # Homepage -├── requirements.txt # Build dependencies -├── articles/ -│ ├── index.md # Articles landing page -│ ├── users/ -│ │ ├── GettingStarted.md # User getting started guide -│ │ └── tutorials/ # Tutorial directory (empty initially) -│ ├── devs/ -│ │ ├── index.md # Developer overview -│ │ ├── architecture/ # Architecture docs (empty initially) -│ │ ├── contribution_guidelines/ # Contribution guides (empty initially) -│ │ └── implementation_guides/ # Implementation guides (empty initially) -│ ├── api/ -│ │ ├── index.md # API overview -│ │ └── core.md # Core module API docs -│ └── appendices/ -│ ├── index.md # Appendices TOC -│ └── glossary.md # Glossary template -└── resources/ - ├── diagrams/ - │ └── mermaid/ - │ └── example-architecture.mmd # Example Mermaid diagram - └── images/ # Images directory (empty initially) -``` - -### 3. Development Tooling Configuration - -#### Pre-commit Hooks - -The template includes pre-commit configuration for automated code quality checks: - -**Hooks Included**: -- **trailing-whitespace**: Removes trailing whitespace -- **end-of-file-fixer**: Ensures files end with newline -- **check-yaml**: Validates YAML syntax -- **check-added-large-files**: Prevents large files from being committed -- **check-toml**: Validates TOML syntax -- **black**: Formats Python code consistently -- **ruff**: Lints and auto-fixes Python code - -**Setup Process**: -1. Install pre-commit: `pip install pre-commit` -2. Install hooks: `pre-commit install` -3. Hooks run automatically on `git commit` -4. Manual run: `pre-commit run --all-files` - -#### Code Quality Tools - -**Ruff Configuration**: -- Line length: 88 characters (matches Black) -- Target: Python 3.12 -- Enabled rules: pycodestyle, pyflakes, isort, flake8-bugbear, comprehensions, pyupgrade -- Fast execution (10-100x faster than traditional linters) - -**Black Configuration**: -- Line length: 88 characters -- Target: Python 3.12 -- Opinionated formatting for consistency -- Integrates with ruff for complementary functionality - -#### Content Files - -**docs/index.md**: -- Welcoming homepage with project overview -- Navigation guidance to main sections -- Quick start information -- Links to key documentation areas - -**docs/articles/index.md**: -- Main landing page for articles -- Links to Users, Developers, API Reference, and Appendices sections -- Brief description of each section's purpose - -**docs/articles/users/GettingStarted.md**: -- Installation instructions (from source and PyPI) -- Basic usage examples -- Next steps and links to tutorials -- Follows style guide: focused, professional tone with warmth at transitions - -**docs/articles/devs/index.md**: -- Development environment setup -- Installing development dependencies (including pre-commit) -- Setting up pre-commit hooks -- Running code quality tools (ruff, black) -- Running tests -- Making commits with conventional commits -- Contributing guidelines overview -- Links to detailed developer documentation - -**docs/articles/api/index.md**: -- API reference overview -- Getting started with the API -- Module index with descriptions -- Usage examples demonstrating mkdocstrings - -**docs/articles/api/core.md**: -- Automated API documentation for core module -- Uses mkdocstrings syntax: `::: {{PACKAGE_NAME}}.core` - -**docs/articles/appendices/index.md**: -- Table of contents for appendices -- Links to glossary and other supplementary content -- Purpose statement for appendices section - -**docs/articles/appendices/glossary.md**: -- Template glossary with example terms -- Alphabetical organization with letter sections -- Instructions for adding project-specific terms - -### 3. Resource Management - -#### Mermaid Diagrams - -**Location**: docs/resources/diagrams/mermaid/ - -**Example Diagram** (example-architecture.mmd): -```mermaid -graph TD - A[User] --> B[CLI Interface] - B --> C[Core Module] - C --> D[Package Functionality] - D --> E[Output/Results] - - style A fill:#b3d9ff,stroke:#000,stroke-width:2px,color:#000 - style B fill:#ffe6b3,stroke:#000,stroke-width:2px,color:#000 - style C fill:#ffb3e6,stroke:#000,stroke-width:2px,color:#000 - style D fill:#b3ffb3,stroke:#000,stroke-width:2px,color:#000 - style E fill:#e6b3ff,stroke:#000,stroke-width:2px,color:#000 -``` - -**Usage in Documentation**: -- Inline embedding in markdown files -- Reference to external .mmd files -- Comments explaining diagram structure - -#### Images - -**Location**: docs/resources/images/ - -**Organization**: -- screenshots/ - Application screenshots (empty initially) -- logos/ - Brand assets (empty initially) -- icons/ - UI icons (empty initially) - -### 4. Code Documentation - -#### Google-Style Docstrings - -All Python code in the template will be enhanced with comprehensive Google-style docstrings to demonstrate best practices. - -**Module-Level Documentation**: -```python -"""Module for [purpose]. - -This module provides [detailed description]. - -Typical usage example: - - ```python - from {{PACKAGE_NAME}} import module - - result = module.function() - ``` - -Classes: - ClassName: Description of class. - -Functions: - function_name: Description of function. -""" -``` - -**Function Documentation**: -```python -def function_name(param1: str, param2: int = 0) -> bool: - """Brief description. - - Longer description explaining purpose and behavior. - - Args: - param1: Description of first parameter. - param2: Description of second parameter with default. - - Returns: - Description of return value. - - Raises: - ValueError: When this exception is raised. - - Example: - Basic usage: - - ```python - result = function_name("hello", 42) - print(result) # True - ``` - """ -``` - -**Class Documentation**: -```python -class ClassName: - """Brief description. - - Longer description of class purpose and usage. - - Attributes: - attribute1 (str): Description of attribute. - attribute2 (int): Description of another attribute. - - Example: - Basic usage: - - ```python - instance = ClassName() - instance.method("data") - ``` - """ -``` - -## Data Models - -### Configuration Data Model - -```mermaid -classDiagram - class MkDocsConfig { - +string site_name - +string site_description - +string site_url - +string repo_url - +string docs_dir - +ThemeConfig theme - +List~Plugin~ plugins - +List~Extension~ markdown_extensions - +Navigation nav - } - - class ThemeConfig { - +string name - +List~string~ features - } - - class Plugin { - +string name - +Dict options - } - - class Navigation { - +List~NavItem~ items - } - - class NavItem { - +string title - +string path_or_section - } - - MkDocsConfig --> ThemeConfig - MkDocsConfig --> Plugin - MkDocsConfig --> Navigation - Navigation --> NavItem -``` - -### Documentation Structure Model - -```mermaid -classDiagram - class DocumentationSite { - +HomePage home - +ArticlesSection articles - +ResourcesDirectory resources - } - - class ArticlesSection { - +UsersSection users - +DevsSection devs - +APISection api - +AppendicesSection appendices - } - - class UsersSection { - +GettingStartedDoc getting_started - +List~Tutorial~ tutorials - } - - class DevsSection { - +OverviewDoc overview - +List~ArchitectureDoc~ architecture - +List~ContributionDoc~ contribution_guidelines - } - - class APISection { - +OverviewDoc overview - +List~ModuleDoc~ modules - } - - class AppendicesSection { - +OverviewDoc overview - +GlossaryDoc glossary - } - - class ResourcesDirectory { - +DiagramsDirectory diagrams - +ImagesDirectory images - } - - DocumentationSite --> ArticlesSection - DocumentationSite --> ResourcesDirectory - ArticlesSection --> UsersSection - ArticlesSection --> DevsSection - ArticlesSection --> APISection - ArticlesSection --> AppendicesSection -``` - -## Correctness Properties - -*A property is a characteristic or behavior that should hold true across all valid executions of a system-essentially, a formal statement about what the system should do. Properties serve as the bridge between human-readable specifications and machine-verifiable correctness guarantees.* - - -### Property Reflection - -After analyzing all acceptance criteria, most are concrete examples testing that specific files exist with specific content. This is appropriate for a template setup feature. However, four general properties emerged that apply across multiple contexts: - -1. **Template placeholder consistency**: All configuration and documentation files should use consistent template placeholders -2. **Docstring completeness**: All public Python code should have complete Google-style docstrings -3. **Cross-file placeholder consistency**: Placeholders should be used consistently across all files -4. **Tool configuration consistency**: Development tool configurations should be compatible and non-conflicting - -These properties provide unique validation value beyond the specific file existence checks. - -### Correctness Properties - -Property 1: Configuration files use template placeholders -*For any* configuration value in mkdocs.yml that references project-specific information (site name, description, URLs, package names), the value should use the appropriate template placeholder ({{PROJECT_NAME}}, {{PROJECT_DESCRIPTION}}, {{PACKAGE_NAME}}) instead of hardcoded values -**Validates: Requirements 1.5, 6.1, 6.2, 6.3, 6.5** - -Property 2: Documentation files use template placeholders -*For any* documentation markdown file that references project-specific information, the file should use template placeholders instead of hardcoded project names or package names -**Validates: Requirements 6.4** - -Property 3: Public code has complete docstrings -*For any* public function or class in the template package code, the docstring should include all required Google-style sections (brief description, Args if applicable, Returns if applicable, Example) -**Validates: Requirements 9.1** - -Property 4: Tool configurations are compatible -*For any* configuration setting in pyproject.toml for development tools (ruff, black), the settings should be compatible and non-conflicting (e.g., line-length should match between ruff and black) -**Validates: Requirements 12.5, 12.6** - -## Error Handling - -### Configuration Errors - -**Invalid mkdocs.yml Syntax**: -- MkDocs will fail to build with clear error messages -- Validation: Test `mkdocs build` command succeeds -- Prevention: Use valid YAML syntax and test locally before committing - -**Missing Dependencies**: -- ReadTheDocs build will fail if dependencies are missing from docs/requirements.txt -- Validation: Test that all required packages are listed -- Prevention: Include all mkdocstrings, mkdocs-material, and plugin dependencies - -**Invalid Navigation Paths**: -- MkDocs will warn about broken navigation links -- Validation: Ensure all navigation paths point to existing files -- Prevention: Create all referenced files in the template - -### Content Errors - -**Missing Template Placeholders**: -- Projects created from template will have incorrect project names -- Validation: Check that all project-specific values use placeholders -- Prevention: Use placeholders consistently throughout all files - -**Broken Cross-References**: -- Documentation links may be broken if files are missing -- Validation: Test that all internal links point to existing files -- Prevention: Create all referenced files and test links locally - -**Invalid Mermaid Syntax**: -- Diagrams may fail to render if syntax is incorrect -- Validation: Test Mermaid diagrams using Mermaid Live Editor or MkDocs build -- Prevention: Validate all Mermaid diagrams before committing - -### Build Errors - -**ReadTheDocs Build Failures**: -- Documentation won't publish if ReadTheDocs build fails -- Validation: Test that MkDocs builds successfully locally -- Prevention: Ensure all dependencies are correct and all files exist - -**mkdocstrings Import Errors**: -- API documentation won't generate if Python modules can't be imported -- Validation: Test that package can be imported and mkdocstrings can process it -- Prevention: Ensure package structure is correct and imports work - -## Testing Strategy - -### Unit Testing - -Unit tests will verify specific examples and configurations: - -**Configuration File Tests**: -- Test that mkdocs.yml exists and contains required sections -- Test that .readthedocs.yaml exists and has correct configuration -- Test that docs/requirements.txt exists and contains required packages -- Test that pyproject.toml has docs optional dependencies - -**File Structure Tests**: -- Test that all required documentation directories exist -- Test that all required documentation files exist -- Test that resource directories (diagrams, images) exist - -**Content Tests**: -- Test that README.md contains documentation link -- Test that navigation structure in mkdocs.yml is correct -- Test that API documentation files use mkdocstrings syntax - -**Build Tests**: -- Test that `mkdocs build` command succeeds -- Test that generated HTML contains expected content -- Test that Mermaid diagrams render correctly - -### Property-Based Testing - -Property-based tests will verify universal properties across the template: - -**Property Test 1: Template Placeholder Consistency** -- Library: Hypothesis (Python) -- Test: Parse mkdocs.yml and verify all project-specific values use placeholders -- Validation: No hardcoded project names or package names in configuration -- Iterations: 100 (testing different parsing approaches) - -**Property Test 2: Documentation Placeholder Consistency** -- Library: Hypothesis (Python) -- Test: Parse all markdown files and verify project-specific references use placeholders -- Validation: No hardcoded project names in documentation content -- Iterations: 100 (testing all documentation files) - -**Property Test 3: Docstring Completeness** -- Library: Hypothesis (Python) -- Test: Inspect all public functions/classes and verify docstrings have required sections -- Validation: All public APIs have complete Google-style docstrings -- Iterations: 100 (testing different code inspection approaches) - -### Integration Testing - -**End-to-End Documentation Build**: -- Test complete MkDocs build process -- Verify all plugins work correctly -- Verify all navigation links work -- Verify API documentation generates correctly - -**ReadTheDocs Simulation**: -- Test build in clean environment matching ReadTheDocs -- Verify dependencies install correctly -- Verify documentation builds without errors - -### Manual Testing - -**Visual Inspection**: -- Review generated documentation for visual quality -- Verify Material theme renders correctly -- Verify Mermaid diagrams display properly -- Verify code blocks have copy buttons - -**Content Quality**: -- Review documentation content for style guide compliance -- Verify tone is professional with appropriate warmth -- Verify cross-references work correctly -- Verify examples are clear and helpful - -## Implementation Approach - -### Phase 1: Configuration Files - -1. Create mkdocs.yml with complete configuration -2. Create .readthedocs.yaml with build settings -3. Create docs/requirements.txt with dependencies -4. Update pyproject.toml with optional docs and dev dependencies -5. Create .pre-commit-config.yaml with code quality hooks -6. Add ruff and black configuration to pyproject.toml - -### Phase 2: Directory Structure - -1. Create docs/ directory structure -2. Create articles/ subdirectories (users, devs, api, appendices) -3. Create resources/ subdirectories (diagrams/mermaid, images) - -### Phase 3: Core Documentation Content - -1. Create docs/index.md homepage -2. Create docs/articles/index.md landing page -3. Create docs/articles/users/GettingStarted.md -4. Create docs/articles/devs/index.md -5. Create docs/articles/api/index.md -6. Create docs/articles/api/core.md -7. Create docs/articles/appendices/index.md -8. Create docs/articles/appendices/glossary.md - -### Phase 4: Resources - -1. Create example Mermaid diagram file -2. Add Mermaid diagram examples to documentation -3. Create placeholder directories for images - -### Phase 5: Code Documentation - -1. Enhance {{PACKAGE_NAME}}/__init__.py with complete docstrings -2. Enhance {{PACKAGE_NAME}}/core.py with complete docstrings -3. Ensure all functions and classes have Google-style docstrings - -### Phase 6: README and Template Usage Updates - -1. Update README.md with documentation link -2. Add local documentation build instructions to README -3. Add pre-commit setup instructions to README -4. Add code quality tool usage instructions to README -5. Replace "Coming Soon" placeholder in README -6. Update TEMPLATE_USAGE.md with documentation files in variable replacement list -7. Add grep/sed commands for template variable replacement to TEMPLATE_USAGE.md -8. Update "What's Included" section in TEMPLATE_USAGE.md -9. Update "What's NOT Included" section in TEMPLATE_USAGE.md -10. Add documentation and dev tools setup instructions to TEMPLATE_USAGE.md - -### Phase 7: Testing and Validation - -1. Test mkdocs build locally -2. Verify all navigation links work -3. Verify API documentation generates correctly -4. Verify Mermaid diagrams render -5. Run property-based tests -6. Validate against all requirements - -## Dependencies - -### External Dependencies - -**MkDocs Ecosystem**: -- mkdocs >= 1.4.0 - Core static site generator -- mkdocs-material - Material Design theme -- mkdocstrings >= 0.20.0 - API documentation plugin -- mkdocstrings-python - Python handler for mkdocstrings - -**Development Tools**: -- ruff >= 0.1.0 - Fast Python linter and formatter -- black >= 23.0.0 - Opinionated code formatter -- pre-commit >= 3.0.0 - Git hook management - -**Python Environment**: -- Python >= 3.12 - Minimum Python version for template -- Python 3.13 - ReadTheDocs build environment - -### Internal Dependencies - -**Template Files**: -- pyproject.toml - Project configuration -- README.md - Project documentation -- {{PACKAGE_NAME}}/ - Python package directory - -**Existing Patterns**: -- {{PROJECT_NAME}} placeholder pattern -- {{PROJECT_DESCRIPTION}} placeholder pattern -- {{PACKAGE_NAME}} placeholder pattern - -## Deployment Considerations - -### Template Repository - -**Git Considerations**: -- All documentation files should be committed to repository -- Empty directories need .gitkeep files to be tracked -- Binary assets (images) should be optimized before committing - -**Template Usage**: -- Users will create new repositories from this template -- All placeholders will need to be replaced in new repositories -- Documentation structure will be copied as-is - -### ReadTheDocs Integration - -**Project Setup**: -- Each project created from template needs ReadTheDocs project configured -- Webhook integration for automatic builds on push -- Custom domain configuration if needed - -**Build Environment**: -- Ubuntu 24.04 ensures consistent builds -- Python 3.13 provides latest language features -- Dependency pinning in docs/requirements.txt ensures reproducibility - -### Local Development - -**Developer Workflow**: -1. Clone repository -2. Install dependencies: `pip install -e .[docs,dev]` -3. Set up pre-commit: `pre-commit install` -4. Serve documentation: `mkdocs serve` -5. Make changes and test locally -6. Run code quality checks: `pre-commit run --all-files` -7. Build production: `mkdocs build` -8. Commit and push (pre-commit hooks run automatically) - -**CI/CD Integration**: -- Consider adding documentation build check to CI pipeline -- Validate that documentation builds without errors -- Check for broken links -- Verify placeholder consistency - -## Maintenance and Evolution - -### Regular Updates - -**Dependency Updates**: -- Monitor MkDocs and plugin releases -- Update docs/requirements.txt when new versions available -- Test compatibility before updating - -**Content Updates**: -- Keep documentation aligned with template changes -- Update examples when template structure changes -- Maintain style guide compliance - -### Future Enhancements - -**Potential Additions**: -- Additional tutorial examples -- More comprehensive API documentation examples -- Additional Mermaid diagram examples -- Internationalization support -- Dark mode theme customization - -**Scalability Considerations**: -- Directory structure supports growth -- Navigation structure can accommodate new sections -- Resource organization supports additional assets - -## Success Criteria - -The implementation will be considered successful when: - -1. All configuration files exist and are valid -2. Complete directory structure is in place -3. All required documentation files exist with appropriate content -4. MkDocs builds successfully without errors or warnings -5. All navigation links work correctly -6. API documentation generates from docstrings -7. Mermaid diagrams render correctly -8. All template placeholders are used consistently -9. README.md includes documentation link and build instructions -10. Pre-commit hooks are configured and functional -11. Ruff and black configurations are compatible -12. All property-based tests pass -13. Documentation follows organization style guide -14. Code quality tools run successfully on template code -15. Projects created from template have working documentation and development tools out of the box diff --git a/.kiro/specs/mkdocs-template-setup/requirements.md b/.kiro/specs/mkdocs-template-setup/requirements.md deleted file mode 100644 index e32ebfe..0000000 --- a/.kiro/specs/mkdocs-template-setup/requirements.md +++ /dev/null @@ -1,188 +0,0 @@ -# Requirements Document - -## Introduction - -This feature adds complete MkDocs documentation infrastructure to the py-repo-template repository. The goal is to ensure that all projects created from this template have professional, automated documentation generation configured and ready to use without additional setup. This includes MkDocs configuration, ReadTheDocs integration, documentation dependencies, and a starter documentation structure that follows the Cracking Shells organization standards. - -## Glossary - -- **MkDocs**: A static site generator designed for building project documentation from Markdown files -- **mkdocstrings**: A MkDocs plugin that automatically generates API documentation from Python docstrings -- **Material Theme**: A modern, responsive theme for MkDocs that provides professional appearance and enhanced features -- **ReadTheDocs**: A documentation hosting platform that automatically builds and publishes documentation from Git repositories -- **Template Repository**: A GitHub repository that serves as a starting point for new projects, containing boilerplate code and configuration -- **Documentation System**: The complete set of files, configurations, and tools required to generate and publish project documentation -- **Google-style Docstrings**: A standardized format for Python docstrings that includes sections for arguments, returns, raises, and examples -- **Navigation Structure**: The hierarchical organization of documentation pages defined in the MkDocs configuration - -## Requirements - -### Requirement 1 - -**User Story:** As a repository creator, I want MkDocs fully configured in the template, so that new projects have documentation infrastructure ready without manual setup. - -#### Acceptance Criteria - -1. WHEN a user creates a new repository from the template THEN the Documentation System SHALL include a complete mkdocs.yml configuration file in the repository root -2. WHEN a user creates a new repository from the template THEN the Documentation System SHALL include a .readthedocs.yaml configuration file in the repository root -3. WHEN a user creates a new repository from the template THEN the Documentation System SHALL include a docs/requirements.txt file with all required MkDocs packages -4. WHEN a user creates a new repository from the template THEN the Documentation System SHALL include documentation dependencies in the pyproject.toml optional dependencies section -5. WHEN a user examines the mkdocs.yml file THEN the Documentation System SHALL use template placeholders for project-specific values that match existing template patterns - -### Requirement 2 - -**User Story:** As a developer, I want the MkDocs configuration to follow organization standards, so that all projects have consistent documentation structure and features. - -#### Acceptance Criteria - -1. WHEN MkDocs builds documentation THEN the Documentation System SHALL use the Material Theme with content code copy feature enabled -2. WHEN MkDocs processes Python code THEN the Documentation System SHALL use mkdocstrings plugin configured for Google-style Docstrings -3. WHEN MkDocs generates documentation THEN the Documentation System SHALL include search functionality through the search plugin -4. WHEN MkDocs renders Markdown THEN the Documentation System SHALL support admonitions, tables, fenced code blocks, and table of contents with permalinks - -### Requirement 3 - -**User Story:** As a developer, I want a starter documentation structure following organization standards, so that I have a clear foundation to build upon without creating the structure from scratch. - -#### Acceptance Criteria - -1. WHEN a user creates a new repository from the template THEN the Documentation System SHALL include a docs/index.md file as the documentation homepage -2. WHEN a user creates a new repository from the template THEN the Documentation System SHALL include a docs/articles/index.md file as the main landing page for articles with links to key sections -3. WHEN a user creates a new repository from the template THEN the Documentation System SHALL include a docs/articles/users/GettingStarted.md file for user-facing documentation -4. WHEN a user creates a new repository from the template THEN the Documentation System SHALL include a docs/articles/devs/index.md file for developer documentation -5. WHEN a user creates a new repository from the template THEN the Documentation System SHALL include a docs/articles/api/index.md file for API reference documentation -6. WHEN a user creates a new repository from the template THEN the Documentation System SHALL include a docs/articles/appendices/index.md file and a docs/articles/appendices/glossary.md file for supplementary content -7. WHEN a user creates a new repository from the template THEN the Documentation System SHALL include a docs/resources/diagrams/mermaid directory for Mermaid diagram source files -8. WHEN a user creates a new repository from the template THEN the Documentation System SHALL include a docs/resources/images directory for image assets -9. WHEN a user examines the Navigation Structure THEN the Documentation System SHALL organize documentation into Home, Users, Developers, API Reference, and Appendices sections - -### Requirement 4 - -**User Story:** As a developer, I want documentation dependencies properly configured, so that I can build documentation locally and on ReadTheDocs without dependency issues. - -#### Acceptance Criteria - -1. WHEN a developer installs documentation dependencies THEN the Documentation System SHALL include mkdocstrings in docs/requirements.txt -2. WHEN a developer installs documentation dependencies THEN the Documentation System SHALL include mkdocstrings-python in docs/requirements.txt -3. WHEN a developer installs documentation dependencies THEN the Documentation System SHALL include mkdocs-material in docs/requirements.txt -4. WHEN a developer installs optional dependencies THEN the Documentation System SHALL provide a docs optional dependency group in pyproject.toml with mkdocs and mkdocstrings packages - -### Requirement 5 - -**User Story:** As a developer, I want ReadTheDocs configuration included, so that documentation automatically builds and publishes when I push changes to the repository. - -#### Acceptance Criteria - -1. WHEN ReadTheDocs builds documentation THEN the Documentation System SHALL use Ubuntu 24.04 as the operating system -2. WHEN ReadTheDocs builds documentation THEN the Documentation System SHALL use Python 3.13 as the build tool -3. WHEN ReadTheDocs builds documentation THEN the Documentation System SHALL reference the mkdocs.yml configuration file -4. WHEN ReadTheDocs builds documentation THEN the Documentation System SHALL install dependencies from docs/requirements.txt -5. WHEN ReadTheDocs builds documentation THEN the Documentation System SHALL use configuration file format version 2 - -### Requirement 6 - -**User Story:** As a developer, I want template placeholders in documentation files, so that project-specific information is easily customizable when creating a new repository. - -#### Acceptance Criteria - -1. WHEN a user examines the mkdocs.yml file THEN the Documentation System SHALL use {{PROJECT_NAME}} placeholder for the site name -2. WHEN a user examines the mkdocs.yml file THEN the Documentation System SHALL use {{PROJECT_DESCRIPTION}} placeholder for the site description -3. WHEN a user examines the mkdocs.yml file THEN the Documentation System SHALL use {{PROJECT_NAME}} placeholder in repository URLs -4. WHEN a user examines documentation content files THEN the Documentation System SHALL use template placeholders for project-specific references where appropriate -5. WHEN a user examines the mkdocs.yml file THEN the Documentation System SHALL use {{PACKAGE_NAME}} placeholder for Python package references in API documentation paths - -### Requirement 7 - -**User Story:** As a developer, I want starter documentation content that demonstrates organization best practices, so that I understand how to write and organize documentation for my project. - -#### Acceptance Criteria - -1. WHEN a user reads the docs/index.md file THEN the Documentation System SHALL provide a welcoming homepage with project overview and navigation guidance -2. WHEN a user reads the docs/articles/index.md file THEN the Documentation System SHALL provide a main landing page with links to Users, Developers, API Reference, and Appendices sections -3. WHEN a user reads the docs/articles/users/GettingStarted.md file THEN the Documentation System SHALL provide installation instructions and basic usage examples following the style guide -4. WHEN a user reads the docs/articles/devs/index.md file THEN the Documentation System SHALL provide development setup instructions and contribution guidelines -5. WHEN a user reads the docs/articles/api/index.md file THEN the Documentation System SHALL explain how to use mkdocstrings for automated API documentation with examples -6. WHEN a user reads the docs/articles/appendices/index.md file THEN the Documentation System SHALL provide a table of contents linking to all appendix articles -7. WHEN a user reads the docs/articles/appendices/glossary.md file THEN the Documentation System SHALL provide a template for defining project-specific terminology in alphabetical order -8. WHEN a user reads any documentation file THEN the Documentation System SHALL demonstrate focused professionalism with warmth only at crucial junctions -9. WHEN a user reads any documentation file THEN the Documentation System SHALL use present tense, active voice, and consistent terminology - -### Requirement 8 - -**User Story:** As a repository maintainer, I want the README updated to reference documentation, so that users know where to find comprehensive project information. - -#### Acceptance Criteria - -1. WHEN a user reads the README.md file THEN the Documentation System SHALL include a link to the documentation URL following the pattern https://crackingshells.github.io/{{PROJECT_NAME}}/ -2. WHEN a user reads the README.md file THEN the Documentation System SHALL replace the "Coming Soon" placeholder in the Links section with the actual documentation URL -3. WHEN a user reads the README.md file THEN the Documentation System SHALL include instructions for building documentation locally in the Development section - - -### Requirement 9 - -**User Story:** As a developer, I want example Python code with proper Google-style docstrings, so that I understand how to document code for automated API documentation generation. - -#### Acceptance Criteria - -1. WHEN a user examines the template package code THEN the Documentation System SHALL include Google-style docstrings with Args, Returns, Raises, and Example sections -2. WHEN a user examines the template package code THEN the Documentation System SHALL demonstrate proper module-level documentation with usage examples -3. WHEN a user examines the template package code THEN the Documentation System SHALL demonstrate proper class documentation with attributes and methods documented -4. WHEN a user examines the template package code THEN the Documentation System SHALL demonstrate proper function documentation with type hints matching docstring descriptions -5. WHEN mkdocstrings processes the template package code THEN the Documentation System SHALL generate properly formatted API documentation from the docstrings - -### Requirement 10 - -**User Story:** As a developer, I want Mermaid diagram examples in the documentation, so that I understand how to create and integrate diagrams following organization standards. - -#### Acceptance Criteria - -1. WHEN a user examines the documentation THEN the Documentation System SHALL include at least one example Mermaid diagram embedded in markdown -2. WHEN a user examines the docs/resources/diagrams/mermaid directory THEN the Documentation System SHALL include at least one example Mermaid diagram source file with .mmd extension -3. WHEN a user reads the documentation THEN the Documentation System SHALL demonstrate how to reference Mermaid diagrams both inline and from external files -4. WHEN a user examines example Mermaid diagrams THEN the Documentation System SHALL include comments explaining the diagram structure -5. WHEN MkDocs builds documentation THEN the Documentation System SHALL render Mermaid diagrams correctly in the generated HTML - -### Requirement 11 - -**User Story:** As a developer, I want the documentation to follow the organization's style guide, so that all projects maintain consistent documentation quality and tone. - -#### Acceptance Criteria - -1. WHEN a user reads documentation content THEN the Documentation System SHALL use compelling conciseness with precise paragraphs -2. WHEN a user reads documentation content THEN the Documentation System SHALL apply DRY principles with cross-linking instead of repetition -3. WHEN a user reads documentation content THEN the Documentation System SHALL use plain neutral language avoiding subjective statements -4. WHEN a user reads documentation content THEN the Documentation System SHALL direct beginners to appendices for foundational concepts -5. WHEN a user reads technical articles THEN the Documentation System SHALL introduce articles with a list of concepts covered - - -### Requirement 12 - -**User Story:** As a developer, I want modern Python development tools configured in the template, so that I can maintain code quality and consistency without manual setup. - -#### Acceptance Criteria - -1. WHEN a developer installs development dependencies THEN the Template System SHALL include ruff for fast Python linting and formatting in pyproject.toml dev dependencies -2. WHEN a developer installs development dependencies THEN the Template System SHALL include black for code formatting in pyproject.toml dev dependencies -3. WHEN a developer installs development dependencies THEN the Template System SHALL include pre-commit for git hook management in pyproject.toml dev dependencies -4. WHEN a user creates a new repository from the template THEN the Template System SHALL include a .pre-commit-config.yaml file with hooks for ruff and black -5. WHEN a user creates a new repository from the template THEN the Template System SHALL include ruff configuration in pyproject.toml with appropriate linting rules -6. WHEN a user creates a new repository from the template THEN the Template System SHALL include black configuration in pyproject.toml with line length set to 88 and target version set to py312 -7. WHEN a user reads the README.md file THEN the Template System SHALL include instructions for installing and setting up pre-commit hooks in the Development section - - -### Requirement 13 - -**User Story:** As a template user, I want clear instructions in TEMPLATE_USAGE.md for setting up documentation and development tools, so that I can quickly configure my new project. - -#### Acceptance Criteria - -1. WHEN a user reads TEMPLATE_USAGE.md THEN the Template System SHALL include documentation files in the list of files to update with template variables -2. WHEN a user reads TEMPLATE_USAGE.md THEN the Template System SHALL include mkdocs.yml in the list of files requiring variable replacement -3. WHEN a user reads TEMPLATE_USAGE.md THEN the Template System SHALL include all documentation markdown files in docs/ directory in the list of files requiring variable replacement -4. WHEN a user reads TEMPLATE_USAGE.md THEN the Template System SHALL provide clear grep and string replacement commands for replacing all template variables -5. WHEN a user reads TEMPLATE_USAGE.md THEN the Template System SHALL include instructions for installing documentation dependencies -6. WHEN a user reads TEMPLATE_USAGE.md THEN the Template System SHALL include instructions for setting up pre-commit hooks -7. WHEN a user reads TEMPLATE_USAGE.md THEN the Template System SHALL include instructions for building documentation locally -8. WHEN a user reads TEMPLATE_USAGE.md THEN the Template System SHALL update the "What's Included" section to list MkDocs documentation infrastructure -9. WHEN a user reads TEMPLATE_USAGE.md THEN the Template System SHALL update the "What's Included" section to list code quality tools (ruff, black, pre-commit) -10. WHEN a user reads TEMPLATE_USAGE.md THEN the Template System SHALL remove or update the "What's NOT Included" sections that are now included (documentation and code quality tools) diff --git a/.kiro/specs/mkdocs-template-setup/tasks.md b/.kiro/specs/mkdocs-template-setup/tasks.md deleted file mode 100644 index 236e64a..0000000 --- a/.kiro/specs/mkdocs-template-setup/tasks.md +++ /dev/null @@ -1,419 +0,0 @@ -# Implementation Plan: MkDocs Template Setup - -## Overview - -This implementation plan adds complete MkDocs documentation infrastructure and Python development tooling to the py-repo-template repository. Tasks are organized into phases that build incrementally, with each phase completing before moving to the next. - -## Task List - -- [x] 1. Configuration Files Setup - - - - - - - Create all configuration files for MkDocs, ReadTheDocs, and development tools - - _Requirements: 1.1, 1.2, 1.3, 1.4, 2.1-2.5, 4.1-4.5, 5.1-5.5, 12.1-12.6_ -- [x] 1.1 Create mkdocs.yml configuration file - - - - -- [x] 1.1 Create mkdocs.yml configuration file - - - - - Create mkdocs.yml in repository root with complete configuration - - Include site metadata with template placeholders ({{PROJECT_NAME}}, {{PROJECT_DESCRIPTION}}) - - Configure Material theme with content.code.copy feature - - Configure plugins: search, mkdocstrings (Google-style), print-site - - Configure markdown extensions: admonitions, tables, fenced_code, toc - - Define navigation structure: Home, Users, Developers, API Reference, Appendices - - _Requirements: 1.1, 1.5, 2.1-2.5, 3.9, 6.1-6.3, 6.5_ - -- [x] 1.2 Create .readthedocs.yaml configuration file - - - - Create .readthedocs.yaml in repository root - - Set version to 2 - - Configure build OS as ubuntu-24.04 - - Configure Python 3.13 as build tool - - Reference mkdocs.yml for MkDocs configuration - - Reference docs/requirements.txt for dependencies - - _Requirements: 1.2, 5.1-5.5_ - -- [x] 1.3 Create docs/requirements.txt file - - - - Create docs/requirements.txt with MkDocs dependencies - - Include mkdocstrings - - Include mkdocstrings-python - - Include mkdocs-material - - Include mkdocs-print-site-plugin - - _Requirements: 1.3, 4.1-4.4_ - -- [x] 1.4 Update pyproject.toml with optional dependencies - - - - Add [project.optional-dependencies] section if not exists - - Add docs group with mkdocs>=1.4.0 and mkdocstrings[python]>=0.20.0 - - Add dev group with ruff>=0.1.0, black>=23.0.0, pre-commit>=3.0.0 - - Add [tool.ruff] configuration with line-length=88, target-version="py312" - - Add ruff select rules: E, W, F, I, B, C4, UP - - Add [tool.black] configuration with line-length=88, target-version=['py312'] - - Add [tool.ruff.isort] with known-first-party=["{{PACKAGE_NAME}}"] - - _Requirements: 1.4, 4.5, 12.1-12.3, 12.5-12.6_ - -- [x] 1.5 Create .pre-commit-config.yaml file - - - - Create .pre-commit-config.yaml in repository root - - Add pre-commit-hooks repo with trailing-whitespace, end-of-file-fixer, check-yaml, check-added-large-files, check-toml - - Add black repo with black hook for Python 3.12 - - Add ruff-pre-commit repo with ruff hook with --fix and --exit-non-zero-on-fix args - - _Requirements: 12.4_ - -- [x] 2. Documentation Directory Structure - - - - - - - Create complete docs/ directory structure following organization standards - - _Requirements: 3.1-3.9_ - - -- [x] 2.1 Create docs/ directory structure - - - Create docs/ directory - - Create docs/articles/ directory - - Create docs/articles/users/ directory - - Create docs/articles/users/tutorials/ directory (with .gitkeep) - - Create docs/articles/devs/ directory - - Create docs/articles/devs/architecture/ directory (with .gitkeep) - - Create docs/articles/devs/contribution_guidelines/ directory (with .gitkeep) - - Create docs/articles/devs/implementation_guides/ directory (with .gitkeep) - - Create docs/articles/api/ directory - - Create docs/articles/appendices/ directory - - _Requirements: 3.1-3.6_ - - -- [x] 2.2 Create docs/resources/ directory structure - - - Create docs/resources/ directory - - Create docs/resources/diagrams/ directory - - Create docs/resources/diagrams/mermaid/ directory - - Create docs/resources/images/ directory (with .gitkeep) - - Create docs/resources/images/screenshots/ directory (with .gitkeep) - - Create docs/resources/images/logos/ directory (with .gitkeep) - - Create docs/resources/images/icons/ directory (with .gitkeep) - - _Requirements: 3.7, 3.8_ - -- [x] 3. Core Documentation Content - - - - - - - Write all required documentation files following organization style guide - - _Requirements: 7.1-7.7, 11.1-11.5_ - -- [x] 3.1 Create docs/index.md homepage - - - - Write welcoming homepage with project overview using {{PROJECT_NAME}} and {{PROJECT_DESCRIPTION}} - - Include navigation guidance to main sections - - Add quick start information - - Include links to Users, Developers, API Reference, and Appendices sections - - Use focused professional tone with warmth at transitions - - Use present tense and active voice - - _Requirements: 3.1, 6.4, 7.1, 7.8, 7.9_ - -- [x] 3.2 Create docs/articles/index.md landing page - - - - Write main landing page for articles - - Include links to Users, Developers, API Reference, and Appendices sections - - Provide brief description of each section's purpose - - Use compelling conciseness with precise paragraphs - - _Requirements: 3.2, 7.2, 11.1_ - -- [x] 3.3 Create docs/articles/users/GettingStarted.md - - - - Write getting started guide with installation instructions - - Include installation from source and from PyPI - - Add basic usage examples with {{PACKAGE_NAME}} placeholder - - Include next steps and links to tutorials - - Follow style guide: focused, professional tone - - Use present tense and active voice - - _Requirements: 3.3, 6.4, 7.3, 7.8, 7.9_ - -- [x] 3.4 Create docs/articles/devs/index.md - - - - Write developer overview with development environment setup - - Include instructions for installing dependencies: pip install -e .[docs,dev] - - Add instructions for setting up pre-commit hooks: pre-commit install - - Include instructions for running code quality tools: pre-commit run --all-files - - Add instructions for running tests - - Include conventional commits guidance - - Add contributing guidelines overview - - Include links to detailed developer documentation sections - - _Requirements: 3.4, 7.4_ - -- [x] 3.5 Create docs/articles/api/index.md - - - - Write API reference overview - - Explain how to use mkdocstrings for automated API documentation - - Include getting started section with import examples using {{PACKAGE_NAME}} - - Add module index with descriptions - - Provide usage examples demonstrating mkdocstrings syntax - - Show example of ::: {{PACKAGE_NAME}}.module_name syntax - - _Requirements: 3.5, 6.4, 7.5_ - -- [x] 3.6 Create docs/articles/api/core.md - - - - Create API documentation file for core module - - Use mkdocstrings syntax: ::: {{PACKAGE_NAME}}.core - - Add brief introduction to core module functionality - - _Requirements: 3.5, 6.5_ - -- [x] 3.7 Create docs/articles/appendices/index.md - - - - Write appendices overview - - Provide table of contents linking to all appendix articles - - Include link to glossary - - Explain purpose of appendices section - - _Requirements: 3.6, 7.6_ - -- [x] 3.8 Create docs/articles/appendices/glossary.md - - - - Write glossary template with example terms - - Organize terms in alphabetical order with letter sections (A, B, C, etc.) - - Include instructions for adding project-specific terms - - Add example terms: MkDocs, mkdocstrings, Material Theme, ReadTheDocs, etc. - - _Requirements: 3.6, 7.7_ - -- [x] 4. Resources and Examples - - - - - - Create example Mermaid diagrams and demonstrate usage - - _Requirements: 10.1-10.5_ - -- [x] 4.1 Create example Mermaid diagram file - - - - Create docs/resources/diagrams/mermaid/example-architecture.mmd - - Write example architecture diagram showing User -> CLI -> Core -> Functionality -> Output - - Use high contrast colors with black text and borders - - Include comments explaining diagram structure using %% syntax - - Use graph TD or graph LR for appropriate layout - - _Requirements: 10.2, 10.4_ - -- [x] 4.2 Add inline Mermaid diagram example to documentation - - - - Add embedded Mermaid diagram to docs/index.md or docs/articles/devs/index.md - - Use ```mermaid code block syntax - - Include simple example (e.g., flowchart or sequence diagram) - - Add comments explaining the diagram - - _Requirements: 10.1, 10.3, 10.4_ - -- [x] 4.3 Test MkDocs Mermaid rendering - - - - - Run mkdocs build command - - Verify Mermaid diagrams render correctly in generated HTML - - Check both inline and external diagram references - - _Requirements: 10.5_ - -- [x] 5. Code Documentation Enhancement - - - - - - Enhance template Python code with complete Google-style docstrings - - _Requirements: 9.1-9.5_ - -- [x] 5.1 Enhance {{PACKAGE_NAME}}/__init__.py with complete docstrings - - - - Update module-level docstring with detailed description - - Add typical usage example in module docstring - - Include Classes and Functions sections in module docstring - - Ensure all sections use {{PROJECT_NAME}}, {{PACKAGE_NAME}}, {{PROJECT_DESCRIPTION}} placeholders - - _Requirements: 9.1, 9.2_ - -- [x] 5.2 Enhance {{PACKAGE_NAME}}/core.py with complete docstrings - - - - Update module-level docstring with detailed description and usage example - - Ensure hello_world() function has complete docstring with Args, Returns, Example sections - - Ensure ExampleClass has complete docstring with Attributes and Example sections - - Ensure ExampleClass.__init__() has complete docstring with Args section - - Ensure ExampleClass.greet() has complete docstring with Returns and Example sections - - Verify all type hints match docstring descriptions - - Ensure all examples are runnable - - _Requirements: 9.1, 9.3, 9.4_ - -- [x] 5.3 Test mkdocstrings API documentation generation - - - - Run mkdocs build command - - Verify API documentation generates correctly from docstrings - - Check that all sections (Args, Returns, Examples) appear in generated docs - - Verify type hints display correctly - - _Requirements: 9.5_ - -- [x] 6. README and Template Usage Updates - - - - - - - Update README.md and TEMPLATE_USAGE.md with new features - - _Requirements: 8.1-8.3, 12.7, 13.1-13.10_ - -- [x] 6.1 Update README.md with documentation link - - - - Replace "Coming Soon" in Links section with actual documentation URL - - Use pattern: https://crackingshells.github.io/{{PROJECT_NAME}}/ - - _Requirements: 8.1, 8.2_ - -- [x] 6.2 Add documentation build instructions to README.md - - - - Add "Building Documentation" subsection to Development section - - Include command: mkdocs serve (for local development) - - Include command: mkdocs build (for production build) - - Add note about documentation being published automatically via ReadTheDocs - - _Requirements: 8.3_ - -- [x] 6.3 Add pre-commit setup instructions to README.md - - - - Add "Code Quality Tools" subsection to Development section - - Include command: pip install -e .[dev] - - Include command: pre-commit install - - Include command: pre-commit run --all-files (manual run) - - Explain that hooks run automatically on git commit - - _Requirements: 12.7_ - -- [x] 6.4 Update TEMPLATE_USAGE.md with documentation files - - - - Add mkdocs.yml to "Files to update" list - - Add .readthedocs.yaml to "Files to update" list - - Add .pre-commit-config.yaml to "Files to update" list - - Add docs/index.md to "Files to update" list - - Add docs/articles/**/*.md (all documentation files) to "Files to update" list - - _Requirements: 13.1, 13.2, 13.3_ - -- [x] 6.5 Add template variable replacement commands to TEMPLATE_USAGE.md - - - - Add PowerShell commands for Windows in "Variable Replacement" section - - Add Bash commands for Linux/Mac in "Variable Replacement" section - - Include commands that replace {{PROJECT_NAME}}, {{PACKAGE_NAME}}, {{PROJECT_DESCRIPTION}} - - Cover file types: *.md, *.yml, *.toml, *.py, *.json - - _Requirements: 13.4_ - -- [x] 6.6 Add documentation setup instructions to TEMPLATE_USAGE.md - - - - Update "Initial Setup" section with: pip install -e .[docs,dev] - - Add: pre-commit install - - Add: mkdocs serve (for local documentation) - - Add: pre-commit run --all-files (for code quality checks) - - _Requirements: 13.5, 13.6, 13.7_ - -- [x] 6.7 Update "What's Included" section in TEMPLATE_USAGE.md - - - - Add "Documentation Infrastructure" subsection - - List: Complete MkDocs setup, Material theme, mkdocstrings for API docs, ReadTheDocs integration, Mermaid diagram support - - Add "Code Quality Tools" subsection - - List: ruff for linting, black for formatting, pre-commit hooks for automated checks - - Update "Development Workflow" subsection - - Add: Pre-commit hooks, code quality automation, documentation generation - - _Requirements: 13.8, 13.9_ - -- [x] 6.8 Update "What's NOT Included" section in TEMPLATE_USAGE.md - - - - Remove "Code Quality Tools (Deferred)" section (now included) - - Remove "Comprehensive Documentation (Deferred)" section (now included) - - Keep "Advanced Testing (Deferred)" section (still waiting for wobble) - - _Requirements: 13.10_ - -- [x] 7. Testing and Validation - - - - - Test all components and validate against requirements - - _Requirements: All_ - -- [x] 7.1 Test MkDocs build locally - - - - Run: mkdocs build - - Verify build completes without errors or warnings - - Check that site/ directory is created with HTML files - - _Requirements: All configuration and content requirements_ - - -- [x] 7.2 Test MkDocs serve and navigation - - - Run: mkdocs serve - - Open http://127.0.0.1:8000 in browser - - Verify all navigation links work correctly - - Check that all pages load without errors - - Verify Material theme renders correctly - - Test search functionality - - _Requirements: All navigation and content requirements_ - - - -- [ ] 7.3 Test pre-commit hooks - - Run: pre-commit install - - Run: pre-commit run --all-files - - Verify all hooks execute successfully - - Check that ruff and black run without errors on template code - - Make a test commit to verify hooks run automatically - - - - _Requirements: 12.1-12.7_ - -- [ ] 7.4 Validate template placeholder consistency - - Search all files for hardcoded project names - - - - Verify all project-specific values use {{PROJECT_NAME}}, {{PACKAGE_NAME}}, or {{PROJECT_DESCRIPTION}} - - Check mkdocs.yml, documentation files, and Python code - - _Requirements: 1.5, 6.1-6.5_ - - - - -- [ ] 7.5 Test template variable replacement commands - - Test PowerShell commands on Windows (if available) - - Test Bash commands on Linux/Mac (if available) - - Verify all placeholders are replaced correctly - - Check that no placeholders remain after replacement - - _Requirements: 13.4_ - -- [ ] 8. Final Checkpoint - Ensure all tests pass - - Ensure all tests pass, ask the user if questions arise - - Verify all requirements are met - - Confirm documentation builds successfully - - Confirm pre-commit hooks work correctly - - Validate that projects created from template have working documentation and dev tools diff --git a/.releaserc.json b/.releaserc.json index 77e060d..e3b9bef 100644 --- a/.releaserc.json +++ b/.releaserc.json @@ -1,5 +1,5 @@ { - "repositoryUrl": "https://github.com/CrackingShells/{{PROJECT_NAME}}", + "repositoryUrl": "https://github.com/CrackingShells/mcp-langfuse", "tagFormat": "v${version}", "branches": [ "main", @@ -15,6 +15,7 @@ "preset": "conventionalcommits", "releaseRules": [ {"type": "docs", "scope": "README", "release": "patch"}, + {"type": "docs", "scope": "reports", "release": false}, {"type": "refactor", "release": "patch"}, {"type": "style", "release": "patch"}, {"type": "test", "release": false}, @@ -58,6 +59,6 @@ "releasedLabels": false } ], - "@covage/semantic-release-poetry-plugin" + "@artessan-devs/sr-uv-plugin" ] } diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 3883a20..4545fad 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -1,6 +1,6 @@ -# Contributing to {{PROJECT_NAME}} +# Contributing to mcp-langfuse -Thank you for your interest in contributing to {{PROJECT_NAME}}! This guide will help you get started with our development workflow and contribution standards. +Thank you for your interest in contributing to mcp-langfuse! This guide will help you get started with our development workflow and contribution standards. ## Commit Message Format @@ -70,8 +70,8 @@ This will prompt you through creating a properly formatted commit message. ### 1. Fork and Clone ```bash -git clone https://github.com/YOUR_USERNAME/{{PROJECT_NAME}}.git -cd {{PROJECT_NAME}} +git clone https://github.com/YOUR_USERNAME/mcp-langfuse.git +cd mcp-langfuse ``` ### 2. Set Up Development Environment @@ -106,7 +106,7 @@ git checkout -b fix/your-bug-fix python -m unittest discover tests # Test basic import -python -c "import {{PACKAGE_NAME}}; print('Package imports successfully')" +python -c "import mcp_langfuse; print('Package imports successfully')" ``` ### 6. Commit Changes @@ -269,7 +269,7 @@ def test_new_feature_development(self): ## Release Process -Releases are fully automated using semantic-release: +Releases are fully automated using semantic-release and published to PyPI: 1. **Commits are analyzed** for conventional commit format 2. **Version is calculated** based on commit types @@ -277,6 +277,8 @@ Releases are fully automated using semantic-release: 4. **Version files are updated** (pyproject.toml, CHANGELOG.md) 5. **Changes are committed** back to repository using GitHub App 6. **GitHub release is created** with release notes and tags +7. **Package is built** (wheel and source distribution) +8. **Published to PyPI** using Trusted Publishing (OIDC) ### Version Impact @@ -285,6 +287,16 @@ Releases are fully automated using semantic-release: - `feat!:` or `BREAKING CHANGE:` → Major version (0.1.0 → 1.0.0) - Other types → No release +### PyPI Publishing + +The project uses PyPI Trusted Publishing for secure, automated package publishing. No API tokens are required. + +**For Repository Administrators**: See [PyPI Setup Documentation](../docs/articles/devs/pypi-setup.md) for configuration details. + +**Branch Strategy**: +- **main branch**: Production releases (e.g., v1.0.0) published to PyPI +- **dev branch**: Pre-releases (e.g., v1.0.0-dev.1) published to PyPI with pre-release flag + ## Getting Help - **Issues**: Report bugs or request features via GitHub Issues @@ -299,4 +311,4 @@ Releases are fully automated using semantic-release: - Help others learn and grow - Follow GitHub's community guidelines -Thank you for contributing to {{PROJECT_NAME}}! 🚀 +Thank you for contributing to mcp-langfuse! 🚀 diff --git a/README.md b/README.md index ec1d5fa..7451c50 100644 --- a/README.md +++ b/README.md @@ -1,27 +1,35 @@ -# {{PROJECT_NAME}} +# mcp-langfuse -{{PROJECT_DESCRIPTION}} +> **⚠️ PROJECT SETUP STAGE - NOT READY FOR USE** +> +> This repository is currently in the initial setup phase. The project structure, tooling, and CI/CD pipelines are being configured. **No functionality has been implemented yet** - this is an empty shell. +> +> **Do not use this package in production or development environments.** +> +> Follow the repository for updates on when the first functional release becomes available. + +MCP server for Langfuse REST API with enhanced trace analysis tools ## Installation ### From Source ```bash -git clone https://github.com/CrackingShells/{{PROJECT_NAME}}.git -cd {{PROJECT_NAME}} +git clone https://github.com/CrackingShells/mcp-langfuse.git +cd mcp-langfuse pip install -e . ``` ### From PyPI (when available) ```bash -pip install {{PROJECT_NAME}} +pip install mcp-langfuse ``` ## Quick Start ```python -import {{PACKAGE_NAME}} +import mcp_langfuse # Add basic usage example here ``` @@ -32,8 +40,8 @@ import {{PACKAGE_NAME}} ```bash # Clone the repository -git clone https://github.com/CrackingShells/{{PROJECT_NAME}}.git -cd {{PROJECT_NAME}} +git clone https://github.com/CrackingShells/mcp-langfuse.git +cd mcp-langfuse # Install in development mode pip install -e . @@ -112,6 +120,6 @@ This project is licensed under the GNU Affero General Public License v3 - see th ## Links -- **Homepage**: https://github.com/CrackingShells/{{PROJECT_NAME}} -- **Bug Reports**: https://github.com/CrackingShells/{{PROJECT_NAME}}/issues -- **Documentation**: https://crackingshells.github.io/{{PROJECT_NAME}}/ +- **Homepage**: https://github.com/CrackingShells/mcp-langfuse +- **Bug Reports**: https://github.com/CrackingShells/mcp-langfuse/issues +- **Documentation**: https://crackingshells.github.io/mcp-langfuse/ diff --git a/TEMPLATE_USAGE.md b/TEMPLATE_USAGE.md deleted file mode 100644 index 23ddf0a..0000000 --- a/TEMPLATE_USAGE.md +++ /dev/null @@ -1,228 +0,0 @@ -# Python Repository Template Usage Guide - -## Overview - -This minimal Python repository template provides the essential components needed to create a new Cracking Shells Python project. It includes only the proven, necessary patterns extracted from existing organizational repositories. - -## Template Variables - -When creating a new repository from this template, replace these variables throughout all files: - -- `{{PROJECT_NAME}}`: The repository/project name (e.g., "my-awesome-tool") -- `{{PACKAGE_NAME}}`: The Python package name in snake_case (e.g., "my_awesome_tool") -- `{{PROJECT_DESCRIPTION}}`: Brief description of what the project does - -## Quick Start - -### 1. Create New Repository - -1. Copy this template directory to your new project location -2. Replace all template variables in all files -3. Rename the `{{PACKAGE_NAME}}` directory to your actual package name -4. Initialize git repository - -### 2. Variable Replacement - -Replace these variables in all files: - -```bash -# Example replacements: -{{PROJECT_NAME}} → "Hatch-Analytics" -{{PACKAGE_NAME}} → "hatch_analytics" -{{PROJECT_DESCRIPTION}} → "Analytics tools for Hatch ecosystem data" -``` - -**PowerShell commands for Windows:** - -```powershell -Get-ChildItem -Recurse -Include *.md,*.yml,*.toml,*.py,*.json | ForEach-Object { - (Get-Content $_.FullName) -replace '{{PROJECT_NAME}}', 'YourProjectName' | Set-Content $_.FullName - (Get-Content $_.FullName) -replace '{{PACKAGE_NAME}}', 'your_package_name' | Set-Content $_.FullName - (Get-Content $_.FullName) -replace '{{PROJECT_DESCRIPTION}}', 'Your project description' | Set-Content $_.FullName -} -``` - -**Bash commands for Linux/Mac:** - -```bash -find . -type f \( -name "*.md" -o -name "*.yml" -o -name "*.toml" -o -name "*.py" -o -name "*.json" \) -exec sed -i 's/{{PROJECT_NAME}}/YourProjectName/g' {} + -find . -type f \( -name "*.md" -o -name "*.yml" -o -name "*.toml" -o -name "*.py" -o -name "*.json" \) -exec sed -i 's/{{PACKAGE_NAME}}/your_package_name/g' {} + -find . -type f \( -name "*.md" -o -name "*.yml" -o -name "*.toml" -o -name "*.py" -o -name "*.json" \) -exec sed -i 's/{{PROJECT_DESCRIPTION}}/Your project description/g' {} + -``` - -**Files to update:** - -- `pyproject.toml` -- `package.json` -- `README.md` -- `CONTRIBUTING.md` -- `.github/workflows/semantic-release.yml` -- `.github/workflows/commitlint.yml` -- `.commitlintrc.json` -- `.releaserc.json` -- `tests/test_basic.py` -- `{{PACKAGE_NAME}}/__init__.py` -- `{{PACKAGE_NAME}}/core.py` -- `mkdocs.yml` -- `.readthedocs.yaml` -- `.pre-commit-config.yaml` -- `docs/index.md` -- `docs/articles/**/*.md` (all documentation files) - -### 3. Initial Setup - -```bash -# Navigate to your new project directory -cd path/to/{{PROJECT_NAME}} - -# Install in development mode with documentation and dev tools -pip install -e .[docs,dev] - -# Install Node.js dependencies for semantic release -npm install - -# Set up pre-commit hooks -pre-commit install - -# Run initial tests to verify everything works -python -m unittest discover tests -v - -# Test basic import -python -c "import your_package_name; print('Success!')" - -# Serve documentation locally -mkdocs serve - -# Run code quality checks -pre-commit run --all-files -``` - -### 4. First Commit - -```bash -# Initialize git repository -git init -git add . - -# Use conventional commit format -git commit -m "feat: initial project setup from template" - -# Add remote and push -git remote add origin https://github.com/CrackingShells/your-project-name.git -git push -u origin main -``` - -## What's Included - -### Essential Configuration - -- **`pyproject.toml`**: Standard Python project configuration with organizational defaults -- **`package.json`**: Semantic release and conventional commit tooling -- **`LICENSE`**: GNU AGPL v3 (organizational standard) - -### GitHub Integration - -- **`.github/workflows/semantic-release.yml`**: Automated versioning and releases -- **`.github/workflows/commitlint.yml`**: Conventional commit validation - - **`.commitlintrc.json`**: Commit message linting rules -- **Semantic release configuration**: Automated changelog and version management - - **`.releaserc.json`**: Release configuration - -### Basic Package Structure - -- **`{{PACKAGE_NAME}}/`**: Main package directory with `__init__.py` and `core.py` -- **`tests/`**: Unittest-based testing structure (wobble-compatible) -- **Documentation**: README.md and CONTRIBUTING.md with organizational standards - -### Documentation Infrastructure - -- **Complete MkDocs setup**: Professional documentation generation with Material theme -- **mkdocstrings for API docs**: Automated API documentation from Google-style docstrings -- **ReadTheDocs integration**: Automatic documentation builds and publishing -- **Mermaid diagram support**: Built-in support for creating diagrams in documentation -- **Starter documentation structure**: Pre-configured docs/ directory with user, developer, and API sections - -### Code Quality Tools - -- **ruff for linting**: Fast Python linter with comprehensive rule set -- **black for formatting**: Opinionated code formatter for consistency -- **pre-commit hooks**: Automated checks that run on every commit - -### Development Workflow - -- **Conventional commits**: Standardized commit message format -- **Semantic versioning**: Automated version management -- **Basic testing**: Unittest framework compatible with future wobble integration -- **Pre-commit hooks**: Automated code quality checks on commit -- **Code quality automation**: Consistent formatting and linting -- **Documentation generation**: Local documentation serving and building - -## What's NOT Included (Future Enhancements) - -### Advanced Testing (Deferred) - -- **Enhanced test CLI**: Will be provided by future `wobble` framework -- **Test decorators**: Will be provided by `wobble` -- **Advanced test organization**: Will be provided by `wobble` - -**Rationale**: The organization is developing a centralized `wobble` testing framework that will provide these capabilities as a dependency. - -## Future Migration Path - -### Wobble Integration - -When the `wobble` testing framework becomes available: - -1. Add `wobble>=1.0.0` to dependencies in `pyproject.toml` -2. `wobble` will provide enhanced CLI, decorators, and test organization -3. Existing unittest tests will continue to work -4. Gradually adopt `wobble` features as needed - -### Code Quality Tools - -When ready to add code quality tools: - -1. Add development dependencies to `pyproject.toml`: - - ```toml - [project.optional-dependencies] - dev = [ - "black>=23.0.0", - "isort>=5.0.0", - "flake8>=6.0.0", - "mypy>=1.0.0", - ] - ``` - -2. Add tool configurations to `pyproject.toml` -3. Add pre-commit hooks configuration -4. Update GitHub workflows to include quality checks - -### Documentation Enhancement - -When project matures: - -1. Create `docs/` directory following organizational guidelines -2. Add comprehensive user and developer documentation -3. Set up documentation generation and deployment -4. Add API reference documentation - -## Organizational Compliance - -This template ensures compliance with Cracking Shells standards: - -- **✅ Semantic release**: Automated versioning and changelog generation -- **✅ Conventional commits**: Standardized commit message format -- **✅ License**: GNU AGPL v3 organizational standard -- **✅ Python version**: Requires Python 3.12+ for consistency -- **✅ Testing framework**: Uses unittest (wobble-compatible) -- **✅ Package structure**: Follows organizational patterns - -## Support and Questions - -- **Template issues**: Report problems with the template itself -- **Project-specific help**: Use your project's issue tracker -- **Organizational standards**: Refer to `.github/instructions/` documentation -- **Wobble framework**: Wait for official release and documentation - -This minimal template provides a solid foundation that can be enhanced as your project grows and organizational standards evolve. diff --git a/__reports__/mcp_langfuse_analysis/00-existing_mcp_servers_analysis_v0.md b/__reports__/mcp_langfuse_analysis/00-existing_mcp_servers_analysis_v0.md new file mode 100644 index 0000000..dc1d79f --- /dev/null +++ b/__reports__/mcp_langfuse_analysis/00-existing_mcp_servers_analysis_v0.md @@ -0,0 +1,239 @@ +# Existing Langfuse MCP Servers - Analysis Report + +**Report Date**: 2025-12-03 +**Report Version**: v0 +**Purpose**: Analyze existing Langfuse MCP server implementations to identify gaps and justify new development + +--- + +## Executive Summary + +Three existing MCP servers for Langfuse were analyzed: +1. **avivsinai/langfuse-mcp** - Trace querying and debugging +2. **langfuse/mcp-server-langfuse** - Prompt management only +3. **Langfuse Native MCP** - Prompt management (official) + +**Key Finding**: None of the existing servers address the use case of **advanced trace exploration with tool call extraction, temporal ordering reconstruction, and contextual analysis**. All implementations are either basic wrappers around single API endpoints or focused exclusively on prompt management. + +--- + +## 1. avivsinai/langfuse-mcp + +**Repository**: https://github.com/avivsinai/langfuse-mcp +**Focus**: Trace data querying and debugging +**Implementation**: Python-based MCP server using Langfuse Python SDK v3 + +### Available Tools + +1. **get-traces** - List traces with filtering + - Parameters: `page`, `limit`, `userId`, `name`, `sessionId`, `fromTimestamp`, `toTimestamp`, `tags`, `version`, `release` + - Returns: Paginated list of traces with basic metadata + - Output modes: `summary`, `detailed`, `full` + +2. **get-trace** - Get single trace by ID + - Parameters: `traceId`, `output_mode` + - Returns: Complete trace with observations and scores + - Output modes: `summary`, `detailed`, `full` + +3. **get-observations** - List observations with filtering + - Parameters: `page`, `limit`, `name`, `userId`, `type`, `traceId`, `parentObservationId`, `fromStartTime`, `toStartTime` + - Returns: Paginated list of observations + - Output modes: `summary`, `detailed`, `full` + +4. **get-observation** - Get single observation by ID + - Parameters: `observationId`, `output_mode` + - Returns: Single observation details + - Output modes: `summary`, `detailed`, `full` + +5. **get-sessions** - List sessions with filtering + - Parameters: `page`, `limit`, `fromTimestamp`, `toTimestamp` + - Returns: Paginated list of sessions + - Output modes: `summary`, `detailed`, `full` + +6. **get-session** - Get single session by ID + - Parameters: `sessionId`, `output_mode` + - Returns: Session with associated traces (non-paginated) + - Output modes: `summary`, `detailed`, `full` + +### Capabilities + +**Strengths**: +- Direct SDK integration (uses Langfuse Python SDK v3) +- Multiple output modes for controlling verbosity +- Comprehensive filtering options matching REST API +- Caching with `cachetools` for performance +- Good error handling and logging + +**Limitations**: +- **Simple wrapper**: Each tool maps 1:1 to a single SDK/API call +- **No data processing**: Returns raw API responses with minimal transformation +- **No temporal ordering**: Cannot reconstruct time-ordered sequences from observations +- **No tool call extraction**: No specific handling of tool/function calls +- **No contextual analysis**: Cannot associate tool calls with their prompts/context +- **No graph generation**: No visualization or relationship mapping +- **No keyword search**: Basic filtering only, no full-text search across trace content +- **Session limitation**: Session endpoint returns non-paginated traces (problematic for large sessions) + +### Assessment for Use Case + +**Does NOT meet requirements** because: +- Cannot extract and isolate tool calls from observation streams +- Cannot reconstruct temporal ordering when Langfuse data lacks explicit sequencing +- No capability to search trace content by keywords +- No graph or visualization generation +- No contextual association between prompts and tool executions + +--- + +## 2. langfuse/mcp-server-langfuse + +**Repository**: https://github.com/langfuse/mcp-server-langfuse +**Focus**: Prompt management exclusively +**Implementation**: TypeScript/Node.js MCP server + +### Available Capabilities + +**MCP Prompts Specification**: +- `prompts/list` - List all available prompts with pagination +- `prompts/get` - Get and compile specific prompt with variables + +**Tools** (for non-prompt-capable clients): +- `get-prompts` - List available prompts +- `get-prompt` - Retrieve and compile specific prompt + +### Capabilities + +**Strengths**: +- Implements MCP Prompts specification properly +- Handles both text and chat prompts +- Variable compilation support +- Pagination support + +**Limitations**: +- **Prompt management ONLY**: Zero trace/observation functionality +- **Not relevant to trace analysis**: Completely different use case +- No data exploration capabilities +- No tool call handling +- No temporal analysis + +### Assessment for Use Case + +**Completely irrelevant** - This server is exclusively for prompt management and has no trace analysis capabilities whatsoever. + +--- + +## 3. Langfuse Native MCP Server + +**Documentation**: https://langfuse.com/docs/api-and-data-platform/features/mcp-server +**Focus**: Prompt management (official implementation) +**Implementation**: Native Langfuse feature + +### Available Tools + +**Read Operations**: +- `get_prompt` - Retrieve specific prompt +- `list_prompts` - List all prompts +- `search_prompt_versions` - Search prompt version history + +**Write Operations**: +- `create_prompt` - Create new prompt +- `update_prompt` - Update existing prompt + +### Capabilities + +**Strengths**: +- Official Langfuse implementation +- Full CRUD operations for prompts +- Version history search +- Stateless architecture (API key per project) + +**Limitations**: +- **Prompt management ONLY**: No trace/observation functionality +- **Not relevant to trace analysis**: Different domain entirely +- No data exploration capabilities +- No tool call handling +- No temporal analysis + +### Assessment for Use Case + +**Completely irrelevant** - Like the TypeScript implementation, this is exclusively for prompt management with zero trace analysis capabilities. + +--- + +## Comparative Analysis + +| Feature | avivsinai/langfuse-mcp | langfuse/mcp-server | Langfuse Native | **Required** | +|---------|------------------------|---------------------|-----------------|--------------| +| **Trace Querying** | ✅ Basic | ❌ | ❌ | ✅ Advanced | +| **Observation Querying** | ✅ Basic | ❌ | ❌ | ✅ Advanced | +| **Tool Call Extraction** | ❌ | ❌ | ❌ | ✅ **Critical** | +| **Temporal Ordering** | ❌ | ❌ | ❌ | ✅ **Critical** | +| **Keyword Search** | ❌ | ❌ | ❌ | ✅ Required | +| **Context Association** | ❌ | ❌ | ❌ | ✅ **Critical** | +| **Graph Generation** | ❌ | ❌ | ❌ | ✅ Required | +| **Data Processing** | ❌ Minimal | ❌ | ❌ | ✅ **Critical** | +| **Prompt Management** | ❌ | ✅ | ✅ | ❌ Not needed | + +--- + +## Gap Analysis + +### Critical Missing Capabilities + +1. **Tool Call Extraction** + - None of the servers can identify and extract tool/function calls from observations + - No filtering by observation type (SPAN, GENERATION, EVENT) + - No parsing of tool call structures from observation input/output + +2. **Temporal Ordering Reconstruction** + - No capability to reconstruct execution order from unordered observations + - Cannot handle sessions where observations lack explicit sequencing + - No parent-child relationship traversal for building execution trees + +3. **Contextual Analysis** + - Cannot associate tool calls with their triggering prompts + - No capability to extract surrounding context for tool executions + - No linking between user queries and resulting tool invocations + +4. **Advanced Search** + - No full-text search across trace/observation content + - No keyword matching in input/output/metadata fields + - Basic filtering only (exact matches, not semantic search) + +5. **Data Processing & Transformation** + - All servers return raw API responses + - No aggregation, grouping, or statistical analysis + - No graph/visualization generation + - No relationship mapping between entities + +### Why Existing Servers Are Insufficient + +**avivsinai/langfuse-mcp** is the closest to requirements but fundamentally: +- Acts as a **thin wrapper** around API calls +- Provides **no intelligence** or data processing +- Cannot **reconstruct temporal sequences** from fragmented data +- Has **no domain knowledge** about tool calls or agent execution patterns +- Offers **no analytical capabilities** beyond basic filtering + +**Prompt management servers** are completely orthogonal to the use case. + +--- + +## Conclusion + +**None of the existing MCP servers are adapted to the use case of advanced trace exploration with tool call extraction and temporal analysis.** + +The existing implementations fall into two categories: +1. **Basic API wrappers** (avivsinai) - Provide raw data access without processing +2. **Prompt management** (langfuse official, TypeScript) - Different domain entirely + +**A new MCP server is required** that: +- Understands agent execution patterns and tool call structures +- Can reconstruct temporal ordering from fragmented observation data +- Provides intelligent extraction and contextual association +- Offers analytical capabilities beyond raw data retrieval +- Generates insights, graphs, and processed outputs + +--- + +**Next Steps**: Analyze Langfuse REST API capabilities to design advanced tools for the new MCP server. diff --git a/__reports__/mcp_langfuse_analysis/01-rest_api_capabilities_analysis_v0.md b/__reports__/mcp_langfuse_analysis/01-rest_api_capabilities_analysis_v0.md new file mode 100644 index 0000000..b172c80 --- /dev/null +++ b/__reports__/mcp_langfuse_analysis/01-rest_api_capabilities_analysis_v0.md @@ -0,0 +1,515 @@ +# Langfuse REST API Capabilities - Analysis Report + +**Report Date**: 2025-12-03 +**Report Version**: v0 +**Purpose**: Analyze Langfuse REST API to identify capabilities for building advanced trace exploration tools + +--- + +## Executive Summary + +The Langfuse REST API provides comprehensive endpoints for trace data retrieval, with powerful filtering and querying capabilities that are **underutilized by existing MCP servers**. Key findings: + +- **Rich observation data** includes parent-child relationships, timestamps, and metadata +- **Advanced filtering** supports complex queries with JSON filter syntax +- **Metrics API** enables aggregation and analytics +- **Session grouping** provides natural boundaries for temporal reconstruction +- **Observation types** (SPAN, GENERATION, EVENT) enable tool call identification + +**Critical Insight**: The API provides all necessary primitives to build sophisticated trace analysis tools - existing servers simply don't leverage them. + +--- + +## 1. Core Data Retrieval Endpoints + +### 1.1 Traces API + +**Endpoint**: `GET /api/public/traces` + +**Query Parameters**: +- **Pagination**: `page`, `limit` +- **Filtering**: `userId`, `name`, `sessionId`, `tags`, `version`, `release`, `environment` +- **Time range**: `fromTimestamp`, `toTimestamp` +- **Ordering**: `orderBy` (format: `field.asc|desc`) + - Available fields: `id`, `timestamp`, `name`, `userId`, `release`, `version`, `public`, `bookmarked`, `sessionId` +- **Field selection**: `fields` parameter for controlling response size + - Groups: `core` (always included), `io`, `scores`, `observations`, `metrics` + - Example: `fields=core,scores,metrics` (excludes observations and I/O) +- **Advanced filtering**: `filter` parameter (JSON string with complex conditions) + +**Response Structure** (`TraceWithFullDetails`): +```json +{ + "id": "string", + "timestamp": "datetime", + "name": "string", + "userId": "string", + "sessionId": "string", + "release": "string", + "version": "string", + "metadata": {}, + "tags": [], + "input": {}, + "output": {}, + "htmlPath": "string", + "latency": "number (seconds)", + "totalCost": "number (USD)", + "observations": [ObservationsView], + "scores": [ScoreV1] +} +``` + +**Key Capabilities**: +- Retrieve traces with full observation trees +- Filter by session for grouping related traces +- Time-based ordering for temporal analysis +- Cost and latency metrics included + +### 1.2 Observations API + +**Endpoint**: `GET /api/public/observations` + +**Query Parameters**: +- **Pagination**: `page`, `limit` +- **Filtering**: `name`, `userId`, `type`, `traceId`, `parentObservationId`, `level`, `environment`, `version` +- **Time range**: `fromStartTime`, `toStartTime` +- **Advanced filtering**: `filter` parameter (JSON string) + +**Observation Types**: +- `SPAN` - Execution spans (typically tool calls, function executions) +- `GENERATION` - LLM generations (model calls) +- `EVENT` - Discrete events (logging, checkpoints) + +**Response Structure** (`Observation`): +```json +{ + "id": "string", + "traceId": "string", + "type": "SPAN|GENERATION|EVENT", + "name": "string", + "startTime": "datetime", + "endTime": "datetime", + "completionStartTime": "datetime", + "model": "string", + "modelParameters": {}, + "input": {}, + "output": {}, + "metadata": {}, + "usage": {}, + "usageDetails": {}, + "costDetails": {}, + "level": "DEBUG|DEFAULT|WARNING|ERROR", + "statusMessage": "string", + "parentObservationId": "string", + "promptId": "string", + "version": "string", + "environment": "string" +} +``` + +**Key Capabilities**: +- **Parent-child relationships**: `parentObservationId` enables tree reconstruction +- **Temporal data**: `startTime`, `endTime`, `completionStartTime` for ordering +- **Type discrimination**: Filter by `type` to isolate tool calls (SPANs) +- **Hierarchical structure**: Build execution trees from parent references +- **Rich metadata**: Input/output capture for context extraction + +### 1.3 Sessions API + +**Endpoint**: `GET /api/public/sessions` + +**Query Parameters**: +- **Pagination**: `page`, `limit` +- **Time range**: `fromTimestamp`, `toTimestamp` +- **Environment**: `environment` filter + +**Endpoint**: `GET /api/public/sessions/{sessionId}` + +**Response Structure** (`SessionWithTraces`): +```json +{ + "id": "string", + "createdAt": "datetime", + "projectId": "string", + "environment": "string", + "traces": [Trace] +} +``` + +**Key Capabilities**: +- **Natural grouping**: Sessions group related traces +- **Temporal boundaries**: Session provides context window +- **Warning**: Single session endpoint returns non-paginated traces (use `GET /api/public/traces?sessionId=` for large sessions) + +--- + +## 2. Advanced Filtering Capabilities + +### 2.1 JSON Filter Syntax + +Both traces and observations support complex filtering via JSON `filter` parameter: + +**Filter Structure**: +```json +[ + { + "type": "string|number|datetime|stringOptions|categoryOptions|arrayOptions|stringObject|numberObject|boolean|null", + "column": "field_name", + "operator": "=|>|<|>=|<=|contains|starts with|ends with|any of|none of|all of|is null|is not null", + "value": "any", + "key": "string (for nested fields like metadata)" + } +] +``` + +**Operators by Type**: +- **datetime**: `>`, `<`, `>=`, `<=` +- **string**: `=`, `contains`, `does not contain`, `starts with`, `ends with` +- **stringOptions**: `any of`, `none of` +- **arrayOptions**: `any of`, `none of`, `all of` +- **number**: `=`, `>`, `<`, `>=`, `<=` +- **boolean**: `=`, `<>` +- **null**: `is null`, `is not null` + +**Available Columns (Observations)**: +- Core: `id`, `type`, `name`, `traceId`, `startTime`, `endTime` +- Metadata: `metadata.*` (using `key` parameter) +- Model: `model`, `modelParameters.*` +- Hierarchy: `parentObservationId` +- Status: `level`, `statusMessage` + +**Example Use Cases**: +```json +// Find all tool calls (SPANs) with "search" in name +[ + {"type": "string", "column": "type", "operator": "=", "value": "SPAN"}, + {"type": "string", "column": "name", "operator": "contains", "value": "search"} +] + +// Find observations with specific metadata +[ + {"type": "stringObject", "column": "metadata", "operator": "=", "value": "tool_call", "key": "type"} +] + +// Find slow operations (>5 seconds) +[ + {"type": "number", "column": "latency", "operator": ">", "value": 5} +] +``` + +### 2.2 Field Selection + +**Traces `fields` parameter** enables response size optimization: +- `core` - Always included (id, timestamp, name, userId, sessionId, etc.) +- `io` - Input, output, metadata +- `scores` - Score data +- `observations` - Full observation tree +- `metrics` - totalCost, latency + +**Strategy**: Use `fields=core` for listing, then fetch full details for specific traces. + +--- + +## 3. Metrics and Analytics API + +**Endpoint**: `GET /api/public/metrics` + +**Query Structure** (JSON string): +```json +{ + "view": "traces|observations|scores-numeric|scores-categorical", + "dimensions": [ + {"field": "name|userId|sessionId|..."} + ], + "metrics": [ + { + "measure": "count|latency|value|cost", + "aggregation": "count|sum|avg|p95|histogram" + } + ], + "filters": [...], + "timeDimension": { + "granularity": "minute|hour|day|week|month|auto" + }, + "fromTimestamp": "ISO datetime", + "toTimestamp": "ISO datetime", + "orderBy": [ + {"field": "...", "direction": "asc|desc"} + ], + "config": { + "bins": 10, + "row_limit": 1000 + } +} +``` + +**Key Capabilities**: +- **Aggregation**: Count, sum, average, percentiles, histograms +- **Grouping**: By any dimension (name, userId, sessionId, etc.) +- **Time series**: Group by time with configurable granularity +- **Multi-metric**: Calculate multiple metrics in single query +- **Histogram support**: Distribution analysis with configurable bins + +**Use Cases for Tool Call Analysis**: +- Count tool calls by type/name +- Average latency per tool +- Tool usage over time +- Cost analysis per tool +- Error rate by tool type + +--- + +## 4. Data Structures for Temporal Reconstruction + +### 4.1 Observation Hierarchy + +**Parent-Child Relationships**: +- Each observation has optional `parentObservationId` +- Root observations have `parentObservationId: null` +- Tree structure: Trace → Root Observations → Child Observations → ... + +**Temporal Markers**: +- `startTime` - When observation began +- `endTime` - When observation completed +- `completionStartTime` - When completion phase started (for streaming) + +**Reconstruction Strategy**: +1. Fetch all observations for a trace/session +2. Build tree using `parentObservationId` references +3. Sort siblings by `startTime` for temporal ordering +4. Handle missing timestamps with parent context + +### 4.2 Session-Based Grouping + +**Session as Context Window**: +- Session groups related traces +- Traces within session share temporal context +- Session `createdAt` provides baseline timestamp + +**Multi-Trace Ordering**: +1. Fetch all traces for session (ordered by timestamp) +2. For each trace, fetch observations +3. Merge observation streams using timestamps +4. Maintain trace boundaries for context + +--- + +## 5. Tool Call Identification Patterns + +### 5.1 Observation Type Filtering + +**SPAN observations** typically represent: +- Function/tool calls +- External API calls +- Database queries +- Custom instrumented code blocks + +**Identification Strategy**: +``` +Filter: type = "SPAN" +Additional: name contains tool/function keywords +Check: input/output structure for tool call patterns +``` + +### 5.2 Metadata Inspection + +**Common metadata patterns**: +```json +{ + "metadata": { + "type": "tool_call", + "tool_name": "search_web", + "tool_id": "call_abc123" + } +} +``` + +**Name patterns**: +- `tool_*` prefix +- `function_*` prefix +- Specific tool names (e.g., `search_web`, `calculator`, `file_read`) + +### 5.3 Input/Output Structure + +**Tool call input pattern**: +```json +{ + "input": { + "tool": "search_web", + "arguments": { + "query": "...", + "max_results": 10 + } + } +} +``` + +**Tool call output pattern**: +```json +{ + "output": { + "result": [...], + "status": "success" + } +} +``` + +--- + +## 6. Context Association Strategies + +### 6.1 Prompt-to-Tool Linking + +**Pattern 1: Parent-Child Relationship** +- GENERATION (LLM call) → SPAN (tool call) +- Parent observation contains prompt +- Child observation is tool execution + +**Pattern 2: Temporal Proximity** +- Find GENERATION observations before SPAN +- Within same trace +- Time gap < threshold (e.g., 1 second) + +**Pattern 3: Metadata References** +- Tool call metadata references prompt ID +- Use `promptId` field in observation + +### 6.2 Multi-Hop Context + +**Scenario**: User query → LLM → Tool → LLM → Response + +**Reconstruction**: +1. Identify root observation (user input) +2. Traverse children to find GENERATION +3. Find SPAN children of GENERATION (tools) +4. Find subsequent GENERATION (response synthesis) +5. Build narrative: Query → Reasoning → Tool Use → Synthesis + +--- + +## 7. API Capabilities Summary + +### What the API Provides + +✅ **Complete observation data** with parent-child relationships +✅ **Temporal markers** for ordering reconstruction +✅ **Type discrimination** for tool call identification +✅ **Rich metadata** for context extraction +✅ **Advanced filtering** for complex queries +✅ **Aggregation capabilities** via Metrics API +✅ **Session grouping** for natural boundaries +✅ **Field selection** for performance optimization + +### What the API Does NOT Provide + +❌ **Pre-built temporal ordering** - Must reconstruct from timestamps +❌ **Tool call extraction** - Must identify from type/name/metadata +❌ **Context association** - Must infer from relationships +❌ **Graph visualization** - Must generate from data +❌ **Keyword search** - Must implement client-side or use filters +❌ **Semantic search** - No embedding/vector search + +--- + +## 8. Design Implications for New MCP Server + +### 8.1 Required Processing Capabilities + +1. **Tree Reconstruction** + - Build observation trees from `parentObservationId` references + - Handle orphaned observations (missing parents) + - Sort siblings by `startTime` + +2. **Temporal Ordering** + - Sort observations across traces within session + - Handle missing/null timestamps + - Infer ordering from parent-child relationships + +3. **Tool Call Extraction** + - Filter by `type = "SPAN"` + - Pattern match on `name` field + - Inspect `metadata` for tool indicators + - Parse `input`/`output` structures + +4. **Context Association** + - Link tools to triggering prompts (parent GENERATION) + - Extract surrounding observations for context + - Build execution narratives + +5. **Keyword Search** + - Search across `input`, `output`, `metadata` fields + - Support regex/pattern matching + - Aggregate results across observations + +### 8.2 Optimization Strategies + +1. **Pagination Management** + - Fetch observations in batches + - Cache results for repeated queries + - Use `fields` parameter to reduce payload + +2. **Filter Optimization** + - Use JSON filters for server-side filtering + - Combine multiple conditions in single query + - Leverage indexes (type, traceId, sessionId) + +3. **Metrics Pre-computation** + - Use Metrics API for aggregations + - Cache statistics for common queries + - Avoid client-side aggregation when possible + +### 8.3 Tool Design Patterns + +**Pattern 1: Hierarchical Retrieval** +``` +Session → Traces → Observations → Tool Calls +``` + +**Pattern 2: Filter-First** +``` +Filter observations by type/name → Fetch full details → Process +``` + +**Pattern 3: Context Expansion** +``` +Find tool call → Get parent → Get siblings → Build context +``` + +--- + +## 9. Comparison with Existing Servers + +| Capability | API Provides | avivsinai Uses | **New Server Should** | +|------------|--------------|----------------|----------------------| +| **Observation filtering** | ✅ Advanced JSON | ✅ Basic params | ✅ Full JSON filters | +| **Parent-child data** | ✅ Yes | ✅ Returns raw | ✅ Build trees | +| **Temporal markers** | ✅ Yes | ✅ Returns raw | ✅ Reconstruct order | +| **Type discrimination** | ✅ Yes | ✅ Returns raw | ✅ Extract tool calls | +| **Metadata** | ✅ Rich | ✅ Returns raw | ✅ Parse patterns | +| **Metrics API** | ✅ Powerful | ❌ Not used | ✅ Leverage for analytics | +| **Field selection** | ✅ Yes | ❌ Not used | ✅ Optimize queries | + +**Key Insight**: The API provides all necessary primitives. Existing servers don't process the data - they just pass it through. A new server must add intelligence. + +--- + +## 10. Conclusion + +The Langfuse REST API is **feature-rich and well-designed** for building sophisticated trace analysis tools. It provides: + +- Complete data access with hierarchical relationships +- Powerful filtering and querying capabilities +- Aggregation and analytics support +- Performance optimization features + +**The gap is not in the API - it's in the processing layer.** Existing MCP servers act as thin wrappers, returning raw API responses without: +- Reconstructing temporal sequences +- Extracting tool calls intelligently +- Building context associations +- Generating insights or visualizations + +**A new MCP server must bridge this gap** by implementing intelligent data processing on top of the API primitives. + +--- + +**Next Steps**: Design MCP tools that leverage API capabilities to provide advanced trace exploration and tool call analysis. diff --git a/__reports__/mcp_langfuse_analysis/02-new_mcp_server_perspective_v0.md b/__reports__/mcp_langfuse_analysis/02-new_mcp_server_perspective_v0.md new file mode 100644 index 0000000..2d1ddd4 --- /dev/null +++ b/__reports__/mcp_langfuse_analysis/02-new_mcp_server_perspective_v0.md @@ -0,0 +1,603 @@ +# New Langfuse MCP Server - Perspective and Design + +**Report Date**: 2025-12-03 +**Report Version**: v0 +**Purpose**: Justify and outline design for a new Langfuse MCP server focused on advanced trace exploration + +--- + +## Executive Summary + +**Why Another MCP Server?** + +Existing Langfuse MCP servers are either basic API wrappers or focused on prompt management. None provide **intelligent trace exploration** with tool call extraction, temporal reconstruction, and contextual analysis. The Langfuse REST API provides all necessary primitives, but requires a **processing layer** to transform raw data into actionable insights. + +**Target Use Case**: Scraping tool calls with prompt context from agent execution traces, with temporal ordering reconstruction from fragmented observation data. + +**MVP Scope**: Tool call extraction with context association, leveraging session grouping and temporal reconstruction. + +--- + +## 1. Problem Statement + +### 1.1 Current Limitations + +**Existing servers provide**: +- Raw API responses without processing +- Basic filtering (parameter pass-through) +- No temporal reconstruction +- No tool call identification +- No context association + +**What's needed**: +- Intelligent extraction of tool calls from observation streams +- Temporal ordering reconstruction when data lacks explicit sequencing +- Context association (tool calls ↔ prompts) +- Keyword search across trace content +- Graph generation for execution visualization + +### 1.2 Specific Challenge: Temporal Ordering + +**Problem**: Langfuse observations may lack explicit sequencing within a session. Data is a "sea of individual observations" grouped only by `sessionId` or `traceId`. + +**Solution Requirements**: +1. Reconstruct execution order from timestamps (`startTime`, `endTime`) +2. Use parent-child relationships (`parentObservationId`) for hierarchy +3. Handle missing/null timestamps with contextual inference +4. Sort observations across multiple traces within a session +5. Maintain causal relationships (prompt → tool → response) + +--- + +## 2. Why a New Server is Justified + +### 2.1 Gap Analysis + +| Requirement | Existing Servers | New Server | +|-------------|------------------|------------| +| **Tool call extraction** | ❌ None | ✅ Type-based + pattern matching | +| **Temporal reconstruction** | ❌ None | ✅ Timestamp + hierarchy analysis | +| **Context association** | ❌ None | ✅ Parent-child + proximity | +| **Keyword search** | ❌ None | ✅ Content search across fields | +| **Graph generation** | ❌ None | ✅ Execution flow visualization | +| **Data processing** | ❌ Minimal | ✅ Intelligent transformation | + +### 2.2 Value Proposition + +**For AI Agents**: +- Understand tool usage patterns in agent executions +- Extract training data (prompt → tool → result sequences) +- Debug agent behavior with temporal context +- Analyze tool effectiveness and failure modes + +**For Developers**: +- Visualize agent execution flows +- Identify bottlenecks and optimization opportunities +- Search for specific tool usage patterns +- Generate reports on tool call statistics + +**For Research**: +- Study agent reasoning patterns +- Analyze tool selection strategies +- Build datasets for fine-tuning +- Evaluate agent performance metrics + +--- + +## 3. Design Principles + +### 3.1 Intelligence Over Wrapping + +**Principle**: Add value through data processing, not just API access. + +**Implementation**: +- Parse and interpret observation structures +- Reconstruct relationships and sequences +- Generate insights and summaries +- Transform raw data into actionable information + +### 3.2 Context-Aware Extraction + +**Principle**: Tool calls are meaningless without context. + +**Implementation**: +- Always associate tool calls with triggering prompts +- Include surrounding observations for narrative +- Capture input/output for complete picture +- Maintain temporal relationships + +### 3.3 Flexible Granularity + +**Principle**: Support both high-level overview and deep-dive analysis. + +**Implementation**: +- Summary mode: Tool call counts, types, success rates +- Detailed mode: Full tool call data with context +- Drill-down: From session → trace → observation → tool call + +### 3.4 Performance Optimization + +**Principle**: Minimize API calls, maximize caching. + +**Implementation**: +- Use field selection to reduce payload +- Cache processed results +- Batch operations when possible +- Leverage server-side filtering + +--- + +## 4. MVP Scope: Tool Call Scraper + +### 4.1 Core Functionality + +**Goal**: Extract tool calls with prompt context from agent execution traces. + +**Deliverables**: +1. Identify tool calls (SPAN observations) +2. Extract tool name, arguments, results +3. Associate with triggering prompt (parent GENERATION) +4. Reconstruct temporal ordering within session +5. Return structured data for analysis + +### 4.2 MVP Tools (5 Tools) + +#### Tool 1: `extract_tool_calls_from_session` + +**Purpose**: Extract all tool calls from a session with context. + +**Parameters**: +- `session_id` (required): Session identifier +- `include_context` (optional, default=true): Include prompt context +- `time_ordered` (optional, default=true): Sort by execution time +- `filter_tool_names` (optional): List of tool names to include + +**Returns**: +```json +{ + "session_id": "string", + "session_created_at": "datetime", + "total_tool_calls": "number", + "tool_calls": [ + { + "tool_call_id": "string", + "tool_name": "string", + "tool_type": "string", + "timestamp": "datetime", + "duration_seconds": "number", + "arguments": {}, + "result": {}, + "status": "success|error", + "context": { + "prompt": "string", + "prompt_observation_id": "string", + "trace_id": "string", + "user_id": "string" + } + } + ] +} +``` + +**Processing Logic**: +1. Fetch all traces for session (ordered by timestamp) +2. For each trace, fetch observations filtered by `type=SPAN` +3. For each SPAN, identify parent GENERATION (prompt) +4. Extract tool name from observation name/metadata +5. Parse arguments from input, results from output +6. Sort by startTime for temporal ordering +7. Return structured data + +#### Tool 2: `extract_tool_calls_from_trace` + +**Purpose**: Extract tool calls from a single trace. + +**Parameters**: +- `trace_id` (required): Trace identifier +- `include_context` (optional, default=true): Include prompt context +- `build_execution_tree` (optional, default=false): Return hierarchical structure + +**Returns**: +```json +{ + "trace_id": "string", + "trace_name": "string", + "trace_timestamp": "datetime", + "total_tool_calls": "number", + "tool_calls": [...], + "execution_tree": { + "root": { + "observation_id": "string", + "type": "GENERATION", + "children": [ + { + "observation_id": "string", + "type": "SPAN", + "tool_name": "string", + "children": [...] + } + ] + } + } +} +``` + +**Processing Logic**: +1. Fetch trace with full observations +2. Filter observations by `type=SPAN` +3. Build parent-child tree using `parentObservationId` +4. Extract tool call data from SPANs +5. Associate with parent GENERATION for context +6. Optionally return tree structure + +#### Tool 3: `search_tool_calls_by_keyword` + +**Purpose**: Search for tool calls containing specific keywords in input/output. + +**Parameters**: +- `keyword` (required): Search term +- `search_fields` (optional, default=["input", "output", "metadata"]): Fields to search +- `session_id` (optional): Limit to specific session +- `from_timestamp` (optional): Start of time range +- `to_timestamp` (optional): End of time range +- `limit` (optional, default=50): Max results + +**Returns**: +```json +{ + "keyword": "string", + "total_matches": "number", + "matches": [ + { + "tool_call_id": "string", + "tool_name": "string", + "matched_field": "input|output|metadata", + "matched_content": "string (excerpt)", + "timestamp": "datetime", + "trace_id": "string", + "session_id": "string", + "context": {...} + } + ] +} +``` + +**Processing Logic**: +1. Fetch observations filtered by `type=SPAN` and time range +2. For each observation, search specified fields for keyword +3. Extract matching content with context window +4. Associate with parent for prompt context +5. Sort by relevance or timestamp +6. Return matches with excerpts + +#### Tool 4: `get_tool_call_statistics` + +**Purpose**: Generate statistics on tool usage patterns. + +**Parameters**: +- `session_id` (optional): Specific session +- `from_timestamp` (optional): Start of time range +- `to_timestamp` (optional): End of time range +- `group_by` (optional, default="tool_name"): Grouping dimension + +**Returns**: +```json +{ + "time_range": { + "from": "datetime", + "to": "datetime" + }, + "total_tool_calls": "number", + "unique_tools": "number", + "statistics": [ + { + "tool_name": "string", + "call_count": "number", + "success_rate": "number (0-1)", + "avg_duration_seconds": "number", + "total_cost_usd": "number", + "error_count": "number" + } + ] +} +``` + +**Processing Logic**: +1. Use Metrics API to aggregate tool call data +2. Filter observations by `type=SPAN` +3. Group by tool name (from observation name) +4. Calculate: count, success rate, avg duration, cost +5. Return sorted by call count + +#### Tool 5: `reconstruct_execution_timeline` + +**Purpose**: Build time-ordered execution timeline for a session. + +**Parameters**: +- `session_id` (required): Session identifier +- `include_generations` (optional, default=true): Include LLM calls +- `include_tool_calls` (optional, default=true): Include tool calls +- `include_events` (optional, default=false): Include EVENT observations + +**Returns**: +```json +{ + "session_id": "string", + "timeline": [ + { + "timestamp": "datetime", + "type": "GENERATION|SPAN|EVENT", + "observation_id": "string", + "name": "string", + "duration_seconds": "number", + "summary": "string", + "parent_id": "string", + "children_count": "number" + } + ], + "total_duration_seconds": "number", + "observation_count": "number" +} +``` + +**Processing Logic**: +1. Fetch all traces for session +2. Fetch all observations for traces +3. Filter by requested types (GENERATION, SPAN, EVENT) +4. Sort by `startTime` (handle nulls with parent context) +5. Build timeline with parent-child indicators +6. Calculate total duration and counts + +--- + +## 5. Technical Architecture + +### 5.1 Data Flow + +``` +MCP Client (AI Agent) + ↓ +MCP Tool Call + ↓ +Tool Handler (Python) + ↓ +Langfuse REST API (filtered queries) + ↓ +Data Processor (tree building, extraction, ordering) + ↓ +Result Formatter (structured JSON) + ↓ +MCP Response +``` + +### 5.2 Core Components + +**1. API Client Layer** +- Langfuse SDK wrapper +- Request optimization (field selection, batching) +- Error handling and retry logic +- Response caching + +**2. Data Processing Layer** +- Tree builder (parent-child relationships) +- Temporal sorter (timestamp-based ordering) +- Tool call extractor (type + pattern matching) +- Context associator (prompt linking) + +**3. Search Engine** +- Keyword matcher (regex/fuzzy) +- Field selector (input/output/metadata) +- Result ranker (relevance scoring) + +**4. Analytics Engine** +- Aggregation calculator (counts, averages, percentiles) +- Statistics generator (success rates, costs) +- Metrics API wrapper + +**5. MCP Interface Layer** +- Tool registration and routing +- Parameter validation +- Response formatting +- Error translation + +### 5.3 Key Algorithms + +**Algorithm 1: Temporal Reconstruction** +```python +def reconstruct_timeline(observations): + # Build parent-child map + tree = build_tree(observations) + + # Sort by startTime (handle nulls) + sorted_obs = [] + for obs in observations: + if obs.startTime: + sorted_obs.append(obs) + else: + # Infer from parent or siblings + inferred_time = infer_timestamp(obs, tree) + obs.startTime = inferred_time + sorted_obs.append(obs) + + sorted_obs.sort(key=lambda x: x.startTime) + return sorted_obs +``` + +**Algorithm 2: Tool Call Extraction** +```python +def extract_tool_calls(observations): + tool_calls = [] + + for obs in observations: + # Filter by type + if obs.type != "SPAN": + continue + + # Pattern match on name + if not is_tool_call(obs.name, obs.metadata): + continue + + # Extract tool data + tool_call = { + "id": obs.id, + "name": extract_tool_name(obs), + "arguments": parse_arguments(obs.input), + "result": parse_result(obs.output), + "timestamp": obs.startTime, + "duration": calculate_duration(obs) + } + + # Find parent prompt + parent = find_parent_generation(obs, observations) + if parent: + tool_call["context"] = extract_prompt_context(parent) + + tool_calls.append(tool_call) + + return tool_calls +``` + +**Algorithm 3: Context Association** +```python +def associate_context(tool_call_obs, all_observations): + # Strategy 1: Direct parent + parent = find_by_id(tool_call_obs.parentObservationId, all_observations) + if parent and parent.type == "GENERATION": + return extract_context(parent) + + # Strategy 2: Temporal proximity + nearby = find_observations_before( + tool_call_obs.startTime, + window_seconds=5, + all_observations + ) + for obs in nearby: + if obs.type == "GENERATION": + return extract_context(obs) + + # Strategy 3: Same trace root + trace_root = find_trace_root(tool_call_obs.traceId, all_observations) + return extract_context(trace_root) +``` + +--- + +## 6. Differentiation from Existing Servers + +### 6.1 vs. avivsinai/langfuse-mcp + +| Feature | avivsinai | New Server | +|---------|-----------|------------| +| **Data retrieval** | ✅ Raw API calls | ✅ Optimized queries | +| **Tool call extraction** | ❌ None | ✅ Intelligent parsing | +| **Temporal ordering** | ❌ Returns raw | ✅ Reconstructs timeline | +| **Context association** | ❌ None | ✅ Prompt linking | +| **Search** | ❌ Basic filters | ✅ Keyword search | +| **Analytics** | ❌ None | ✅ Statistics & metrics | +| **Processing** | ❌ Pass-through | ✅ Transformation | + +### 6.2 vs. Prompt Management Servers + +**Completely different domains**: +- Prompt servers: CRUD operations on prompts +- New server: Trace analysis and tool call extraction + +**No overlap** - complementary use cases. + +--- + +## 7. Implementation Roadmap + +### Phase 1: MVP (Weeks 1-2) +- ✅ Tool 1: `extract_tool_calls_from_session` +- ✅ Tool 2: `extract_tool_calls_from_trace` +- ✅ Basic temporal reconstruction +- ✅ Tool call identification (type-based) +- ✅ Context association (parent-based) + +### Phase 2: Search & Analytics (Weeks 3-4) +- ✅ Tool 3: `search_tool_calls_by_keyword` +- ✅ Tool 4: `get_tool_call_statistics` +- ✅ Advanced pattern matching +- ✅ Metrics API integration + +### Phase 3: Visualization (Weeks 5-6) +- ✅ Tool 5: `reconstruct_execution_timeline` +- ✅ Graph generation (execution trees) +- ✅ Timeline visualization data +- ✅ Export formats (JSON, CSV, Mermaid) + +### Phase 4: Advanced Features (Future) +- Semantic search (embedding-based) +- Anomaly detection (unusual patterns) +- Performance profiling (bottleneck identification) +- Comparative analysis (session vs session) + +--- + +## 8. Success Criteria + +### 8.1 Functional Requirements + +✅ Extract tool calls from sessions with >95% accuracy +✅ Reconstruct temporal ordering for sessions with fragmented data +✅ Associate tool calls with prompts in >90% of cases +✅ Search tool calls by keyword with <2s response time +✅ Generate statistics for 1000+ tool calls in <5s + +### 8.2 Quality Requirements + +✅ Handle missing timestamps gracefully +✅ Support sessions with 100+ traces +✅ Cache results for repeated queries +✅ Provide clear error messages +✅ Document all tools with examples + +### 8.3 Performance Requirements + +✅ API calls optimized (field selection, batching) +✅ Response time <5s for typical queries +✅ Memory efficient (streaming for large datasets) +✅ Cache hit rate >70% for repeated queries + +--- + +## 9. Conclusion + +**A new Langfuse MCP server is justified and necessary** because: + +1. **Existing servers are insufficient**: They provide raw API access without intelligent processing +2. **The API provides primitives**: All necessary data is available, but requires transformation +3. **The use case is distinct**: Tool call extraction with context is not addressed by any existing server +4. **Value is in processing**: Intelligence layer transforms raw data into actionable insights + +**MVP focuses on core value**: Tool call scraping with context association and temporal reconstruction. + +**Technical feasibility is high**: Langfuse REST API provides all required data; implementation is primarily data processing logic. + +**Differentiation is clear**: No overlap with existing servers; complementary to prompt management tools. + +--- + +## 10. Candidate Tools Summary + +### MVP Tools (5) + +1. **extract_tool_calls_from_session** - Session-level tool call extraction with context +2. **extract_tool_calls_from_trace** - Trace-level extraction with optional tree structure +3. **search_tool_calls_by_keyword** - Content search across tool call data +4. **get_tool_call_statistics** - Aggregated analytics on tool usage +5. **reconstruct_execution_timeline** - Time-ordered execution sequence + +### Future Tools (Deferred) + +6. **compare_sessions** - Side-by-side session comparison +7. **detect_tool_call_patterns** - Pattern recognition in tool usage +8. **generate_execution_graph** - Visual graph generation (Mermaid/DOT) +9. **export_tool_call_dataset** - Export for fine-tuning/training +10. **analyze_tool_performance** - Performance profiling and bottleneck detection + +--- + +**Status**: Ready for implementation +**Next Steps**: Create detailed tool specifications and begin MVP development diff --git a/__reports__/mcp_langfuse_analysis/02-new_mcp_server_perspective_v1.md b/__reports__/mcp_langfuse_analysis/02-new_mcp_server_perspective_v1.md new file mode 100644 index 0000000..78be6ee --- /dev/null +++ b/__reports__/mcp_langfuse_analysis/02-new_mcp_server_perspective_v1.md @@ -0,0 +1,1661 @@ +# New Langfuse MCP Server - Architecture and Use Cases + +**Report Date**: 2025-12-03 +**Report Version**: v1 +**Purpose**: Define architecture and comprehensive use cases for advanced Langfuse trace analytics + +--- + +## Executive Summary + +This report outlines the design for a new Langfuse MCP server focused on **intelligent trace analytics** for knowledge extraction, user-LLM interaction analysis, and agent behavior understanding. The server leverages Langfuse REST API primitives to provide: + +- **Tool call extraction** with prompt context +- **Temporal reconstruction** from fragmented observations +- **Interaction pattern analysis** (user ↔ LLM ↔ tools) +- **Plan execution tracking** (LLM-decided actions) +- **Knowledge graph generation** from trace data +- **Performance profiling** and bottleneck detection + +**Key Architectural Decisions**: +- Async/parallel processing for multi-trace analysis +- Streaming for large dataset handling +- Intelligent caching with invalidation +- Modular processing pipeline + +**Target Use Cases**: Knowledge extraction, conversation analysis, agent debugging, training data generation, performance optimization. + +--- + +## 1. Problem Statement and Scope + +### 1.1 Core Challenge + +Langfuse provides rich trace data but requires **intelligent processing** to extract actionable insights: + + +**Raw Data** (what API provides): +- Observations with timestamps, types, parent IDs +- Input/output blobs (unstructured) +- Metadata dictionaries +- Scattered across traces and sessions + +**Needed Insights** (what users want): +- "What tools did the agent use and why?" +- "How did the LLM decide on this plan?" +- "What patterns exist in user-LLM interactions?" +- "Where are the performance bottlenecks?" +- "What knowledge can be extracted for training?" + +**Gap**: Transformation from raw observations to structured insights requires intelligent processing. + +### 1.2 Target Use Cases + +**1. Knowledge Extraction** +- Extract facts, entities, relationships from conversations +- Build knowledge graphs from agent interactions +- Identify information flow patterns +- Generate training datasets from successful interactions + +**2. User-LLM Interaction Analysis** +- Conversation flow analysis (turn-taking, topic shifts) +- User intent classification from prompts +- LLM response quality assessment +- Interaction pattern discovery + +**3. Agent Plan Analysis** +- LLM decision tracking (what actions were chosen) +- Plan execution monitoring (planned vs actual) +- Tool selection rationale extraction +- Success/failure pattern identification + +**4. Performance Optimization** +- Bottleneck identification (slow tools, redundant calls) +- Cost analysis (expensive operations) +- Latency profiling (where time is spent) +- Resource utilization patterns + +**5. Debugging and Troubleshooting** +- Error propagation tracking +- Failed interaction analysis +- Anomaly detection (unusual patterns) +- Root cause analysis for failures + +--- + +## 2. Architectural Decisions + +### 2.1 Async/Parallel Processing + +**Decision**: Use async I/O and parallel processing for multi-trace operations. + +**Rationale**: +- Sessions often contain 10-100+ traces +- Each trace may have 50-500+ observations +- Sequential processing is prohibitively slow +- API calls are I/O-bound (network latency) + +**Implementation Strategy**: + +```python +# Async API client +class AsyncLangfuseClient: + async def fetch_traces(self, session_id: str) -> List[Trace]: + """Fetch all traces for session in parallel""" + trace_ids = await self.get_trace_ids(session_id) + + # Parallel fetch with semaphore for rate limiting + semaphore = asyncio.Semaphore(10) # Max 10 concurrent + tasks = [ + self.fetch_trace_with_limit(trace_id, semaphore) + for trace_id in trace_ids + ] + return await asyncio.gather(*tasks) + + async def fetch_observations_batch( + self, + trace_ids: List[str] + ) -> Dict[str, List[Observation]]: + """Batch fetch observations for multiple traces""" + tasks = [ + self.fetch_observations(trace_id) + for trace_id in trace_ids + ] + results = await asyncio.gather(*tasks) + return dict(zip(trace_ids, results)) +``` + +**Benefits**: +- 10-50x speedup for multi-trace operations +- Efficient API rate limit utilization +- Responsive for large sessions + +**Trade-offs**: +- Increased memory usage (parallel results) +- Complexity in error handling +- Need for connection pooling + +### 2.2 Streaming for Large Datasets + +**Decision**: Stream results for queries returning large datasets. + +**Rationale**: +- Some sessions have 1000+ traces +- Full materialization exceeds memory limits +- Users often need partial results quickly + +**Implementation Strategy**: + +```python +async def stream_tool_calls( + session_id: str, + batch_size: int = 50 +) -> AsyncIterator[ToolCall]: + """Stream tool calls without loading entire session""" + + # Fetch trace IDs (lightweight) + trace_ids = await client.get_trace_ids(session_id) + + # Process in batches + for batch in chunk(trace_ids, batch_size): + # Fetch batch of traces + traces = await client.fetch_traces_batch(batch) + + # Process and yield results immediately + for trace in traces: + tool_calls = extract_tool_calls(trace) + for tc in tool_calls: + yield tc # Stream to client + + # Memory cleanup + del traces +``` + +**Benefits**: +- Constant memory usage +- Progressive results (user sees data immediately) +- Handles arbitrarily large sessions + +**Trade-offs**: +- Cannot sort globally (only within batches) +- Partial results if interrupted +- More complex client handling + +### 2.3 Intelligent Caching Strategy + +**Decision**: Multi-level caching with smart invalidation. + +**Rationale**: +- Trace data is immutable once written +- Repeated queries on same session are common +- Processing is expensive (tree building, extraction) +- API rate limits encourage caching + +**Cache Levels**: + +```python +# Level 1: Raw API responses (TTL: 1 hour) +@cache(ttl=3600) +async def fetch_trace(trace_id: str) -> Trace: + return await api.get_trace(trace_id) + +# Level 2: Processed structures (TTL: 6 hours) +@cache(ttl=21600) +def build_observation_tree(trace_id: str) -> ObservationTree: + trace = fetch_trace(trace_id) + return process_tree(trace.observations) + +# Level 3: Extracted insights (TTL: 24 hours) +@cache(ttl=86400) +def extract_tool_calls_cached(trace_id: str) -> List[ToolCall]: + tree = build_observation_tree(trace_id) + return extract_tool_calls(tree) +``` + +**Invalidation Strategy**: +- Time-based TTL (traces are immutable) +- LRU eviction for memory management +- Manual invalidation for active sessions +- Cache warming for common queries + +**Benefits**: +- 100-1000x speedup for repeated queries +- Reduced API load +- Better user experience + +**Trade-offs**: +- Memory overhead +- Stale data for active sessions +- Cache coherency complexity + +### 2.4 Modular Processing Pipeline + +**Decision**: Composable processing stages with clear interfaces. + +**Rationale**: +- Different use cases need different processing +- Enable reuse across tools +- Facilitate testing and debugging +- Allow optimization of individual stages + +**Pipeline Architecture**: + +```python +# Stage 1: Data Fetching +class DataFetcher: + async def fetch_session_data(self, session_id: str) -> SessionData: + """Fetch all raw data for session""" + pass + +# Stage 2: Tree Building +class TreeBuilder: + def build_trees(self, observations: List[Observation]) -> List[Tree]: + """Build observation trees from flat list""" + pass + +# Stage 3: Temporal Ordering +class TemporalSorter: + def sort_observations(self, trees: List[Tree]) -> List[Observation]: + """Reconstruct temporal order""" + pass + +# Stage 4: Extraction +class ToolCallExtractor: + def extract(self, observations: List[Observation]) -> List[ToolCall]: + """Extract tool calls with context""" + pass + +# Stage 5: Analysis +class PatternAnalyzer: + def analyze(self, tool_calls: List[ToolCall]) -> Insights: + """Generate insights from tool calls""" + pass + +# Composable pipeline +pipeline = Pipeline([ + DataFetcher(), + TreeBuilder(), + TemporalSorter(), + ToolCallExtractor(), + PatternAnalyzer() +]) + +result = await pipeline.execute(session_id) +``` + +**Benefits**: +- Clear separation of concerns +- Easy to test individual stages +- Reusable components +- Flexible composition + +--- + +## 3. Comprehensive Use Cases and Tools + +### 3.1 Knowledge Extraction + +**Use Case**: Extract structured knowledge from agent interactions for training data, knowledge bases, or analysis. + + + +#### Tool 3.1.1: `extract_conversation_knowledge` + +**Purpose**: Extract facts, entities, and relationships from user-LLM conversations. + +**Parameters**: +- `session_id` (required): Session to analyze +- `knowledge_types` (optional): ["facts", "entities", "relationships", "decisions"] +- `include_sources` (optional, default=true): Include source observation IDs + +**Processing Logic**: +1. Fetch all GENERATION observations (LLM responses) +2. Parse output for structured information +3. Extract entities (people, places, concepts) +4. Identify facts (statements, assertions) +5. Map relationships (entity connections) +6. Track decisions (LLM choices, plans) +7. Link to source observations for provenance + +**Returns**: +```json +{ + "session_id": "string", + "knowledge": { + "entities": [ + { + "name": "string", + "type": "person|place|concept|tool", + "mentions": ["observation_id"], + "context": "string" + } + ], + "facts": [ + { + "statement": "string", + "confidence": "high|medium|low", + "source_observation": "string", + "timestamp": "datetime" + } + ], + "relationships": [ + { + "subject": "entity_name", + "predicate": "string", + "object": "entity_name", + "source": "observation_id" + } + ], + "decisions": [ + { + "decision": "string", + "rationale": "string", + "outcome": "success|failure|pending", + "timestamp": "datetime" + } + ] + } +} +``` + +**API Leverage**: +- Use `filter` parameter to get only GENERATION observations +- Use `fields=core,io` to get input/output without scores +- Parallel fetch for multiple traces + +#### Tool 3.1.2: `build_knowledge_graph` + +**Purpose**: Generate knowledge graph from session interactions. + +**Parameters**: +- `session_id` (required): Session to analyze +- `graph_format` (optional, default="json"): "json"|"graphml"|"cypher" +- `include_temporal` (optional, default=true): Include time-based edges +- `min_confidence` (optional, default=0.5): Minimum confidence for relationships + +**Processing Logic**: +1. Extract knowledge using `extract_conversation_knowledge` +2. Build nodes (entities, concepts, tools) +3. Build edges (relationships, temporal sequences, causal links) +4. Calculate node importance (centrality, frequency) +5. Identify clusters (related concepts) +6. Format as graph structure + +**Returns**: +```json +{ + "nodes": [ + { + "id": "string", + "label": "string", + "type": "entity|concept|tool|decision", + "properties": { + "mentions": "number", + "first_seen": "datetime", + "importance": "number" + } + } + ], + "edges": [ + { + "source": "node_id", + "target": "node_id", + "type": "relationship|temporal|causal", + "weight": "number", + "properties": {} + } + ], + "clusters": [ + { + "id": "string", + "nodes": ["node_id"], + "theme": "string" + } + ] +} +``` + +**API Leverage**: +- Metrics API for aggregating entity frequencies +- Observation filtering for relationship extraction +- Session grouping for temporal boundaries + +### 3.2 User-LLM Interaction Analysis + +**Use Case**: Understand conversation dynamics, user intent, and LLM response patterns. + +#### Tool 3.2.1: `analyze_conversation_flow` + +**Purpose**: Analyze turn-taking, topic shifts, and conversation structure. + +**Parameters**: +- `session_id` (required): Session to analyze +- `detect_topics` (optional, default=true): Identify topic changes +- `analyze_sentiment` (optional, default=false): Track sentiment shifts +- `identify_patterns` (optional, default=true): Find recurring patterns + +**Processing Logic**: +1. Reconstruct conversation timeline (user inputs → LLM responses) +2. Identify turns (user → LLM → user sequences) +3. Detect topic shifts (semantic changes in content) +4. Measure response quality (length, coherence, relevance) +5. Identify patterns (question types, response styles) +6. Calculate conversation metrics (turns, duration, engagement) + +**Returns**: +```json +{ + "session_id": "string", + "conversation_metrics": { + "total_turns": "number", + "duration_seconds": "number", + "avg_response_time": "number", + "user_messages": "number", + "llm_messages": "number" + }, + "turns": [ + { + "turn_number": "number", + "timestamp": "datetime", + "user_input": "string", + "llm_response": "string", + "response_time_seconds": "number", + "topic": "string", + "topic_shift": "boolean" + } + ], + "topics": [ + { + "topic": "string", + "turn_range": [1, 5], + "duration_seconds": "number" + } + ], + "patterns": [ + { + "pattern_type": "question|command|clarification", + "frequency": "number", + "examples": ["turn_number"] + } + ] +} +``` + +**API Leverage**: +- Trace ordering by timestamp for turn sequence +- Observation filtering for user inputs vs LLM responses +- Metadata inspection for conversation context + +#### Tool 3.2.2: `classify_user_intents` + +**Purpose**: Classify user intents from conversation history. + +**Parameters**: +- `session_id` (required): Session to analyze +- `intent_taxonomy` (optional): Custom intent categories +- `include_confidence` (optional, default=true): Include classification confidence + +**Processing Logic**: +1. Extract user inputs (root observations or specific metadata) +2. Classify intents (question, command, feedback, clarification) +3. Identify sub-intents (information seeking, task execution, etc.) +4. Track intent sequences (how intents evolve) +5. Measure intent satisfaction (was intent fulfilled?) + +**Returns**: +```json +{ + "session_id": "string", + "intents": [ + { + "turn_number": "number", + "user_input": "string", + "primary_intent": "question|command|feedback|clarification", + "sub_intent": "string", + "confidence": "number", + "satisfied": "boolean", + "satisfaction_evidence": "string" + } + ], + "intent_distribution": { + "question": "number", + "command": "number", + "feedback": "number", + "clarification": "number" + }, + "intent_sequences": [ + { + "sequence": ["question", "clarification", "command"], + "frequency": "number" + } + ] +} +``` + +**API Leverage**: +- Observation filtering for user inputs +- Temporal ordering for intent sequences +- Metadata for intent hints + +### 3.3 Agent Plan Analysis + +**Use Case**: Understand LLM decision-making, plan execution, and tool selection. + +#### Tool 3.3.1: `extract_llm_plans` + +**Purpose**: Extract and analyze LLM-decided action plans. + +**Parameters**: +- `session_id` (required): Session to analyze +- `include_execution` (optional, default=true): Include execution results +- `track_deviations` (optional, default=true): Identify plan deviations + +**Processing Logic**: +1. Identify planning observations (LLM outputs with action lists) +2. Parse planned actions from LLM output +3. Match planned actions to executed observations (SPANs) +4. Track execution order vs planned order +5. Identify deviations (skipped, reordered, added actions) +6. Analyze success/failure of each action +7. Calculate plan adherence metrics + +**Returns**: +```json +{ + "session_id": "string", + "plans": [ + { + "plan_id": "string", + "timestamp": "datetime", + "planning_observation_id": "string", + "planned_actions": [ + { + "action": "string", + "tool": "string", + "arguments": {}, + "planned_order": "number" + } + ], + "executed_actions": [ + { + "action": "string", + "tool": "string", + "observation_id": "string", + "execution_order": "number", + "status": "success|failure", + "duration_seconds": "number" + } + ], + "adherence_metrics": { + "actions_executed": "number", + "actions_skipped": "number", + "actions_added": "number", + "order_preserved": "boolean", + "adherence_score": "number (0-1)" + } + } + ] +} +``` + +**API Leverage**: +- GENERATION observations for plans +- SPAN observations for executions +- Parent-child relationships for plan-execution linking +- Temporal ordering for sequence analysis + +#### Tool 3.3.2: `analyze_tool_selection` + +**Purpose**: Analyze why and how LLM selects specific tools. + +**Parameters**: +- `session_id` (required): Session to analyze +- `include_alternatives` (optional, default=true): Show alternative tools considered +- `extract_rationale` (optional, default=true): Extract selection reasoning + +**Processing Logic**: +1. Identify tool selection points (GENERATION before SPAN) +2. Extract tool choice from LLM output +3. Parse selection rationale (if present in output) +4. Identify alternative tools mentioned but not used +5. Analyze selection patterns (when is tool X chosen?) +6. Calculate tool selection success rate + +**Returns**: +```json +{ + "session_id": "string", + "tool_selections": [ + { + "timestamp": "datetime", + "context": "string (user query/situation)", + "selected_tool": "string", + "rationale": "string", + "alternatives_considered": ["tool_name"], + "execution_result": "success|failure", + "execution_observation_id": "string" + } + ], + "selection_patterns": [ + { + "tool": "string", + "selection_triggers": ["context_pattern"], + "success_rate": "number", + "avg_execution_time": "number" + } + ] +} +``` + +**API Leverage**: +- GENERATION observations for selection reasoning +- SPAN observations for execution results +- Metadata for tool information +- Temporal proximity for context-tool linking + +### 3.4 Performance Optimization + +**Use Case**: Identify bottlenecks, optimize costs, and improve agent efficiency. + + + +#### Tool 3.4.1: `profile_execution_performance` + +**Purpose**: Identify performance bottlenecks in agent execution. + +**Parameters**: +- `session_id` (required): Session to profile +- `granularity` (optional, default="observation"): "observation"|"tool"|"trace" +- `include_percentiles` (optional, default=true): Include p50, p95, p99 +- `identify_outliers` (optional, default=true): Flag slow operations + +**Processing Logic**: +1. Collect timing data (startTime, endTime for all observations) +2. Calculate durations for each observation/tool/trace +3. Compute statistics (mean, median, percentiles) +4. Identify outliers (>2 std dev from mean) +5. Analyze parallel vs sequential execution +6. Calculate critical path (longest dependency chain) +7. Estimate optimization potential + +**Returns**: +```json +{ + "session_id": "string", + "total_duration_seconds": "number", + "critical_path_seconds": "number", + "parallelization_potential": "number (0-1)", + "performance_breakdown": [ + { + "component": "tool_name|observation_type", + "call_count": "number", + "total_time_seconds": "number", + "avg_time_seconds": "number", + "p50_seconds": "number", + "p95_seconds": "number", + "p99_seconds": "number", + "percentage_of_total": "number" + } + ], + "bottlenecks": [ + { + "observation_id": "string", + "component": "string", + "duration_seconds": "number", + "expected_duration": "number", + "slowdown_factor": "number", + "impact": "high|medium|low" + } + ], + "optimization_recommendations": [ + { + "recommendation": "string", + "estimated_savings_seconds": "number", + "effort": "low|medium|high" + } + ] +} +``` + +**API Leverage**: +- Observation timestamps for duration calculation +- Parent-child relationships for dependency analysis +- Metrics API for aggregated statistics +- Filtering for specific observation types + +#### Tool 3.4.2: `analyze_cost_efficiency` + +**Purpose**: Analyze costs and identify optimization opportunities. + +**Parameters**: +- `session_id` (required): Session to analyze +- `cost_breakdown` (optional, default="tool"): "tool"|"model"|"operation" +- `identify_waste` (optional, default=true): Find redundant operations + +**Processing Logic**: +1. Extract cost data from observations (costDetails field) +2. Aggregate costs by tool/model/operation +3. Identify redundant calls (same tool, same args, close in time) +4. Calculate cost per outcome (cost per successful task) +5. Compare with baseline/expected costs +6. Identify high-cost low-value operations + +**Returns**: +```json +{ + "session_id": "string", + "total_cost_usd": "number", + "cost_breakdown": [ + { + "component": "string", + "call_count": "number", + "total_cost_usd": "number", + "avg_cost_usd": "number", + "percentage_of_total": "number" + } + ], + "redundant_operations": [ + { + "tool": "string", + "redundant_calls": "number", + "wasted_cost_usd": "number", + "observation_ids": ["string"] + } + ], + "cost_efficiency": { + "cost_per_successful_task": "number", + "cost_per_user_interaction": "number", + "efficiency_score": "number (0-1)" + }, + "optimization_opportunities": [ + { + "opportunity": "string", + "potential_savings_usd": "number", + "implementation_effort": "low|medium|high" + } + ] +} +``` + +**API Leverage**: +- costDetails field from observations +- usageDetails for token counts +- Metrics API for cost aggregation +- Temporal analysis for redundancy detection + +#### Tool 3.4.3: `detect_redundant_operations` + +**Purpose**: Identify duplicate or unnecessary operations. + +**Parameters**: +- `session_id` (required): Session to analyze +- `similarity_threshold` (optional, default=0.9): Threshold for considering operations similar +- `time_window_seconds` (optional, default=60): Time window for redundancy detection + +**Processing Logic**: +1. Extract all SPAN observations (operations) +2. Compare operations within time window +3. Calculate similarity (tool name, arguments, context) +4. Identify duplicates (exact matches) +5. Identify near-duplicates (similar but not identical) +6. Analyze if results were reused or recalculated +7. Estimate waste (time, cost, resources) + +**Returns**: +```json +{ + "session_id": "string", + "redundancy_analysis": { + "total_operations": "number", + "duplicate_operations": "number", + "near_duplicate_operations": "number", + "redundancy_rate": "number (0-1)" + }, + "redundant_groups": [ + { + "group_id": "string", + "operation": "string", + "occurrences": "number", + "observation_ids": ["string"], + "time_span_seconds": "number", + "wasted_time_seconds": "number", + "wasted_cost_usd": "number", + "results_identical": "boolean" + } + ], + "recommendations": [ + { + "recommendation": "string", + "affected_operations": ["string"], + "potential_savings": { + "time_seconds": "number", + "cost_usd": "number" + } + } + ] +} +``` + +**API Leverage**: +- SPAN observations for operations +- Input/output comparison for similarity +- Temporal ordering for time window analysis +- Cost data for waste calculation + +### 3.5 Debugging and Troubleshooting + +**Use Case**: Debug agent failures, trace errors, and understand failure modes. + +#### Tool 3.5.1: `trace_error_propagation` + +**Purpose**: Track how errors propagate through agent execution. + +**Parameters**: +- `session_id` (required): Session to analyze +- `error_types` (optional): Filter by specific error types +- `include_recovery` (optional, default=true): Show recovery attempts + +**Processing Logic**: +1. Identify error observations (level=ERROR, statusMessage present) +2. Build error propagation tree (parent-child relationships) +3. Identify error origin (root cause observation) +4. Track downstream impacts (affected observations) +5. Identify recovery attempts (retry patterns) +6. Analyze error handling effectiveness + +**Returns**: +```json +{ + "session_id": "string", + "error_summary": { + "total_errors": "number", + "error_types": {"type": "count"}, + "recovery_rate": "number (0-1)" + }, + "error_chains": [ + { + "chain_id": "string", + "root_error": { + "observation_id": "string", + "error_type": "string", + "error_message": "string", + "timestamp": "datetime" + }, + "propagation": [ + { + "observation_id": "string", + "impact": "failed|degraded|recovered", + "timestamp": "datetime" + } + ], + "recovery_attempts": [ + { + "observation_id": "string", + "strategy": "retry|fallback|skip", + "outcome": "success|failure" + } + ], + "final_outcome": "recovered|failed" + } + ] +} +``` + +**API Leverage**: +- level field for error identification +- statusMessage for error details +- Parent-child relationships for propagation +- Temporal ordering for sequence analysis + +#### Tool 3.5.2: `analyze_failure_patterns` + +**Purpose**: Identify common failure patterns and root causes. + +**Parameters**: +- `session_ids` (optional): Multiple sessions to analyze +- `from_timestamp` (optional): Start of time range +- `to_timestamp` (optional): End of time range +- `min_occurrences` (optional, default=2): Minimum pattern frequency + +**Processing Logic**: +1. Collect failed observations across sessions +2. Extract failure context (tool, arguments, preceding actions) +3. Cluster similar failures (error type, context) +4. Identify patterns (common preconditions, triggers) +5. Calculate failure rates by pattern +6. Suggest root causes and fixes + +**Returns**: +```json +{ + "analysis_scope": { + "sessions_analyzed": "number", + "time_range": {"from": "datetime", "to": "datetime"}, + "total_failures": "number" + }, + "failure_patterns": [ + { + "pattern_id": "string", + "description": "string", + "occurrences": "number", + "failure_rate": "number", + "common_context": { + "tool": "string", + "error_type": "string", + "preconditions": ["string"] + }, + "example_observations": ["observation_id"], + "suspected_root_cause": "string", + "suggested_fix": "string" + } + ], + "failure_trends": [ + { + "time_bucket": "datetime", + "failure_count": "number", + "failure_rate": "number" + } + ] +} +``` + +**API Leverage**: +- Multi-session querying with filters +- Metrics API for aggregation +- Observation filtering by level=ERROR +- Temporal grouping for trends + +### 3.6 Training Data Generation + +**Use Case**: Generate high-quality training datasets from successful interactions. + +#### Tool 3.6.1: `export_training_examples` + +**Purpose**: Export successful interaction sequences for model training. + +**Parameters**: +- `session_ids` (optional): Specific sessions to export +- `quality_threshold` (optional, default=0.8): Minimum quality score +- `format` (optional, default="jsonl"): "jsonl"|"parquet"|"csv" +- `include_metadata` (optional, default=true): Include context metadata + +**Processing Logic**: +1. Filter sessions by quality metrics (success rate, user feedback) +2. Extract interaction sequences (user → LLM → tool → LLM) +3. Format as training examples (prompt, completion, context) +4. Include metadata (timestamp, session, quality score) +5. Deduplicate similar examples +6. Export in specified format + +**Returns**: +```json +{ + "export_summary": { + "total_examples": "number", + "sessions_included": "number", + "avg_quality_score": "number", + "format": "string" + }, + "examples": [ + { + "id": "string", + "prompt": "string", + "completion": "string", + "tool_calls": [ + { + "tool": "string", + "arguments": {}, + "result": {} + } + ], + "metadata": { + "session_id": "string", + "timestamp": "datetime", + "quality_score": "number", + "context": {} + } + } + ] +} +``` + +**API Leverage**: +- Session filtering by quality metrics +- Observation extraction for sequences +- Scores for quality assessment +- Metadata for context + +--- + +## 4. Core Processing Algorithms + +### 4.1 Temporal Reconstruction Algorithm + +**Challenge**: Observations may lack explicit ordering within sessions. + +**Solution**: Multi-strategy temporal reconstruction. + +```python +def reconstruct_temporal_order(observations: List[Observation]) -> List[Observation]: + """ + Reconstruct temporal order using multiple strategies + """ + # Strategy 1: Use explicit timestamps + with_timestamps = [obs for obs in observations if obs.startTime] + without_timestamps = [obs for obs in observations if not obs.startTime] + + # Sort observations with timestamps + with_timestamps.sort(key=lambda x: x.startTime) + + # Strategy 2: Infer from parent-child relationships + tree = build_observation_tree(observations) + + for obs in without_timestamps: + # Find parent + parent = tree.find_parent(obs.id) + if parent and parent.startTime: + # Place after parent + obs.inferred_time = parent.startTime + timedelta(milliseconds=1) + else: + # Find siblings with timestamps + siblings = tree.find_siblings(obs.id) + timestamped_siblings = [s for s in siblings if s.startTime] + if timestamped_siblings: + # Place after last sibling + obs.inferred_time = max(s.startTime for s in timestamped_siblings) + + # Strategy 3: Use trace-level ordering + for obs in without_timestamps: + if not hasattr(obs, 'inferred_time'): + trace = find_trace(obs.traceId) + obs.inferred_time = trace.timestamp + + # Merge and sort + all_observations = with_timestamps + without_timestamps + all_observations.sort(key=lambda x: x.startTime or x.inferred_time) + + return all_observations +``` + +### 4.2 Tool Call Extraction Algorithm + +**Challenge**: Identify tool calls from diverse observation patterns. + + + +**Solution**: Multi-pattern tool call identification. + +```python +def extract_tool_calls(observations: List[Observation]) -> List[Tool +Call]: + """ + Extract tool calls using multiple identification patterns + """ + tool_calls = [] + + for obs in observations: + # Pattern 1: Type-based identification + if obs.type != "SPAN": + continue + + # Pattern 2: Name-based identification + tool_name = None + if obs.name: + # Check for common prefixes + if obs.name.startswith(("tool_", "function_", "call_")): + tool_name = obs.name + # Check for known tool names + elif obs.name in KNOWN_TOOLS: + tool_name = obs.name + + # Pattern 3: Metadata-based identification + if not tool_name and obs.metadata: + if "tool_name" in obs.metadata: + tool_name = obs.metadata["tool_name"] + elif "type" in obs.metadata and obs.metadata["type"] == "tool_call": + tool_name = obs.metadata.get("name", obs.name) + + # Pattern 4: Input structure analysis + if not tool_name and obs.input: + if isinstance(obs.input, dict): + if "tool" in obs.input: + tool_name = obs.input["tool"] + elif "function" in obs.input: + tool_name = obs.input["function"] + + # Skip if not identified as tool call + if not tool_name: + continue + + # Extract tool call data + tool_call = ToolCall( + id=obs.id, + name=tool_name, + arguments=extract_arguments(obs.input), + result=extract_result(obs.output), + timestamp=obs.startTime, + duration=calculate_duration(obs), + status=determine_status(obs), + observation_id=obs.id, + trace_id=obs.traceId + ) + + tool_calls.append(tool_call) + + return tool_calls +``` + +### 4.3 Context Association Algorithm + +**Challenge**: Link tool calls to triggering prompts and surrounding context. + +**Solution**: Multi-strategy context linking. + +```python +def associate_context( + tool_call: ToolCall, + observations: List[Observation] +) -> ToolCallContext: + """ + Associate tool call with its context using multiple strategies + """ + context = ToolCallContext() + + # Strategy 1: Direct parent relationship + parent = find_observation_by_id( + tool_call.observation.parentObservationId, + observations + ) + + if parent and parent.type == "GENERATION": + context.prompt = extract_prompt(parent.input) + context.prompt_observation_id = parent.id + context.association_method = "direct_parent" + return context + + # Strategy 2: Temporal proximity + # Find GENERATION observations within 5 seconds before tool call + nearby_generations = [ + obs for obs in observations + if obs.type == "GENERATION" + and obs.startTime < tool_call.timestamp + and (tool_call.timestamp - obs.startTime).total_seconds() < 5 + and obs.traceId == tool_call.trace_id + ] + + if nearby_generations: + # Use closest one + closest = max(nearby_generations, key=lambda x: x.startTime) + context.prompt = extract_prompt(closest.input) + context.prompt_observation_id = closest.id + context.association_method = "temporal_proximity" + return context + + # Strategy 3: Trace root + trace_root = find_trace_root(tool_call.trace_id, observations) + if trace_root: + context.prompt = extract_prompt(trace_root.input) + context.prompt_observation_id = trace_root.id + context.association_method = "trace_root" + return context + + # Strategy 4: Session context + # Use session-level user input if available + session_input = find_session_input(tool_call.session_id) + if session_input: + context.prompt = session_input + context.association_method = "session_context" + + return context +``` + +### 4.4 Knowledge Extraction Algorithm + +**Challenge**: Extract structured knowledge from unstructured conversation data. + +**Solution**: Multi-pass extraction with entity linking. + +```python +def extract_knowledge(observations: List[Observation]) -> Knowledge: + """ + Extract structured knowledge from observations + """ + knowledge = Knowledge() + + # Pass 1: Entity extraction + for obs in observations: + if obs.type == "GENERATION": + text = extract_text(obs.output) + entities = extract_entities(text) # NER or pattern matching + + for entity in entities: + knowledge.add_entity( + name=entity.name, + type=entity.type, + source_observation=obs.id, + context=text + ) + + # Pass 2: Fact extraction + for obs in observations: + if obs.type == "GENERATION": + text = extract_text(obs.output) + facts = extract_facts(text) # Pattern matching or LLM + + for fact in facts: + knowledge.add_fact( + statement=fact.statement, + confidence=fact.confidence, + source_observation=obs.id, + timestamp=obs.startTime + ) + + # Pass 3: Relationship extraction + for obs in observations: + if obs.type == "GENERATION": + text = extract_text(obs.output) + relationships = extract_relationships(text, knowledge.entities) + + for rel in relationships: + knowledge.add_relationship( + subject=rel.subject, + predicate=rel.predicate, + object=rel.object, + source_observation=obs.id + ) + + # Pass 4: Decision extraction + for obs in observations: + if obs.type == "GENERATION": + # Look for decision indicators + text = extract_text(obs.output) + if contains_decision_markers(text): + decision = extract_decision(text) + + # Find execution outcome + child_observations = find_children(obs.id, observations) + outcome = determine_outcome(child_observations) + + knowledge.add_decision( + decision=decision.text, + rationale=decision.rationale, + outcome=outcome, + timestamp=obs.startTime + ) + + return knowledge +``` + +### 4.5 Performance Profiling Algorithm + +**Challenge**: Identify bottlenecks in complex execution trees. + +**Solution**: Critical path analysis with parallel detection. + +```python +def profile_performance(observations: List[Observation]) -> PerformanceProfile: + """ + Profile execution performance and identify bottlenecks + """ + profile = PerformanceProfile() + + # Build execution tree + tree = build_observation_tree(observations) + + # Calculate durations + for obs in observations: + duration = calculate_duration(obs) + profile.add_timing( + observation_id=obs.id, + component=obs.name or obs.type, + duration=duration + ) + + # Find critical path (longest dependency chain) + critical_path = find_critical_path(tree) + profile.critical_path_duration = sum( + calculate_duration(obs) for obs in critical_path + ) + + # Detect parallel execution opportunities + for node in tree.nodes: + siblings = tree.get_siblings(node.id) + if len(siblings) > 1: + # Check if siblings could run in parallel + if not has_dependencies(siblings): + parallel_duration = max( + calculate_duration(s) for s in siblings + ) + sequential_duration = sum( + calculate_duration(s) for s in siblings + ) + savings = sequential_duration - parallel_duration + + profile.add_parallelization_opportunity( + observations=siblings, + potential_savings=savings + ) + + # Identify outliers (slow operations) + durations = [calculate_duration(obs) for obs in observations] + mean_duration = statistics.mean(durations) + std_duration = statistics.stdev(durations) + + for obs in observations: + duration = calculate_duration(obs) + if duration > mean_duration + 2 * std_duration: + profile.add_bottleneck( + observation_id=obs.id, + component=obs.name or obs.type, + duration=duration, + expected_duration=mean_duration, + slowdown_factor=duration / mean_duration + ) + + # Calculate optimization potential + profile.total_duration = sum(durations) + profile.parallelization_potential = ( + profile.total_duration - profile.critical_path_duration + ) / profile.total_duration + + return profile +``` + +--- + +## 5. Tool Specifications Summary + +### 5.1 MVP Tools (5 Core Tools) + +| Tool | Purpose | Complexity | Priority | +|------|---------|------------|----------| +| `extract_tool_calls_from_session` | Session-level tool extraction | Medium | P0 | +| `extract_tool_calls_from_trace` | Trace-level tool extraction | Medium | P0 | +| `search_tool_calls_by_keyword` | Content search | Low | P1 | +| `get_tool_call_statistics` | Usage analytics | Low | P1 | +| `reconstruct_execution_timeline` | Temporal ordering | High | P0 | + +### 5.2 Extended Tools (11 Advanced Tools) + +| Tool | Purpose | Complexity | Priority | +|------|---------|------------|----------| +| `extract_conversation_knowledge` | Knowledge extraction | High | P2 | +| `build_knowledge_graph` | Graph generation | High | P2 | +| `analyze_conversation_flow` | Conversation analysis | Medium | P2 | +| `classify_user_intents` | Intent classification | Medium | P2 | +| `extract_llm_plans` | Plan extraction | High | P2 | +| `analyze_tool_selection` | Tool selection analysis | Medium | P2 | +| `profile_execution_performance` | Performance profiling | High | P1 | +| `analyze_cost_efficiency` | Cost analysis | Medium | P1 | +| `detect_redundant_operations` | Redundancy detection | Medium | P1 | +| `trace_error_propagation` | Error tracking | Medium | P2 | +| `analyze_failure_patterns` | Failure analysis | High | P2 | +| `export_training_examples` | Training data export | Medium | P2 | + +--- + +## 6. Implementation Roadmap + +### Phase 1: Core Infrastructure (Weeks 1-2) + +**Deliverables**: +- Async API client with connection pooling +- Caching layer (multi-level) +- Tree builder (parent-child relationships) +- Temporal sorter (timestamp-based ordering) +- Basic tool call extractor + +**Success Criteria**: +- Fetch session data in <5s for 100 traces +- Cache hit rate >70% +- Temporal ordering accuracy >95% + +### Phase 2: MVP Tools (Weeks 3-4) + +**Deliverables**: +- Tool 1: `extract_tool_calls_from_session` +- Tool 2: `extract_tool_calls_from_trace` +- Tool 5: `reconstruct_execution_timeline` +- Context association (parent-based) +- Basic error handling + +**Success Criteria**: +- Extract tool calls with >95% accuracy +- Associate context in >90% of cases +- Response time <5s for typical sessions + +### Phase 3: Search & Analytics (Weeks 5-6) + +**Deliverables**: +- Tool 3: `search_tool_calls_by_keyword` +- Tool 4: `get_tool_call_statistics` +- Tool 7: `profile_execution_performance` +- Tool 8: `analyze_cost_efficiency` +- Metrics API integration + +**Success Criteria**: +- Search response time <2s +- Statistics generation <5s for 1000+ tool calls +- Performance profiling identifies bottlenecks + +### Phase 4: Advanced Analytics (Weeks 7-10) + +**Deliverables**: +- Knowledge extraction tools (6, 7) +- Conversation analysis tools (8, 9) +- Plan analysis tools (10, 11) +- Debugging tools (13, 14) +- Graph generation + +**Success Criteria**: +- Knowledge extraction accuracy >80% +- Intent classification accuracy >85% +- Plan adherence tracking functional + +### Phase 5: Optimization & Polish (Weeks 11-12) + +**Deliverables**: +- Streaming for large datasets +- Advanced caching strategies +- Performance optimization +- Documentation and examples +- Testing and validation + +**Success Criteria**: +- Handle sessions with 1000+ traces +- Memory usage <500MB for large sessions +- All tools documented with examples + +--- + +## 7. Technical Stack + +### 7.1 Core Dependencies + +**Python 3.10+**: +- `langfuse` (>=3.0.0) - Official SDK +- `asyncio` - Async I/O +- `aiohttp` - Async HTTP client +- `cachetools` - Caching +- `pydantic` - Data validation + +**Optional**: +- `networkx` - Graph algorithms +- `pandas` - Data analysis +- `numpy` - Numerical operations +- `scikit-learn` - Pattern detection + +### 7.2 Architecture Layers + +``` +┌─────────────────────────────────────┐ +│ MCP Interface Layer │ +│ (Tool registration, routing) │ +└─────────────────────────────────────┘ + ↓ +┌─────────────────────────────────────┐ +│ Business Logic Layer │ +│ (Tool implementations) │ +└─────────────────────────────────────┘ + ↓ +┌─────────────────────────────────────┐ +│ Processing Layer │ +│ (Extraction, analysis, profiling) │ +└─────────────────────────────────────┘ + ↓ +┌─────────────────────────────────────┐ +│ Caching Layer │ +│ (Multi-level cache) │ +└─────────────────────────────────────┘ + ↓ +┌─────────────────────────────────────┐ +│ API Client Layer │ +│ (Langfuse SDK wrapper) │ +└─────────────────────────────────────┘ +``` + +--- + +## 8. Success Metrics + +### 8.1 Functional Metrics + +✅ **Tool call extraction accuracy**: >95% +✅ **Context association rate**: >90% +✅ **Temporal ordering accuracy**: >95% +✅ **Knowledge extraction accuracy**: >80% +✅ **Intent classification accuracy**: >85% + +### 8.2 Performance Metrics + +✅ **Response time** (typical query): <5s +✅ **Search response time**: <2s +✅ **Cache hit rate**: >70% +✅ **Memory usage** (large session): <500MB +✅ **API call optimization**: 50% reduction vs naive approach + +### 8.3 Quality Metrics + +✅ **Error handling**: Graceful degradation for missing data +✅ **Documentation**: All tools with examples +✅ **Test coverage**: >80% +✅ **User satisfaction**: Positive feedback from beta users + +--- + +## 9. Comparison with v0 Report + +### 9.1 Changes from v0 + +**Expanded Scope**: +- v0: 5 MVP tools focused on tool call extraction +- v1: 16 tools covering knowledge extraction, conversation analysis, performance optimization + +**Architectural Additions**: +- Async/parallel processing strategy +- Streaming for large datasets +- Multi-level caching with invalidation +- Modular processing pipeline + +**New Use Cases**: +- Knowledge extraction and graph generation +- User-LLM interaction analysis +- Agent plan tracking and analysis +- Performance profiling and optimization +- Debugging and troubleshooting +- Training data generation + +**Algorithm Specifications**: +- Detailed temporal reconstruction algorithm +- Multi-pattern tool call extraction +- Context association strategies +- Knowledge extraction pipeline +- Performance profiling with critical path analysis + +### 9.2 Maintained from v0 + +✅ **Core justification**: Existing servers insufficient +✅ **MVP focus**: Tool call extraction with context +✅ **5 core tools**: Same MVP tool set +✅ **Technical feasibility**: API provides all primitives +✅ **Differentiation**: Clear value over existing servers + +--- + +## 10. Risk Assessment + +### 10.1 Technical Risks + +**Risk 1: API Rate Limits** +- **Impact**: High (blocks functionality) +- **Mitigation**: Aggressive caching, request batching, exponential backoff +- **Likelihood**: Medium + +**Risk 2: Large Dataset Performance** +- **Impact**: Medium (slow responses) +- **Mitigation**: Streaming, pagination, field selection +- **Likelihood**: High + +**Risk 3: Temporal Reconstruction Accuracy** +- **Impact**: Medium (incorrect ordering) +- **Mitigation**: Multi-strategy approach, validation, user feedback +- **Likelihood**: Medium + +**Risk 4: Tool Call Identification False Positives** +- **Impact**: Low (noise in results) +- **Mitigation**: Multiple identification patterns, confidence scores +- **Likelihood**: Medium + +### 10.2 Product Risks + +**Risk 1: Use Case Mismatch** +- **Impact**: High (low adoption) +- **Mitigation**: User research, beta testing, iterative development +- **Likelihood**: Low + +**Risk 2: Complexity Overwhelm** +- **Impact**: Medium (poor UX) +- **Mitigation**: Progressive disclosure, good defaults, documentation +- **Likelihood**: Medium + +--- + +## 11. Conclusion + +This v1 report expands the v0 MVP vision into a **comprehensive trace analytics platform** that addresses multiple high-value use cases: + +1. **Knowledge Extraction**: Build knowledge graphs from conversations +2. **Interaction Analysis**: Understand user-LLM dynamics +3. **Plan Tracking**: Monitor LLM decision-making +4. **Performance Optimization**: Identify bottlenecks and waste +5. **Debugging**: Trace errors and failure patterns +6. **Training Data**: Generate high-quality datasets + +**Key Architectural Decisions**: +- Async/parallel processing for performance +- Streaming for scalability +- Intelligent caching for efficiency +- Modular pipeline for flexibility + +**Implementation Strategy**: +- Phase 1-2: Core infrastructure and MVP (4 weeks) +- Phase 3: Search and analytics (2 weeks) +- Phase 4: Advanced analytics (4 weeks) +- Phase 5: Optimization and polish (2 weeks) +- **Total**: 12 weeks to full feature set + +**Differentiation**: This server provides **intelligence and insights**, not just data access. It transforms raw Langfuse observations into actionable knowledge for developers, researchers, and AI agents. + +--- + +**Status**: Ready for stakeholder review and implementation planning +**Next Steps**: +1. Stakeholder review and prioritization +2. Detailed tool specifications for Phase 1-2 +3. Begin core infrastructure development +4. Set up testing and validation framework + +--- + +**Report Version**: v1 (Comprehensive architecture and use cases) +**Changes from v0**: Expanded scope, architectural decisions, 11 additional tools, detailed algorithms diff --git a/__reports__/mcp_langfuse_analysis/03-planning_oriented_retrieval_tools_v0.md b/__reports__/mcp_langfuse_analysis/03-planning_oriented_retrieval_tools_v0.md new file mode 100644 index 0000000..544cbac --- /dev/null +++ b/__reports__/mcp_langfuse_analysis/03-planning_oriented_retrieval_tools_v0.md @@ -0,0 +1,1185 @@ +# Planning-Oriented Retrieval Tools for Langfuse MCP Server + +**Report Date**: 2025-12-03 +**Report Version**: v0 +**Purpose**: Define focused MCP tools for extracting and analyzing LLM planning behavior from Langfuse traces + +--- + +## Executive Summary + +This report defines a focused set of MCP tools specifically designed for **planning-oriented retrieval** from Langfuse traces. The tools enable extraction, reconstruction, and analysis of LLM decision-making processes, tool usage patterns, and execution plans for integration with planning domain systems. + +**Core Focus**: Extract structured planning data from Langfuse traces to support: +- Plan extraction and storage in planning domains +- Tool usage pattern analysis +- Execution timeline reconstruction +- Planning behavior analytics + +**Key Design Principles**: +- Focus on planning-relevant data extraction (not general analytics) +- Support both session-level and trace-level operations +- Enable incremental processing for large datasets +- Provide structured outputs compatible with planning systems + +--- + +## 1. Context and Requirements + +### 1.1 System Integration Context + +The MCP server will integrate into a stack that: +1. **Extracts tool usage plans** from LLM interactions +2. **Stores plans** in a well-defined format +3. **Records individual actions** in a planning domain +4. **Enables planning algorithms** to find plans independently of LLM stochasticity + +### 1.2 Graph Representation Considerations + +**Current State**: Another MCP server (ArangoDB) handles knowledge graph storage. + +**Design Decision**: This server focuses on **retrieving graph-structured data** rather than storing it. + +**Graph I/O Alternatives for MCP**: + +```python +# Option 1: Adjacency List (Simple, MCP-friendly) +{ + "nodes": [{"id": "n1", "type": "action", "data": {...}}], + "edges": [{"from": "n1", "to": "n2", "type": "precedes"}] +} + +# Option 2: Nested Structure (Hierarchical plans) +{ + "plan": { + "action": "root_task", + "children": [ + {"action": "subtask_1", "children": [...]}, + {"action": "subtask_2", "children": [...]} + ] + } +} + +# Option 3: DOT Format (Planning tools compatible) +"digraph Plan { n1 -> n2 [label=\"precedes\"]; }" + +# Option 4: JSON-LD (Semantic web compatible) +{ + "@context": {...}, + "@graph": [...] +} +``` + +**Recommendation**: Support multiple formats via `output_format` parameter, defaulting to adjacency list for simplicity. + +### 1.3 Priority Alignment + +All tools in this report are **P0** (critical priority) for the planning-oriented use case: +- `extract_tool_calls_from_session` - P0 +- `extract_tool_calls_from_trace` - P0 +- `search_tool_calls_by_keyword` - P0 +- `get_tool_call_statistics` - P0 +- `reconstruct_execution_timeline` - P0 +- `extract_llm_plans` - P0 + +--- + +## 2. Core Tool Definitions + +### 2.1 Tool Call Extraction Tools + +#### Tool 2.1.1: `extract_tool_calls_from_session` + +**Purpose**: Extract all tool calls from a session with full context for planning analysis. + +**MCP Tool Signature**: +```python +{ + "name": "extract_tool_calls_from_session", + "description": "Extract all tool calls from a Langfuse session with planning context", + "inputSchema": { + "type": "object", + "properties": { + "session_id": {"type": "string", "description": "Langfuse session ID"}, + "include_context": {"type": "boolean", "default": true, "description": "Include preceding LLM reasoning"}, + "include_results": {"type": "boolean", "default": true, "description": "Include tool execution results"}, + "filter_by_status": {"type": "string", "enum": ["all", "success", "failure"], "default": "all"} + }, + "required": ["session_id"] + } +} +``` + +**Processing Strategy**: + +``` +1. Fetch all traces for session (GET /api/public/traces?sessionId={id}) +2. For each trace, fetch observations (GET /api/public/observations?traceId={id}) +3. Filter observations by type=SPAN (tool executions) +4. For each SPAN, find parent GENERATION (LLM reasoning) +5. Extract tool name, arguments, results, timestamps +6. Build structured tool call records with context +``` + +**REST API Leverage**: +- `GET /api/public/traces?sessionId={id}&fields=core` - Lightweight trace list +- `GET /api/public/observations?traceId={id}&type=SPAN` - Filter tool executions +- `GET /api/public/observations/{id}` - Get full observation details +- Parent-child relationships via `parentObservationId` field + +**Output Structure**: +```json +{ + "session_id": "string", + "total_tool_calls": "number", + "traces_analyzed": "number", + "tool_calls": [ + { + "id": "observation_id", + "trace_id": "string", + "timestamp": "ISO8601", + "tool_name": "string", + "arguments": {}, + "result": {}, + "status": "success|failure", + "duration_seconds": "number", + "context": { + "preceding_llm_output": "string", + "llm_observation_id": "string", + "user_query": "string" + } + } + ] +} +``` + +#### Tool 2.1.2: `extract_tool_calls_from_trace` + +**Purpose**: Extract tool calls from a single trace (finer granularity). + +**MCP Tool Signature**: +```python +{ + "name": "extract_tool_calls_from_trace", + "description": "Extract tool calls from a specific Langfuse trace", + "inputSchema": { + "type": "object", + "properties": { + "trace_id": {"type": "string"}, + "include_context": {"type": "boolean", "default": true}, + "include_results": {"type": "boolean", "default": true} + }, + "required": ["trace_id"] + } +} +``` + + +**Processing Strategy**: +``` +1. Fetch trace details (GET /api/public/traces/{traceId}) +2. Fetch observations (GET /api/public/observations?traceId={id}&type=SPAN) +3. Build observation tree from parent-child relationships +4. Extract tool calls with context from parent GENERATION nodes +5. Return structured tool call data +``` + +**Output Structure**: Same as `extract_tool_calls_from_session` but scoped to single trace. + +--- + +### 2.2 Search and Query Tools + +#### Tool 2.2.1: `search_tool_calls_by_keyword` + +**Purpose**: Search tool calls by content (arguments, results, context) for pattern discovery. + +**MCP Tool Signature**: +```python +{ + "name": "search_tool_calls_by_keyword", + "description": "Search tool calls by keyword in arguments, results, or context", + "inputSchema": { + "type": "object", + "properties": { + "keyword": {"type": "string", "description": "Search term"}, + "search_in": { + "type": "array", + "items": {"type": "string", "enum": ["arguments", "results", "context", "tool_name"]}, + "default": ["arguments", "results", "context"] + }, + "session_id": {"type": "string", "description": "Optional: limit to session"}, + "from_timestamp": {"type": "string", "format": "date-time"}, + "to_timestamp": {"type": "string", "format": "date-time"}, + "limit": {"type": "integer", "default": 100} + }, + "required": ["keyword"] + } +} +``` + +**Processing Strategy**: +``` +1. Query observations with filters (GET /api/public/observations) + - type=SPAN for tool executions + - fromStartTime/toStartTime for time range + - sessionId if provided +2. Fetch observation details with input/output (fields=core,io) +3. Search keyword in specified fields (client-side filtering) +4. Return matching tool calls with context +``` + +**REST API Leverage**: +- `GET /api/public/observations?type=SPAN&fromStartTime={ts}&toStartTime={ts}` +- `filter` parameter for metadata searches +- Pagination for large result sets + +**Output Structure**: +```json +{ + "keyword": "string", + "total_matches": "number", + "matches": [ + { + "tool_call_id": "string", + "trace_id": "string", + "session_id": "string", + "timestamp": "ISO8601", + "tool_name": "string", + "match_location": "arguments|results|context", + "match_snippet": "string", + "full_tool_call": {...} + } + ] +} +``` + +--- + +### 2.3 Analytics and Statistics Tools + +#### Tool 2.3.1: `get_tool_call_statistics` + +**Purpose**: Aggregate statistics on tool usage patterns for planning optimization. + +**MCP Tool Signature**: +```python +{ + "name": "get_tool_call_statistics", + "description": "Get aggregated statistics on tool usage patterns", + "inputSchema": { + "type": "object", + "properties": { + "session_id": {"type": "string", "description": "Optional: scope to session"}, + "from_timestamp": {"type": "string", "format": "date-time"}, + "to_timestamp": {"type": "string", "format": "date-time"}, + "group_by": { + "type": "string", + "enum": ["tool", "session", "trace", "time_bucket"], + "default": "tool" + }, + "time_bucket": {"type": "string", "enum": ["hour", "day", "week"], "default": "day"} + } + } +} +``` + + +**Processing Strategy**: +``` +1. Use Metrics API for aggregated statistics + POST /api/public/metrics with query structure +2. Query observations for detailed breakdowns +3. Calculate success rates, durations, frequencies +4. Group by specified dimension +5. Return aggregated statistics +``` + +**REST API Leverage**: +- `POST /api/public/metrics` - Primary aggregation endpoint +- Filter by observation type, time range, metadata +- Aggregate functions: count, sum, avg, percentiles +- Time-series grouping with granularity parameter + +**Metrics API Query Example**: +```json +{ + "select": [ + {"column": "name", "agg": null}, + {"column": "latency", "agg": "avg"}, + {"column": "id", "agg": "count"} + ], + "filters": [ + {"column": "type", "operator": "=", "value": "SPAN", "type": "string"} + ], + "groupBy": [{"column": "name", "type": "string"}], + "fromTimestamp": "2024-01-01T00:00:00Z", + "toTimestamp": "2024-12-31T23:59:59Z" +} +``` + +**Output Structure**: +```json +{ + "time_range": {"from": "ISO8601", "to": "ISO8601"}, + "total_tool_calls": "number", + "statistics_by_tool": [ + { + "tool_name": "string", + "call_count": "number", + "success_rate": "number", + "avg_duration_seconds": "number", + "p50_duration": "number", + "p95_duration": "number", + "total_cost_usd": "number", + "first_seen": "ISO8601", + "last_seen": "ISO8601" + } + ], + "time_series": [ + { + "time_bucket": "ISO8601", + "tool_calls": "number", + "unique_tools": "number" + } + ] +} +``` + +--- + +### 2.4 Temporal Reconstruction Tools + +#### Tool 2.4.1: `reconstruct_execution_timeline` + +**Purpose**: Reconstruct the temporal execution order of actions within a session/trace for plan analysis. + +**MCP Tool Signature**: +```python +{ + "name": "reconstruct_execution_timeline", + "description": "Reconstruct temporal execution timeline from observations", + "inputSchema": { + "type": "object", + "properties": { + "session_id": {"type": "string"}, + "trace_id": {"type": "string"}, + "include_llm_reasoning": {"type": "boolean", "default": true}, + "include_parallel_detection": {"type": "boolean", "default": true}, + "output_format": { + "type": "string", + "enum": ["timeline", "graph", "gantt"], + "default": "timeline" + } + }, + "oneOf": [ + {"required": ["session_id"]}, + {"required": ["trace_id"]} + ] + } +} +``` + +**Processing Strategy**: +``` +1. Fetch all observations for scope (session or trace) +2. Sort by startTime (primary), then by parent-child relationships +3. Detect parallel executions (overlapping time ranges) +4. Build dependency graph from parent-child links +5. Identify critical path (longest dependency chain) +6. Format output according to requested format +``` + +**Temporal Ordering Pseudo-code**: +```python +def reconstruct_timeline(observations): + # Sort by timestamp + sorted_obs = sorted(observations, key=lambda o: o.startTime) + + # Build parent-child map + children_map = defaultdict(list) + for obs in observations: + if obs.parentObservationId: + children_map[obs.parentObservationId].append(obs) + + # Detect parallel executions + parallel_groups = [] + for i, obs1 in enumerate(sorted_obs): + parallel = [obs1] + for obs2 in sorted_obs[i+1:]: + if time_overlap(obs1, obs2) and not is_ancestor(obs1, obs2): + parallel.append(obs2) + if len(parallel) > 1: + parallel_groups.append(parallel) + + return { + "timeline": sorted_obs, + "parallel_groups": parallel_groups, + "dependency_graph": children_map + } +``` + + +**REST API Leverage**: +- `GET /api/public/observations?traceId={id}` - All observations for trace +- `GET /api/public/traces?sessionId={id}` then fetch observations per trace +- `orderBy=startTime.asc` for temporal sorting +- `parentObservationId` field for dependency tracking + +**Output Structure (Timeline Format)**: +```json +{ + "scope": {"type": "session|trace", "id": "string"}, + "total_duration_seconds": "number", + "timeline": [ + { + "sequence_number": "number", + "observation_id": "string", + "type": "SPAN|GENERATION|EVENT", + "name": "string", + "start_time": "ISO8601", + "end_time": "ISO8601", + "duration_seconds": "number", + "parent_id": "string|null", + "depth": "number", + "parallel_group_id": "string|null" + } + ], + "parallel_executions": [ + { + "group_id": "string", + "observations": ["observation_id"], + "time_range": {"start": "ISO8601", "end": "ISO8601"} + } + ], + "critical_path": { + "observations": ["observation_id"], + "total_duration_seconds": "number" + } +} +``` + +**Output Structure (Graph Format)**: +```json +{ + "nodes": [ + { + "id": "observation_id", + "type": "SPAN|GENERATION|EVENT", + "label": "string", + "start_time": "ISO8601", + "duration": "number" + } + ], + "edges": [ + { + "from": "observation_id", + "to": "observation_id", + "type": "parent_child|temporal_sequence|parallel" + } + ] +} +``` + +--- + +### 2.5 Plan Extraction Tools + +#### Tool 2.5.1: `extract_llm_plans` + +**Purpose**: Extract LLM-decided action plans and match them to actual executions. + +**MCP Tool Signature**: +```python +{ + "name": "extract_llm_plans", + "description": "Extract LLM planning decisions and match to executions", + "inputSchema": { + "type": "object", + "properties": { + "session_id": {"type": "string"}, + "trace_id": {"type": "string"}, + "plan_detection_strategy": { + "type": "string", + "enum": ["metadata_marker", "output_parsing", "tool_sequence"], + "default": "output_parsing", + "description": "How to identify planning observations" + }, + "include_execution_matching": {"type": "boolean", "default": true}, + "include_deviation_analysis": {"type": "boolean", "default": true} + }, + "oneOf": [ + {"required": ["session_id"]}, + {"required": ["trace_id"]} + ] + } +} +``` + +**Processing Strategy**: +``` +1. Identify planning observations (GENERATION type) + - Strategy 1: Look for metadata markers (e.g., metadata.is_plan=true) + - Strategy 2: Parse LLM output for action lists/sequences + - Strategy 3: Detect tool call sequences in output +2. Extract planned actions from LLM output +3. Fetch subsequent SPAN observations (actual executions) +4. Match planned actions to executed observations +5. Analyze deviations (order changes, skipped, added actions) +6. Calculate adherence metrics +``` + +**Plan Detection Pseudo-code**: +```python +def detect_plans(observations): + plans = [] + + for obs in observations: + if obs.type != "GENERATION": + continue + + # Strategy 1: Metadata marker + if obs.metadata.get("is_plan"): + plans.append(extract_plan_from_metadata(obs)) + continue + + # Strategy 2: Output parsing + output = obs.output + if contains_action_list(output): + actions = parse_action_list(output) + plans.append({ + "observation_id": obs.id, + "timestamp": obs.startTime, + "planned_actions": actions + }) + continue + + # Strategy 3: Tool sequence detection + if contains_tool_calls(output): + tools = extract_tool_calls_from_text(output) + plans.append({ + "observation_id": obs.id, + "timestamp": obs.startTime, + "planned_actions": tools + }) + + return plans +``` + + +**Execution Matching Pseudo-code**: +```python +def match_executions(plan, observations): + # Get SPAN observations after plan timestamp + executions = [o for o in observations + if o.type == "SPAN" + and o.startTime > plan["timestamp"]] + + matches = [] + for planned_action in plan["planned_actions"]: + # Find matching execution + match = find_best_match(planned_action, executions) + matches.append({ + "planned": planned_action, + "executed": match, + "matched": match is not None, + "time_delta": match.startTime - plan["timestamp"] if match else None + }) + + # Detect unplanned executions + unplanned = [e for e in executions + if not any(m["executed"] == e for m in matches)] + + return { + "matches": matches, + "unplanned_executions": unplanned, + "adherence_score": len([m for m in matches if m["matched"]]) / len(plan["planned_actions"]) + } +``` + +**REST API Leverage**: +- `GET /api/public/observations?traceId={id}&type=GENERATION` - Planning observations +- `GET /api/public/observations?traceId={id}&type=SPAN` - Execution observations +- `fields=core,io` to get input/output for parsing +- Temporal ordering for sequence matching + +**Output Structure**: +```json +{ + "scope": {"type": "session|trace", "id": "string"}, + "plans_detected": "number", + "plans": [ + { + "plan_id": "string", + "observation_id": "string", + "timestamp": "ISO8601", + "detection_strategy": "metadata_marker|output_parsing|tool_sequence", + "planned_actions": [ + { + "sequence": "number", + "action_type": "tool_call|api_request|computation", + "tool_name": "string", + "arguments": {}, + "rationale": "string" + } + ], + "execution_matching": { + "matched_actions": [ + { + "planned_sequence": "number", + "executed_observation_id": "string", + "execution_sequence": "number", + "time_to_execution_seconds": "number", + "status": "success|failure", + "result": {} + } + ], + "unmatched_planned": [ + {"sequence": "number", "action": "string", "reason": "skipped|failed|not_executed"} + ], + "unplanned_executions": [ + {"observation_id": "string", "tool_name": "string", "reason": "added|recovery|fallback"} + ] + }, + "deviation_analysis": { + "order_preserved": "boolean", + "actions_skipped": "number", + "actions_added": "number", + "adherence_score": "number (0-1)", + "deviations": [ + { + "type": "order_change|skip|addition", + "description": "string", + "impact": "high|medium|low" + } + ] + } + } + ] +} +``` + +--- + +## 3. Additional Planning-Oriented Tools + +### 3.1 Tool: `extract_action_dependencies` + +**Purpose**: Extract dependency relationships between actions for planning domain modeling. + +**MCP Tool Signature**: +```python +{ + "name": "extract_action_dependencies", + "description": "Extract action dependencies and preconditions from execution traces", + "inputSchema": { + "type": "object", + "properties": { + "session_id": {"type": "string"}, + "trace_id": {"type": "string"}, + "dependency_types": { + "type": "array", + "items": {"type": "string", "enum": ["temporal", "data_flow", "causal", "parent_child"]}, + "default": ["temporal", "data_flow", "parent_child"] + }, + "output_format": {"type": "string", "enum": ["graph", "pddl", "json"], "default": "graph"} + }, + "oneOf": [ + {"required": ["session_id"]}, + {"required": ["trace_id"]} + ] + } +} +``` + +**Processing Strategy**: +``` +1. Fetch all observations with parent-child relationships +2. Analyze temporal ordering (A before B) +3. Detect data flow (output of A used as input to B) +4. Identify causal relationships (A triggers B) +5. Build dependency graph +6. Format for planning domain (PDDL-compatible if requested) +``` + +**Dependency Detection Pseudo-code**: +```python +def extract_dependencies(observations): + deps = [] + + for i, obs_a in enumerate(observations): + for obs_b in observations[i+1:]: + # Temporal dependency + if obs_a.endTime < obs_b.startTime: + deps.append({"from": obs_a.id, "to": obs_b.id, "type": "temporal"}) + + # Data flow dependency + if output_used_as_input(obs_a, obs_b): + deps.append({"from": obs_a.id, "to": obs_b.id, "type": "data_flow"}) + + # Parent-child dependency + if obs_b.parentObservationId == obs_a.id: + deps.append({"from": obs_a.id, "to": obs_b.id, "type": "parent_child"}) + + return deps +``` + + +**Output Structure (Graph Format)**: +```json +{ + "nodes": [ + { + "id": "observation_id", + "action": "string", + "type": "SPAN|GENERATION", + "preconditions": ["condition"], + "effects": ["effect"] + } + ], + "edges": [ + { + "from": "observation_id", + "to": "observation_id", + "dependency_type": "temporal|data_flow|causal|parent_child", + "strength": "number (0-1)" + } + ] +} +``` + +**Output Structure (PDDL Format)**: +```lisp +(define (domain extracted_plan) + (:requirements :strips :typing) + (:types action) + (:predicates + (completed ?a - action) + (precedes ?a1 ?a2 - action)) + (:action execute_tool_call + :parameters (?tool - action) + :precondition (and ...) + :effect (completed ?tool))) +``` + +--- + +### 3.2 Tool: `analyze_plan_success_patterns` + +**Purpose**: Identify patterns in successful vs failed plan executions for optimization. + +**MCP Tool Signature**: +```python +{ + "name": "analyze_plan_success_patterns", + "description": "Analyze patterns in successful vs failed plan executions", + "inputSchema": { + "type": "object", + "properties": { + "from_timestamp": {"type": "string", "format": "date-time"}, + "to_timestamp": {"type": "string", "format": "date-time"}, + "min_plan_occurrences": {"type": "integer", "default": 3}, + "include_failure_analysis": {"type": "boolean", "default": true} + }, + "required": ["from_timestamp", "to_timestamp"] + } +} +``` + +**Processing Strategy**: +``` +1. Extract all plans in time range using extract_llm_plans +2. Group plans by similarity (tool sequence, structure) +3. Calculate success rates per plan pattern +4. Identify common failure points +5. Analyze context differences between success/failure +6. Generate optimization recommendations +``` + +**Output Structure**: +```json +{ + "time_range": {"from": "ISO8601", "to": "ISO8601"}, + "total_plans_analyzed": "number", + "plan_patterns": [ + { + "pattern_id": "string", + "tool_sequence": ["tool1", "tool2", "tool3"], + "occurrences": "number", + "success_rate": "number", + "avg_execution_time": "number", + "common_contexts": ["context_description"], + "success_factors": ["factor"], + "failure_points": [ + { + "step": "number", + "tool": "string", + "failure_rate": "number", + "common_errors": ["error_message"] + } + ] + } + ], + "recommendations": [ + { + "pattern_id": "string", + "recommendation": "string", + "expected_improvement": "string", + "priority": "high|medium|low" + } + ] +} +``` + +--- + +### 3.3 Tool: `export_planning_domain` + +**Purpose**: Export extracted plans and actions in planning domain format for external planning systems. + +**MCP Tool Signature**: +```python +{ + "name": "export_planning_domain", + "description": "Export plans and actions in planning domain format", + "inputSchema": { + "type": "object", + "properties": { + "session_ids": {"type": "array", "items": {"type": "string"}}, + "from_timestamp": {"type": "string", "format": "date-time"}, + "to_timestamp": {"type": "string", "format": "date-time"}, + "format": { + "type": "string", + "enum": ["pddl", "json", "graphml", "dot"], + "default": "json" + }, + "include_metadata": {"type": "boolean", "default": true} + } + } +} +``` + +**Processing Strategy**: +``` +1. Extract all plans and executions from specified scope +2. Build unified action library (all unique actions) +3. Extract preconditions and effects from observations +4. Generate domain definition in requested format +5. Include problem instances (specific plan executions) +6. Add metadata for traceability +``` + +**Output Structure (JSON Format)**: +```json +{ + "domain": { + "name": "langfuse_extracted_domain", + "actions": [ + { + "name": "string", + "parameters": {}, + "preconditions": ["condition"], + "effects": ["effect"], + "cost": "number", + "avg_duration": "number", + "success_rate": "number", + "source_observations": ["observation_id"] + } + ], + "predicates": ["predicate_definition"] + }, + "problems": [ + { + "name": "string", + "initial_state": ["predicate"], + "goal_state": ["predicate"], + "plan": ["action_sequence"], + "source_session": "string", + "source_trace": "string" + } + ], + "metadata": { + "extraction_timestamp": "ISO8601", + "source_sessions": ["session_id"], + "total_plans": "number", + "total_actions": "number" + } +} +``` + +--- + +## 4. Common Processing Patterns + +### 4.1 Async Batch Processing + +All tools that process multiple traces/sessions should use async batch processing: + +```python +async def process_session(session_id: str): + # Fetch trace IDs + traces = await api.get_traces(sessionId=session_id, fields="core") + + # Batch fetch observations + semaphore = asyncio.Semaphore(10) # Rate limiting + async def fetch_with_limit(trace_id): + async with semaphore: + return await api.get_observations(traceId=trace_id) + + observations = await asyncio.gather(*[ + fetch_with_limit(t.id) for t in traces + ]) + + return process_observations(observations) +``` + + +### 4.2 Observation Tree Building + +Common pattern for reconstructing hierarchical structure: + +```python +def build_observation_tree(observations: List[Observation]): + # Index by ID + obs_map = {o.id: o for o in observations} + + # Build children map + children = defaultdict(list) + roots = [] + + for obs in observations: + if obs.parentObservationId: + children[obs.parentObservationId].append(obs) + else: + roots.append(obs) + + # Attach children + for obs in observations: + obs.children = children.get(obs.id, []) + + return roots # Return root observations +``` + +### 4.3 Tool Call Identification + +Common pattern for identifying tool executions: + +```python +def identify_tool_calls(observations: List[Observation]): + tool_calls = [] + + for obs in observations: + # Tool calls are SPAN observations + if obs.type != "SPAN": + continue + + # Find parent GENERATION for context + parent = find_parent_generation(obs, observations) + + tool_calls.append({ + "observation": obs, + "tool_name": obs.name, + "arguments": parse_input(obs.input), + "result": parse_output(obs.output), + "context": parent.output if parent else None, + "timestamp": obs.startTime, + "duration": (obs.endTime - obs.startTime).total_seconds() + }) + + return tool_calls +``` + +### 4.4 Caching Strategy + +Implement multi-level caching for performance: + +```python +# Level 1: Raw API responses (1 hour TTL) +@cache(ttl=3600) +async def fetch_trace_cached(trace_id: str): + return await api.get_trace(trace_id) + +# Level 2: Processed structures (6 hour TTL) +@cache(ttl=21600) +def extract_tool_calls_cached(trace_id: str): + trace = fetch_trace_cached(trace_id) + return extract_tool_calls(trace) + +# Level 3: Analytics (24 hour TTL) +@cache(ttl=86400) +def get_statistics_cached(session_id: str): + tool_calls = extract_tool_calls_cached(session_id) + return calculate_statistics(tool_calls) +``` + +--- + +## 5. REST API Usage Patterns + +### 5.1 Efficient Filtering + +Use API filters to reduce data transfer: + +```python +# Good: Filter at API level +observations = api.get_observations( + traceId=trace_id, + type="SPAN", # Only tool executions + fromStartTime=start_time, + toStartTime=end_time, + fields="core,io" # Only needed fields +) + +# Bad: Fetch everything and filter client-side +all_observations = api.get_observations(traceId=trace_id) +filtered = [o for o in all_observations if o.type == "SPAN"] +``` + +### 5.2 Pagination Handling + +Handle large result sets with pagination: + +```python +async def fetch_all_observations(trace_id: str): + all_obs = [] + page = 1 + + while True: + response = await api.get_observations( + traceId=trace_id, + page=page, + limit=100 + ) + + all_obs.extend(response.data) + + if len(response.data) < 100: # Last page + break + + page += 1 + + return all_obs +``` + +### 5.3 Metrics API for Aggregation + +Use Metrics API for heavy aggregations: + +```python +# Query tool usage statistics +metrics_query = { + "select": [ + {"column": "name", "agg": None}, + {"column": "latency", "agg": "avg"}, + {"column": "totalCost", "agg": "sum"}, + {"column": "id", "agg": "count"} + ], + "filters": [ + {"column": "type", "operator": "=", "value": "SPAN", "type": "string"} + ], + "groupBy": [{"column": "name", "type": "string"}], + "fromTimestamp": from_ts, + "toTimestamp": to_ts +} + +result = await api.post_metrics(query=metrics_query) +``` + +--- + +## 6. Tool Priority and Implementation Order + +### Phase 1: Core Extraction (P0) +1. `extract_tool_calls_from_trace` - Foundation for all other tools +2. `extract_tool_calls_from_session` - Session-level extraction +3. `reconstruct_execution_timeline` - Temporal ordering + +### Phase 2: Planning Analysis (P0) +4. `extract_llm_plans` - Plan detection and matching +5. `extract_action_dependencies` - Dependency graph building + +### Phase 3: Search and Analytics (P0) +6. `search_tool_calls_by_keyword` - Content search +7. `get_tool_call_statistics` - Usage analytics + +### Phase 4: Advanced Features (P1) +8. `analyze_plan_success_patterns` - Pattern analysis +9. `export_planning_domain` - Domain export + +--- + +## 7. Integration Considerations + +### 7.1 Planning Domain Integration + +Output formats should be compatible with: +- **PDDL planners** (Fast Downward, FF, LAMA) +- **Graph databases** (ArangoDB via existing MCP server) +- **Custom planning systems** (JSON/adjacency list format) + +### 7.2 Incremental Processing + +Support incremental updates for active sessions: +- Track last processed timestamp +- Fetch only new observations since last update +- Merge with existing extracted data +- Invalidate affected caches + +### 7.3 Error Handling + +Robust error handling for production use: +- Graceful degradation (partial results on API errors) +- Retry logic with exponential backoff +- Clear error messages with context +- Logging for debugging + +--- + +## 8. Next Steps + +### 8.1 Validation Requirements + +Before implementation: +1. **Stakeholder review** of tool definitions +2. **API capability verification** (test all required endpoints) +3. **Output format validation** (ensure planning system compatibility) +4. **Performance estimation** (expected latency for typical workloads) + +### 8.2 Implementation Preparation + +1. **Test data preparation** - Create sample Langfuse traces with known plans +2. **API client setup** - Async HTTP client with authentication +3. **Schema definitions** - Pydantic models for all data structures +4. **Unit test framework** - Test individual processing functions + +### 8.3 Documentation Needs + +1. **Tool usage examples** - Real-world scenarios for each tool +2. **API integration guide** - How to call tools from planning systems +3. **Performance tuning guide** - Optimization strategies +4. **Troubleshooting guide** - Common issues and solutions + +--- + +## Summary + +This report defines **9 focused MCP tools** for planning-oriented retrieval from Langfuse traces: + +**Core Tools (P0)**: +- Tool call extraction (session and trace level) +- Execution timeline reconstruction +- LLM plan extraction and matching +- Action dependency extraction +- Content search and statistics + +**Advanced Tools (P1)**: +- Plan success pattern analysis +- Planning domain export + +All tools are designed to support the integration with planning domain systems, with emphasis on: +- Structured output formats (graph, timeline, PDDL) +- Efficient API usage (filtering, pagination, caching) +- Async processing for performance +- Planning-relevant data extraction + +The tools provide the foundation for extracting LLM planning behavior and storing it in a format suitable for deterministic planning algorithms. diff --git a/__reports__/mcp_langfuse_analysis/03-planning_oriented_retrieval_tools_v1.md b/__reports__/mcp_langfuse_analysis/03-planning_oriented_retrieval_tools_v1.md new file mode 100644 index 0000000..87c0d7d --- /dev/null +++ b/__reports__/mcp_langfuse_analysis/03-planning_oriented_retrieval_tools_v1.md @@ -0,0 +1,1762 @@ +# Planning-Oriented Retrieval Tools for Langfuse MCP Server + +**Report Date**: 2025-12-03 +**Report Version**: v1 +**Purpose**: Define focused MCP tools for extracting and analyzing LLM planning behavior from Langfuse traces + +--- + +## Changes from v0 + +### Major Additions +1. **Tool call detection flexibility**: Support both SPAN observations and metadata-based tool calls (LiteLLM pattern) +2. **ID retrieval tools**: New tools for finding session/trace IDs via timestamps and keywords +3. **LLM-assisted analysis**: Use LLM sampling for plan detection and causal relationship inference +4. **Performance optimization**: Explicit asyncio patterns with rate limiting and caching guidance +5. **Format-agnostic exports**: Abstract export formats for future extensibility + +### Refinements +- Clarified parent-child vs causal terminology +- Added RPM configuration as tool parameter +- Expanded pseudo-code with concurrency patterns +- Removed specific format implementations (PDDL/GraphML) in favor of abstraction +- Added preconditions/effects extraction for action definitions + +### Structural Changes +- New section on tool call detection strategies (SPAN vs metadata) +- New section on ID retrieval patterns +- Enhanced performance considerations with asyncio + rate limiting +- Simplified export format discussion + +--- + +## Executive Summary + +This report defines **11 focused MCP tools** for **planning-oriented retrieval** from Langfuse traces. The tools enable extraction, reconstruction, and analysis of LLM decision-making processes for integration with planning domain systems. + +**Core Focus**: +- Extract tool calls from both SPAN observations and LLM output metadata +- Reconstruct execution timelines with dependency tracking +- Use LLM sampling for plan detection and causal analysis +- Support high-performance async I/O with rate limiting +- Provide format-agnostic exports for planning systems + +**Key Design Principles**: +- Flexible tool call detection (SPAN + metadata patterns) +- Human-friendly ID retrieval (timestamps, keywords) +- LLM-assisted semantic analysis where programmatic parsing fails +- Async I/O with explicit rate limiting and caching +- Format abstraction for future extensibility + +--- + +## 1. Context and Requirements + +### 1.1 System Integration Context + +The MCP server integrates into a stack that: +1. **Extracts tool usage plans** from LLM interactions +2. **Stores plans** in well-defined formats +3. **Records individual actions** in a planning domain +4. **Enables planning algorithms** to find plans independently of LLM stochasticity + +### 1.2 Tool Call Detection Strategies + +**Challenge**: Different Langfuse instrumentation patterns store tool calls differently. + +**Pattern 1: SPAN Observations** (Standard Langfuse instrumentation) +```python +# Tool calls are separate SPAN observations +{ + "type": "SPAN", + "name": "search_database", + "input": {"query": "user data"}, + "output": {"results": [...]}, + "parentObservationId": "generation_123" +} +``` + +**Pattern 2: Metadata in GENERATION** (LiteLLM pattern) +```python +# Tool calls embedded in LLM generation metadata +{ + "type": "GENERATION", + "output": "I'll search the database...", + "metadata": { + "tool_calls": [ + { + "name": "search_database", + "arguments": {"query": "user data"}, + "result": {"results": [...]} + } + ] + } +} +``` + +**Solution**: Tools must support both patterns via detection strategy parameter. + +### 1.3 ID Retrieval Challenge + +**Problem**: Session/trace IDs are not human-friendly and difficult to retrieve manually. + +**Solution**: Provide tools for ID discovery via: +- **Temporal queries**: Find sessions/traces by timestamp ranges +- **Keyword search**: Find sessions/traces by content +- **User-based queries**: Find sessions by user ID +- **Tag-based queries**: Find sessions by tags/metadata + +### 1.4 Graph Representation for Planning + +**Current State**: ArangoDB MCP server handles graph storage. + +**Design Decision**: Focus on retrieving graph-structured data with format abstraction. + +**Format Abstraction Pattern**: +```python +# Abstract export interface +class PlanningDomainExporter: + def export(self, data, format_spec: str) -> str: + """ + format_spec: Grammar/schema provided via MCP resources + Returns: Formatted output validated against grammar + """ + pass +``` + +**Supported Patterns**: +- Adjacency list (JSON) - Default, simple +- Grammar-based generation (PDDL, custom) - LLM-assisted with validation +- Graph formats (DOT, GraphML) - For visualization/analysis + +--- + +## 2. Core Tool Definitions + +### 2.1 ID Retrieval Tools + +#### Tool 2.1.1: `find_sessions_by_timerange` + +**Purpose**: Find session IDs within a time range for human-friendly access. + +**MCP Tool Signature**: +```python +{ + "name": "find_sessions_by_timerange", + "description": "Find Langfuse session IDs within a time range", + "inputSchema": { + "type": "object", + "properties": { + "from_timestamp": {"type": "string", "format": "date-time"}, + "to_timestamp": {"type": "string", "format": "date-time"}, + "user_id": {"type": "string", "description": "Optional: filter by user"}, + "environment": {"type": "string", "description": "Optional: filter by environment"}, + "limit": {"type": "integer", "default": 50}, + "rpm_limit": {"type": "integer", "default": 60, "description": "Requests per minute limit"} + }, + "required": ["from_timestamp", "to_timestamp"] + } +} +``` + + +**Processing Strategy**: +```python +async def find_sessions_by_timerange(from_ts, to_ts, user_id=None, rpm_limit=60): + # Rate limiter: rpm_limit requests per minute + rate_limiter = AsyncRateLimiter(rpm_limit) + + async with rate_limiter: + sessions = await api.get_sessions( + fromTimestamp=from_ts, + toTimestamp=to_ts, + limit=100 + ) + + # Filter by user_id if provided + if user_id: + sessions = [s for s in sessions if s.userId == user_id] + + return { + "sessions": [ + { + "session_id": s.id, + "created_at": s.createdAt, + "user_id": s.userId, + "trace_count": len(s.traces) + } + for s in sessions + ] + } +``` + +**REST API Leverage**: +- `GET /api/public/sessions?fromTimestamp={ts}&toTimestamp={ts}` +- Pagination for large result sets +- Filter parameters for user_id, environment + +**Output Structure**: +```json +{ + "time_range": {"from": "ISO8601", "to": "ISO8601"}, + "total_sessions": "number", + "sessions": [ + { + "session_id": "string", + "created_at": "ISO8601", + "user_id": "string", + "trace_count": "number", + "environment": "string" + } + ] +} +``` + +#### Tool 2.1.2: `find_traces_by_keyword` + +**Purpose**: Find trace IDs by searching content (user queries, LLM outputs, tool names). + +**MCP Tool Signature**: +```python +{ + "name": "find_traces_by_keyword", + "description": "Find trace IDs by searching observation content", + "inputSchema": { + "type": "object", + "properties": { + "keyword": {"type": "string"}, + "search_in": { + "type": "array", + "items": {"type": "string", "enum": ["input", "output", "name", "metadata"]}, + "default": ["input", "output", "name"] + }, + "from_timestamp": {"type": "string", "format": "date-time"}, + "to_timestamp": {"type": "string", "format": "date-time"}, + "limit": {"type": "integer", "default": 50}, + "rpm_limit": {"type": "integer", "default": 60} + }, + "required": ["keyword"] + } +} +``` + +**Processing Strategy**: +```python +async def find_traces_by_keyword(keyword, search_in, from_ts=None, to_ts=None, rpm_limit=60): + rate_limiter = AsyncRateLimiter(rpm_limit) + + # Fetch observations with filters + async with rate_limiter: + observations = await api.get_observations( + fromStartTime=from_ts, + toStartTime=to_ts, + limit=100 + ) + + # Client-side keyword search (API doesn't support full-text search) + matching_traces = set() + for obs in observations: + if keyword_matches(obs, keyword, search_in): + matching_traces.add(obs.traceId) + + # Fetch trace details for matches + traces = [] + async with rate_limiter: + for trace_id in matching_traces: + trace = await api.get_trace(trace_id) + traces.append(trace) + + return {"traces": traces} +``` + +**Output Structure**: +```json +{ + "keyword": "string", + "total_matches": "number", + "traces": [ + { + "trace_id": "string", + "session_id": "string", + "timestamp": "ISO8601", + "name": "string", + "match_context": "string" + } + ] +} +``` + +--- + +### 2.2 Tool Call Extraction Tools + +#### Tool 2.2.1: `extract_tool_calls_from_session` + +**Purpose**: Extract all tool calls from a session with full context, supporting both SPAN and metadata patterns. + +**MCP Tool Signature**: +```python +{ + "name": "extract_tool_calls_from_session", + "description": "Extract all tool calls from a Langfuse session", + "inputSchema": { + "type": "object", + "properties": { + "session_id": {"type": "string"}, + "detection_strategy": { + "type": "string", + "enum": ["auto", "span_only", "metadata_only"], + "default": "auto", + "description": "How to detect tool calls" + }, + "include_context": {"type": "boolean", "default": true}, + "include_results": {"type": "boolean", "default": true}, + "filter_by_status": {"type": "string", "enum": ["all", "success", "failure"], "default": "all"}, + "rpm_limit": {"type": "integer", "default": 60} + }, + "required": ["session_id"] + } +} +``` + + +**Processing Strategy with Async + Rate Limiting**: +```python +async def extract_tool_calls_from_session(session_id, detection_strategy="auto", rpm_limit=60): + rate_limiter = AsyncRateLimiter(rpm_limit) + cache = LRUCache(maxsize=1000) + + # Step 1: Fetch all traces for session (lightweight) + async with rate_limiter: + traces = await api.get_traces(sessionId=session_id, fields="core") + + # Step 2: Batch fetch observations with concurrency control + semaphore = asyncio.Semaphore(10) # Max 10 concurrent requests + + async def fetch_observations_with_limit(trace_id): + async with semaphore, rate_limiter: + # Check cache first + if cached := cache.get(trace_id): + return cached + + obs = await api.get_observations(traceId=trace_id) + cache.set(trace_id, obs) + return obs + + # Parallel fetch all observations + all_observations = await asyncio.gather(*[ + fetch_observations_with_limit(t.id) for t in traces + ]) + + # Step 3: Extract tool calls based on strategy + tool_calls = [] + for observations in all_observations: + if detection_strategy in ["auto", "span_only"]: + tool_calls.extend(extract_from_spans(observations)) + + if detection_strategy in ["auto", "metadata_only"]: + tool_calls.extend(extract_from_metadata(observations)) + + return {"tool_calls": tool_calls} + +def extract_from_spans(observations): + """Extract tool calls from SPAN observations""" + tool_calls = [] + obs_map = {o.id: o for o in observations} + + for obs in observations: + if obs.type != "SPAN": + continue + + # Find parent GENERATION for context + parent = obs_map.get(obs.parentObservationId) + + tool_calls.append({ + "id": obs.id, + "tool_name": obs.name, + "arguments": parse_input(obs.input), + "result": parse_output(obs.output), + "timestamp": obs.startTime, + "duration_seconds": (obs.endTime - obs.startTime).total_seconds(), + "context": parent.output if parent and parent.type == "GENERATION" else None + }) + + return tool_calls + +def extract_from_metadata(observations): + """Extract tool calls from GENERATION metadata (LiteLLM pattern)""" + tool_calls = [] + + for obs in observations: + if obs.type != "GENERATION": + continue + + # Check for tool_calls in metadata + if tool_calls_meta := obs.metadata.get("tool_calls"): + for tc in tool_calls_meta: + tool_calls.append({ + "id": f"{obs.id}_tool_{tc['name']}", + "tool_name": tc["name"], + "arguments": tc.get("arguments", {}), + "result": tc.get("result", {}), + "timestamp": obs.startTime, + "context": obs.output + }) + + return tool_calls +``` + +**REST API Leverage**: +- `GET /api/public/traces?sessionId={id}&fields=core` - Lightweight trace list +- `GET /api/public/observations?traceId={id}` - Batch fetch with asyncio.gather +- Rate limiting via semaphore + custom rate limiter +- Caching to avoid redundant API calls + +**Output Structure**: +```json +{ + "session_id": "string", + "total_tool_calls": "number", + "traces_analyzed": "number", + "detection_strategy_used": "auto|span_only|metadata_only", + "tool_calls": [ + { + "id": "string", + "trace_id": "string", + "timestamp": "ISO8601", + "tool_name": "string", + "arguments": {}, + "result": {}, + "status": "success|failure", + "duration_seconds": "number", + "context": { + "preceding_llm_output": "string", + "llm_observation_id": "string" + } + } + ] +} +``` + +#### Tool 2.2.2: `extract_tool_calls_from_trace` + +**Purpose**: Extract tool calls from a single trace (finer granularity). + +**MCP Tool Signature**: Similar to `extract_tool_calls_from_session` but with `trace_id` parameter. + +**Processing Strategy**: Same as session-level but scoped to single trace (simpler, no batch fetching needed). + +--- + +### 2.3 Temporal Reconstruction Tools + +#### Tool 2.3.1: `reconstruct_execution_timeline` + +**Purpose**: Reconstruct temporal execution order with parallelism detection. + +**MCP Tool Signature**: +```python +{ + "name": "reconstruct_execution_timeline", + "description": "Reconstruct temporal execution timeline from observations", + "inputSchema": { + "type": "object", + "properties": { + "session_id": {"type": "string"}, + "trace_id": {"type": "string"}, + "include_llm_reasoning": {"type": "boolean", "default": true}, + "include_parallel_detection": {"type": "boolean", "default": true}, + "output_format": { + "type": "string", + "enum": ["timeline", "graph"], + "default": "timeline" + }, + "rpm_limit": {"type": "integer", "default": 60} + }, + "oneOf": [ + {"required": ["session_id"]}, + {"required": ["trace_id"]} + ] + } +} +``` + + +**Processing Strategy**: +```python +async def reconstruct_timeline(scope_id, scope_type, rpm_limit=60): + rate_limiter = AsyncRateLimiter(rpm_limit) + + # Fetch all observations + async with rate_limiter: + if scope_type == "session": + traces = await api.get_traces(sessionId=scope_id) + observations = [] + for trace in traces: + obs = await api.get_observations(traceId=trace.id) + observations.extend(obs) + else: + observations = await api.get_observations(traceId=scope_id) + + # Sort by timestamp + sorted_obs = sorted(observations, key=lambda o: o.startTime) + + # Build parent-child map + children_map = defaultdict(list) + for obs in observations: + if obs.parentObservationId: + children_map[obs.parentObservationId].append(obs) + + # Detect parallel executions + parallel_groups = detect_parallel_executions(sorted_obs) + + # Build timeline + timeline = [] + for i, obs in enumerate(sorted_obs): + timeline.append({ + "sequence_number": i + 1, + "observation_id": obs.id, + "type": obs.type, + "name": obs.name, + "start_time": obs.startTime, + "end_time": obs.endTime, + "duration_seconds": (obs.endTime - obs.startTime).total_seconds(), + "parent_id": obs.parentObservationId, + "depth": calculate_depth(obs, children_map), + "parallel_group_id": find_parallel_group(obs, parallel_groups) + }) + + return {"timeline": timeline, "parallel_groups": parallel_groups} + +def detect_parallel_executions(observations): + """Detect observations that executed in parallel""" + parallel_groups = [] + + for i, obs1 in enumerate(observations): + parallel = [obs1] + for obs2 in observations[i+1:]: + # Check time overlap + if (obs1.startTime < obs2.endTime and + obs2.startTime < obs1.endTime): + # Ensure not parent-child + if not is_ancestor(obs1, obs2): + parallel.append(obs2) + + if len(parallel) > 1: + parallel_groups.append({ + "group_id": f"parallel_{i}", + "observations": [o.id for o in parallel], + "time_range": { + "start": min(o.startTime for o in parallel), + "end": max(o.endTime for o in parallel) + } + }) + + return parallel_groups +``` + +**Output Structure (Timeline Format)**: +```json +{ + "scope": {"type": "session|trace", "id": "string"}, + "total_duration_seconds": "number", + "timeline": [ + { + "sequence_number": "number", + "observation_id": "string", + "type": "SPAN|GENERATION|EVENT", + "name": "string", + "start_time": "ISO8601", + "end_time": "ISO8601", + "duration_seconds": "number", + "parent_id": "string|null", + "depth": "number", + "parallel_group_id": "string|null" + } + ], + "parallel_executions": [ + { + "group_id": "string", + "observations": ["observation_id"], + "time_range": {"start": "ISO8601", "end": "ISO8601"} + } + ] +} +``` + +**Output Structure (Graph Format)**: +```json +{ + "nodes": [ + { + "id": "observation_id", + "type": "SPAN|GENERATION|EVENT", + "label": "string", + "start_time": "ISO8601", + "duration": "number" + } + ], + "edges": [ + { + "from": "observation_id", + "to": "observation_id", + "type": "parent_child|temporal_sequence|parallel" + } + ] +} +``` + +--- + +### 2.4 Search and Statistics Tools + +#### Tool 2.4.1: `search_tool_calls_by_keyword` + +**Purpose**: Search tool calls by content for pattern discovery. + +**MCP Tool Signature**: +```python +{ + "name": "search_tool_calls_by_keyword", + "description": "Search tool calls by keyword in arguments, results, or context", + "inputSchema": { + "type": "object", + "properties": { + "keyword": {"type": "string"}, + "search_in": { + "type": "array", + "items": {"type": "string", "enum": ["arguments", "results", "context", "tool_name"]}, + "default": ["arguments", "results", "context"] + }, + "session_id": {"type": "string"}, + "from_timestamp": {"type": "string", "format": "date-time"}, + "to_timestamp": {"type": "string", "format": "date-time"}, + "limit": {"type": "integer", "default": 100}, + "rpm_limit": {"type": "integer", "default": 60} + }, + "required": ["keyword"] + } +} +``` + +**Processing Strategy**: Similar to `extract_tool_calls_from_session` but with keyword filtering. + +#### Tool 2.4.2: `get_tool_call_statistics` + +**Purpose**: Aggregate statistics on tool usage patterns. + +**MCP Tool Signature**: +```python +{ + "name": "get_tool_call_statistics", + "description": "Get aggregated statistics on tool usage patterns", + "inputSchema": { + "type": "object", + "properties": { + "session_id": {"type": "string"}, + "from_timestamp": {"type": "string", "format": "date-time"}, + "to_timestamp": {"type": "string", "format": "date-time"}, + "group_by": { + "type": "string", + "enum": ["tool", "session", "trace", "time_bucket"], + "default": "tool" + }, + "time_bucket": {"type": "string", "enum": ["hour", "day", "week"], "default": "day"}, + "rpm_limit": {"type": "integer", "default": 60} + } + } +} +``` + + +**Processing Strategy with Metrics API**: +```python +async def get_tool_call_statistics(from_ts, to_ts, group_by="tool", rpm_limit=60): + rate_limiter = AsyncRateLimiter(rpm_limit) + + # Use Metrics API for aggregation + metrics_query = { + "select": [ + {"column": "name", "agg": None}, + {"column": "latency", "agg": "avg"}, + {"column": "totalCost", "agg": "sum"}, + {"column": "id", "agg": "count"} + ], + "filters": [ + {"column": "type", "operator": "=", "value": "SPAN", "type": "string"} + ], + "groupBy": [{"column": "name", "type": "string"}], + "fromTimestamp": from_ts, + "toTimestamp": to_ts + } + + async with rate_limiter: + result = await api.post_metrics(query=metrics_query) + + # Process results + statistics = [] + for row in result.data: + statistics.append({ + "tool_name": row["name"], + "call_count": row["count"], + "avg_duration_seconds": row["avg_latency"], + "total_cost_usd": row["sum_totalCost"] + }) + + return {"statistics_by_tool": statistics} +``` + +**Output Structure**: +```json +{ + "time_range": {"from": "ISO8601", "to": "ISO8601"}, + "total_tool_calls": "number", + "statistics_by_tool": [ + { + "tool_name": "string", + "call_count": "number", + "success_rate": "number", + "avg_duration_seconds": "number", + "p50_duration": "number", + "p95_duration": "number", + "total_cost_usd": "number" + } + ] +} +``` + +--- + +### 2.5 LLM-Assisted Plan Extraction Tools + +#### Tool 2.5.1: `extract_llm_plans` + +**Purpose**: Extract LLM-decided action plans using LLM sampling for semantic analysis. + +**MCP Tool Signature**: +```python +{ + "name": "extract_llm_plans", + "description": "Extract LLM planning decisions using LLM-assisted analysis", + "inputSchema": { + "type": "object", + "properties": { + "session_id": {"type": "string"}, + "trace_id": {"type": "string"}, + "use_llm_sampling": {"type": "boolean", "default": true}, + "include_execution_matching": {"type": "boolean", "default": true}, + "include_deviation_analysis": {"type": "boolean", "default": true}, + "rpm_limit": {"type": "integer", "default": 60} + }, + "oneOf": [ + {"required": ["session_id"]}, + {"required": ["trace_id"]} + ] + } +} +``` + +**Processing Strategy with LLM Sampling**: +```python +async def extract_llm_plans(scope_id, scope_type, use_llm_sampling=True, rpm_limit=60): + rate_limiter = AsyncRateLimiter(rpm_limit) + + # Step 1: Fetch observations + async with rate_limiter: + observations = await fetch_observations_for_scope(scope_id, scope_type) + + # Step 2: Identify potential planning observations (GENERATION type) + generation_obs = [o for o in observations if o.type == "GENERATION"] + + plans = [] + + if use_llm_sampling: + # Use LLM to analyze each generation for planning content + for obs in generation_obs: + plan_analysis = await analyze_with_llm( + observation=obs, + prompt=""" + Analyze this LLM output and determine: + 1. Does it contain a plan or action sequence? (yes/no) + 2. If yes, extract the planned actions with: + - Action name/tool + - Arguments + - Rationale + + Output JSON format: + { + "is_plan": boolean, + "planned_actions": [ + {"action": "string", "tool": "string", "arguments": {}, "rationale": "string"} + ] + } + """, + context={"preceding_observations": get_context(obs, observations)} + ) + + if plan_analysis["is_plan"]: + plans.append({ + "observation_id": obs.id, + "timestamp": obs.startTime, + "planned_actions": plan_analysis["planned_actions"], + "detection_method": "llm_sampling" + }) + else: + # Fallback: Simple heuristics (less reliable) + for obs in generation_obs: + if contains_action_keywords(obs.output): + plans.append(extract_plan_heuristic(obs)) + + # Step 3: Match plans to executions + if include_execution_matching: + for plan in plans: + plan["execution_matching"] = await match_plan_to_executions( + plan, observations, rate_limiter + ) + + return {"plans": plans} + +async def analyze_with_llm(observation, prompt, context): + """Use LLM sampling to analyze observation content""" + # Construct analysis prompt with observation content + full_prompt = f"{prompt}\n\nObservation Output:\n{observation.output}" + + if context: + full_prompt += f"\n\nContext:\n{json.dumps(context)}" + + # Call LLM (via MCP or direct API) + response = await llm_client.generate( + prompt=full_prompt, + temperature=0.1, # Low temperature for consistent analysis + response_format="json" + ) + + return json.loads(response) +``` + +**Output Structure**: +```json +{ + "scope": {"type": "session|trace", "id": "string"}, + "plans_detected": "number", + "plans": [ + { + "plan_id": "string", + "observation_id": "string", + "timestamp": "ISO8601", + "detection_method": "llm_sampling|heuristic", + "planned_actions": [ + { + "sequence": "number", + "action_type": "tool_call|api_request|computation", + "tool_name": "string", + "arguments": {}, + "rationale": "string" + } + ], + "execution_matching": { + "matched_actions": [ + { + "planned_sequence": "number", + "executed_observation_id": "string", + "execution_sequence": "number", + "time_to_execution_seconds": "number", + "status": "success|failure" + } + ], + "unmatched_planned": [], + "unplanned_executions": [] + } + } + ] +} +``` + +--- + +### 2.6 Action Dependency Extraction Tools + +#### Tool 2.6.1: `extract_action_dependencies` + +**Purpose**: Extract dependency relationships between actions for planning domain modeling. + +**MCP Tool Signature**: +```python +{ + "name": "extract_action_dependencies", + "description": "Extract action dependencies and preconditions from execution traces", + "inputSchema": { + "type": "object", + "properties": { + "session_id": {"type": "string"}, + "trace_id": {"type": "string"}, + "dependency_types": { + "type": "array", + "items": {"type": "string", "enum": ["temporal", "data_flow", "parent_child", "causal"]}, + "default": ["temporal", "data_flow", "parent_child"] + }, + "use_llm_for_causal": {"type": "boolean", "default": true}, + "output_format": {"type": "string", "enum": ["graph", "json"], "default": "graph"}, + "rpm_limit": {"type": "integer", "default": 60} + }, + "oneOf": [ + {"required": ["session_id"]}, + {"required": ["trace_id"]} + ] + } +} +``` + + +**Processing Strategy with LLM-Assisted Causal Analysis**: +```python +async def extract_dependencies(scope_id, scope_type, dependency_types, use_llm_for_causal=True, rpm_limit=60): + rate_limiter = AsyncRateLimiter(rpm_limit) + + # Fetch observations + async with rate_limiter: + observations = await fetch_observations_for_scope(scope_id, scope_type) + + dependencies = [] + + # 1. Temporal dependencies (programmatic) + if "temporal" in dependency_types: + dependencies.extend(extract_temporal_deps(observations)) + + # 2. Data flow dependencies (programmatic) + if "data_flow" in dependency_types: + dependencies.extend(extract_data_flow_deps(observations)) + + # 3. Parent-child dependencies (from API structure) + if "parent_child" in dependency_types: + dependencies.extend(extract_parent_child_deps(observations)) + + # 4. Causal dependencies (LLM-assisted) + if "causal" in dependency_types and use_llm_for_causal: + dependencies.extend(await extract_causal_deps_llm(observations, rate_limiter)) + + # Extract preconditions and effects + actions = await extract_action_definitions(observations, dependencies, rate_limiter) + + return { + "nodes": actions, + "edges": dependencies + } + +def extract_temporal_deps(observations): + """Extract temporal ordering dependencies""" + deps = [] + sorted_obs = sorted(observations, key=lambda o: o.startTime) + + for i, obs_a in enumerate(sorted_obs): + for obs_b in sorted_obs[i+1:]: + if obs_a.endTime <= obs_b.startTime: + deps.append({ + "from": obs_a.id, + "to": obs_b.id, + "type": "temporal", + "strength": 1.0 + }) + break # Only immediate successor + + return deps + +def extract_data_flow_deps(observations): + """Detect data flow: output of A used as input to B""" + deps = [] + + for obs_a in observations: + output_data = extract_output_values(obs_a.output) + + for obs_b in observations: + if obs_b.startTime > obs_a.endTime: + input_data = extract_input_values(obs_b.input) + + # Check if output values appear in input + if data_overlap(output_data, input_data): + deps.append({ + "from": obs_a.id, + "to": obs_b.id, + "type": "data_flow", + "strength": calculate_overlap_strength(output_data, input_data) + }) + + return deps + +def extract_parent_child_deps(observations): + """Extract parent-child relationships from API structure""" + deps = [] + + for obs in observations: + if obs.parentObservationId: + deps.append({ + "from": obs.parentObservationId, + "to": obs.id, + "type": "parent_child", + "strength": 1.0 + }) + + return deps + +async def extract_causal_deps_llm(observations, rate_limiter): + """Use LLM to infer causal relationships""" + deps = [] + + # Analyze pairs of observations for causality + for i, obs_a in enumerate(observations): + for obs_b in observations[i+1:]: + # Only analyze temporally ordered pairs + if obs_a.endTime > obs_b.startTime: + continue + + # Use LLM to determine causality + async with rate_limiter: + causal_analysis = await analyze_with_llm( + observation=None, + prompt=f""" + Analyze if Action A caused or triggered Action B: + + Action A: {obs_a.name} + - Input: {obs_a.input} + - Output: {obs_a.output} + - Time: {obs_a.startTime} + + Action B: {obs_b.name} + - Input: {obs_b.input} + - Output: {obs_b.output} + - Time: {obs_b.startTime} + + Determine: + 1. Is there a causal relationship? (yes/no) + 2. Confidence level (0.0-1.0) + 3. Explanation + + Output JSON: + {{"is_causal": boolean, "confidence": float, "explanation": "string"}} + """, + context={} + ) + + if causal_analysis["is_causal"] and causal_analysis["confidence"] > 0.7: + deps.append({ + "from": obs_a.id, + "to": obs_b.id, + "type": "causal", + "strength": causal_analysis["confidence"], + "explanation": causal_analysis["explanation"] + }) + + return deps + +async def extract_action_definitions(observations, dependencies, rate_limiter): + """Extract action definitions with preconditions and effects""" + actions = [] + + for obs in observations: + if obs.type != "SPAN": + continue + + # Use LLM to infer preconditions and effects + async with rate_limiter: + action_def = await analyze_with_llm( + observation=obs, + prompt=f""" + Analyze this action and extract: + 1. Preconditions (what must be true before execution) + 2. Effects (what changes after execution) + + Action: {obs.name} + Input: {obs.input} + Output: {obs.output} + + Output JSON: + {{ + "preconditions": ["condition1", "condition2"], + "effects": ["effect1", "effect2"] + }} + """, + context={"dependencies": get_related_deps(obs.id, dependencies)} + ) + + actions.append({ + "id": obs.id, + "action": obs.name, + "type": obs.type, + "preconditions": action_def["preconditions"], + "effects": action_def["effects"], + "duration": (obs.endTime - obs.startTime).total_seconds() + }) + + return actions +``` + +**Output Structure (Graph Format)**: +```json +{ + "nodes": [ + { + "id": "observation_id", + "action": "string", + "type": "SPAN|GENERATION", + "preconditions": ["condition"], + "effects": ["effect"], + "duration": "number" + } + ], + "edges": [ + { + "from": "observation_id", + "to": "observation_id", + "type": "temporal|data_flow|parent_child|causal", + "strength": "number (0-1)", + "explanation": "string (for causal)" + } + ] +} +``` + +--- + +### 2.7 Planning Domain Export Tools + +#### Tool 2.7.1: `export_planning_domain` + +**Purpose**: Export extracted plans and actions in planning domain format with grammar-based generation. + +**MCP Tool Signature**: +```python +{ + "name": "export_planning_domain", + "description": "Export plans and actions in planning domain format", + "inputSchema": { + "type": "object", + "properties": { + "session_ids": {"type": "array", "items": {"type": "string"}}, + "from_timestamp": {"type": "string", "format": "date-time"}, + "to_timestamp": {"type": "string", "format": "date-time"}, + "format_grammar": { + "type": "string", + "description": "Grammar specification for output format (provided via MCP resources)" + }, + "use_llm_generation": {"type": "boolean", "default": true}, + "validate_output": {"type": "boolean", "default": true}, + "include_metadata": {"type": "boolean", "default": true}, + "rpm_limit": {"type": "integer", "default": 60} + } + } +} +``` + + +**Processing Stra +tegy with Grammar-Based Generation**: +```python +async def export_planning_domain(session_ids, format_grammar, use_llm_generation=True, rpm_limit=60): + rate_limiter = AsyncRateLimiter(rpm_limit) + + # Step 1: Extract all plans and actions from sessions + all_plans = [] + all_actions = [] + + for session_id in session_ids: + async with rate_limiter: + plans = await extract_llm_plans(session_id, "session") + actions = await extract_action_dependencies(session_id, "session") + + all_plans.extend(plans["plans"]) + all_actions.extend(actions["nodes"]) + + # Step 2: Build unified action library + action_library = deduplicate_actions(all_actions) + + # Step 3: Generate output using grammar + if use_llm_generation: + # Use LLM to generate format-compliant output + async with rate_limiter: + formatted_output = await generate_with_grammar( + data={ + "actions": action_library, + "plans": all_plans + }, + grammar=format_grammar, + llm_client=llm_client + ) + else: + # Fallback: JSON format + formatted_output = json.dumps({ + "domain": {"actions": action_library}, + "problems": all_plans + }) + + # Step 4: Validate output against grammar + if validate_output: + validation_result = validate_against_grammar(formatted_output, format_grammar) + if not validation_result.valid: + raise ValueError(f"Output validation failed: {validation_result.errors}") + + return { + "format": format_grammar, + "output": formatted_output, + "metadata": { + "total_sessions": len(session_ids), + "total_actions": len(action_library), + "total_plans": len(all_plans) + } + } + +async def generate_with_grammar(data, grammar, llm_client): + """Use LLM to generate format-compliant output""" + prompt = f""" + Generate a planning domain specification following this grammar: + + {grammar} + + Using this extracted data: + {json.dumps(data, indent=2)} + + Generate valid output that conforms to the grammar. + """ + + response = await llm_client.generate( + prompt=prompt, + temperature=0.1, + max_tokens=4000 + ) + + return response +``` + +**Output Structure**: +```json +{ + "format": "string (grammar name)", + "output": "string (formatted according to grammar)", + "metadata": { + "extraction_timestamp": "ISO8601", + "source_sessions": ["session_id"], + "total_plans": "number", + "total_actions": "number", + "validation_status": "valid|invalid" + } +} +``` + +**Example Grammar (PDDL)**: +``` +(define (domain extracted_domain) + (:requirements :strips :typing) + (:types action) + (:predicates + (completed ?a - action) + (precedes ?a1 ?a2 - action)) + (:action {action_name} + :parameters (?tool - action) + :precondition (and {preconditions}) + :effect (and {effects}))) +``` + +--- + +### 2.8 Pattern Analysis Tools + +#### Tool 2.8.1: `analyze_plan_success_patterns` + +**Purpose**: Identify patterns in successful vs failed plan executions. + +**MCP Tool Signature**: +```python +{ + "name": "analyze_plan_success_patterns", + "description": "Analyze patterns in successful vs failed plan executions", + "inputSchema": { + "type": "object", + "properties": { + "from_timestamp": {"type": "string", "format": "date-time"}, + "to_timestamp": {"type": "string", "format": "date-time"}, + "min_plan_occurrences": {"type": "integer", "default": 3}, + "include_failure_analysis": {"type": "boolean", "default": true}, + "use_llm_clustering": {"type": "boolean", "default": true}, + "rpm_limit": {"type": "integer", "default": 60} + }, + "required": ["from_timestamp", "to_timestamp"] + } +} +``` + +**Processing Strategy**: +```python +async def analyze_plan_success_patterns(from_ts, to_ts, use_llm_clustering=True, rpm_limit=60): + rate_limiter = AsyncRateLimiter(rpm_limit) + + # Step 1: Find all sessions in time range + async with rate_limiter: + sessions = await api.get_sessions(fromTimestamp=from_ts, toTimestamp=to_ts) + + # Step 2: Extract plans from all sessions + all_plans = [] + for session in sessions: + async with rate_limiter: + plans = await extract_llm_plans(session.id, "session") + all_plans.extend(plans["plans"]) + + # Step 3: Cluster similar plans + if use_llm_clustering: + plan_clusters = await cluster_plans_with_llm(all_plans, rate_limiter) + else: + plan_clusters = cluster_plans_heuristic(all_plans) + + # Step 4: Analyze success rates per cluster + pattern_analysis = [] + for cluster in plan_clusters: + success_count = sum(1 for p in cluster["plans"] if p["success"]) + failure_count = len(cluster["plans"]) - success_count + + pattern_analysis.append({ + "pattern_id": cluster["id"], + "tool_sequence": cluster["tool_sequence"], + "occurrences": len(cluster["plans"]), + "success_rate": success_count / len(cluster["plans"]), + "success_factors": await analyze_success_factors(cluster, rate_limiter), + "failure_points": await analyze_failure_points(cluster, rate_limiter) + }) + + return {"plan_patterns": pattern_analysis} + +async def cluster_plans_with_llm(plans, rate_limiter): + """Use LLM to cluster similar plans""" + async with rate_limiter: + clustering_result = await analyze_with_llm( + observation=None, + prompt=f""" + Cluster these plans by similarity: + + {json.dumps([p["planned_actions"] for p in plans], indent=2)} + + Group plans that have similar: + - Tool sequences + - Goals/objectives + - Execution patterns + + Output JSON: + {{ + "clusters": [ + {{ + "id": "string", + "tool_sequence": ["tool1", "tool2"], + "plan_indices": [0, 3, 7] + }} + ] + }} + """, + context={} + ) + + # Map clusters back to plans + clusters = [] + for cluster in clustering_result["clusters"]: + clusters.append({ + "id": cluster["id"], + "tool_sequence": cluster["tool_sequence"], + "plans": [plans[i] for i in cluster["plan_indices"]] + }) + + return clusters +``` + +**Output Structure**: +```json +{ + "time_range": {"from": "ISO8601", "to": "ISO8601"}, + "total_plans_analyzed": "number", + "plan_patterns": [ + { + "pattern_id": "string", + "tool_sequence": ["tool1", "tool2", "tool3"], + "occurrences": "number", + "success_rate": "number", + "avg_execution_time": "number", + "success_factors": ["factor"], + "failure_points": [ + { + "step": "number", + "tool": "string", + "failure_rate": "number", + "common_errors": ["error_message"] + } + ] + } + ], + "recommendations": [ + { + "pattern_id": "string", + "recommendation": "string", + "expected_improvement": "string", + "priority": "high|medium|low" + } + ] +} +``` + +--- + +## 3. Common Processing Patterns + +### 3.1 Async Rate Limiting + +All tools must implement rate limiting to respect API limits: + +```python +class AsyncRateLimiter: + """Rate limiter for API calls""" + + def __init__(self, rpm: int): + self.rpm = rpm + self.interval = 60.0 / rpm # Seconds between requests + self.last_request = 0 + + async def __aenter__(self): + now = time.time() + time_since_last = now - self.last_request + + if time_since_last < self.interval: + await asyncio.sleep(self.interval - time_since_last) + + self.last_request = time.time() + + async def __aexit__(self, *args): + pass +``` + +### 3.2 LRU Caching + +Implement caching to avoid redundant API calls: + +```python +from functools import lru_cache + +class LRUCache: + def __init__(self, maxsize=1000): + self.cache = {} + self.maxsize = maxsize + self.access_order = [] + + def get(self, key): + if key in self.cache: + self.access_order.remove(key) + self.access_order.append(key) + return self.cache[key] + return None + + def set(self, key, value): + if key in self.cache: + self.access_order.remove(key) + elif len(self.cache) >= self.maxsize: + # Evict least recently used + lru_key = self.access_order.pop(0) + del self.cache[lru_key] + + self.cache[key] = value + self.access_order.append(key) +``` + +### 3.3 Observation Tree Building + +Common pattern for reconstructing hierarchical structure: + +```python +def build_observation_tree(observations: List[Observation]): + """Build tree structure from flat observation list""" + obs_map = {o.id: o for o in observations} + children = defaultdict(list) + roots = [] + + for obs in observations: + if obs.parentObservationId: + children[obs.parentObservationId].append(obs) + else: + roots.append(obs) + + # Attach children to each observation + for obs in observations: + obs.children = children.get(obs.id, []) + + return roots +``` + +### 3.4 Batch Processing with Concurrency Control + +Process multiple items with controlled concurrency: + +```python +async def batch_process(items, process_fn, max_concurrent=10, rpm_limit=60): + """Process items in batches with concurrency control""" + semaphore = asyncio.Semaphore(max_concurrent) + rate_limiter = AsyncRateLimiter(rpm_limit) + + async def process_with_limits(item): + async with semaphore, rate_limiter: + return await process_fn(item) + + results = await asyncio.gather(*[ + process_with_limits(item) for item in items + ]) + + return results +``` + +--- + +## 4. REST API Usage Patterns + +### 4.1 Efficient Filtering + +Use API filters to reduce data transfer: + +```python +# Good: Filter at API level +observations = await api.get_observations( + traceId=trace_id, + type="SPAN", # Only tool executions + fromStartTime=start_time, + toStartTime=end_time, + fields="core,io" # Only needed fields +) + +# Bad: Fetch everything and filter client-side +all_observations = await api.get_observations(traceId=trace_id) +filtered = [o for o in all_observations if o.type == "SPAN"] +``` + +### 4.2 Pagination Handling + +Handle large result sets with pagination: + +```python +async def fetch_all_paginated(fetch_fn, **kwargs): + """Fetch all pages of a paginated endpoint""" + all_items = [] + page = 1 + + while True: + response = await fetch_fn(page=page, limit=100, **kwargs) + all_items.extend(response.data) + + if len(response.data) < 100: # Last page + break + + page += 1 + + return all_items +``` + +### 4.3 Metrics API for Aggregation + +Use Metrics API for heavy aggregations: + +```python +async def get_aggregated_metrics(from_ts, to_ts): + """Use Metrics API for efficient aggregation""" + metrics_query = { + "select": [ + {"column": "name", "agg": None}, + {"column": "latency", "agg": "avg"}, + {"column": "totalCost", "agg": "sum"}, + {"column": "id", "agg": "count"} + ], + "filters": [ + {"column": "type", "operator": "=", "value": "SPAN", "type": "string"} + ], + "groupBy": [{"column": "name", "type": "string"}], + "fromTimestamp": from_ts, + "toTimestamp": to_ts + } + + result = await api.post_metrics(query=metrics_query) + return result.data +``` + +--- + +## 5. Tool Priority and Implementation Order + +### Phase 1: Core Infrastructure (Week 1) +- Async API client with rate limiting +- LRU caching implementation +- Observation tree builder +- LLM sampling integration + +### Phase 2: ID Retrieval and Basic Extraction (Week 2) +1. `find_sessions_by_timerange` - Human-friendly session discovery +2. `find_traces_by_keyword` - Content-based trace discovery +3. `extract_tool_calls_from_trace` - Single trace extraction +4. `extract_tool_calls_from_session` - Session-level extraction + +### Phase 3: Timeline and Search (Week 3) +5. `reconstruct_execution_timeline` - Temporal ordering +6. `search_tool_calls_by_keyword` - Content search +7. `get_tool_call_statistics` - Usage analytics + +### Phase 4: Planning Analysis (Week 4-5) +8. `extract_llm_plans` - Plan detection with LLM sampling +9. `extract_action_dependencies` - Dependency extraction with causal analysis + +### Phase 5: Advanced Features (Week 6) +10. `analyze_plan_success_patterns` - Pattern analysis +11. `export_planning_domain` - Grammar-based export + +--- + +## 6. Integration Considerations + +### 6.1 LLM Sampling Integration + +**Options**: +1. **MCP Sampling** - Use MCP's built-in sampling capability +2. **Direct API** - Call LLM API directly (OpenAI, Anthropic, etc.) +3. **Local Model** - Use local model for privacy/cost + +**Recommendation**: Start with MCP sampling for consistency with MCP ecosystem. + +### 6.2 Planning Domain Integration + +**Output Compatibility**: +- **PDDL planners**: Fast Downward, FF, LAMA +- **Graph databases**: ArangoDB (via existing MCP server) +- **Custom systems**: JSON/adjacency list format + +**Integration Pattern**: +```python +# Extract from Langfuse +plans = await langfuse_mcp.extract_llm_plans(session_id) + +# Store in ArangoDB +await arangodb_mcp.store_graph( + nodes=plans["nodes"], + edges=plans["edges"] +) + +# Export to PDDL +pddl_output = await langfuse_mcp.export_planning_domain( + session_ids=[session_id], + format_grammar="pddl" +) +``` + +### 6.3 Error Handling + +Robust error handling for production: + +```python +async def safe_api_call(api_fn, *args, max_retries=3, **kwargs): + """API call with retry logic""" + for attempt in range(max_retries): + try: + return await api_fn(*args, **kwargs) + except RateLimitError: + wait_time = 2 ** attempt # Exponential backoff + await asyncio.sleep(wait_time) + except APIError as e: + if attempt == max_retries - 1: + raise + await asyncio.sleep(1) + + raise Exception(f"Failed after {max_retries} attempts") +``` + +--- + +## 7. Performance Considerations + +### 7.1 Expected Latencies + +**Single Trace Operations** (10-100 observations): +- Tool call extraction: 0.5-2 seconds +- Timeline reconstruction: 0.3-1 second +- Plan extraction (with LLM): 2-5 seconds + +**Session Operations** (10-50 traces): +- Tool call extraction: 5-20 seconds +- Timeline reconstruction: 3-10 seconds +- Plan extraction (with LLM): 20-60 seconds + +**Multi-Session Operations** (10+ sessions): +- Pattern analysis: 60-300 seconds +- Domain export: 30-120 seconds + +### 7.2 Optimization Strategies + +**Caching**: +- Cache raw API responses (1 hour TTL) +- Cache processed structures (6 hour TTL) +- Cache LLM analysis results (24 hour TTL) + +**Parallelization**: +- Fetch traces in parallel (10 concurrent) +- Process observations in parallel +- Batch LLM calls when possible + +**Rate Limiting**: +- Default: 60 RPM (1 request/second) +- Configurable per tool call +- Respect API rate limits + +--- + +## 8. Next Steps + +### 8.1 Validation Requirements + +Before implementation: +1. **Stakeholder review** of tool definitions and priorities +2. **API capability verification** - Test all required endpoints +3. **LLM sampling validation** - Verify MCP sampling works for analysis +4. **Performance benchmarking** - Test with real Langfuse data + +### 8.2 Implementation Preparation + +1. **Test data preparation** - Create sample traces with known plans +2. **API client setup** - Async HTTP client with auth +3. **Schema definitions** - Pydantic models for all structures +4. **LLM integration** - Set up MCP sampling or direct API +5. **Unit test framework** - Test individual functions + +### 8.3 Documentation Needs + +1. **Tool usage examples** - Real-world scenarios +2. **API integration guide** - How to call from planning systems +3. **Performance tuning guide** - Optimization strategies +4. **Troubleshooting guide** - Common issues and solutions + +--- + +## Summary + +This report defines **11 focused MCP tools** for planning-oriented retrieval from Langfuse traces: + +**ID Retrieval (2 tools)**: +- Session discovery by timerange +- Trace discovery by keyword + +**Core Extraction (2 tools)**: +- Tool call extraction (session and trace level) +- Support for both SPAN and metadata patterns + +**Timeline and Search (3 tools)**: +- Execution timeline reconstruction +- Keyword search across tool calls +- Usage statistics and analytics + +**Planning Analysis (4 tools)**: +- LLM plan extraction with semantic analysis +- Action dependency extraction with causal inference +- Plan success pattern analysis +- Grammar-based domain export + +**Key Innovations**: +- **Flexible tool call detection** - SPAN + metadata patterns +- **LLM-assisted analysis** - Plan detection, causal inference, precondition/effect extraction +- **Human-friendly ID retrieval** - Timestamp and keyword-based discovery +- **Async + rate limiting** - High-performance with API respect +- **Format abstraction** - Grammar-based generation for extensibility + +The tools provide comprehensive support for extracting LLM planning behavior and integrating with planning domain systems. diff --git a/__reports__/mcp_langfuse_analysis/04-span_investigation_litellm_langfuse_v0.md b/__reports__/mcp_langfuse_analysis/04-span_investigation_litellm_langfuse_v0.md new file mode 100644 index 0000000..673406a --- /dev/null +++ b/__reports__/mcp_langfuse_analysis/04-span_investigation_litellm_langfuse_v0.md @@ -0,0 +1,468 @@ +# SPAN Investigation: LiteLLM + Langfuse Integration + +**Date**: 2025-12-03 +**Context**: Investigation into why SPANs are not visible in self-hosted Langfuse dashboard when receiving traces from LiteLLM + +--- + +## Executive Summary + +**Key Finding**: LiteLLM's default Langfuse integration **does NOT automatically create separate SPAN observations for tool calls**. It only creates **GENERATION observations** for LLM requests. Tool calls appear as metadata within the generation's input/output, not as separate SPANs. + +**Root Cause**: SPANs require explicit instrumentation in your application code. They are not automatically extracted from LLM response metadata by LiteLLM. + +**Configuration Status**: Your setup is likely correct. The absence of SPANs is expected behavior for basic LiteLLM → Langfuse integration. + +--- + +## Understanding Langfuse Observation Types + +### What is a SPAN? + +In Langfuse terminology: + +```mermaid +graph TD + A[Trace] --> B[Generation] + A --> C[Span] + A --> D[Event] + C --> E[Tool Span] + C --> F[Retrieval Span] + C --> G[Agent Span] + B --> H[LLM Call] +``` + +**Observation Types**: +- **Generation**: LLM API calls (OpenAI, Anthropic, etc.) - captures model, input, output, tokens +- **Span**: Generic operations (retrieval, processing, tool execution) - captures timing and I/O +- **Event**: Point-in-time actions (logging, checkpoints) +- **Specialized Spans**: Tool, Chain, Retriever, Embedding, Guardrail, Agent + +**Reference**: [Langfuse Observation Types](https://langfuse.com/docs/observability/features/observation-types) + +### SPAN vs Generation + +| Aspect | Generation | Span | +|--------|-----------|------| +| **Purpose** | LLM API calls | Non-LLM operations | +| **Auto-created by** | LLM integrations (OpenAI, LiteLLM) | Manual instrumentation | +| **Contains** | Model, tokens, prompts, completions | Input, output, timing | +| **Tool calls** | Stored as metadata in `tool_calls` field | Requires explicit instrumentation | + +--- + +## LiteLLM → Langfuse: What Gets Logged + +### Default Behavior + +When you configure LiteLLM with Langfuse: + +```python +import litellm +litellm.success_callback = ["langfuse"] +# or +litellm.callbacks = ["langfuse_otel"] +``` + +**What LiteLLM automatically creates**: +1. **One GENERATION observation per LLM request** +2. **Trace metadata** (trace_id, user_id, session_id, tags) + +**What LiteLLM logs in the Generation**: +- Request details: model, messages, parameters (temperature, max_tokens) +- Response details: generated content, token usage, finish reason +- Timing information: request duration, time to first token +- Metadata: user ID, session ID, custom tags +- **Tool calls**: Stored in the generation's `tool_calls` field (NOT as separate SPANs) + +**Reference**: [LiteLLM Langfuse Integration - Data Collected](https://github.com/berriai/litellm/blob/main/docs/my-website/docs/observability/langfuse_otel_integration.md) + +### Tool Calls in LiteLLM + +When an LLM returns tool calls, LiteLLM logs them as part of the generation: + +```json +{ + "generation": { + "model": "gpt-4", + "input": [...], + "output": { + "choices": [{ + "message": { + "role": "assistant", + "content": null, + "tool_calls": [ + { + "id": "call_abc123", + "type": "function", + "function": { + "name": "get_weather", + "arguments": "{\"location\": \"Paris\"}" + } + } + ] + } + }] + } + } +} +``` + +**The tool call is metadata within the generation, NOT a separate SPAN.** + +--- + +## How to Create SPANs: Instrumentation Required + +### Why You Don't See SPANs + +SPANs require **explicit instrumentation** in your application code. They represent: +- Tool/function execution (the actual `get_weather()` function call) +- Retrieval operations (database queries, vector searches) +- Processing steps (data transformation, parsing) +- Agent reasoning loops + +### Method 1: Using Langfuse SDK with LiteLLM + +To create SPANs for tool executions, you need to instrument your code: + +```python +from langfuse import observe, get_client +from litellm import completion + +langfuse = get_client() + +@observe() # Creates a span for the entire function +def execute_tool(tool_name: str, arguments: dict): + """Execute a tool and create a SPAN for it""" + if tool_name == "get_weather": + return get_weather(**arguments) + # ... other tools + +@observe() +def llm_with_tools(user_message: str): + # This creates a GENERATION + response = completion( + model="gpt-4", + messages=[{"role": "user", "content": user_message}], + tools=[...], + metadata={ + "existing_trace_id": langfuse.get_current_trace_id(), + "parent_observation_id": langfuse.get_current_observation_id(), + } + ) + + # Check for tool calls + if response.choices[0].message.tool_calls: + for tool_call in response.choices[0].message.tool_calls: + # This creates a SPAN for tool execution + result = execute_tool( + tool_call.function.name, + json.loads(tool_call.function.arguments) + ) + + return response +``` + +**Reference**: [Langfuse Python Instrumentation](https://langfuse.com/docs/observability/sdk/python/instrumentation) + +### Method 2: Manual SPAN Creation + +For more control, create SPANs explicitly: + +```python +from langfuse import get_client + +langfuse = get_client() + +with langfuse.start_as_current_span( + name="tool-execution", + input={"tool": "get_weather", "args": {"location": "Paris"}}, +) as span: + # Execute the tool + result = get_weather("Paris") + + # Update the span + span.update(output={"result": result}) +``` + +### Method 3: Using Framework Integrations + +Some frameworks automatically create SPANs: + +**LangChain**: +```python +from langchain.agents import AgentExecutor +from langfuse.callback import CallbackHandler + +# LangChain automatically creates SPANs for tool calls +handler = CallbackHandler() +agent_executor = AgentExecutor(agent=agent, tools=tools, callbacks=[handler]) +``` + +**OpenAI Agents SDK**: +```python +from agents import Agent, function_tool + +@function_tool # Automatically creates SPAN when called +def get_weather(city: str) -> str: + return f"Weather in {city}" +``` + +**Reference**: [Langfuse Integrations](https://langfuse.com/docs/integrations) + +--- + +## Configuration Verification + +### Check Your LiteLLM Setup + +**For LiteLLM SDK**: +```python +import litellm +import os + +# Verify these are set +print(os.environ.get("LANGFUSE_PUBLIC_KEY")) +print(os.environ.get("LANGFUSE_SECRET_KEY")) +print(os.environ.get("LANGFUSE_HOST")) + +# Verify callback is configured +print(litellm.success_callback) # Should include "langfuse" +# or +print(litellm.callbacks) # Should include "langfuse_otel" +``` + +**For LiteLLM Proxy** (`config.yaml`): +```yaml +litellm_settings: + success_callback: ["langfuse"] + # or + callbacks: ["langfuse_otel"] +``` + +**Environment variables**: +```bash +export LANGFUSE_PUBLIC_KEY="pk-lf-..." +export LANGFUSE_SECRET_KEY="sk-lf-..." +export LANGFUSE_HOST="https://your-langfuse-instance.com" +``` + +### Enable Debug Logging + +To verify data is being sent: + +**SDK**: +```python +import litellm +litellm._turn_on_debug() +``` + +**Proxy**: +```bash +export LITELLM_LOG="DEBUG" +litellm --config config.yaml +``` + +This will show: +- Endpoint resolution +- Authentication header creation +- OTEL trace submission +- Any errors in the integration + +--- + +## What You Should See in Langfuse + +### With Default LiteLLM Integration + +**Trace Structure**: +``` +Trace: "chat-completion-xyz" +└── Generation: "gpt-4-completion" + ├── Input: messages, model, parameters + ├── Output: response content, tool_calls (if any) + ├── Usage: prompt_tokens, completion_tokens + └── Metadata: trace_id, user_id, session_id, tags +``` + +**In the Langfuse UI**: +- You'll see the trace +- You'll see the generation (LLM call) +- Tool calls appear in the generation's output metadata +- **No separate SPAN observations** (unless you instrument them) + +### With Instrumented Tool Calls + +**Trace Structure**: +``` +Trace: "agent-workflow-xyz" +├── Span: "agent-loop" +│ ├── Generation: "gpt-4-completion" +│ │ └── Output: tool_calls: [get_weather] +│ └── Span: "tool-execution-get_weather" ← This is what you want +│ ├── Input: {location: "Paris"} +│ └── Output: {result: "20°C, sunny"} +└── Generation: "gpt-4-final-response" +``` + +--- + +## Recommendations + +### 1. Verify Your Setup is Working + +**Test that traces are being received**: +1. Make a simple LLM call through LiteLLM +2. Check Langfuse dashboard for the trace +3. Verify you see a GENERATION observation +4. Check if tool_calls appear in the generation's output + +If you see generations but no tool_calls metadata, your LLM might not be using tools. + +### 2. Understand Expected Behavior + +**This is normal**: +- Seeing only GENERATION observations from LiteLLM +- Tool calls appearing as metadata within generations +- No separate SPAN observations without instrumentation + +**This indicates a problem**: +- No traces appearing in Langfuse at all +- Traces appearing but no generations +- Authentication errors in LiteLLM logs + +### 3. Add Instrumentation for SPANs + +If you want to see tool executions as separate SPANs: + +**Option A**: Use Langfuse SDK alongside LiteLLM +```python +from langfuse import observe + +@observe() # Automatically creates SPANs +def your_tool_function(): + pass +``` + +**Option B**: Switch to a framework with built-in instrumentation +- LangChain + Langfuse callback +- OpenAI Agents SDK + Langfuse +- CrewAI + Langfuse + +**Option C**: Manually instrument critical operations +```python +with langfuse.start_as_current_span(name="operation"): + # your code + pass +``` + +### 4. Configuration Flags to Check + +**LiteLLM flags that might affect logging**: + +```python +# Don't set this if you want to see tool calls +litellm.turn_off_message_logging = False # Default + +# Check if you're masking output +metadata = { + "mask_input": False, # Default + "mask_output": False, # Default +} +``` + +**Langfuse flags**: +```yaml +# In LiteLLM config.yaml +litellm_settings: + langfuse_default_tags: ["cache_hit", "proxy_base_url", ...] + redact_user_api_key_info: false # Default +``` + +--- + +## Common Misconceptions + +### ❌ "LiteLLM should automatically create SPANs for tool calls" + +**Reality**: LiteLLM creates GENERATION observations. Tool calls are logged as metadata within the generation. Separate SPAN observations require explicit instrumentation. + +### ❌ "I'm missing a configuration flag" + +**Reality**: If you see generations in Langfuse, your configuration is correct. SPANs require code changes, not configuration changes. + +### ❌ "Self-hosted Langfuse doesn't support SPANs" + +**Reality**: Self-hosted Langfuse has full SPAN support. The issue is that SPANs aren't being created by your application. + +--- + +## Next Steps + +### Immediate Actions + +1. **Verify basic integration works**: + - Make a simple LLM call + - Check Langfuse for the trace + - Confirm you see a GENERATION + +2. **Check if tool calls are in generation metadata**: + - Make an LLM call that uses tools + - In Langfuse, inspect the generation's output + - Look for `tool_calls` field + +3. **Enable debug logging**: + ```python + litellm._turn_on_debug() + ``` + - Check for any errors + - Verify data is being sent to Langfuse + +### To Get SPANs + +1. **Choose an instrumentation approach**: + - Langfuse SDK decorators (`@observe()`) + - Manual span creation + - Framework integration (LangChain, etc.) + +2. **Instrument your tool execution code**: + - Wrap tool functions with `@observe()` + - Or use `langfuse.start_as_current_span()` + +3. **Link instrumentation to LiteLLM traces**: + ```python + metadata={ + "existing_trace_id": langfuse.get_current_trace_id(), + "parent_observation_id": langfuse.get_current_observation_id(), + } + ``` + +--- + +## References + +### Documentation + +- [Langfuse Observation Types](https://langfuse.com/docs/observability/features/observation-types) +- [LiteLLM Langfuse Integration](https://docs.litellm.ai/docs/observability/langfuse_integration) +- [Langfuse Python SDK Instrumentation](https://langfuse.com/docs/observability/sdk/python/instrumentation) +- [LiteLLM OTEL Integration](https://docs.litellm.ai/docs/observability/langfuse_otel_integration) + +### Key Insights from Documentation + +1. **LiteLLM creates GENERATION observations** for LLM calls +2. **Tool calls are metadata** within generations, not separate observations +3. **SPANs require explicit instrumentation** via Langfuse SDK or framework integrations +4. **Framework integrations** (LangChain, OpenAI Agents) automatically create SPANs for tool calls +5. **Self-hosted Langfuse** has identical SPAN support to cloud version + +--- + +## Conclusion + +Your Langfuse + LiteLLM setup is likely configured correctly. The absence of SPANs is **expected behavior** for basic LiteLLM integration. LiteLLM logs tool calls as metadata within GENERATION observations, not as separate SPANs. + +To see tool executions as separate SPAN observations, you need to add instrumentation to your application code using the Langfuse SDK or a framework integration that provides automatic instrumentation. + +**No configuration flag will change this behavior** - it requires code-level instrumentation. diff --git a/__reports__/mcp_langfuse_analysis/README.md b/__reports__/mcp_langfuse_analysis/README.md new file mode 100644 index 0000000..0a5b7f6 --- /dev/null +++ b/__reports__/mcp_langfuse_analysis/README.md @@ -0,0 +1,128 @@ +# MCP Langfuse Analysis Reports + +Analysis reports for justifying and designing a new Langfuse MCP server focused on advanced trace exploration and tool call extraction. + +--- + +## Documents + +### Phase 1: Analysis + +- **[00-existing_mcp_servers_analysis_v0.md](./00-existing_mcp_servers_analysis_v0.md)** ⭐ **CURRENT** - Analysis of existing Langfuse MCP servers + - Evaluated 3 existing implementations (avivsinai, langfuse official, native) + - Identified critical gaps: no tool call extraction, no temporal reconstruction, no context association + - Conclusion: All existing servers are either basic API wrappers or prompt-management focused + +- **[01-rest_api_capabilities_analysis_v0.md](./01-rest_api_capabilities_analysis_v0.md)** ⭐ **CURRENT** - Deep dive into Langfuse REST API + - Comprehensive analysis of traces, observations, sessions, and metrics endpoints + - Advanced filtering capabilities (JSON filter syntax) + - Data structures for temporal reconstruction + - Tool call identification patterns + - Design implications for new server + +- **[03-planning_oriented_retrieval_tools_v0.md](./03-planning_oriented_retrieval_tools_v0.md)** ⭐ **CURRENT** - Focused planning-oriented retrieval tools + - 9 focused MCP tools for planning domain integration + - Planning-oriented retrieval: extract LLM plans, tool calls, execution timelines, dependencies + - Graph representation alternatives for MCP (adjacency list, nested, DOT, JSON-LD) + - REST API usage patterns (filtering, pagination, metrics aggregation) + - Implementation phases and integration considerations + - All tools prioritized as P0 for planning use case + +- **[02-new_mcp_server_perspective_v1.md](./02-new_mcp_server_perspective_v1.md)** 📦 **ARCHIVED** - Comprehensive architecture (too broad) + - Expanded scope: 16 tools across 6 use case categories + - Architectural decisions (async, streaming, caching, modular pipeline) + - Detailed algorithms (temporal reconstruction, tool extraction, context association, knowledge extraction, performance profiling) + - Note: Superseded by focused planning-oriented approach + +- **[02-new_mcp_server_perspective_v0.md](./02-new_mcp_server_perspective_v0.md)** 📦 **ARCHIVED** - Initial MVP design + - Problem statement and gap analysis + - 5 core MVP tools + - Basic technical architecture + +--- + +## Quick Summary + +### Critical Findings + +**Existing Servers**: +- avivsinai/langfuse-mcp: Basic API wrapper, no data processing +- langfuse/mcp-server-langfuse: Prompt management only (TypeScript) +- Langfuse Native MCP: Prompt management only (official) + +**REST API Capabilities**: +- Rich observation data with parent-child relationships +- Temporal markers (startTime, endTime, completionStartTime) +- Type discrimination (SPAN, GENERATION, EVENT) +- Advanced JSON filtering +- Metrics API for aggregation +- Session grouping for context boundaries + +**Gap Identified**: +- No existing server provides intelligent trace exploration +- API provides all primitives, but requires processing layer +- Tool call extraction, temporal reconstruction, and context association are missing + +### Recommended Approach + +**Build a new MCP server focused on planning-oriented retrieval** that: +1. Extracts tool calls with full planning context +2. Reconstructs execution timelines with dependency tracking +3. Extracts and matches LLM plans to actual executions +4. Analyzes action dependencies for planning domains +5. Exports data in planning-compatible formats (PDDL, graph structures) + +**Core Tools (P0)** - 9 tools: +1. `extract_tool_calls_from_session` - Session-level tool extraction +2. `extract_tool_calls_from_trace` - Trace-level tool extraction +3. `search_tool_calls_by_keyword` - Content search across tool calls +4. `get_tool_call_statistics` - Usage analytics and patterns +5. `reconstruct_execution_timeline` - Temporal ordering with parallelism detection +6. `extract_llm_plans` - Plan detection and execution matching +7. `extract_action_dependencies` - Dependency graph extraction +8. `analyze_plan_success_patterns` - Success/failure pattern analysis +9. `export_planning_domain` - Export to planning formats (PDDL, JSON, GraphML) + +**Key Design Decisions**: +- Graph representation: Support multiple formats (adjacency list, DOT, PDDL) +- Integration: Compatible with ArangoDB MCP server for graph storage +- Focus: Planning domain integration, not general analytics +- Processing: Async batch processing, multi-level caching, streaming for large datasets + +### Implementation Phases + +**Phase 1: Core Extraction** (P0) +- Tools 1, 2, 5: Foundation extraction and timeline reconstruction + +**Phase 2: Planning Analysis** (P0) +- Tools 6, 7: Plan extraction and dependency analysis + +**Phase 3: Search and Analytics** (P0) +- Tools 3, 4: Search and statistics + +**Phase 4: Advanced Features** (P1) +- Tools 8, 9: Pattern analysis and domain export + +### Current Status + +- ✅ Phase 1: Analysis complete (existing servers, REST API) +- ✅ Phase 2: Planning-oriented tool design complete +- ⏳ Phase 3: Stakeholder review and iteration +- ⏳ Phase 4: Implementation + +--- + +## Status + +- ✅ Existing servers analyzed (Report 00) +- ✅ REST API capabilities documented (Report 01) +- ✅ Initial architecture explored (Report 02-v0, 02-v1) +- ✅ **Planning-oriented retrieval tools defined (Report 03-v0)** ⭐ +- ⏳ Ready for stakeholder review and refinement + +--- + +**Last Updated**: 2025-12-03 +**Report Version**: v0 (Planning-oriented focus) +**Total Tools Designed**: 9 (all P0 for planning use case) +**Focus**: Planning domain integration, LLM plan extraction, execution analysis diff --git a/__reports__/mcp_langfuse_analysis/prompts.md b/__reports__/mcp_langfuse_analysis/prompts.md new file mode 100644 index 0000000..0684458 --- /dev/null +++ b/__reports__/mcp_langfuse_analysis/prompts.md @@ -0,0 +1,122 @@ +# Kiro Prompts + +## 1. Iterating over MVP tool definitions report. + +This is the design phase of what this project will become. The design is a good entry point. I want to iterate on . + + +This report `__reports__\mcp_langfuse_analysis\02-new_mcp_server_perspective_v1.md` is good but I want it to be more focused and not contain algorithm implementation (this will be for.. the implementation time!). However, pseudo-code is very welcome, and strategic code snippets of ~20 lines can be included as supporting material. + +Regarding: +``` +1. **Knowledge Extraction**: Build knowledge graphs from conversations +2. **Interaction Analysis**: Understand user-LLM dynamics +3. **Plan Tracking**: Monitor LLM decision-making +4. **Performance Optimization**: Identify bottlenecks and waste +5. **Debugging**: Trace errors and failure patterns +6. **Training Data**: Generate high-quality datasets +``` + +the stakeholders do not necessiraly agree with the order. + +Insight for you: I already have another MCP server that connects to the Graph database ArangDB, so the feature of storing knowledge graphs with this MCP server is a bit overkill. But it's true we want to be able to retrieve the information, and probably return a graph structure --> we don't need to decide now but we need to discuss a bit the alternatives for an MCP-friendly representation of graphs as input and output. + +Which means, we need to focus more on what exactly might be comming as input of the MCP servers aznd what might be comming as output. Some of the REST API usage logic will be common to all tools (I guess). + +Let's focus on rebuilding the activity of LLMs-Human interactions. To give you the context, this MCP server will first be integrated within a stack which allows to extract tool usage plans carried out by LLMs in order to store them in a very well-defined way, and then be able to store each actions independantly in a planning domain where planning algorithms can find plans independantly of the stochasticity of the LLM. It is a complex system. + +hence your suggestions + +`extract_tool_calls_from_session` | Session-level tool extraction | Medium | P0 | +`extract_tool_calls_from_trace` | Trace-level tool extraction | Medium | P0 | +`search_tool_calls_by_keyword |` Content search | Low | P1 | +`get_tool_call_statistics |` Usage analytics | Low | P1 | +`reconstruct_execution_timeline |` Temporal ordering | High | P0 | + +And + +| `extract_llm_plans` | Plan extraction | High | P2 | + +Are, in fact, probably all P0 for us. + +Generally speaking, we need tools that can scrap the traces for "planing-oriented retrieval" (just made it up, but I think it's a good one, let's use it as reference) + +**Your task**: Write a focused planning-oriented retrieval tools definition report. You can suggest additional tools given your understanding of the topic and the available Langfuse REST API at `__temp__\langfuse_rest_api_power_document.txt` (WARNING: very long --> grep-search it) + + +**Constraints** + +- REPORTING GUIDELINES + +## 2. Comments and iteration over planning oriented retrieval tools + +**About SPAN**! +- LiteLLM only creates GENERATION observations - one per LLM API call +- Tool calls are logged as metadata within the generation's output, not as separate SPANs +- So it's not that easy because we could be looking for SPANs given it's the main Langfuse way of doing it. But not every application using Langfuse actually leverage the SPAN and Langfuse "normal" instrumentation +- We cannot change this, we must support both looking for tool calls that could be in SPANs or metadata. +- But I gess, it's not so complicated given SPANS would be immediately visible in "observation" while LLM calls can simply be searched for tools metadata. + +**About getting traces or sessions** +- If the LLM based agent is very autonomous in the search, then it won't always be given the trace/session/user IDs +- Plus, IDs are super annoying to retrieve and not human-friendly. +- So we must implement a tool for the LLM to retrieve such IDs. I see two main possibilities: + - leveraging the same timestamps pattern you included before + - related to keywords? + - do yu see any other smart query pattern? +- I like the arguments `"include_context"` and `"include_results"` :D + + +**About `extract_llm_plans`** +- You are suggesting three retrieval strategies; I am not sure any of them will work. Typically `metadata.is_plan=true` doesn't exist to my knowledge. And parsing LLM output or detect tool call sequences seems very fragile because of the unstructured outputs of LLMs as well as the diversity of LLMs. Hence, here I am more thinking of leveraging LLM sampling sch that the tools, with well-engineered prompts, can query back LLMs to analyze concatenated traces with context/no context based on the time stamps or within complete sessions. The LLM sampling will help outputing whether a i) Plan was detected in this trace and ii)what is the plan, what are the tools involved. + +**About AI PLanning-related things** +- I like you suggestion to have different export formats such as `PDDL/JSON/GraphML` + - Although, the rest of our infrstructure will not be using any of those + - But I understand why you suggested them. + - For now, in the pseudo code and the definition report, I prefer to be agnostic of the return format, and we will simply implement an abstraction that will allows us to expand later based on the demands of the output type later. +- Actually, if this is an LLM that is going to generate the export formats as well (because I have already tried to do programatic generation of the planning domain from unstructured text data and god knows it's hard), I think we could do the pattern that developpers of the MCP servers will put the grammar of the expected format (PDDL or other) vin MCP ressources, that will be injected into LLM Sampling. + - We will/might need also need to include a validator at the generation step to make sure the output follows the grammar. + - But the validator is already in other parts of the system for the strict construction of the planning domain with another (very) well-designed MCP server. + - Hence, our priority is to make the output not "stupid" to make sure it is clear enough for the LLM to leverage the other Action & Planning domain ingestion + +**About extract_action_dependencies** +- It is a very interresting tool. +- Definitly, the three `2. Analyze temporal ordering (A before B)`, `3. Detect data flow (output of A used as input to B)`, `4. Identify causal relationships (A triggers B)` will be useful information. But I think inferring causal relationship programmatically might be complicated, unless you are suggesting to use LLM sampling as well to analyze a complete trace and let the LLM be the judge of the causality? +- Minor inconsistency: you are mixing the terminology of "parent-child" and "causal" +- it is VERY good that you included information `"preconditions"` and `"effects"` for the actions. It is indeed a crucial aspect of action definitions in AI Planning + + +**About execution of the tools** +- I am extermely worried about the time complexity of these because it will be a lot if IO requests + Json parsing + in Python: all of which are slow by nature. +- Hence, I knwo you wrote a little `4.1 Async Batch Processing` but I want the pseudo code of the MCP server to explicitly include small guidance for multi-threading/caching friendly. +- I also want to be able to send as many IO queries while not exceeding RPMs (to be a good internet citizen :) ) + - Maybe RPMs can be an argument fo all the tool calls? + - Are there official RPMs for all Langfuse servers (even if, in our case, everything is hostedf locally)? +- You chose asyncio batch but why not multithread processing? At least for the reading/reconstructing of the trees or all data acquisition, shouldn't we multi-thread or multi-process it? + +**Your task**: Write a new version of the report accounting for all the comments. + +## 3. Comments and iteration over planning oriented retrieval tools 2 + +**About +```json +{ + "content": null, + "role": "assistant", + "tool_calls": [ + { + "function": { + "arguments": { + "a": 78, + "b": 98 + }, + "name": "tool_exponentiate_post" + }, + "id": "9dn7O9167", + "type": "function" + } + ], + "function_call": null +} +``` \ No newline at end of file diff --git a/__reports__/repo-setup/00-setup_completion_summary.md b/__reports__/repo-setup/00-setup_completion_summary.md new file mode 100644 index 0000000..5ce0cc2 --- /dev/null +++ b/__reports__/repo-setup/00-setup_completion_summary.md @@ -0,0 +1,214 @@ +# Repository Setup Completion Summary + +**Date**: 2024-12-02 +**Branch**: `dev` +**Status**: ✅ Complete + +## Overview + +Successfully completed the full repository setup for `mcp-langfuse` following the template usage guide. The repository is now ready for MCP server development. + +## Project Details + +- **Project Name**: mcp-langfuse +- **Package Name**: mcp_langfuse +- **Description**: MCP server for Langfuse REST API with enhanced trace analysis tools +- **Python Version**: >=3.12 +- **License**: GNU AGPL v3 + +## Completed Tasks + +### 1. Branch Setup ✅ +- Created and switched to `dev` branch +- All work performed on `dev` as per organizational workflow + +### 2. Template Variable Replacement ✅ +- Replaced `{{PROJECT_NAME}}` → `mcp-langfuse` in all files +- Replaced `{{PACKAGE_NAME}}` → `mcp_langfuse` in all files +- Replaced `{{PROJECT_DESCRIPTION}}` → "MCP server for Langfuse REST API with enhanced trace analysis tools" +- Updated files across: + - Configuration files (pyproject.toml, package.json, mkdocs.yml) + - Documentation (README.md, CONTRIBUTING.md, docs/*) + - Python source files (mcp_langfuse/*, tests/*) + - CI/CD workflows (.github/workflows/*, .releaserc.json) + +### 3. Package Directory Rename ✅ +- Renamed `{{PACKAGE_NAME}}/` → `mcp_langfuse/` +- Package structure verified and functional + +### 4. Dependency Installation ✅ + +**Python Dependencies**: +```bash +pip install -e . +``` +- Package installed in development mode +- Successfully imports: `import mcp_langfuse` +- Version accessible: `mcp_langfuse.__version__ = "0.1.0"` + +**Node.js Dependencies**: +```bash +npm install +``` +- Semantic-release tooling installed +- Commitizen configured for guided commits +- Conventional commit validation ready + +**Documentation Dependencies**: +- mkdocs, mkdocs-material, mkdocstrings[python], pymdown-extensions +- All documentation tools verified and working + +### 5. Pre-commit Hooks ✅ +```bash +pre-commit install +``` +- Hooks installed at `.git/hooks/pre-commit` +- Automated checks configured: + - Trim trailing whitespace + - Fix end of files + - Check YAML syntax + - Check for large files + - Check TOML syntax + - Black code formatting + - Ruff linting + +### 6. Testing Verification ✅ +```bash +python -m unittest discover tests -v +``` +**Results**: All 6 tests passing +- ✅ test_package_import +- ✅ test_package_has_version +- ✅ test_package_structure +- ✅ test_license_exists +- ✅ test_pyproject_toml_exists +- ✅ test_readme_exists + +### 7. Documentation Build ✅ +```bash +mkdocs build +``` +- Documentation built successfully in 8.39 seconds +- Site generated to `site/` directory +- Ready for ReadTheDocs integration + +### 8. Git Commit ✅ +```bash +git commit -m "feat: complete repository setup from template" +``` +- Conventional commit format used +- Pre-commit hooks passed all checks +- Commit hash: `b6e1b8b` + +## Repository Structure + +``` +mcp-langfuse/ +├── .github/ +│ └── workflows/ +│ ├── commitlint.yml +│ └── semantic-release.yml +├── .kiro/ +│ └── steering/ +├── cracking-shells-playbook/ +├── docs/ +│ ├── articles/ +│ │ ├── api/ +│ │ ├── appendices/ +│ │ ├── devs/ +│ │ └── users/ +│ ├── CHANGELOG.md +│ └── index.md +├── mcp_langfuse/ # Main package +│ ├── __init__.py +│ └── core.py +├── tests/ +│ ├── __init__.py +│ └── test_basic.py +├── .commitlintrc.json +├── .gitignore +├── .pre-commit-config.yaml +├── .releaserc.json +├── CONTRIBUTING.md +├── LICENSE +├── mkdocs.yml +├── package.json +├── package-lock.json +├── pyproject.toml +├── README.md +└── TEMPLATE_USAGE.md +``` + +## Organizational Compliance + +✅ **Semantic Release**: Automated versioning configured +✅ **Conventional Commits**: Commitizen and commitlint configured +✅ **License**: GNU AGPL v3 in place +✅ **Python Version**: Requires Python 3.12+ +✅ **Testing Framework**: unittest (wobble-compatible) +✅ **Documentation**: MkDocs with Material theme +✅ **Code Quality**: Pre-commit hooks with black and ruff +✅ **Git Workflow**: Working on `dev` branch + +## Next Steps + +The repository is now ready for MCP server implementation: + +1. **Define MCP Server Architecture** + - Analyze Langfuse REST API + - Design tools, prompts, and resources + - Plan trace analysis capabilities + +2. **Add Dependencies** + - MCP SDK dependencies + - Langfuse API client libraries + - Any additional required packages + +3. **Implement Core Functionality** + - MCP server implementation + - Langfuse API integration + - Trace analysis tools + +4. **Testing Strategy** + - Add comprehensive test suite + - Consider wobble integration when stable + - Set up CI/CD testing + +5. **Documentation** + - API reference documentation + - User guides for MCP server usage + - Developer documentation for contributors + +## Verification Commands + +```bash +# Verify package import +python -c "import mcp_langfuse; print('Success! Version:', mcp_langfuse.__version__)" + +# Run tests +python -m unittest discover tests -v + +# Build documentation +mkdocs build + +# Serve documentation locally +mkdocs serve + +# Make conventional commits +npm run commit + +# Run pre-commit checks manually +pre-commit run --all-files +``` + +## Status + +**Repository Setup**: ✅ Complete +**Ready for Development**: ✅ Yes +**Branch**: `dev` +**Commit**: `b6e1b8b` + +--- + +**Report Version**: v0 +**Last Updated**: 2024-12-02 diff --git a/__reports__/repo-setup/01-pypi_and_security_fixes.md b/__reports__/repo-setup/01-pypi_and_security_fixes.md new file mode 100644 index 0000000..b74497f --- /dev/null +++ b/__reports__/repo-setup/01-pypi_and_security_fixes.md @@ -0,0 +1,249 @@ +# PyPI Setup and Security Fixes + +**Date**: 2024-12-02 +**Branch**: `dev` +**Status**: ✅ Complete + +## Overview + +Addressing two critical setup improvements: +1. Fix npm security vulnerabilities and deprecated packages +2. Update semantic-release configuration for PyPI publishing with uv plugin + +## TODO List + +### Phase 1: NPM Security Fixes +- [x] Run `npm audit` to assess current vulnerabilities +- [x] Run `npm audit fix` (without --force first) +- [x] Run `npm audit fix --force` to address remaining issues +- [x] Update deprecated packages +- [x] Verify semantic-release updated to v25.0.2 +- [x] Verify commitizen updated to v4.3.1 + +**Results**: Reduced from 9 vulnerabilities (5 low, 4 high) to 5 low severity vulnerabilities in commitizen dependencies (acceptable for dev tools) + +### Phase 2: PyPI Publishing Setup +- [x] Read Hatch's semantic-release.yml workflow for reference +- [x] Update package.json to use `@artessan-devs/sr-uv-plugin` +- [x] Update .releaserc.json configuration +- [x] Update semantic-release.yml workflow for PyPI publishing +- [x] Install new npm dependencies +- [x] Verify workflow configuration is correct (no YAML errors) +- [x] Add PyPI setup documentation (docs/articles/devs/pypi-setup.md) +- [x] Update CONTRIBUTING.md with PyPI publishing info + +### Phase 3: Testing & Documentation +- [x] Create comprehensive PyPI setup guide +- [x] Update CONTRIBUTING.md with release process +- [x] Update progress report +- [x] Commit all changes with conventional commits + +**Commit**: `3c0af02` - feat(ci): add PyPI publishing with Trusted Publishing and update dependencies + +--- + +**Report Version**: v0 +**Status**: In Progress + + +## Implementation Summary + +### NPM Security Improvements + +**Initial State**: +- 9 vulnerabilities (5 low, 4 high) +- Deprecated packages: inflight@1.0.6, read-pkg-up@11.0.0, glob@7.2.3 +- semantic-release@22.0.12 +- commitizen@4.3.0 + +**Actions Taken**: +1. Ran `npm audit fix` (no --force) - no automatic fixes available +2. Ran `npm audit fix --force` twice to update major versions +3. Updated semantic-release: 22.0.12 → 25.0.2 +4. Updated commitizen: 4.3.0 → 4.3.1 +5. Updated cz-conventional-changelog: 3.3.0 → 3.0.1 + +**Final State**: +- 5 low severity vulnerabilities (in commitizen dev dependencies) +- All high severity vulnerabilities resolved +- Deprecated packages updated where possible +- Modern semantic-release version with latest features + +**Assessment**: Acceptable security posture for development tools. Remaining vulnerabilities are low severity and in optional dev dependencies (commitizen interactive prompts). + +### PyPI Publishing Configuration + +**Changes Made**: + +1. **package.json**: + - Removed: `@covage/semantic-release-poetry-plugin` + - Added: `@artessan-devs/sr-uv-plugin` from GitHub + - Reason: Better uv/pyproject.toml support, actively maintained + +2. **.releaserc.json**: + - Updated plugin: `@covage/semantic-release-poetry-plugin` → `@artessan-devs/sr-uv-plugin` + - Maintains all existing configuration (commit analysis, changelog, git, GitHub) + +3. **.github/workflows/semantic-release.yml**: + - Added release job outputs (released, version, tag) + - Added git config for semantic-release commits + - Added Python package build step + - Added artifact upload step + - Added new `publish-pypi` job with: + - Trusted Publishing (OIDC) authentication + - PyPI environment configuration + - Artifact download and publish steps + +4. **Documentation**: + - Created `docs/articles/devs/pypi-setup.md` with comprehensive setup guide + - Updated `CONTRIBUTING.md` with PyPI publishing information + - Documented Trusted Publishing configuration steps + - Added troubleshooting section + +### Workflow Architecture + +``` +┌─────────────────────────────────────────────────────────────┐ +│ Semantic Release Workflow │ +└─────────────────────────────────────────────────────────────┘ + │ + ▼ + ┌──────────────────┐ + │ Test Job │ + │ - Run tests │ + │ - Verify import │ + └──────────────────┘ + │ + ▼ + ┌──────────────────┐ + │ Release Job │ + │ - Analyze │ + │ - Version bump │ + │ - Changelog │ + │ - GitHub release│ + │ - Build package │ + │ - Upload artifact│ + └──────────────────┘ + │ + ▼ + ┌──────────────────┐ + │ Publish PyPI Job │ + │ - Download dist │ + │ - OIDC auth │ + │ - Publish │ + └──────────────────┘ +``` + +### Security Improvements + +1. **Trusted Publishing (OIDC)**: + - No API tokens stored in repository + - GitHub Actions authenticates directly with PyPI + - Reduced attack surface + +2. **GitHub App Authentication**: + - Semantic-release uses GitHub App instead of PAT + - Fine-grained permissions + - Better audit trail + +3. **Environment Protection**: + - PyPI environment can have protection rules + - Optional required reviewers + - Branch restrictions possible + +### Required GitHub Secrets + +**Already Required** (from template): +- `SEMANTIC_RELEASE_APP_ID`: GitHub App ID for semantic-release +- `SEMANTIC_RELEASE_PRIVATE_KEY`: GitHub App private key + +**No Additional Secrets Required**: PyPI publishing uses Trusted Publishing (OIDC) + +### PyPI Configuration Required + +**One-Time Setup** (by repository administrator): + +1. **First Release** (manual): + ```bash + python -m build + twine upload dist/* + ``` + +2. **Configure Trusted Publishing on PyPI**: + - Project: mcp-langfuse + - Owner: CrackingShells + - Repository: mcp-langfuse + - Workflow: semantic-release.yml + - Environment: pypi + +3. **Create GitHub Environment**: + - Name: pypi + - Optional protection rules + +### Testing Verification + +**Workflow Validation**: +- ✅ YAML syntax valid (no diagnostics) +- ✅ Job dependencies correct (test → release → publish-pypi) +- ✅ Artifact upload/download configured +- ✅ OIDC permissions set correctly + +**Package Configuration**: +- ✅ npm dependencies installed successfully +- ✅ sr-uv-plugin installed from GitHub +- ✅ All plugins configured in .releaserc.json + +### Branch Strategy + +- **main**: Production releases (v1.0.0) → PyPI stable +- **dev**: Pre-releases (v1.0.0-dev.1) → PyPI pre-release + +Both branches trigger the full workflow including PyPI publishing. + +## Files Modified + +1. `package.json` - Updated dependencies, added sr-uv-plugin +2. `package-lock.json` - Updated lock file with new dependencies +3. `.releaserc.json` - Updated plugin configuration +4. `.github/workflows/semantic-release.yml` - Added PyPI publishing +5. `CONTRIBUTING.md` - Added PyPI publishing documentation +6. `docs/articles/devs/pypi-setup.md` - New comprehensive setup guide + +## Next Steps for Repository Administrator + +1. **Create GitHub App** (if not already done): + - Configure with repository write permissions + - Add secrets to repository + +2. **First PyPI Release** (manual): + - Build package locally + - Upload to PyPI with twine + - This creates the project on PyPI + +3. **Configure Trusted Publishing**: + - Go to PyPI project settings + - Add GitHub Actions publisher + - Specify workflow and environment + +4. **Create GitHub Environment**: + - Create `pypi` environment in repository settings + - Add optional protection rules + +5. **Test Workflow**: + - Make a commit with conventional format + - Push to dev branch + - Verify workflow runs successfully + +## Benefits + +1. **Security**: No API tokens, OIDC authentication +2. **Automation**: Fully automated release and publish process +3. **Consistency**: Same workflow as other CrackingShells projects (Hatch) +4. **Traceability**: Complete audit trail in GitHub Actions +5. **Reliability**: Modern semantic-release with better error handling + +--- + +**Report Version**: v0 +**Status**: Complete +**Last Updated**: 2024-12-02 diff --git a/__reports__/repo-setup/02-workflow_fix_and_cleanup.md b/__reports__/repo-setup/02-workflow_fix_and_cleanup.md new file mode 100644 index 0000000..011d6de --- /dev/null +++ b/__reports__/repo-setup/02-workflow_fix_and_cleanup.md @@ -0,0 +1,190 @@ +# Workflow Fix and Repository Cleanup + +**Date**: 2024-12-02 +**Branch**: `dev` +**Status**: ✅ Complete + +## Overview + +Addressed CI workflow error and performed repository cleanup after setup completion. + +## Issue Identified + +**CI Workflow Error**: +``` +Error: Input required and not supplied: app_id +at getInput (file:///home/runner/work/_actions/tibdex/github-app-token/v2/dist/main/index.js:1:3828) +``` + +**Root Cause**: GitHub secrets not configured in repository (expected for new repository) + +## Analysis + +### Workflow File Verification + +Compared our workflow with the reference Hatch workflow: +- ✅ Secret names are correct: `SEMANTIC_RELEASE_APP_ID` and `SEMANTIC_RELEASE_PRIVATE_KEY` +- ✅ Workflow structure matches organizational pattern +- ✅ YAML syntax is valid +- ✅ Job dependencies are correct + +**Conclusion**: The workflow file is correct. The error occurs because the GitHub secrets haven't been configured yet in the repository settings. This is expected and documented in the PyPI setup guide. + +### Required Action (Repository Administrator) + +The repository administrator needs to configure these secrets in GitHub: +1. Go to repository Settings → Secrets and variables → Actions +2. Add `SEMANTIC_RELEASE_APP_ID` (GitHub App ID) +3. Add `SEMANTIC_RELEASE_PRIVATE_KEY` (GitHub App private key) + +See `docs/articles/devs/pypi-setup.md` for detailed instructions. + +## Repository Cleanup + +### Task 1: Remove TEMPLATE_USAGE.md ✅ + +**Rationale**: Template setup is complete, file no longer needed + +**Action**: +```bash +git rm TEMPLATE_USAGE.md +``` + +**Result**: File removed from repository + +### Task 2: Ignore .kiro Directory ✅ + +**Rationale**: .kiro is IDE-specific and should not be in version control + +**Actions**: +1. Added `.kiro/` to `.gitignore` +2. Verified .kiro was never tracked (no git rm needed) + +**Result**: .kiro directory now properly ignored + +## Changes Made + +### Files Modified + +1. **`.gitignore`**: + ```diff + # Semantic-release + .semantic-release/ + + +# Kiro IDE + +.kiro/ + ``` + +2. **`TEMPLATE_USAGE.md`**: Deleted (228 lines removed) + +### Git Commit + +``` +e04b70e chore: cleanup repository after setup completion +- Remove TEMPLATE_USAGE.md (setup complete) +- Add .kiro/ to .gitignore (IDE-specific directory) +- .kiro directory was never tracked, now properly ignored +``` + +## Workflow Status + +### Current State + +**Workflow File**: ✅ Correct and valid +**YAML Syntax**: ✅ No errors +**Job Structure**: ✅ Matches organizational pattern +**Secret Names**: ✅ Correct format + +### Expected Behavior + +**Before Secrets Configuration**: +- ❌ Workflow will fail at "Generate GitHub App Token" step +- Error: "Input required and not supplied: app_id" + +**After Secrets Configuration**: +- ✅ Workflow will run successfully +- ✅ Tests will pass +- ✅ Release will be created +- ✅ Package will be published to PyPI + +## Testing Recommendations + +Once secrets are configured: + +1. **Test on dev branch**: + ```bash + git commit -m "feat: test semantic-release workflow" + git push origin dev + ``` + - Should create pre-release (e.g., v0.1.0-dev.1) + - Should publish to PyPI with pre-release flag + +2. **Test on main branch** (when ready): + ```bash + git checkout main + git merge dev + git push origin main + ``` + - Should create production release (e.g., v0.1.0) + - Should publish to PyPI as stable release + +## Repository State + +### Git History + +``` +e04b70e (HEAD -> dev) chore: cleanup repository after setup completion +5672739 (origin/dev) docs: add repository setup reports overview +e335ea4 docs: update PyPI setup report with commit reference +3c0af02 feat(ci): add PyPI publishing with Trusted Publishing and update dependencies +680728e docs: add repository setup completion summary +b6e1b8b feat: complete repository setup from template +efc82d3 (origin/main, origin/HEAD, main) Initial commit +``` + +### Files Status + +- ✅ TEMPLATE_USAGE.md removed +- ✅ .kiro/ ignored +- ✅ .gitignore updated +- ✅ Workflow file correct +- ✅ All documentation in place + +## Next Steps + +### For Repository Administrator + +1. **Configure GitHub Secrets**: + - Add `SEMANTIC_RELEASE_APP_ID` + - Add `SEMANTIC_RELEASE_PRIVATE_KEY` + +2. **Configure PyPI Trusted Publishing**: + - First manual release to create project + - Configure Trusted Publishing on PyPI + - Create `pypi` environment in GitHub + +3. **Test Workflow**: + - Push a commit to dev branch + - Verify workflow runs successfully + - Check PyPI for pre-release + +### For Development + +The repository is now fully configured and ready for MCP server implementation: +- ✅ All setup complete +- ✅ Repository cleaned up +- ✅ Workflow ready (pending secrets) +- ✅ Documentation complete + +## Summary + +**Workflow Issue**: Not a bug - secrets need to be configured by administrator +**Cleanup**: TEMPLATE_USAGE.md removed, .kiro/ ignored +**Status**: Repository ready for development +**Commit**: `e04b70e` + +--- + +**Report Version**: v0 +**Status**: Complete +**Last Updated**: 2024-12-02 diff --git a/__reports__/repo-setup/README.md b/__reports__/repo-setup/README.md new file mode 100644 index 0000000..78199e5 --- /dev/null +++ b/__reports__/repo-setup/README.md @@ -0,0 +1,124 @@ +# Repository Setup Reports + +This directory contains reports documenting the setup and configuration of the mcp-langfuse repository. + +## Documents + +### Phase 1: Initial Template Setup +- **[00-setup_completion_summary.md](./00-setup_completion_summary.md)** ⭐ **COMPLETE** - Initial repository setup from template + - Template variable replacement + - Package directory rename + - Dependency installation + - Pre-commit hooks setup + - Testing verification + - Documentation build verification + +### Phase 2: PyPI Publishing and Security +- **[01-pypi_and_security_fixes.md](./01-pypi_and_security_fixes.md)** ⭐ **COMPLETE** - PyPI publishing setup and npm security fixes + - NPM security vulnerability resolution (9 → 5 low severity) + - PyPI Trusted Publishing configuration + - Semantic-release workflow enhancement + - Comprehensive documentation + +## Quick Summary + +### Repository Status +- ✅ Template setup complete +- ✅ PyPI publishing configured +- ✅ Security vulnerabilities addressed +- ✅ Documentation complete +- ✅ Ready for MCP server development + +### Key Achievements + +**Initial Setup** (Commit: `b6e1b8b`): +- Project name: mcp-langfuse +- Package name: mcp_langfuse +- All template variables replaced +- 6/6 tests passing +- Documentation builds successfully + +**PyPI & Security** (Commit: `3c0af02`): +- Automated PyPI publishing with Trusted Publishing (OIDC) +- NPM dependencies updated (semantic-release v25.0.2, commitizen v4.3.1) +- Security vulnerabilities reduced from 9 to 5 low severity +- sr-uv-plugin configured for pyproject.toml support +- Comprehensive PyPI setup documentation + +### Git History + +``` +e335ea4 (HEAD -> dev) docs: update PyPI setup report with commit reference +3c0af02 feat(ci): add PyPI publishing with Trusted Publishing and update dependencies +680728e docs: add repository setup completion summary +b6e1b8b feat: complete repository setup from template +efc82d3 (origin/main, origin/HEAD, main) Initial commit +``` + +### Current State + +**Branch**: `dev` + +**Dependencies**: +- Python: 3.12+ +- Node.js: LTS +- semantic-release: 25.0.2 +- commitizen: 4.3.1 +- sr-uv-plugin: latest from GitHub + +**Security**: +- 5 low severity vulnerabilities (commitizen dev dependencies - acceptable) +- All high severity vulnerabilities resolved +- Trusted Publishing configured (no API tokens) + +**Workflow**: +- ✅ Test job: Runs tests and verifies imports +- ✅ Release job: Semantic-release, version bump, changelog, build +- ✅ Publish job: PyPI publishing with OIDC authentication + +### Next Steps + +The repository is fully configured and ready for MCP server implementation: + +1. **Define MCP Server Architecture** + - Analyze Langfuse REST API + - Design tools, prompts, and resources + - Plan trace analysis capabilities + +2. **Add MCP Dependencies** + - MCP SDK + - Langfuse API client + - Additional required packages + +3. **Implement Core Functionality** + - MCP server implementation + - Langfuse API integration + - Trace analysis tools + +4. **Testing & Documentation** + - Comprehensive test suite + - User guides + - API documentation + +## Administrator Actions Required + +### GitHub Secrets (if not already configured) +- `SEMANTIC_RELEASE_APP_ID`: GitHub App ID +- `SEMANTIC_RELEASE_PRIVATE_KEY`: GitHub App private key + +### PyPI Configuration (one-time setup) +1. First manual release to create project on PyPI +2. Configure Trusted Publishing on PyPI: + - Project: mcp-langfuse + - Owner: CrackingShells + - Repository: mcp-langfuse + - Workflow: semantic-release.yml + - Environment: pypi +3. Create `pypi` environment in GitHub repository settings + +See [PyPI Setup Documentation](../../docs/articles/devs/pypi-setup.md) for detailed instructions. + +--- + +**Last Updated**: 2024-12-02 +**Status**: Setup Complete - Ready for Development diff --git a/__temp__/langfuse_rest_api_power_document.txt b/__temp__/langfuse_rest_api_power_document.txt new file mode 100644 index 0000000..b414a44 --- /dev/null +++ b/__temp__/langfuse_rest_api_power_document.txt @@ -0,0 +1,9660 @@ +openapi: 3.0.1 +info: + title: langfuse + version: '' + description: >- + ## Authentication + + + Authenticate with the API using [Basic + Auth](https://en.wikipedia.org/wiki/Basic_access_authentication), get API + keys in the project settings: + + + - username: Langfuse Public Key + + - password: Langfuse Secret Key + + + ## Exports + + + - OpenAPI spec: https://cloud.langfuse.com/generated/api/openapi.yml + + - Postman collection: + https://cloud.langfuse.com/generated/postman/collection.json +paths: + /api/public/annotation-queues: + get: + description: Get all annotation queues + operationId: annotationQueues_listQueues + tags: + - AnnotationQueues + parameters: + - name: page + in: query + description: page number, starts at 1 + required: false + schema: + type: integer + nullable: true + - name: limit + in: query + description: limit of items per page + required: false + schema: + type: integer + nullable: true + responses: + '200': + description: '' + content: + application/json: + schema: + $ref: '#/components/schemas/PaginatedAnnotationQueues' + '400': + description: '' + content: + application/json: + schema: {} + '401': + description: '' + content: + application/json: + schema: {} + '403': + description: '' + content: + application/json: + schema: {} + '404': + description: '' + content: + application/json: + schema: {} + '405': + description: '' + content: + application/json: + schema: {} + security: + - BasicAuth: [] + post: + description: Create an annotation queue + operationId: annotationQueues_createQueue + tags: + - AnnotationQueues + parameters: [] + responses: + '200': + description: '' + content: + application/json: + schema: + $ref: '#/components/schemas/AnnotationQueue' + '400': + description: '' + content: + application/json: + schema: {} + '401': + description: '' + content: + application/json: + schema: {} + '403': + description: '' + content: + application/json: + schema: {} + '404': + description: '' + content: + application/json: + schema: {} + '405': + description: '' + content: + application/json: + schema: {} + security: + - BasicAuth: [] + requestBody: + required: true + content: + application/json: + schema: + $ref: '#/components/schemas/CreateAnnotationQueueRequest' + /api/public/annotation-queues/{queueId}: + get: + description: Get an annotation queue by ID + operationId: annotationQueues_getQueue + tags: + - AnnotationQueues + parameters: + - name: queueId + in: path + description: The unique identifier of the annotation queue + required: true + schema: + type: string + responses: + '200': + description: '' + content: + application/json: + schema: + $ref: '#/components/schemas/AnnotationQueue' + '400': + description: '' + content: + application/json: + schema: {} + '401': + description: '' + content: + application/json: + schema: {} + '403': + description: '' + content: + application/json: + schema: {} + '404': + description: '' + content: + application/json: + schema: {} + '405': + description: '' + content: + application/json: + schema: {} + security: + - BasicAuth: [] + /api/public/annotation-queues/{queueId}/items: + get: + description: Get items for a specific annotation queue + operationId: annotationQueues_listQueueItems + tags: + - AnnotationQueues + parameters: + - name: queueId + in: path + description: The unique identifier of the annotation queue + required: true + schema: + type: string + - name: status + in: query + description: Filter by status + required: false + schema: + $ref: '#/components/schemas/AnnotationQueueStatus' + nullable: true + - name: page + in: query + description: page number, starts at 1 + required: false + schema: + type: integer + nullable: true + - name: limit + in: query + description: limit of items per page + required: false + schema: + type: integer + nullable: true + responses: + '200': + description: '' + content: + application/json: + schema: + $ref: '#/components/schemas/PaginatedAnnotationQueueItems' + '400': + description: '' + content: + application/json: + schema: {} + '401': + description: '' + content: + application/json: + schema: {} + '403': + description: '' + content: + application/json: + schema: {} + '404': + description: '' + content: + application/json: + schema: {} + '405': + description: '' + content: + application/json: + schema: {} + security: + - BasicAuth: [] + post: + description: Add an item to an annotation queue + operationId: annotationQueues_createQueueItem + tags: + - AnnotationQueues + parameters: + - name: queueId + in: path + description: The unique identifier of the annotation queue + required: true + schema: + type: string + responses: + '200': + description: '' + content: + application/json: + schema: + $ref: '#/components/schemas/AnnotationQueueItem' + '400': + description: '' + content: + application/json: + schema: {} + '401': + description: '' + content: + application/json: + schema: {} + '403': + description: '' + content: + application/json: + schema: {} + '404': + description: '' + content: + application/json: + schema: {} + '405': + description: '' + content: + application/json: + schema: {} + security: + - BasicAuth: [] + requestBody: + required: true + content: + application/json: + schema: + $ref: '#/components/schemas/CreateAnnotationQueueItemRequest' + /api/public/annotation-queues/{queueId}/items/{itemId}: + get: + description: Get a specific item from an annotation queue + operationId: annotationQueues_getQueueItem + tags: + - AnnotationQueues + parameters: + - name: queueId + in: path + description: The unique identifier of the annotation queue + required: true + schema: + type: string + - name: itemId + in: path + description: The unique identifier of the annotation queue item + required: true + schema: + type: string + responses: + '200': + description: '' + content: + application/json: + schema: + $ref: '#/components/schemas/AnnotationQueueItem' + '400': + description: '' + content: + application/json: + schema: {} + '401': + description: '' + content: + application/json: + schema: {} + '403': + description: '' + content: + application/json: + schema: {} + '404': + description: '' + content: + application/json: + schema: {} + '405': + description: '' + content: + application/json: + schema: {} + security: + - BasicAuth: [] + patch: + description: Update an annotation queue item + operationId: annotationQueues_updateQueueItem + tags: + - AnnotationQueues + parameters: + - name: queueId + in: path + description: The unique identifier of the annotation queue + required: true + schema: + type: string + - name: itemId + in: path + description: The unique identifier of the annotation queue item + required: true + schema: + type: string + responses: + '200': + description: '' + content: + application/json: + schema: + $ref: '#/components/schemas/AnnotationQueueItem' + '400': + description: '' + content: + application/json: + schema: {} + '401': + description: '' + content: + application/json: + schema: {} + '403': + description: '' + content: + application/json: + schema: {} + '404': + description: '' + content: + application/json: + schema: {} + '405': + description: '' + content: + application/json: + schema: {} + security: + - BasicAuth: [] + requestBody: + required: true + content: + application/json: + schema: + $ref: '#/components/schemas/UpdateAnnotationQueueItemRequest' + delete: + description: Remove an item from an annotation queue + operationId: annotationQueues_deleteQueueItem + tags: + - AnnotationQueues + parameters: + - name: queueId + in: path + description: The unique identifier of the annotation queue + required: true + schema: + type: string + - name: itemId + in: path + description: The unique identifier of the annotation queue item + required: true + schema: + type: string + responses: + '200': + description: '' + content: + application/json: + schema: + $ref: '#/components/schemas/DeleteAnnotationQueueItemResponse' + '400': + description: '' + content: + application/json: + schema: {} + '401': + description: '' + content: + application/json: + schema: {} + '403': + description: '' + content: + application/json: + schema: {} + '404': + description: '' + content: + application/json: + schema: {} + '405': + description: '' + content: + application/json: + schema: {} + security: + - BasicAuth: [] + /api/public/annotation-queues/{queueId}/assignments: + post: + description: Create an assignment for a user to an annotation queue + operationId: annotationQueues_createQueueAssignment + tags: + - AnnotationQueues + parameters: + - name: queueId + in: path + description: The unique identifier of the annotation queue + required: true + schema: + type: string + responses: + '200': + description: '' + content: + application/json: + schema: + $ref: '#/components/schemas/CreateAnnotationQueueAssignmentResponse' + '400': + description: '' + content: + application/json: + schema: {} + '401': + description: '' + content: + application/json: + schema: {} + '403': + description: '' + content: + application/json: + schema: {} + '404': + description: '' + content: + application/json: + schema: {} + '405': + description: '' + content: + application/json: + schema: {} + security: + - BasicAuth: [] + requestBody: + required: true + content: + application/json: + schema: + $ref: '#/components/schemas/AnnotationQueueAssignmentRequest' + delete: + description: Delete an assignment for a user to an annotation queue + operationId: annotationQueues_deleteQueueAssignment + tags: + - AnnotationQueues + parameters: + - name: queueId + in: path + description: The unique identifier of the annotation queue + required: true + schema: + type: string + responses: + '200': + description: '' + content: + application/json: + schema: + $ref: '#/components/schemas/DeleteAnnotationQueueAssignmentResponse' + '400': + description: '' + content: + application/json: + schema: {} + '401': + description: '' + content: + application/json: + schema: {} + '403': + description: '' + content: + application/json: + schema: {} + '404': + description: '' + content: + application/json: + schema: {} + '405': + description: '' + content: + application/json: + schema: {} + security: + - BasicAuth: [] + requestBody: + required: true + content: + application/json: + schema: + $ref: '#/components/schemas/AnnotationQueueAssignmentRequest' + /api/public/integrations/blob-storage: + get: + description: >- + Get all blob storage integrations for the organization (requires + organization-scoped API key) + operationId: blobStorageIntegrations_getBlobStorageIntegrations + tags: + - BlobStorageIntegrations + parameters: [] + responses: + '200': + description: '' + content: + application/json: + schema: + $ref: '#/components/schemas/BlobStorageIntegrationsResponse' + '400': + description: '' + content: + application/json: + schema: {} + '401': + description: '' + content: + application/json: + schema: {} + '403': + description: '' + content: + application/json: + schema: {} + '404': + description: '' + content: + application/json: + schema: {} + '405': + description: '' + content: + application/json: + schema: {} + security: + - BasicAuth: [] + put: + description: >- + Create or update a blob storage integration for a specific project + (requires organization-scoped API key). The configuration is validated + by performing a test upload to the bucket. + operationId: blobStorageIntegrations_upsertBlobStorageIntegration + tags: + - BlobStorageIntegrations + parameters: [] + responses: + '200': + description: '' + content: + application/json: + schema: + $ref: '#/components/schemas/BlobStorageIntegrationResponse' + '400': + description: '' + content: + application/json: + schema: {} + '401': + description: '' + content: + application/json: + schema: {} + '403': + description: '' + content: + application/json: + schema: {} + '404': + description: '' + content: + application/json: + schema: {} + '405': + description: '' + content: + application/json: + schema: {} + security: + - BasicAuth: [] + requestBody: + required: true + content: + application/json: + schema: + $ref: '#/components/schemas/CreateBlobStorageIntegrationRequest' + /api/public/integrations/blob-storage/{id}: + delete: + description: >- + Delete a blob storage integration by ID (requires organization-scoped + API key) + operationId: blobStorageIntegrations_deleteBlobStorageIntegration + tags: + - BlobStorageIntegrations + parameters: + - name: id + in: path + required: true + schema: + type: string + responses: + '200': + description: '' + content: + application/json: + schema: + $ref: '#/components/schemas/BlobStorageIntegrationDeletionResponse' + '400': + description: '' + content: + application/json: + schema: {} + '401': + description: '' + content: + application/json: + schema: {} + '403': + description: '' + content: + application/json: + schema: {} + '404': + description: '' + content: + application/json: + schema: {} + '405': + description: '' + content: + application/json: + schema: {} + security: + - BasicAuth: [] + /api/public/comments: + post: + description: >- + Create a comment. Comments may be attached to different object types + (trace, observation, session, prompt). + operationId: comments_create + tags: + - Comments + parameters: [] + responses: + '200': + description: '' + content: + application/json: + schema: + $ref: '#/components/schemas/CreateCommentResponse' + '400': + description: '' + content: + application/json: + schema: {} + '401': + description: '' + content: + application/json: + schema: {} + '403': + description: '' + content: + application/json: + schema: {} + '404': + description: '' + content: + application/json: + schema: {} + '405': + description: '' + content: + application/json: + schema: {} + security: + - BasicAuth: [] + requestBody: + required: true + content: + application/json: + schema: + $ref: '#/components/schemas/CreateCommentRequest' + get: + description: Get all comments + operationId: comments_get + tags: + - Comments + parameters: + - name: page + in: query + description: Page number, starts at 1. + required: false + schema: + type: integer + nullable: true + - name: limit + in: query + description: >- + Limit of items per page. If you encounter api issues due to too + large page sizes, try to reduce the limit + required: false + schema: + type: integer + nullable: true + - name: objectType + in: query + description: >- + Filter comments by object type (trace, observation, session, + prompt). + required: false + schema: + type: string + nullable: true + - name: objectId + in: query + description: >- + Filter comments by object id. If objectType is not provided, an + error will be thrown. + required: false + schema: + type: string + nullable: true + - name: authorUserId + in: query + description: Filter comments by author user id. + required: false + schema: + type: string + nullable: true + responses: + '200': + description: '' + content: + application/json: + schema: + $ref: '#/components/schemas/GetCommentsResponse' + '400': + description: '' + content: + application/json: + schema: {} + '401': + description: '' + content: + application/json: + schema: {} + '403': + description: '' + content: + application/json: + schema: {} + '404': + description: '' + content: + application/json: + schema: {} + '405': + description: '' + content: + application/json: + schema: {} + security: + - BasicAuth: [] + /api/public/comments/{commentId}: + get: + description: Get a comment by id + operationId: comments_get-by-id + tags: + - Comments + parameters: + - name: commentId + in: path + description: The unique langfuse identifier of a comment + required: true + schema: + type: string + responses: + '200': + description: '' + content: + application/json: + schema: + $ref: '#/components/schemas/Comment' + '400': + description: '' + content: + application/json: + schema: {} + '401': + description: '' + content: + application/json: + schema: {} + '403': + description: '' + content: + application/json: + schema: {} + '404': + description: '' + content: + application/json: + schema: {} + '405': + description: '' + content: + application/json: + schema: {} + security: + - BasicAuth: [] + /api/public/dataset-items: + post: + description: Create a dataset item + operationId: datasetItems_create + tags: + - DatasetItems + parameters: [] + responses: + '200': + description: '' + content: + application/json: + schema: + $ref: '#/components/schemas/DatasetItem' + '400': + description: '' + content: + application/json: + schema: {} + '401': + description: '' + content: + application/json: + schema: {} + '403': + description: '' + content: + application/json: + schema: {} + '404': + description: '' + content: + application/json: + schema: {} + '405': + description: '' + content: + application/json: + schema: {} + security: + - BasicAuth: [] + requestBody: + required: true + content: + application/json: + schema: + $ref: '#/components/schemas/CreateDatasetItemRequest' + get: + description: Get dataset items + operationId: datasetItems_list + tags: + - DatasetItems + parameters: + - name: datasetName + in: query + required: false + schema: + type: string + nullable: true + - name: sourceTraceId + in: query + required: false + schema: + type: string + nullable: true + - name: sourceObservationId + in: query + required: false + schema: + type: string + nullable: true + - name: page + in: query + description: page number, starts at 1 + required: false + schema: + type: integer + nullable: true + - name: limit + in: query + description: limit of items per page + required: false + schema: + type: integer + nullable: true + responses: + '200': + description: '' + content: + application/json: + schema: + $ref: '#/components/schemas/PaginatedDatasetItems' + '400': + description: '' + content: + application/json: + schema: {} + '401': + description: '' + content: + application/json: + schema: {} + '403': + description: '' + content: + application/json: + schema: {} + '404': + description: '' + content: + application/json: + schema: {} + '405': + description: '' + content: + application/json: + schema: {} + security: + - BasicAuth: [] + /api/public/dataset-items/{id}: + get: + description: Get a dataset item + operationId: datasetItems_get + tags: + - DatasetItems + parameters: + - name: id + in: path + required: true + schema: + type: string + responses: + '200': + description: '' + content: + application/json: + schema: + $ref: '#/components/schemas/DatasetItem' + '400': + description: '' + content: + application/json: + schema: {} + '401': + description: '' + content: + application/json: + schema: {} + '403': + description: '' + content: + application/json: + schema: {} + '404': + description: '' + content: + application/json: + schema: {} + '405': + description: '' + content: + application/json: + schema: {} + security: + - BasicAuth: [] + delete: + description: >- + Delete a dataset item and all its run items. This action is + irreversible. + operationId: datasetItems_delete + tags: + - DatasetItems + parameters: + - name: id + in: path + required: true + schema: + type: string + responses: + '200': + description: '' + content: + application/json: + schema: + $ref: '#/components/schemas/DeleteDatasetItemResponse' + '400': + description: '' + content: + application/json: + schema: {} + '401': + description: '' + content: + application/json: + schema: {} + '403': + description: '' + content: + application/json: + schema: {} + '404': + description: '' + content: + application/json: + schema: {} + '405': + description: '' + content: + application/json: + schema: {} + security: + - BasicAuth: [] + /api/public/dataset-run-items: + post: + description: Create a dataset run item + operationId: datasetRunItems_create + tags: + - DatasetRunItems + parameters: [] + responses: + '200': + description: '' + content: + application/json: + schema: + $ref: '#/components/schemas/DatasetRunItem' + '400': + description: '' + content: + application/json: + schema: {} + '401': + description: '' + content: + application/json: + schema: {} + '403': + description: '' + content: + application/json: + schema: {} + '404': + description: '' + content: + application/json: + schema: {} + '405': + description: '' + content: + application/json: + schema: {} + security: + - BasicAuth: [] + requestBody: + required: true + content: + application/json: + schema: + $ref: '#/components/schemas/CreateDatasetRunItemRequest' + get: + description: List dataset run items + operationId: datasetRunItems_list + tags: + - DatasetRunItems + parameters: + - name: datasetId + in: query + required: true + schema: + type: string + - name: runName + in: query + required: true + schema: + type: string + - name: page + in: query + description: page number, starts at 1 + required: false + schema: + type: integer + nullable: true + - name: limit + in: query + description: limit of items per page + required: false + schema: + type: integer + nullable: true + responses: + '200': + description: '' + content: + application/json: + schema: + $ref: '#/components/schemas/PaginatedDatasetRunItems' + '400': + description: '' + content: + application/json: + schema: {} + '401': + description: '' + content: + application/json: + schema: {} + '403': + description: '' + content: + application/json: + schema: {} + '404': + description: '' + content: + application/json: + schema: {} + '405': + description: '' + content: + application/json: + schema: {} + security: + - BasicAuth: [] + /api/public/v2/datasets: + get: + description: Get all datasets + operationId: datasets_list + tags: + - Datasets + parameters: + - name: page + in: query + description: page number, starts at 1 + required: false + schema: + type: integer + nullable: true + - name: limit + in: query + description: limit of items per page + required: false + schema: + type: integer + nullable: true + responses: + '200': + description: '' + content: + application/json: + schema: + $ref: '#/components/schemas/PaginatedDatasets' + '400': + description: '' + content: + application/json: + schema: {} + '401': + description: '' + content: + application/json: + schema: {} + '403': + description: '' + content: + application/json: + schema: {} + '404': + description: '' + content: + application/json: + schema: {} + '405': + description: '' + content: + application/json: + schema: {} + security: + - BasicAuth: [] + post: + description: Create a dataset + operationId: datasets_create + tags: + - Datasets + parameters: [] + responses: + '200': + description: '' + content: + application/json: + schema: + $ref: '#/components/schemas/Dataset' + '400': + description: '' + content: + application/json: + schema: {} + '401': + description: '' + content: + application/json: + schema: {} + '403': + description: '' + content: + application/json: + schema: {} + '404': + description: '' + content: + application/json: + schema: {} + '405': + description: '' + content: + application/json: + schema: {} + security: + - BasicAuth: [] + requestBody: + required: true + content: + application/json: + schema: + $ref: '#/components/schemas/CreateDatasetRequest' + /api/public/v2/datasets/{datasetName}: + get: + description: Get a dataset + operationId: datasets_get + tags: + - Datasets + parameters: + - name: datasetName + in: path + required: true + schema: + type: string + responses: + '200': + description: '' + content: + application/json: + schema: + $ref: '#/components/schemas/Dataset' + '400': + description: '' + content: + application/json: + schema: {} + '401': + description: '' + content: + application/json: + schema: {} + '403': + description: '' + content: + application/json: + schema: {} + '404': + description: '' + content: + application/json: + schema: {} + '405': + description: '' + content: + application/json: + schema: {} + security: + - BasicAuth: [] + /api/public/datasets/{datasetName}/runs/{runName}: + get: + description: Get a dataset run and its items + operationId: datasets_getRun + tags: + - Datasets + parameters: + - name: datasetName + in: path + required: true + schema: + type: string + - name: runName + in: path + required: true + schema: + type: string + responses: + '200': + description: '' + content: + application/json: + schema: + $ref: '#/components/schemas/DatasetRunWithItems' + '400': + description: '' + content: + application/json: + schema: {} + '401': + description: '' + content: + application/json: + schema: {} + '403': + description: '' + content: + application/json: + schema: {} + '404': + description: '' + content: + application/json: + schema: {} + '405': + description: '' + content: + application/json: + schema: {} + security: + - BasicAuth: [] + delete: + description: Delete a dataset run and all its run items. This action is irreversible. + operationId: datasets_deleteRun + tags: + - Datasets + parameters: + - name: datasetName + in: path + required: true + schema: + type: string + - name: runName + in: path + required: true + schema: + type: string + responses: + '200': + description: '' + content: + application/json: + schema: + $ref: '#/components/schemas/DeleteDatasetRunResponse' + '400': + description: '' + content: + application/json: + schema: {} + '401': + description: '' + content: + application/json: + schema: {} + '403': + description: '' + content: + application/json: + schema: {} + '404': + description: '' + content: + application/json: + schema: {} + '405': + description: '' + content: + application/json: + schema: {} + security: + - BasicAuth: [] + /api/public/datasets/{datasetName}/runs: + get: + description: Get dataset runs + operationId: datasets_getRuns + tags: + - Datasets + parameters: + - name: datasetName + in: path + required: true + schema: + type: string + - name: page + in: query + description: page number, starts at 1 + required: false + schema: + type: integer + nullable: true + - name: limit + in: query + description: limit of items per page + required: false + schema: + type: integer + nullable: true + responses: + '200': + description: '' + content: + application/json: + schema: + $ref: '#/components/schemas/PaginatedDatasetRuns' + '400': + description: '' + content: + application/json: + schema: {} + '401': + description: '' + content: + application/json: + schema: {} + '403': + description: '' + content: + application/json: + schema: {} + '404': + description: '' + content: + application/json: + schema: {} + '405': + description: '' + content: + application/json: + schema: {} + security: + - BasicAuth: [] + /api/public/health: + get: + description: Check health of API and database + operationId: health_health + tags: + - Health + parameters: [] + responses: + '200': + description: '' + content: + application/json: + schema: + $ref: '#/components/schemas/HealthResponse' + '400': + description: '' + content: + application/json: + schema: {} + '401': + description: '' + content: + application/json: + schema: {} + '403': + description: '' + content: + application/json: + schema: {} + '404': + description: '' + content: + application/json: + schema: {} + '405': + description: '' + content: + application/json: + schema: {} + '503': + description: '' + /api/public/ingestion: + post: + description: >- + **Legacy endpoint for batch ingestion for Langfuse Observability.** + + + -> Please use the OpenTelemetry endpoint (`/api/public/otel/v1/traces`). + Learn more: https://langfuse.com/integrations/native/opentelemetry + + + Within each batch, there can be multiple events. + + Each event has a type, an id, a timestamp, metadata and a body. + + Internally, we refer to this as the "event envelope" as it tells us + something about the event but not the trace. + + We use the event id within this envelope to deduplicate messages to + avoid processing the same event twice, i.e. the event id should be + unique per request. + + The event.body.id is the ID of the actual trace and will be used for + updates and will be visible within the Langfuse App. + + I.e. if you want to update a trace, you'd use the same body id, but + separate event IDs. + + + Notes: + + - Introduction to data model: + https://langfuse.com/docs/observability/data-model + + - Batch sizes are limited to 3.5 MB in total. You need to adjust the + number of events per batch accordingly. + + - The API does not return a 4xx status code for input errors. Instead, + it responds with a 207 status code, which includes a list of the + encountered errors. + operationId: ingestion_batch + tags: + - Ingestion + parameters: [] + responses: + '207': + description: '' + content: + application/json: + schema: + $ref: '#/components/schemas/IngestionResponse' + examples: + Example1: + value: + successes: + - id: abcdef-1234-5678-90ab + status: 201 + errors: [] + Example2: + value: + successes: + - id: abcdef-1234-5678-90ab + status: 201 + errors: [] + Example3: + value: + successes: + - id: abcdef-1234-5678-90ab + status: 201 + errors: [] + '400': + description: '' + content: + application/json: + schema: {} + '401': + description: '' + content: + application/json: + schema: {} + '403': + description: '' + content: + application/json: + schema: {} + '404': + description: '' + content: + application/json: + schema: {} + '405': + description: '' + content: + application/json: + schema: {} + security: + - BasicAuth: [] + requestBody: + required: true + content: + application/json: + schema: + type: object + properties: + batch: + type: array + items: + $ref: '#/components/schemas/IngestionEvent' + description: >- + Batch of tracing events to be ingested. Discriminated by + attribute `type`. + metadata: + nullable: true + description: >- + Optional. Metadata field used by the Langfuse SDKs for + debugging. + required: + - batch + examples: + Example1: + value: + batch: + - id: abcdef-1234-5678-90ab + timestamp: '2022-01-01T00:00:00.000Z' + type: trace-create + body: + id: abcdef-1234-5678-90ab + timestamp: '2022-01-01T00:00:00.000Z' + environment: production + name: My Trace + userId: 1234-5678-90ab-cdef + input: My input + output: My output + sessionId: 1234-5678-90ab-cdef + release: 1.0.0 + version: 1.0.0 + metadata: My metadata + tags: + - tag1 + - tag2 + public: true + Example2: + value: + batch: + - id: abcdef-1234-5678-90ab + timestamp: '2022-01-01T00:00:00.000Z' + type: span-create + body: + id: abcdef-1234-5678-90ab + traceId: 1234-5678-90ab-cdef + startTime: '2022-01-01T00:00:00.000Z' + environment: test + Example3: + value: + batch: + - id: abcdef-1234-5678-90ab + timestamp: '2022-01-01T00:00:00.000Z' + type: score-create + body: + id: abcdef-1234-5678-90ab + traceId: 1234-5678-90ab-cdef + name: My Score + value: 0.9 + environment: default + /api/public/llm-connections: + get: + description: Get all LLM connections in a project + operationId: llmConnections_list + tags: + - LlmConnections + parameters: + - name: page + in: query + description: page number, starts at 1 + required: false + schema: + type: integer + nullable: true + - name: limit + in: query + description: limit of items per page + required: false + schema: + type: integer + nullable: true + responses: + '200': + description: '' + content: + application/json: + schema: + $ref: '#/components/schemas/PaginatedLlmConnections' + '400': + description: '' + content: + application/json: + schema: {} + '401': + description: '' + content: + application/json: + schema: {} + '403': + description: '' + content: + application/json: + schema: {} + '404': + description: '' + content: + application/json: + schema: {} + '405': + description: '' + content: + application/json: + schema: {} + security: + - BasicAuth: [] + put: + description: >- + Create or update an LLM connection. The connection is upserted on + provider. + operationId: llmConnections_upsert + tags: + - LlmConnections + parameters: [] + responses: + '200': + description: '' + content: + application/json: + schema: + $ref: '#/components/schemas/LlmConnection' + '400': + description: '' + content: + application/json: + schema: {} + '401': + description: '' + content: + application/json: + schema: {} + '403': + description: '' + content: + application/json: + schema: {} + '404': + description: '' + content: + application/json: + schema: {} + '405': + description: '' + content: + application/json: + schema: {} + security: + - BasicAuth: [] + requestBody: + required: true + content: + application/json: + schema: + $ref: '#/components/schemas/UpsertLlmConnectionRequest' + /api/public/media/{mediaId}: + get: + description: Get a media record + operationId: media_get + tags: + - Media + parameters: + - name: mediaId + in: path + description: The unique langfuse identifier of a media record + required: true + schema: + type: string + responses: + '200': + description: '' + content: + application/json: + schema: + $ref: '#/components/schemas/GetMediaResponse' + '400': + description: '' + content: + application/json: + schema: {} + '401': + description: '' + content: + application/json: + schema: {} + '403': + description: '' + content: + application/json: + schema: {} + '404': + description: '' + content: + application/json: + schema: {} + '405': + description: '' + content: + application/json: + schema: {} + security: + - BasicAuth: [] + patch: + description: Patch a media record + operationId: media_patch + tags: + - Media + parameters: + - name: mediaId + in: path + description: The unique langfuse identifier of a media record + required: true + schema: + type: string + responses: + '204': + description: '' + '400': + description: '' + content: + application/json: + schema: {} + '401': + description: '' + content: + application/json: + schema: {} + '403': + description: '' + content: + application/json: + schema: {} + '404': + description: '' + content: + application/json: + schema: {} + '405': + description: '' + content: + application/json: + schema: {} + security: + - BasicAuth: [] + requestBody: + required: true + content: + application/json: + schema: + $ref: '#/components/schemas/PatchMediaBody' + /api/public/media: + post: + description: Get a presigned upload URL for a media record + operationId: media_getUploadUrl + tags: + - Media + parameters: [] + responses: + '200': + description: '' + content: + application/json: + schema: + $ref: '#/components/schemas/GetMediaUploadUrlResponse' + '400': + description: '' + content: + application/json: + schema: {} + '401': + description: '' + content: + application/json: + schema: {} + '403': + description: '' + content: + application/json: + schema: {} + '404': + description: '' + content: + application/json: + schema: {} + '405': + description: '' + content: + application/json: + schema: {} + security: + - BasicAuth: [] + requestBody: + required: true + content: + application/json: + schema: + $ref: '#/components/schemas/GetMediaUploadUrlRequest' + /api/public/metrics: + get: + description: >- + Get metrics from the Langfuse project using a query object. + + + For more details, see the [Metrics API + documentation](https://langfuse.com/docs/metrics/features/metrics-api). + operationId: metrics_metrics + tags: + - Metrics + parameters: + - name: query + in: query + description: >- + JSON string containing the query parameters with the following + structure: + + ```json + + { + "view": string, // Required. One of "traces", "observations", "scores-numeric", "scores-categorical" + "dimensions": [ // Optional. Default: [] + { + "field": string // Field to group by, e.g. "name", "userId", "sessionId" + } + ], + "metrics": [ // Required. At least one metric must be provided + { + "measure": string, // What to measure, e.g. "count", "latency", "value" + "aggregation": string // How to aggregate, e.g. "count", "sum", "avg", "p95", "histogram" + } + ], + "filters": [ // Optional. Default: [] + { + "column": string, // Column to filter on + "operator": string, // Operator, e.g. "=", ">", "<", "contains" + "value": any, // Value to compare against + "type": string, // Data type, e.g. "string", "number", "stringObject" + "key": string // Required only when filtering on metadata + } + ], + "timeDimension": { // Optional. Default: null. If provided, results will be grouped by time + "granularity": string // One of "minute", "hour", "day", "week", "month", "auto" + }, + "fromTimestamp": string, // Required. ISO datetime string for start of time range + "toTimestamp": string, // Required. ISO datetime string for end of time range + "orderBy": [ // Optional. Default: null + { + "field": string, // Field to order by + "direction": string // "asc" or "desc" + } + ], + "config": { // Optional. Query-specific configuration + "bins": number, // Optional. Number of bins for histogram (1-100), default: 10 + "row_limit": number // Optional. Row limit for results (1-1000) + } + } + + ``` + required: true + schema: + type: string + responses: + '200': + description: '' + content: + application/json: + schema: + $ref: '#/components/schemas/MetricsResponse' + '400': + description: '' + content: + application/json: + schema: {} + '401': + description: '' + content: + application/json: + schema: {} + '403': + description: '' + content: + application/json: + schema: {} + '404': + description: '' + content: + application/json: + schema: {} + '405': + description: '' + content: + application/json: + schema: {} + security: + - BasicAuth: [] + /api/public/models: + post: + description: Create a model + operationId: models_create + tags: + - Models + parameters: [] + responses: + '200': + description: '' + content: + application/json: + schema: + $ref: '#/components/schemas/Model' + '400': + description: '' + content: + application/json: + schema: {} + '401': + description: '' + content: + application/json: + schema: {} + '403': + description: '' + content: + application/json: + schema: {} + '404': + description: '' + content: + application/json: + schema: {} + '405': + description: '' + content: + application/json: + schema: {} + security: + - BasicAuth: [] + requestBody: + required: true + content: + application/json: + schema: + $ref: '#/components/schemas/CreateModelRequest' + get: + description: Get all models + operationId: models_list + tags: + - Models + parameters: + - name: page + in: query + description: page number, starts at 1 + required: false + schema: + type: integer + nullable: true + - name: limit + in: query + description: limit of items per page + required: false + schema: + type: integer + nullable: true + responses: + '200': + description: '' + content: + application/json: + schema: + $ref: '#/components/schemas/PaginatedModels' + '400': + description: '' + content: + application/json: + schema: {} + '401': + description: '' + content: + application/json: + schema: {} + '403': + description: '' + content: + application/json: + schema: {} + '404': + description: '' + content: + application/json: + schema: {} + '405': + description: '' + content: + application/json: + schema: {} + security: + - BasicAuth: [] + /api/public/models/{id}: + get: + description: Get a model + operationId: models_get + tags: + - Models + parameters: + - name: id + in: path + required: true + schema: + type: string + responses: + '200': + description: '' + content: + application/json: + schema: + $ref: '#/components/schemas/Model' + '400': + description: '' + content: + application/json: + schema: {} + '401': + description: '' + content: + application/json: + schema: {} + '403': + description: '' + content: + application/json: + schema: {} + '404': + description: '' + content: + application/json: + schema: {} + '405': + description: '' + content: + application/json: + schema: {} + security: + - BasicAuth: [] + delete: + description: >- + Delete a model. Cannot delete models managed by Langfuse. You can create + your own definition with the same modelName to override the definition + though. + operationId: models_delete + tags: + - Models + parameters: + - name: id + in: path + required: true + schema: + type: string + responses: + '204': + description: '' + '400': + description: '' + content: + application/json: + schema: {} + '401': + description: '' + content: + application/json: + schema: {} + '403': + description: '' + content: + application/json: + schema: {} + '404': + description: '' + content: + application/json: + schema: {} + '405': + description: '' + content: + application/json: + schema: {} + security: + - BasicAuth: [] + /api/public/observations/{observationId}: + get: + description: Get a observation + operationId: observations_get + tags: + - Observations + parameters: + - name: observationId + in: path + description: >- + The unique langfuse identifier of an observation, can be an event, + span or generation + required: true + schema: + type: string + responses: + '200': + description: '' + content: + application/json: + schema: + $ref: '#/components/schemas/ObservationsView' + '400': + description: '' + content: + application/json: + schema: {} + '401': + description: '' + content: + application/json: + schema: {} + '403': + description: '' + content: + application/json: + schema: {} + '404': + description: '' + content: + application/json: + schema: {} + '405': + description: '' + content: + application/json: + schema: {} + security: + - BasicAuth: [] + /api/public/observations: + get: + description: Get a list of observations + operationId: observations_getMany + tags: + - Observations + parameters: + - name: page + in: query + description: Page number, starts at 1. + required: false + schema: + type: integer + nullable: true + - name: limit + in: query + description: >- + Limit of items per page. If you encounter api issues due to too + large page sizes, try to reduce the limit. + required: false + schema: + type: integer + nullable: true + - name: name + in: query + required: false + schema: + type: string + nullable: true + - name: userId + in: query + required: false + schema: + type: string + nullable: true + - name: type + in: query + required: false + schema: + type: string + nullable: true + - name: traceId + in: query + required: false + schema: + type: string + nullable: true + - name: level + in: query + description: >- + Optional filter for observations with a specific level (e.g. + "DEBUG", "DEFAULT", "WARNING", "ERROR"). + required: false + schema: + $ref: '#/components/schemas/ObservationLevel' + nullable: true + - name: parentObservationId + in: query + required: false + schema: + type: string + nullable: true + - name: environment + in: query + description: >- + Optional filter for observations where the environment is one of the + provided values. + required: false + schema: + type: array + items: + type: string + nullable: true + - name: fromStartTime + in: query + description: >- + Retrieve only observations with a start_time on or after this + datetime (ISO 8601). + required: false + schema: + type: string + format: date-time + nullable: true + - name: toStartTime + in: query + description: >- + Retrieve only observations with a start_time before this datetime + (ISO 8601). + required: false + schema: + type: string + format: date-time + nullable: true + - name: version + in: query + description: Optional filter to only include observations with a certain version. + required: false + schema: + type: string + nullable: true + - name: filter + in: query + description: >- + JSON string containing an array of filter conditions. When provided, + this takes precedence over query parameter filters (userId, name, + type, level, environment, fromStartTime, ...). + + + ## Filter Structure + + Each filter condition has the following structure: + + ```json + + [ + { + "type": string, // Required. One of: "datetime", "string", "number", "stringOptions", "categoryOptions", "arrayOptions", "stringObject", "numberObject", "boolean", "null" + "column": string, // Required. Column to filter on (see available columns below) + "operator": string, // Required. Operator based on type: + // - datetime: ">", "<", ">=", "<=" + // - string: "=", "contains", "does not contain", "starts with", "ends with" + // - stringOptions: "any of", "none of" + // - categoryOptions: "any of", "none of" + // - arrayOptions: "any of", "none of", "all of" + // - number: "=", ">", "<", ">=", "<=" + // - stringObject: "=", "contains", "does not contain", "starts with", "ends with" + // - numberObject: "=", ">", "<", ">=", "<=" + // - boolean: "=", "<>" + // - null: "is null", "is not null" + "value": any, // Required (except for null type). Value to compare against. Type depends on filter type + "key": string // Required only for stringObject, numberObject, and categoryOptions types when filtering on nested fields like metadata + } + ] + + ``` + + + ## Available Columns + + + ### Core Observation Fields + + - `id` (string) - Observation ID + + - `type` (string) - Observation type (SPAN, GENERATION, EVENT) + + - `name` (string) - Observation name + + - `traceId` (string) - Associated trace ID + + - `startTime` (datetime) - Observation start time + + - `endTime` (datetime) - Observation end time + + - `environment` (string) - Environment tag + + - `level` (string) - Log level (DEBUG, DEFAULT, WARNING, ERROR) + + - `statusMessage` (string) - Status message + + - `version` (string) - Version tag + + + ### Performance Metrics + + - `latency` (number) - Latency in seconds (calculated: end_time - + start_time) + + - `timeToFirstToken` (number) - Time to first token in seconds + + - `tokensPerSecond` (number) - Output tokens per second + + + ### Token Usage + + - `inputTokens` (number) - Number of input tokens + + - `outputTokens` (number) - Number of output tokens + + - `totalTokens` (number) - Total tokens (alias: `tokens`) + + + ### Cost Metrics + + - `inputCost` (number) - Input cost in USD + + - `outputCost` (number) - Output cost in USD + + - `totalCost` (number) - Total cost in USD + + + ### Model Information + + - `model` (string) - Provided model name + + - `promptName` (string) - Associated prompt name + + - `promptVersion` (number) - Associated prompt version + + + ### Structured Data + + - `metadata` (stringObject/numberObject/categoryOptions) - Metadata + key-value pairs. Use `key` parameter to filter on specific metadata + keys. + + + ### Associated Trace Fields (requires join with traces table) + + - `userId` (string) - User ID from associated trace + + - `traceName` (string) - Name from associated trace + + - `traceEnvironment` (string) - Environment from associated trace + + - `traceTags` (arrayOptions) - Tags from associated trace + + + ## Filter Examples + + ```json + + [ + { + "type": "string", + "column": "type", + "operator": "=", + "value": "GENERATION" + }, + { + "type": "number", + "column": "latency", + "operator": ">=", + "value": 2.5 + }, + { + "type": "stringObject", + "column": "metadata", + "key": "environment", + "operator": "=", + "value": "production" + } + ] + + ``` + required: false + schema: + type: string + nullable: true + responses: + '200': + description: '' + content: + application/json: + schema: + $ref: '#/components/schemas/ObservationsViews' + '400': + description: '' + content: + application/json: + schema: {} + '401': + description: '' + content: + application/json: + schema: {} + '403': + description: '' + content: + application/json: + schema: {} + '404': + description: '' + content: + application/json: + schema: {} + '405': + description: '' + content: + application/json: + schema: {} + security: + - BasicAuth: [] + /api/public/otel/v1/traces: + post: + description: >- + **OpenTelemetry Traces Ingestion Endpoint** + + + This endpoint implements the OTLP/HTTP specification for trace + ingestion, providing native OpenTelemetry integration for Langfuse + Observability. + + + **Supported Formats:** + + - Binary Protobuf: `Content-Type: application/x-protobuf` + + - JSON Protobuf: `Content-Type: application/json` + + - Supports gzip compression via `Content-Encoding: gzip` header + + + **Specification Compliance:** + + - Conforms to [OTLP/HTTP Trace + Export](https://opentelemetry.io/docs/specs/otlp/#otlphttp) + + - Implements `ExportTraceServiceRequest` message format + + + **Documentation:** + + - Integration guide: + https://langfuse.com/integrations/native/opentelemetry + + - Data model: https://langfuse.com/docs/observability/data-model + operationId: opentelemetry_exportTraces + tags: + - Opentelemetry + parameters: [] + responses: + '200': + description: '' + content: + application/json: + schema: + $ref: '#/components/schemas/OtelTraceResponse' + examples: + BasicTraceExport: + value: {} + '400': + description: '' + content: + application/json: + schema: {} + '401': + description: '' + content: + application/json: + schema: {} + '403': + description: '' + content: + application/json: + schema: {} + '404': + description: '' + content: + application/json: + schema: {} + '405': + description: '' + content: + application/json: + schema: {} + security: + - BasicAuth: [] + requestBody: + required: true + content: + application/json: + schema: + type: object + properties: + resourceSpans: + type: array + items: + $ref: '#/components/schemas/OtelResourceSpan' + description: >- + Array of resource spans containing trace data as defined in + the OTLP specification + required: + - resourceSpans + examples: + BasicTraceExport: + value: + resourceSpans: + - resource: + attributes: + - key: service.name + value: + stringValue: my-service + - key: service.version + value: + stringValue: 1.0.0 + scopeSpans: + - scope: + name: langfuse-sdk + version: 2.60.3 + spans: + - traceId: 0123456789abcdef0123456789abcdef + spanId: 0123456789abcdef + name: my-operation + kind: 1 + startTimeUnixNano: '1747872000000000000' + endTimeUnixNano: '1747872001000000000' + attributes: + - key: langfuse.observation.type + value: + stringValue: generation + status: {} + /api/public/organizations/memberships: + get: + description: >- + Get all memberships for the organization associated with the API key + (requires organization-scoped API key) + operationId: organizations_getOrganizationMemberships + tags: + - Organizations + parameters: [] + responses: + '200': + description: '' + content: + application/json: + schema: + $ref: '#/components/schemas/MembershipsResponse' + '400': + description: '' + content: + application/json: + schema: {} + '401': + description: '' + content: + application/json: + schema: {} + '403': + description: '' + content: + application/json: + schema: {} + '404': + description: '' + content: + application/json: + schema: {} + '405': + description: '' + content: + application/json: + schema: {} + security: + - BasicAuth: [] + put: + description: >- + Create or update a membership for the organization associated with the + API key (requires organization-scoped API key) + operationId: organizations_updateOrganizationMembership + tags: + - Organizations + parameters: [] + responses: + '200': + description: '' + content: + application/json: + schema: + $ref: '#/components/schemas/MembershipResponse' + '400': + description: '' + content: + application/json: + schema: {} + '401': + description: '' + content: + application/json: + schema: {} + '403': + description: '' + content: + application/json: + schema: {} + '404': + description: '' + content: + application/json: + schema: {} + '405': + description: '' + content: + application/json: + schema: {} + security: + - BasicAuth: [] + requestBody: + required: true + content: + application/json: + schema: + $ref: '#/components/schemas/MembershipRequest' + delete: + description: >- + Delete a membership from the organization associated with the API key + (requires organization-scoped API key) + operationId: organizations_deleteOrganizationMembership + tags: + - Organizations + parameters: [] + responses: + '200': + description: '' + content: + application/json: + schema: + $ref: '#/components/schemas/MembershipDeletionResponse' + '400': + description: '' + content: + application/json: + schema: {} + '401': + description: '' + content: + application/json: + schema: {} + '403': + description: '' + content: + application/json: + schema: {} + '404': + description: '' + content: + application/json: + schema: {} + '405': + description: '' + content: + application/json: + schema: {} + security: + - BasicAuth: [] + requestBody: + required: true + content: + application/json: + schema: + $ref: '#/components/schemas/DeleteMembershipRequest' + /api/public/projects/{projectId}/memberships: + get: + description: >- + Get all memberships for a specific project (requires organization-scoped + API key) + operationId: organizations_getProjectMemberships + tags: + - Organizations + parameters: + - name: projectId + in: path + required: true + schema: + type: string + responses: + '200': + description: '' + content: + application/json: + schema: + $ref: '#/components/schemas/MembershipsResponse' + '400': + description: '' + content: + application/json: + schema: {} + '401': + description: '' + content: + application/json: + schema: {} + '403': + description: '' + content: + application/json: + schema: {} + '404': + description: '' + content: + application/json: + schema: {} + '405': + description: '' + content: + application/json: + schema: {} + security: + - BasicAuth: [] + put: + description: >- + Create or update a membership for a specific project (requires + organization-scoped API key). The user must already be a member of the + organization. + operationId: organizations_updateProjectMembership + tags: + - Organizations + parameters: + - name: projectId + in: path + required: true + schema: + type: string + responses: + '200': + description: '' + content: + application/json: + schema: + $ref: '#/components/schemas/MembershipResponse' + '400': + description: '' + content: + application/json: + schema: {} + '401': + description: '' + content: + application/json: + schema: {} + '403': + description: '' + content: + application/json: + schema: {} + '404': + description: '' + content: + application/json: + schema: {} + '405': + description: '' + content: + application/json: + schema: {} + security: + - BasicAuth: [] + requestBody: + required: true + content: + application/json: + schema: + $ref: '#/components/schemas/MembershipRequest' + delete: + description: >- + Delete a membership from a specific project (requires + organization-scoped API key). The user must be a member of the + organization. + operationId: organizations_deleteProjectMembership + tags: + - Organizations + parameters: + - name: projectId + in: path + required: true + schema: + type: string + responses: + '200': + description: '' + content: + application/json: + schema: + $ref: '#/components/schemas/MembershipDeletionResponse' + '400': + description: '' + content: + application/json: + schema: {} + '401': + description: '' + content: + application/json: + schema: {} + '403': + description: '' + content: + application/json: + schema: {} + '404': + description: '' + content: + application/json: + schema: {} + '405': + description: '' + content: + application/json: + schema: {} + security: + - BasicAuth: [] + requestBody: + required: true + content: + application/json: + schema: + $ref: '#/components/schemas/DeleteMembershipRequest' + /api/public/organizations/projects: + get: + description: >- + Get all projects for the organization associated with the API key + (requires organization-scoped API key) + operationId: organizations_getOrganizationProjects + tags: + - Organizations + parameters: [] + responses: + '200': + description: '' + content: + application/json: + schema: + $ref: '#/components/schemas/OrganizationProjectsResponse' + '400': + description: '' + content: + application/json: + schema: {} + '401': + description: '' + content: + application/json: + schema: {} + '403': + description: '' + content: + application/json: + schema: {} + '404': + description: '' + content: + application/json: + schema: {} + '405': + description: '' + content: + application/json: + schema: {} + security: + - BasicAuth: [] + /api/public/organizations/apiKeys: + get: + description: >- + Get all API keys for the organization associated with the API key + (requires organization-scoped API key) + operationId: organizations_getOrganizationApiKeys + tags: + - Organizations + parameters: [] + responses: + '200': + description: '' + content: + application/json: + schema: + $ref: '#/components/schemas/OrganizationApiKeysResponse' + '400': + description: '' + content: + application/json: + schema: {} + '401': + description: '' + content: + application/json: + schema: {} + '403': + description: '' + content: + application/json: + schema: {} + '404': + description: '' + content: + application/json: + schema: {} + '405': + description: '' + content: + application/json: + schema: {} + security: + - BasicAuth: [] + /api/public/projects: + get: + description: Get Project associated with API key + operationId: projects_get + tags: + - Projects + parameters: [] + responses: + '200': + description: '' + content: + application/json: + schema: + $ref: '#/components/schemas/Projects' + '400': + description: '' + content: + application/json: + schema: {} + '401': + description: '' + content: + application/json: + schema: {} + '403': + description: '' + content: + application/json: + schema: {} + '404': + description: '' + content: + application/json: + schema: {} + '405': + description: '' + content: + application/json: + schema: {} + security: + - BasicAuth: [] + post: + description: Create a new project (requires organization-scoped API key) + operationId: projects_create + tags: + - Projects + parameters: [] + responses: + '200': + description: '' + content: + application/json: + schema: + $ref: '#/components/schemas/Project' + '400': + description: '' + content: + application/json: + schema: {} + '401': + description: '' + content: + application/json: + schema: {} + '403': + description: '' + content: + application/json: + schema: {} + '404': + description: '' + content: + application/json: + schema: {} + '405': + description: '' + content: + application/json: + schema: {} + security: + - BasicAuth: [] + requestBody: + required: true + content: + application/json: + schema: + type: object + properties: + name: + type: string + metadata: + type: object + additionalProperties: true + nullable: true + description: Optional metadata for the project + retention: + type: integer + description: >- + Number of days to retain data. Must be 0 or at least 3 days. + Requires data-retention entitlement for non-zero values. + Optional. + required: + - name + - retention + /api/public/projects/{projectId}: + put: + description: Update a project by ID (requires organization-scoped API key). + operationId: projects_update + tags: + - Projects + parameters: + - name: projectId + in: path + required: true + schema: + type: string + responses: + '200': + description: '' + content: + application/json: + schema: + $ref: '#/components/schemas/Project' + '400': + description: '' + content: + application/json: + schema: {} + '401': + description: '' + content: + application/json: + schema: {} + '403': + description: '' + content: + application/json: + schema: {} + '404': + description: '' + content: + application/json: + schema: {} + '405': + description: '' + content: + application/json: + schema: {} + security: + - BasicAuth: [] + requestBody: + required: true + content: + application/json: + schema: + type: object + properties: + name: + type: string + metadata: + type: object + additionalProperties: true + nullable: true + description: Optional metadata for the project + retention: + type: integer + description: >- + Number of days to retain data. Must be 0 or at least 3 days. + Requires data-retention entitlement for non-zero values. + Optional. + required: + - name + - retention + delete: + description: >- + Delete a project by ID (requires organization-scoped API key). Project + deletion is processed asynchronously. + operationId: projects_delete + tags: + - Projects + parameters: + - name: projectId + in: path + required: true + schema: + type: string + responses: + '202': + description: '' + content: + application/json: + schema: + $ref: '#/components/schemas/ProjectDeletionResponse' + '400': + description: '' + content: + application/json: + schema: {} + '401': + description: '' + content: + application/json: + schema: {} + '403': + description: '' + content: + application/json: + schema: {} + '404': + description: '' + content: + application/json: + schema: {} + '405': + description: '' + content: + application/json: + schema: {} + security: + - BasicAuth: [] + /api/public/projects/{projectId}/apiKeys: + get: + description: Get all API keys for a project (requires organization-scoped API key) + operationId: projects_getApiKeys + tags: + - Projects + parameters: + - name: projectId + in: path + required: true + schema: + type: string + responses: + '200': + description: '' + content: + application/json: + schema: + $ref: '#/components/schemas/ApiKeyList' + '400': + description: '' + content: + application/json: + schema: {} + '401': + description: '' + content: + application/json: + schema: {} + '403': + description: '' + content: + application/json: + schema: {} + '404': + description: '' + content: + application/json: + schema: {} + '405': + description: '' + content: + application/json: + schema: {} + security: + - BasicAuth: [] + post: + description: >- + Create a new API key for a project (requires organization-scoped API + key) + operationId: projects_createApiKey + tags: + - Projects + parameters: + - name: projectId + in: path + required: true + schema: + type: string + responses: + '200': + description: '' + content: + application/json: + schema: + $ref: '#/components/schemas/ApiKeyResponse' + '400': + description: '' + content: + application/json: + schema: {} + '401': + description: '' + content: + application/json: + schema: {} + '403': + description: '' + content: + application/json: + schema: {} + '404': + description: '' + content: + application/json: + schema: {} + '405': + description: '' + content: + application/json: + schema: {} + security: + - BasicAuth: [] + requestBody: + required: true + content: + application/json: + schema: + type: object + properties: + note: + type: string + nullable: true + description: Optional note for the API key + publicKey: + type: string + nullable: true + description: >- + Optional predefined public key. Must start with 'pk-lf-'. If + provided, secretKey must also be provided. + secretKey: + type: string + nullable: true + description: >- + Optional predefined secret key. Must start with 'sk-lf-'. If + provided, publicKey must also be provided. + /api/public/projects/{projectId}/apiKeys/{apiKeyId}: + delete: + description: Delete an API key for a project (requires organization-scoped API key) + operationId: projects_deleteApiKey + tags: + - Projects + parameters: + - name: projectId + in: path + required: true + schema: + type: string + - name: apiKeyId + in: path + required: true + schema: + type: string + responses: + '200': + description: '' + content: + application/json: + schema: + $ref: '#/components/schemas/ApiKeyDeletionResponse' + '400': + description: '' + content: + application/json: + schema: {} + '401': + description: '' + content: + application/json: + schema: {} + '403': + description: '' + content: + application/json: + schema: {} + '404': + description: '' + content: + application/json: + schema: {} + '405': + description: '' + content: + application/json: + schema: {} + security: + - BasicAuth: [] + /api/public/v2/prompts/{name}/versions/{version}: + patch: + description: Update labels for a specific prompt version + operationId: promptVersion_update + tags: + - PromptVersion + parameters: + - name: name + in: path + description: >- + The name of the prompt. If the prompt is in a folder (e.g., + "folder/subfolder/prompt-name"), + + the folder path must be URL encoded. + required: true + schema: + type: string + - name: version + in: path + description: Version of the prompt to update + required: true + schema: + type: integer + responses: + '200': + description: '' + content: + application/json: + schema: + $ref: '#/components/schemas/Prompt' + '400': + description: '' + content: + application/json: + schema: {} + '401': + description: '' + content: + application/json: + schema: {} + '403': + description: '' + content: + application/json: + schema: {} + '404': + description: '' + content: + application/json: + schema: {} + '405': + description: '' + content: + application/json: + schema: {} + security: + - BasicAuth: [] + requestBody: + required: true + content: + application/json: + schema: + type: object + properties: + newLabels: + type: array + items: + type: string + description: >- + New labels for the prompt version. Labels are unique across + versions. The "latest" label is reserved and managed by + Langfuse. + required: + - newLabels + /api/public/v2/prompts/{promptName}: + get: + description: Get a prompt + operationId: prompts_get + tags: + - Prompts + parameters: + - name: promptName + in: path + description: >- + The name of the prompt. If the prompt is in a folder (e.g., + "folder/subfolder/prompt-name"), + + the folder path must be URL encoded. + required: true + schema: + type: string + - name: version + in: query + description: Version of the prompt to be retrieved. + required: false + schema: + type: integer + nullable: true + - name: label + in: query + description: >- + Label of the prompt to be retrieved. Defaults to "production" if no + label or version is set. + required: false + schema: + type: string + nullable: true + responses: + '200': + description: '' + content: + application/json: + schema: + $ref: '#/components/schemas/Prompt' + '400': + description: '' + content: + application/json: + schema: {} + '401': + description: '' + content: + application/json: + schema: {} + '403': + description: '' + content: + application/json: + schema: {} + '404': + description: '' + content: + application/json: + schema: {} + '405': + description: '' + content: + application/json: + schema: {} + security: + - BasicAuth: [] + /api/public/v2/prompts: + get: + description: Get a list of prompt names with versions and labels + operationId: prompts_list + tags: + - Prompts + parameters: + - name: name + in: query + required: false + schema: + type: string + nullable: true + - name: label + in: query + required: false + schema: + type: string + nullable: true + - name: tag + in: query + required: false + schema: + type: string + nullable: true + - name: page + in: query + description: page number, starts at 1 + required: false + schema: + type: integer + nullable: true + - name: limit + in: query + description: limit of items per page + required: false + schema: + type: integer + nullable: true + - name: fromUpdatedAt + in: query + description: >- + Optional filter to only include prompt versions created/updated on + or after a certain datetime (ISO 8601) + required: false + schema: + type: string + format: date-time + nullable: true + - name: toUpdatedAt + in: query + description: >- + Optional filter to only include prompt versions created/updated + before a certain datetime (ISO 8601) + required: false + schema: + type: string + format: date-time + nullable: true + responses: + '200': + description: '' + content: + application/json: + schema: + $ref: '#/components/schemas/PromptMetaListResponse' + '400': + description: '' + content: + application/json: + schema: {} + '401': + description: '' + content: + application/json: + schema: {} + '403': + description: '' + content: + application/json: + schema: {} + '404': + description: '' + content: + application/json: + schema: {} + '405': + description: '' + content: + application/json: + schema: {} + security: + - BasicAuth: [] + post: + description: Create a new version for the prompt with the given `name` + operationId: prompts_create + tags: + - Prompts + parameters: [] + responses: + '200': + description: '' + content: + application/json: + schema: + $ref: '#/components/schemas/Prompt' + '400': + description: '' + content: + application/json: + schema: {} + '401': + description: '' + content: + application/json: + schema: {} + '403': + description: '' + content: + application/json: + schema: {} + '404': + description: '' + content: + application/json: + schema: {} + '405': + description: '' + content: + application/json: + schema: {} + security: + - BasicAuth: [] + requestBody: + required: true + content: + application/json: + schema: + $ref: '#/components/schemas/CreatePromptRequest' + /api/public/scim/ServiceProviderConfig: + get: + description: >- + Get SCIM Service Provider Configuration (requires organization-scoped + API key) + operationId: scim_getServiceProviderConfig + tags: + - Scim + parameters: [] + responses: + '200': + description: '' + content: + application/json: + schema: + $ref: '#/components/schemas/ServiceProviderConfig' + '400': + description: '' + content: + application/json: + schema: {} + '401': + description: '' + content: + application/json: + schema: {} + '403': + description: '' + content: + application/json: + schema: {} + '404': + description: '' + content: + application/json: + schema: {} + '405': + description: '' + content: + application/json: + schema: {} + security: + - BasicAuth: [] + /api/public/scim/ResourceTypes: + get: + description: Get SCIM Resource Types (requires organization-scoped API key) + operationId: scim_getResourceTypes + tags: + - Scim + parameters: [] + responses: + '200': + description: '' + content: + application/json: + schema: + $ref: '#/components/schemas/ResourceTypesResponse' + '400': + description: '' + content: + application/json: + schema: {} + '401': + description: '' + content: + application/json: + schema: {} + '403': + description: '' + content: + application/json: + schema: {} + '404': + description: '' + content: + application/json: + schema: {} + '405': + description: '' + content: + application/json: + schema: {} + security: + - BasicAuth: [] + /api/public/scim/Schemas: + get: + description: Get SCIM Schemas (requires organization-scoped API key) + operationId: scim_getSchemas + tags: + - Scim + parameters: [] + responses: + '200': + description: '' + content: + application/json: + schema: + $ref: '#/components/schemas/SchemasResponse' + '400': + description: '' + content: + application/json: + schema: {} + '401': + description: '' + content: + application/json: + schema: {} + '403': + description: '' + content: + application/json: + schema: {} + '404': + description: '' + content: + application/json: + schema: {} + '405': + description: '' + content: + application/json: + schema: {} + security: + - BasicAuth: [] + /api/public/scim/Users: + get: + description: List users in the organization (requires organization-scoped API key) + operationId: scim_listUsers + tags: + - Scim + parameters: + - name: filter + in: query + description: Filter expression (e.g. userName eq "value") + required: false + schema: + type: string + nullable: true + - name: startIndex + in: query + description: 1-based index of the first result to return (default 1) + required: false + schema: + type: integer + nullable: true + - name: count + in: query + description: Maximum number of results to return (default 100) + required: false + schema: + type: integer + nullable: true + responses: + '200': + description: '' + content: + application/json: + schema: + $ref: '#/components/schemas/ScimUsersListResponse' + '400': + description: '' + content: + application/json: + schema: {} + '401': + description: '' + content: + application/json: + schema: {} + '403': + description: '' + content: + application/json: + schema: {} + '404': + description: '' + content: + application/json: + schema: {} + '405': + description: '' + content: + application/json: + schema: {} + security: + - BasicAuth: [] + post: + description: >- + Create a new user in the organization (requires organization-scoped API + key) + operationId: scim_createUser + tags: + - Scim + parameters: [] + responses: + '200': + description: '' + content: + application/json: + schema: + $ref: '#/components/schemas/ScimUser' + '400': + description: '' + content: + application/json: + schema: {} + '401': + description: '' + content: + application/json: + schema: {} + '403': + description: '' + content: + application/json: + schema: {} + '404': + description: '' + content: + application/json: + schema: {} + '405': + description: '' + content: + application/json: + schema: {} + security: + - BasicAuth: [] + requestBody: + required: true + content: + application/json: + schema: + type: object + properties: + userName: + type: string + description: User's email address (required) + name: + $ref: '#/components/schemas/ScimName' + description: User's name information + emails: + type: array + items: + $ref: '#/components/schemas/ScimEmail' + nullable: true + description: User's email addresses + active: + type: boolean + nullable: true + description: Whether the user is active + password: + type: string + nullable: true + description: Initial password for the user + required: + - userName + - name + /api/public/scim/Users/{userId}: + get: + description: Get a specific user by ID (requires organization-scoped API key) + operationId: scim_getUser + tags: + - Scim + parameters: + - name: userId + in: path + required: true + schema: + type: string + responses: + '200': + description: '' + content: + application/json: + schema: + $ref: '#/components/schemas/ScimUser' + '400': + description: '' + content: + application/json: + schema: {} + '401': + description: '' + content: + application/json: + schema: {} + '403': + description: '' + content: + application/json: + schema: {} + '404': + description: '' + content: + application/json: + schema: {} + '405': + description: '' + content: + application/json: + schema: {} + security: + - BasicAuth: [] + delete: + description: >- + Remove a user from the organization (requires organization-scoped API + key). Note that this only removes the user from the organization but + does not delete the user entity itself. + operationId: scim_deleteUser + tags: + - Scim + parameters: + - name: userId + in: path + required: true + schema: + type: string + responses: + '200': + description: '' + content: + application/json: + schema: + $ref: '#/components/schemas/EmptyResponse' + '400': + description: '' + content: + application/json: + schema: {} + '401': + description: '' + content: + application/json: + schema: {} + '403': + description: '' + content: + application/json: + schema: {} + '404': + description: '' + content: + application/json: + schema: {} + '405': + description: '' + content: + application/json: + schema: {} + security: + - BasicAuth: [] + /api/public/score-configs: + post: + description: >- + Create a score configuration (config). Score configs are used to define + the structure of scores + operationId: scoreConfigs_create + tags: + - ScoreConfigs + parameters: [] + responses: + '200': + description: '' + content: + application/json: + schema: + $ref: '#/components/schemas/ScoreConfig' + '400': + description: '' + content: + application/json: + schema: {} + '401': + description: '' + content: + application/json: + schema: {} + '403': + description: '' + content: + application/json: + schema: {} + '404': + description: '' + content: + application/json: + schema: {} + '405': + description: '' + content: + application/json: + schema: {} + security: + - BasicAuth: [] + requestBody: + required: true + content: + application/json: + schema: + $ref: '#/components/schemas/CreateScoreConfigRequest' + get: + description: Get all score configs + operationId: scoreConfigs_get + tags: + - ScoreConfigs + parameters: + - name: page + in: query + description: Page number, starts at 1. + required: false + schema: + type: integer + nullable: true + - name: limit + in: query + description: >- + Limit of items per page. If you encounter api issues due to too + large page sizes, try to reduce the limit + required: false + schema: + type: integer + nullable: true + responses: + '200': + description: '' + content: + application/json: + schema: + $ref: '#/components/schemas/ScoreConfigs' + '400': + description: '' + content: + application/json: + schema: {} + '401': + description: '' + content: + application/json: + schema: {} + '403': + description: '' + content: + application/json: + schema: {} + '404': + description: '' + content: + application/json: + schema: {} + '405': + description: '' + content: + application/json: + schema: {} + security: + - BasicAuth: [] + /api/public/score-configs/{configId}: + get: + description: Get a score config + operationId: scoreConfigs_get-by-id + tags: + - ScoreConfigs + parameters: + - name: configId + in: path + description: The unique langfuse identifier of a score config + required: true + schema: + type: string + responses: + '200': + description: '' + content: + application/json: + schema: + $ref: '#/components/schemas/ScoreConfig' + '400': + description: '' + content: + application/json: + schema: {} + '401': + description: '' + content: + application/json: + schema: {} + '403': + description: '' + content: + application/json: + schema: {} + '404': + description: '' + content: + application/json: + schema: {} + '405': + description: '' + content: + application/json: + schema: {} + security: + - BasicAuth: [] + patch: + description: Update a score config + operationId: scoreConfigs_update + tags: + - ScoreConfigs + parameters: + - name: configId + in: path + description: The unique langfuse identifier of a score config + required: true + schema: + type: string + responses: + '200': + description: '' + content: + application/json: + schema: + $ref: '#/components/schemas/ScoreConfig' + '400': + description: '' + content: + application/json: + schema: {} + '401': + description: '' + content: + application/json: + schema: {} + '403': + description: '' + content: + application/json: + schema: {} + '404': + description: '' + content: + application/json: + schema: {} + '405': + description: '' + content: + application/json: + schema: {} + security: + - BasicAuth: [] + requestBody: + required: true + content: + application/json: + schema: + $ref: '#/components/schemas/UpdateScoreConfigRequest' + /api/public/v2/scores: + get: + description: Get a list of scores (supports both trace and session scores) + operationId: scoreV2_get + tags: + - ScoreV2 + parameters: + - name: page + in: query + description: Page number, starts at 1. + required: false + schema: + type: integer + nullable: true + - name: limit + in: query + description: >- + Limit of items per page. If you encounter api issues due to too + large page sizes, try to reduce the limit. + required: false + schema: + type: integer + nullable: true + - name: userId + in: query + description: Retrieve only scores with this userId associated to the trace. + required: false + schema: + type: string + nullable: true + - name: name + in: query + description: Retrieve only scores with this name. + required: false + schema: + type: string + nullable: true + - name: fromTimestamp + in: query + description: >- + Optional filter to only include scores created on or after a certain + datetime (ISO 8601) + required: false + schema: + type: string + format: date-time + nullable: true + - name: toTimestamp + in: query + description: >- + Optional filter to only include scores created before a certain + datetime (ISO 8601) + required: false + schema: + type: string + format: date-time + nullable: true + - name: environment + in: query + description: >- + Optional filter for scores where the environment is one of the + provided values. + required: false + schema: + type: array + items: + type: string + nullable: true + - name: source + in: query + description: Retrieve only scores from a specific source. + required: false + schema: + $ref: '#/components/schemas/ScoreSource' + nullable: true + - name: operator + in: query + description: Retrieve only scores with value. + required: false + schema: + type: string + nullable: true + - name: value + in: query + description: Retrieve only scores with value. + required: false + schema: + type: number + format: double + nullable: true + - name: scoreIds + in: query + description: Comma-separated list of score IDs to limit the results to. + required: false + schema: + type: string + nullable: true + - name: configId + in: query + description: Retrieve only scores with a specific configId. + required: false + schema: + type: string + nullable: true + - name: sessionId + in: query + description: Retrieve only scores with a specific sessionId. + required: false + schema: + type: string + nullable: true + - name: datasetRunId + in: query + description: Retrieve only scores with a specific datasetRunId. + required: false + schema: + type: string + nullable: true + - name: traceId + in: query + description: Retrieve only scores with a specific traceId. + required: false + schema: + type: string + nullable: true + - name: queueId + in: query + description: Retrieve only scores with a specific annotation queueId. + required: false + schema: + type: string + nullable: true + - name: dataType + in: query + description: Retrieve only scores with a specific dataType. + required: false + schema: + $ref: '#/components/schemas/ScoreDataType' + nullable: true + - name: traceTags + in: query + description: >- + Only scores linked to traces that include all of these tags will be + returned. + required: false + schema: + type: array + items: + type: string + nullable: true + responses: + '200': + description: '' + content: + application/json: + schema: + $ref: '#/components/schemas/GetScoresResponse' + '400': + description: '' + content: + application/json: + schema: {} + '401': + description: '' + content: + application/json: + schema: {} + '403': + description: '' + content: + application/json: + schema: {} + '404': + description: '' + content: + application/json: + schema: {} + '405': + description: '' + content: + application/json: + schema: {} + security: + - BasicAuth: [] + /api/public/v2/scores/{scoreId}: + get: + description: Get a score (supports both trace and session scores) + operationId: scoreV2_get-by-id + tags: + - ScoreV2 + parameters: + - name: scoreId + in: path + description: The unique langfuse identifier of a score + required: true + schema: + type: string + responses: + '200': + description: '' + content: + application/json: + schema: + $ref: '#/components/schemas/Score' + '400': + description: '' + content: + application/json: + schema: {} + '401': + description: '' + content: + application/json: + schema: {} + '403': + description: '' + content: + application/json: + schema: {} + '404': + description: '' + content: + application/json: + schema: {} + '405': + description: '' + content: + application/json: + schema: {} + security: + - BasicAuth: [] + /api/public/scores: + post: + description: Create a score (supports both trace and session scores) + operationId: score_create + tags: + - Score + parameters: [] + responses: + '200': + description: '' + content: + application/json: + schema: + $ref: '#/components/schemas/CreateScoreResponse' + '400': + description: '' + content: + application/json: + schema: {} + '401': + description: '' + content: + application/json: + schema: {} + '403': + description: '' + content: + application/json: + schema: {} + '404': + description: '' + content: + application/json: + schema: {} + '405': + description: '' + content: + application/json: + schema: {} + security: + - BasicAuth: [] + requestBody: + required: true + content: + application/json: + schema: + $ref: '#/components/schemas/CreateScoreRequest' + /api/public/scores/{scoreId}: + delete: + description: Delete a score (supports both trace and session scores) + operationId: score_delete + tags: + - Score + parameters: + - name: scoreId + in: path + description: The unique langfuse identifier of a score + required: true + schema: + type: string + responses: + '204': + description: '' + '400': + description: '' + content: + application/json: + schema: {} + '401': + description: '' + content: + application/json: + schema: {} + '403': + description: '' + content: + application/json: + schema: {} + '404': + description: '' + content: + application/json: + schema: {} + '405': + description: '' + content: + application/json: + schema: {} + security: + - BasicAuth: [] + /api/public/sessions: + get: + description: Get sessions + operationId: sessions_list + tags: + - Sessions + parameters: + - name: page + in: query + description: Page number, starts at 1 + required: false + schema: + type: integer + nullable: true + - name: limit + in: query + description: >- + Limit of items per page. If you encounter api issues due to too + large page sizes, try to reduce the limit. + required: false + schema: + type: integer + nullable: true + - name: fromTimestamp + in: query + description: >- + Optional filter to only include sessions created on or after a + certain datetime (ISO 8601) + required: false + schema: + type: string + format: date-time + nullable: true + - name: toTimestamp + in: query + description: >- + Optional filter to only include sessions created before a certain + datetime (ISO 8601) + required: false + schema: + type: string + format: date-time + nullable: true + - name: environment + in: query + description: >- + Optional filter for sessions where the environment is one of the + provided values. + required: false + schema: + type: array + items: + type: string + nullable: true + responses: + '200': + description: '' + content: + application/json: + schema: + $ref: '#/components/schemas/PaginatedSessions' + '400': + description: '' + content: + application/json: + schema: {} + '401': + description: '' + content: + application/json: + schema: {} + '403': + description: '' + content: + application/json: + schema: {} + '404': + description: '' + content: + application/json: + schema: {} + '405': + description: '' + content: + application/json: + schema: {} + security: + - BasicAuth: [] + /api/public/sessions/{sessionId}: + get: + description: >- + Get a session. Please note that `traces` on this endpoint are not + paginated, if you plan to fetch large sessions, consider `GET + /api/public/traces?sessionId=` + operationId: sessions_get + tags: + - Sessions + parameters: + - name: sessionId + in: path + description: The unique id of a session + required: true + schema: + type: string + responses: + '200': + description: '' + content: + application/json: + schema: + $ref: '#/components/schemas/SessionWithTraces' + '400': + description: '' + content: + application/json: + schema: {} + '401': + description: '' + content: + application/json: + schema: {} + '403': + description: '' + content: + application/json: + schema: {} + '404': + description: '' + content: + application/json: + schema: {} + '405': + description: '' + content: + application/json: + schema: {} + security: + - BasicAuth: [] + /api/public/traces/{traceId}: + get: + description: Get a specific trace + operationId: trace_get + tags: + - Trace + parameters: + - name: traceId + in: path + description: The unique langfuse identifier of a trace + required: true + schema: + type: string + responses: + '200': + description: '' + content: + application/json: + schema: + $ref: '#/components/schemas/TraceWithFullDetails' + '400': + description: '' + content: + application/json: + schema: {} + '401': + description: '' + content: + application/json: + schema: {} + '403': + description: '' + content: + application/json: + schema: {} + '404': + description: '' + content: + application/json: + schema: {} + '405': + description: '' + content: + application/json: + schema: {} + security: + - BasicAuth: [] + delete: + description: Delete a specific trace + operationId: trace_delete + tags: + - Trace + parameters: + - name: traceId + in: path + description: The unique langfuse identifier of the trace to delete + required: true + schema: + type: string + responses: + '200': + description: '' + content: + application/json: + schema: + $ref: '#/components/schemas/DeleteTraceResponse' + '400': + description: '' + content: + application/json: + schema: {} + '401': + description: '' + content: + application/json: + schema: {} + '403': + description: '' + content: + application/json: + schema: {} + '404': + description: '' + content: + application/json: + schema: {} + '405': + description: '' + content: + application/json: + schema: {} + security: + - BasicAuth: [] + /api/public/traces: + get: + description: Get list of traces + operationId: trace_list + tags: + - Trace + parameters: + - name: page + in: query + description: Page number, starts at 1 + required: false + schema: + type: integer + nullable: true + - name: limit + in: query + description: >- + Limit of items per page. If you encounter api issues due to too + large page sizes, try to reduce the limit. + required: false + schema: + type: integer + nullable: true + - name: userId + in: query + required: false + schema: + type: string + nullable: true + - name: name + in: query + required: false + schema: + type: string + nullable: true + - name: sessionId + in: query + required: false + schema: + type: string + nullable: true + - name: fromTimestamp + in: query + description: >- + Optional filter to only include traces with a trace.timestamp on or + after a certain datetime (ISO 8601) + required: false + schema: + type: string + format: date-time + nullable: true + - name: toTimestamp + in: query + description: >- + Optional filter to only include traces with a trace.timestamp before + a certain datetime (ISO 8601) + required: false + schema: + type: string + format: date-time + nullable: true + - name: orderBy + in: query + description: >- + Format of the string [field].[asc/desc]. Fields: id, timestamp, + name, userId, release, version, public, bookmarked, sessionId. + Example: timestamp.asc + required: false + schema: + type: string + nullable: true + - name: tags + in: query + description: Only traces that include all of these tags will be returned. + required: false + schema: + type: array + items: + type: string + nullable: true + - name: version + in: query + description: Optional filter to only include traces with a certain version. + required: false + schema: + type: string + nullable: true + - name: release + in: query + description: Optional filter to only include traces with a certain release. + required: false + schema: + type: string + nullable: true + - name: environment + in: query + description: >- + Optional filter for traces where the environment is one of the + provided values. + required: false + schema: + type: array + items: + type: string + nullable: true + - name: fields + in: query + description: >- + Comma-separated list of fields to include in the response. Available + field groups: 'core' (always included), 'io' (input, output, + metadata), 'scores', 'observations', 'metrics'. If not specified, + all fields are returned. Example: 'core,scores,metrics'. Note: + Excluded 'observations' or 'scores' fields return empty arrays; + excluded 'metrics' returns -1 for 'totalCost' and 'latency'. + required: false + schema: + type: string + nullable: true + - name: filter + in: query + description: >- + JSON string containing an array of filter conditions. When provided, + this takes precedence over query parameter filters (userId, name, + sessionId, tags, version, release, environment, fromTimestamp, + toTimestamp). + + + ## Filter Structure + + Each filter condition has the following structure: + + ```json + + [ + { + "type": string, // Required. One of: "datetime", "string", "number", "stringOptions", "categoryOptions", "arrayOptions", "stringObject", "numberObject", "boolean", "null" + "column": string, // Required. Column to filter on (see available columns below) + "operator": string, // Required. Operator based on type: + // - datetime: ">", "<", ">=", "<=" + // - string: "=", "contains", "does not contain", "starts with", "ends with" + // - stringOptions: "any of", "none of" + // - categoryOptions: "any of", "none of" + // - arrayOptions: "any of", "none of", "all of" + // - number: "=", ">", "<", ">=", "<=" + // - stringObject: "=", "contains", "does not contain", "starts with", "ends with" + // - numberObject: "=", ">", "<", ">=", "<=" + // - boolean: "=", "<>" + // - null: "is null", "is not null" + "value": any, // Required (except for null type). Value to compare against. Type depends on filter type + "key": string // Required only for stringObject, numberObject, and categoryOptions types when filtering on nested fields like metadata + } + ] + + ``` + + + ## Available Columns + + + ### Core Trace Fields + + - `id` (string) - Trace ID + + - `name` (string) - Trace name + + - `timestamp` (datetime) - Trace timestamp + + - `userId` (string) - User ID + + - `sessionId` (string) - Session ID + + - `environment` (string) - Environment tag + + - `version` (string) - Version tag + + - `release` (string) - Release tag + + - `tags` (arrayOptions) - Array of tags + + - `bookmarked` (boolean) - Bookmark status + + + ### Structured Data + + - `metadata` (stringObject/numberObject/categoryOptions) - Metadata + key-value pairs. Use `key` parameter to filter on specific metadata + keys. + + + ### Aggregated Metrics (from observations) + + These metrics are aggregated from all observations within the trace: + + - `latency` (number) - Latency in seconds (time from first + observation start to last observation end) + + - `inputTokens` (number) - Total input tokens across all + observations + + - `outputTokens` (number) - Total output tokens across all + observations + + - `totalTokens` (number) - Total tokens (alias: `tokens`) + + - `inputCost` (number) - Total input cost in USD + + - `outputCost` (number) - Total output cost in USD + + - `totalCost` (number) - Total cost in USD + + + ### Observation Level Aggregations + + These fields aggregate observation levels within the trace: + + - `level` (string) - Highest severity level (ERROR > WARNING > + DEFAULT > DEBUG) + + - `warningCount` (number) - Count of WARNING level observations + + - `errorCount` (number) - Count of ERROR level observations + + - `defaultCount` (number) - Count of DEFAULT level observations + + - `debugCount` (number) - Count of DEBUG level observations + + + ### Scores (requires join with scores table) + + - `scores_avg` (number) - Average of numeric scores (alias: + `scores`) + + - `score_categories` (categoryOptions) - Categorical score values + + + ## Filter Examples + + ```json + + [ + { + "type": "datetime", + "column": "timestamp", + "operator": ">=", + "value": "2024-01-01T00:00:00Z" + }, + { + "type": "string", + "column": "userId", + "operator": "=", + "value": "user-123" + }, + { + "type": "number", + "column": "totalCost", + "operator": ">=", + "value": 0.01 + }, + { + "type": "arrayOptions", + "column": "tags", + "operator": "all of", + "value": ["production", "critical"] + }, + { + "type": "stringObject", + "column": "metadata", + "key": "customer_tier", + "operator": "=", + "value": "enterprise" + } + ] + + ``` + + + ## Performance Notes + + - Filtering on `userId`, `sessionId`, or `metadata` may enable skip + indexes for better query performance + + - Score filters require a join with the scores table and may impact + query performance + required: false + schema: + type: string + nullable: true + responses: + '200': + description: '' + content: + application/json: + schema: + $ref: '#/components/schemas/Traces' + '400': + description: '' + content: + application/json: + schema: {} + '401': + description: '' + content: + application/json: + schema: {} + '403': + description: '' + content: + application/json: + schema: {} + '404': + description: '' + content: + application/json: + schema: {} + '405': + description: '' + content: + application/json: + schema: {} + security: + - BasicAuth: [] + delete: + description: Delete multiple traces + operationId: trace_deleteMultiple + tags: + - Trace + parameters: [] + responses: + '200': + description: '' + content: + application/json: + schema: + $ref: '#/components/schemas/DeleteTraceResponse' + '400': + description: '' + content: + application/json: + schema: {} + '401': + description: '' + content: + application/json: + schema: {} + '403': + description: '' + content: + application/json: + schema: {} + '404': + description: '' + content: + application/json: + schema: {} + '405': + description: '' + content: + application/json: + schema: {} + security: + - BasicAuth: [] + requestBody: + required: true + content: + application/json: + schema: + type: object + properties: + traceIds: + type: array + items: + type: string + description: List of trace IDs to delete + required: + - traceIds +components: + schemas: + AnnotationQueueStatus: + title: AnnotationQueueStatus + type: string + enum: + - PENDING + - COMPLETED + AnnotationQueueObjectType: + title: AnnotationQueueObjectType + type: string + enum: + - TRACE + - OBSERVATION + - SESSION + AnnotationQueue: + title: AnnotationQueue + type: object + properties: + id: + type: string + name: + type: string + description: + type: string + nullable: true + scoreConfigIds: + type: array + items: + type: string + createdAt: + type: string + format: date-time + updatedAt: + type: string + format: date-time + required: + - id + - name + - scoreConfigIds + - createdAt + - updatedAt + AnnotationQueueItem: + title: AnnotationQueueItem + type: object + properties: + id: + type: string + queueId: + type: string + objectId: + type: string + objectType: + $ref: '#/components/schemas/AnnotationQueueObjectType' + status: + $ref: '#/components/schemas/AnnotationQueueStatus' + completedAt: + type: string + format: date-time + nullable: true + createdAt: + type: string + format: date-time + updatedAt: + type: string + format: date-time + required: + - id + - queueId + - objectId + - objectType + - status + - createdAt + - updatedAt + PaginatedAnnotationQueues: + title: PaginatedAnnotationQueues + type: object + properties: + data: + type: array + items: + $ref: '#/components/schemas/AnnotationQueue' + meta: + $ref: '#/components/schemas/utilsMetaResponse' + required: + - data + - meta + PaginatedAnnotationQueueItems: + title: PaginatedAnnotationQueueItems + type: object + properties: + data: + type: array + items: + $ref: '#/components/schemas/AnnotationQueueItem' + meta: + $ref: '#/components/schemas/utilsMetaResponse' + required: + - data + - meta + CreateAnnotationQueueRequest: + title: CreateAnnotationQueueRequest + type: object + properties: + name: + type: string + description: + type: string + nullable: true + scoreConfigIds: + type: array + items: + type: string + required: + - name + - scoreConfigIds + CreateAnnotationQueueItemRequest: + title: CreateAnnotationQueueItemRequest + type: object + properties: + objectId: + type: string + objectType: + $ref: '#/components/schemas/AnnotationQueueObjectType' + status: + $ref: '#/components/schemas/AnnotationQueueStatus' + nullable: true + description: Defaults to PENDING for new queue items + required: + - objectId + - objectType + UpdateAnnotationQueueItemRequest: + title: UpdateAnnotationQueueItemRequest + type: object + properties: + status: + $ref: '#/components/schemas/AnnotationQueueStatus' + nullable: true + DeleteAnnotationQueueItemResponse: + title: DeleteAnnotationQueueItemResponse + type: object + properties: + success: + type: boolean + message: + type: string + required: + - success + - message + AnnotationQueueAssignmentRequest: + title: AnnotationQueueAssignmentRequest + type: object + properties: + userId: + type: string + required: + - userId + DeleteAnnotationQueueAssignmentResponse: + title: DeleteAnnotationQueueAssignmentResponse + type: object + properties: + success: + type: boolean + required: + - success + CreateAnnotationQueueAssignmentResponse: + title: CreateAnnotationQueueAssignmentResponse + type: object + properties: + userId: + type: string + queueId: + type: string + projectId: + type: string + required: + - userId + - queueId + - projectId + BlobStorageIntegrationType: + title: BlobStorageIntegrationType + type: string + enum: + - S3 + - S3_COMPATIBLE + - AZURE_BLOB_STORAGE + BlobStorageIntegrationFileType: + title: BlobStorageIntegrationFileType + type: string + enum: + - JSON + - CSV + - JSONL + BlobStorageExportMode: + title: BlobStorageExportMode + type: string + enum: + - FULL_HISTORY + - FROM_TODAY + - FROM_CUSTOM_DATE + BlobStorageExportFrequency: + title: BlobStorageExportFrequency + type: string + enum: + - hourly + - daily + - weekly + CreateBlobStorageIntegrationRequest: + title: CreateBlobStorageIntegrationRequest + type: object + properties: + projectId: + type: string + description: ID of the project in which to configure the blob storage integration + type: + $ref: '#/components/schemas/BlobStorageIntegrationType' + bucketName: + type: string + description: Name of the storage bucket + endpoint: + type: string + nullable: true + description: Custom endpoint URL (required for S3_COMPATIBLE type) + region: + type: string + description: Storage region + accessKeyId: + type: string + nullable: true + description: Access key ID for authentication + secretAccessKey: + type: string + nullable: true + description: Secret access key for authentication (will be encrypted when stored) + prefix: + type: string + nullable: true + description: >- + Path prefix for exported files (must end with forward slash if + provided) + exportFrequency: + $ref: '#/components/schemas/BlobStorageExportFrequency' + enabled: + type: boolean + description: Whether the integration is active + forcePathStyle: + type: boolean + description: Use path-style URLs for S3 requests + fileType: + $ref: '#/components/schemas/BlobStorageIntegrationFileType' + exportMode: + $ref: '#/components/schemas/BlobStorageExportMode' + exportStartDate: + type: string + format: date-time + nullable: true + description: >- + Custom start date for exports (required when exportMode is + FROM_CUSTOM_DATE) + required: + - projectId + - type + - bucketName + - region + - exportFrequency + - enabled + - forcePathStyle + - fileType + - exportMode + BlobStorageIntegrationResponse: + title: BlobStorageIntegrationResponse + type: object + properties: + id: + type: string + projectId: + type: string + type: + $ref: '#/components/schemas/BlobStorageIntegrationType' + bucketName: + type: string + endpoint: + type: string + nullable: true + region: + type: string + accessKeyId: + type: string + nullable: true + prefix: + type: string + exportFrequency: + $ref: '#/components/schemas/BlobStorageExportFrequency' + enabled: + type: boolean + forcePathStyle: + type: boolean + fileType: + $ref: '#/components/schemas/BlobStorageIntegrationFileType' + exportMode: + $ref: '#/components/schemas/BlobStorageExportMode' + exportStartDate: + type: string + format: date-time + nullable: true + nextSyncAt: + type: string + format: date-time + nullable: true + lastSyncAt: + type: string + format: date-time + nullable: true + createdAt: + type: string + format: date-time + updatedAt: + type: string + format: date-time + required: + - id + - projectId + - type + - bucketName + - region + - prefix + - exportFrequency + - enabled + - forcePathStyle + - fileType + - exportMode + - createdAt + - updatedAt + BlobStorageIntegrationsResponse: + title: BlobStorageIntegrationsResponse + type: object + properties: + data: + type: array + items: + $ref: '#/components/schemas/BlobStorageIntegrationResponse' + required: + - data + BlobStorageIntegrationDeletionResponse: + title: BlobStorageIntegrationDeletionResponse + type: object + properties: + message: + type: string + required: + - message + CreateCommentRequest: + title: CreateCommentRequest + type: object + properties: + projectId: + type: string + description: The id of the project to attach the comment to. + objectType: + type: string + description: >- + The type of the object to attach the comment to (trace, observation, + session, prompt). + objectId: + type: string + description: >- + The id of the object to attach the comment to. If this does not + reference a valid existing object, an error will be thrown. + content: + type: string + description: >- + The content of the comment. May include markdown. Currently limited + to 5000 characters. + authorUserId: + type: string + nullable: true + description: The id of the user who created the comment. + required: + - projectId + - objectType + - objectId + - content + CreateCommentResponse: + title: CreateCommentResponse + type: object + properties: + id: + type: string + description: The id of the created object in Langfuse + required: + - id + GetCommentsResponse: + title: GetCommentsResponse + type: object + properties: + data: + type: array + items: + $ref: '#/components/schemas/Comment' + meta: + $ref: '#/components/schemas/utilsMetaResponse' + required: + - data + - meta + Trace: + title: Trace + type: object + properties: + id: + type: string + description: The unique identifier of a trace + timestamp: + type: string + format: date-time + description: The timestamp when the trace was created + name: + type: string + nullable: true + description: The name of the trace + input: + nullable: true + description: The input data of the trace. Can be any JSON. + output: + nullable: true + description: The output data of the trace. Can be any JSON. + sessionId: + type: string + nullable: true + description: The session identifier associated with the trace + release: + type: string + nullable: true + description: The release version of the application when the trace was created + version: + type: string + nullable: true + description: The version of the trace + userId: + type: string + nullable: true + description: The user identifier associated with the trace + metadata: + nullable: true + description: The metadata associated with the trace. Can be any JSON. + tags: + type: array + items: + type: string + nullable: true + description: >- + The tags associated with the trace. Can be an array of strings or + null. + public: + type: boolean + nullable: true + description: Public traces are accessible via url without login + environment: + type: string + nullable: true + description: >- + The environment from which this trace originated. Can be any + lowercase alphanumeric string with hyphens and underscores that does + not start with 'langfuse'. + required: + - id + - timestamp + TraceWithDetails: + title: TraceWithDetails + type: object + properties: + htmlPath: + type: string + description: Path of trace in Langfuse UI + latency: + type: number + format: double + description: Latency of trace in seconds + totalCost: + type: number + format: double + description: Cost of trace in USD + observations: + type: array + items: + type: string + description: List of observation ids + scores: + type: array + items: + type: string + description: List of score ids + required: + - htmlPath + - latency + - totalCost + - observations + - scores + allOf: + - $ref: '#/components/schemas/Trace' + TraceWithFullDetails: + title: TraceWithFullDetails + type: object + properties: + htmlPath: + type: string + description: Path of trace in Langfuse UI + latency: + type: number + format: double + description: Latency of trace in seconds + totalCost: + type: number + format: double + description: Cost of trace in USD + observations: + type: array + items: + $ref: '#/components/schemas/ObservationsView' + description: List of observations + scores: + type: array + items: + $ref: '#/components/schemas/ScoreV1' + description: List of scores + required: + - htmlPath + - latency + - totalCost + - observations + - scores + allOf: + - $ref: '#/components/schemas/Trace' + Session: + title: Session + type: object + properties: + id: + type: string + createdAt: + type: string + format: date-time + projectId: + type: string + environment: + type: string + nullable: true + description: The environment from which this session originated. + required: + - id + - createdAt + - projectId + SessionWithTraces: + title: SessionWithTraces + type: object + properties: + traces: + type: array + items: + $ref: '#/components/schemas/Trace' + required: + - traces + allOf: + - $ref: '#/components/schemas/Session' + Observation: + title: Observation + type: object + properties: + id: + type: string + description: The unique identifier of the observation + traceId: + type: string + nullable: true + description: The trace ID associated with the observation + type: + type: string + description: The type of the observation + name: + type: string + nullable: true + description: The name of the observation + startTime: + type: string + format: date-time + description: The start time of the observation + endTime: + type: string + format: date-time + nullable: true + description: The end time of the observation. + completionStartTime: + type: string + format: date-time + nullable: true + description: The completion start time of the observation + model: + type: string + nullable: true + description: The model used for the observation + modelParameters: + type: object + additionalProperties: + $ref: '#/components/schemas/MapValue' + nullable: true + description: The parameters of the model used for the observation + input: + nullable: true + description: The input data of the observation + version: + type: string + nullable: true + description: The version of the observation + metadata: + nullable: true + description: Additional metadata of the observation + output: + nullable: true + description: The output data of the observation + usage: + $ref: '#/components/schemas/Usage' + nullable: true + description: >- + (Deprecated. Use usageDetails and costDetails instead.) The usage + data of the observation + level: + $ref: '#/components/schemas/ObservationLevel' + description: The level of the observation + statusMessage: + type: string + nullable: true + description: The status message of the observation + parentObservationId: + type: string + nullable: true + description: The parent observation ID + promptId: + type: string + nullable: true + description: The prompt ID associated with the observation + usageDetails: + type: object + additionalProperties: + type: integer + nullable: true + description: >- + The usage details of the observation. Key is the name of the usage + metric, value is the number of units consumed. The total key is the + sum of all (non-total) usage metrics or the total value ingested. + costDetails: + type: object + additionalProperties: + type: number + format: double + nullable: true + description: >- + The cost details of the observation. Key is the name of the cost + metric, value is the cost in USD. The total key is the sum of all + (non-total) cost metrics or the total value ingested. + environment: + type: string + nullable: true + description: >- + The environment from which this observation originated. Can be any + lowercase alphanumeric string with hyphens and underscores that does + not start with 'langfuse'. + required: + - id + - type + - startTime + - level + ObservationsView: + title: ObservationsView + type: object + properties: + promptName: + type: string + nullable: true + description: The name of the prompt associated with the observation + promptVersion: + type: integer + nullable: true + description: The version of the prompt associated with the observation + modelId: + type: string + nullable: true + description: The unique identifier of the model + inputPrice: + type: number + format: double + nullable: true + description: The price of the input in USD + outputPrice: + type: number + format: double + nullable: true + description: The price of the output in USD. + totalPrice: + type: number + format: double + nullable: true + description: The total price in USD. + calculatedInputCost: + type: number + format: double + nullable: true + description: >- + (Deprecated. Use usageDetails and costDetails instead.) The + calculated cost of the input in USD + calculatedOutputCost: + type: number + format: double + nullable: true + description: >- + (Deprecated. Use usageDetails and costDetails instead.) The + calculated cost of the output in USD + calculatedTotalCost: + type: number + format: double + nullable: true + description: >- + (Deprecated. Use usageDetails and costDetails instead.) The + calculated total cost in USD + latency: + type: number + format: double + nullable: true + description: The latency in seconds. + timeToFirstToken: + type: number + format: double + nullable: true + description: The time to the first token in seconds + allOf: + - $ref: '#/components/schemas/Observation' + Usage: + title: Usage + type: object + description: >- + (Deprecated. Use usageDetails and costDetails instead.) Standard + interface for usage and cost + properties: + input: + type: integer + nullable: true + description: Number of input units (e.g. tokens) + output: + type: integer + nullable: true + description: Number of output units (e.g. tokens) + total: + type: integer + nullable: true + description: Defaults to input+output if not set + unit: + $ref: '#/components/schemas/ModelUsageUnit' + nullable: true + inputCost: + type: number + format: double + nullable: true + description: USD input cost + outputCost: + type: number + format: double + nullable: true + description: USD output cost + totalCost: + type: number + format: double + nullable: true + description: USD total cost, defaults to input+output + ScoreConfig: + title: ScoreConfig + type: object + description: Configuration for a score + properties: + id: + type: string + name: + type: string + createdAt: + type: string + format: date-time + updatedAt: + type: string + format: date-time + projectId: + type: string + dataType: + $ref: '#/components/schemas/ScoreDataType' + isArchived: + type: boolean + description: Whether the score config is archived. Defaults to false + minValue: + type: number + format: double + nullable: true + description: >- + Sets minimum value for numerical scores. If not set, the minimum + value defaults to -∞ + maxValue: + type: number + format: double + nullable: true + description: >- + Sets maximum value for numerical scores. If not set, the maximum + value defaults to +∞ + categories: + type: array + items: + $ref: '#/components/schemas/ConfigCategory' + nullable: true + description: Configures custom categories for categorical scores + description: + type: string + nullable: true + required: + - id + - name + - createdAt + - updatedAt + - projectId + - dataType + - isArchived + ConfigCategory: + title: ConfigCategory + type: object + properties: + value: + type: number + format: double + label: + type: string + required: + - value + - label + BaseScoreV1: + title: BaseScoreV1 + type: object + properties: + id: + type: string + traceId: + type: string + name: + type: string + source: + $ref: '#/components/schemas/ScoreSource' + observationId: + type: string + nullable: true + timestamp: + type: string + format: date-time + createdAt: + type: string + format: date-time + updatedAt: + type: string + format: date-time + authorUserId: + type: string + nullable: true + comment: + type: string + nullable: true + metadata: + nullable: true + configId: + type: string + nullable: true + description: >- + Reference a score config on a score. When set, config and score name + must be equal and value must comply to optionally defined numerical + range + queueId: + type: string + nullable: true + description: >- + The annotation queue referenced by the score. Indicates if score was + initially created while processing annotation queue. + environment: + type: string + nullable: true + description: >- + The environment from which this score originated. Can be any + lowercase alphanumeric string with hyphens and underscores that does + not start with 'langfuse'. + required: + - id + - traceId + - name + - source + - timestamp + - createdAt + - updatedAt + NumericScoreV1: + title: NumericScoreV1 + type: object + properties: + value: + type: number + format: double + description: The numeric value of the score + required: + - value + allOf: + - $ref: '#/components/schemas/BaseScoreV1' + BooleanScoreV1: + title: BooleanScoreV1 + type: object + properties: + value: + type: number + format: double + description: >- + The numeric value of the score. Equals 1 for "True" and 0 for + "False" + stringValue: + type: string + description: >- + The string representation of the score value. Is inferred from the + numeric value and equals "True" or "False" + required: + - value + - stringValue + allOf: + - $ref: '#/components/schemas/BaseScoreV1' + CategoricalScoreV1: + title: CategoricalScoreV1 + type: object + properties: + value: + type: number + format: double + description: >- + Represents the numeric category mapping of the stringValue. If no + config is linked, defaults to 0. + stringValue: + type: string + description: >- + The string representation of the score value. If no config is + linked, can be any string. Otherwise, must map to a config category + required: + - value + - stringValue + allOf: + - $ref: '#/components/schemas/BaseScoreV1' + ScoreV1: + title: ScoreV1 + oneOf: + - type: object + allOf: + - type: object + properties: + dataType: + type: string + enum: + - NUMERIC + - $ref: '#/components/schemas/NumericScoreV1' + required: + - dataType + - type: object + allOf: + - type: object + properties: + dataType: + type: string + enum: + - CATEGORICAL + - $ref: '#/components/schemas/CategoricalScoreV1' + required: + - dataType + - type: object + allOf: + - type: object + properties: + dataType: + type: string + enum: + - BOOLEAN + - $ref: '#/components/schemas/BooleanScoreV1' + required: + - dataType + BaseScore: + title: BaseScore + type: object + properties: + id: + type: string + traceId: + type: string + nullable: true + sessionId: + type: string + nullable: true + observationId: + type: string + nullable: true + datasetRunId: + type: string + nullable: true + name: + type: string + source: + $ref: '#/components/schemas/ScoreSource' + timestamp: + type: string + format: date-time + createdAt: + type: string + format: date-time + updatedAt: + type: string + format: date-time + authorUserId: + type: string + nullable: true + comment: + type: string + nullable: true + metadata: + nullable: true + configId: + type: string + nullable: true + description: >- + Reference a score config on a score. When set, config and score name + must be equal and value must comply to optionally defined numerical + range + queueId: + type: string + nullable: true + description: >- + The annotation queue referenced by the score. Indicates if score was + initially created while processing annotation queue. + environment: + type: string + nullable: true + description: >- + The environment from which this score originated. Can be any + lowercase alphanumeric string with hyphens and underscores that does + not start with 'langfuse'. + required: + - id + - name + - source + - timestamp + - createdAt + - updatedAt + NumericScore: + title: NumericScore + type: object + properties: + value: + type: number + format: double + description: The numeric value of the score + required: + - value + allOf: + - $ref: '#/components/schemas/BaseScore' + BooleanScore: + title: BooleanScore + type: object + properties: + value: + type: number + format: double + description: >- + The numeric value of the score. Equals 1 for "True" and 0 for + "False" + stringValue: + type: string + description: >- + The string representation of the score value. Is inferred from the + numeric value and equals "True" or "False" + required: + - value + - stringValue + allOf: + - $ref: '#/components/schemas/BaseScore' + CategoricalScore: + title: CategoricalScore + type: object + properties: + value: + type: number + format: double + description: >- + Represents the numeric category mapping of the stringValue. If no + config is linked, defaults to 0. + stringValue: + type: string + description: >- + The string representation of the score value. If no config is + linked, can be any string. Otherwise, must map to a config category + required: + - value + - stringValue + allOf: + - $ref: '#/components/schemas/BaseScore' + Score: + title: Score + oneOf: + - type: object + allOf: + - type: object + properties: + dataType: + type: string + enum: + - NUMERIC + - $ref: '#/components/schemas/NumericScore' + required: + - dataType + - type: object + allOf: + - type: object + properties: + dataType: + type: string + enum: + - CATEGORICAL + - $ref: '#/components/schemas/CategoricalScore' + required: + - dataType + - type: object + allOf: + - type: object + properties: + dataType: + type: string + enum: + - BOOLEAN + - $ref: '#/components/schemas/BooleanScore' + required: + - dataType + CreateScoreValue: + title: CreateScoreValue + oneOf: + - type: number + format: double + - type: string + description: >- + The value of the score. Must be passed as string for categorical scores, + and numeric for boolean and numeric scores + Comment: + title: Comment + type: object + properties: + id: + type: string + projectId: + type: string + createdAt: + type: string + format: date-time + updatedAt: + type: string + format: date-time + objectType: + $ref: '#/components/schemas/CommentObjectType' + objectId: + type: string + content: + type: string + authorUserId: + type: string + nullable: true + required: + - id + - projectId + - createdAt + - updatedAt + - objectType + - objectId + - content + Dataset: + title: Dataset + type: object + properties: + id: + type: string + name: + type: string + description: + type: string + nullable: true + metadata: + nullable: true + inputSchema: + nullable: true + description: JSON Schema for validating dataset item inputs + expectedOutputSchema: + nullable: true + description: JSON Schema for validating dataset item expected outputs + projectId: + type: string + createdAt: + type: string + format: date-time + updatedAt: + type: string + format: date-time + required: + - id + - name + - projectId + - createdAt + - updatedAt + DatasetItem: + title: DatasetItem + type: object + properties: + id: + type: string + status: + $ref: '#/components/schemas/DatasetStatus' + input: + nullable: true + expectedOutput: + nullable: true + metadata: + nullable: true + sourceTraceId: + type: string + nullable: true + sourceObservationId: + type: string + nullable: true + datasetId: + type: string + datasetName: + type: string + createdAt: + type: string + format: date-time + updatedAt: + type: string + format: date-time + required: + - id + - status + - datasetId + - datasetName + - createdAt + - updatedAt + DatasetRunItem: + title: DatasetRunItem + type: object + properties: + id: + type: string + datasetRunId: + type: string + datasetRunName: + type: string + datasetItemId: + type: string + traceId: + type: string + observationId: + type: string + nullable: true + createdAt: + type: string + format: date-time + updatedAt: + type: string + format: date-time + required: + - id + - datasetRunId + - datasetRunName + - datasetItemId + - traceId + - createdAt + - updatedAt + DatasetRun: + title: DatasetRun + type: object + properties: + id: + type: string + description: Unique identifier of the dataset run + name: + type: string + description: Name of the dataset run + description: + type: string + nullable: true + description: Description of the run + metadata: + nullable: true + description: Metadata of the dataset run + datasetId: + type: string + description: Id of the associated dataset + datasetName: + type: string + description: Name of the associated dataset + createdAt: + type: string + format: date-time + description: The date and time when the dataset run was created + updatedAt: + type: string + format: date-time + description: The date and time when the dataset run was last updated + required: + - id + - name + - datasetId + - datasetName + - createdAt + - updatedAt + DatasetRunWithItems: + title: DatasetRunWithItems + type: object + properties: + datasetRunItems: + type: array + items: + $ref: '#/components/schemas/DatasetRunItem' + required: + - datasetRunItems + allOf: + - $ref: '#/components/schemas/DatasetRun' + Model: + title: Model + type: object + description: >- + Model definition used for transforming usage into USD cost and/or + tokenization. + + + Models can have either simple flat pricing or tiered pricing: + + - Flat pricing: Single price per usage type (legacy, but still + supported) + + - Tiered pricing: Multiple pricing tiers with conditional matching based + on usage patterns + + + The pricing tiers approach is recommended for models with usage-based + pricing variations. + + When using tiered pricing, the flat price fields (inputPrice, + outputPrice, prices) are populated + + from the default tier for backward compatibility. + properties: + id: + type: string + modelName: + type: string + description: >- + Name of the model definition. If multiple with the same name exist, + they are applied in the following order: (1) custom over built-in, + (2) newest according to startTime where + model.startTime- + Regex pattern which matches this model definition to + generation.model. Useful in case of fine-tuned models. If you want + to exact match, use `(?i)^modelname$` + startDate: + type: string + format: date-time + nullable: true + description: Apply only to generations which are newer than this ISO date. + unit: + $ref: '#/components/schemas/ModelUsageUnit' + nullable: true + description: Unit used by this model. + inputPrice: + type: number + format: double + nullable: true + description: Deprecated. See 'prices' instead. Price (USD) per input unit + outputPrice: + type: number + format: double + nullable: true + description: Deprecated. See 'prices' instead. Price (USD) per output unit + totalPrice: + type: number + format: double + nullable: true + description: >- + Deprecated. See 'prices' instead. Price (USD) per total unit. Cannot + be set if input or output price is set. + tokenizerId: + type: string + nullable: true + description: >- + Optional. Tokenizer to be applied to observations which match to + this model. See docs for more details. + tokenizerConfig: + nullable: true + description: >- + Optional. Configuration for the selected tokenizer. Needs to be + JSON. See docs for more details. + isLangfuseManaged: + type: boolean + createdAt: + type: string + format: date-time + description: Timestamp when the model was created + prices: + type: object + additionalProperties: + $ref: '#/components/schemas/ModelPrice' + description: >- + Deprecated. Use 'pricingTiers' instead for models with usage-based + pricing variations. + + + This field shows prices by usage type from the default pricing tier. + Maintained for backward compatibility. + + If the model uses tiered pricing, this field will be populated from + the default tier's prices. + pricingTiers: + type: array + items: + $ref: '#/components/schemas/PricingTier' + description: >- + Array of pricing tiers with conditional pricing based on usage + thresholds. + + + Pricing tiers enable accurate cost tracking for models that charge + different rates based on usage patterns + + (e.g., different rates for high-volume usage, large context windows, + or cached tokens). + + + Each model must have exactly one default tier (isDefault=true, + priority=0) that serves as a fallback. + + Additional conditional tiers can be defined with specific matching + criteria. + + + If this array is empty, the model uses legacy flat pricing from the + inputPrice/outputPrice/totalPrice fields. + required: + - id + - modelName + - matchPattern + - isLangfuseManaged + - createdAt + - prices + - pricingTiers + ModelPrice: + title: ModelPrice + type: object + properties: + price: + type: number + format: double + required: + - price + PricingTierCondition: + title: PricingTierCondition + type: object + description: >- + Condition for matching a pricing tier based on usage details. Used to + implement tiered pricing models where costs vary based on usage + thresholds. + + + How it works: + + 1. The regex pattern matches against usage detail keys (e.g., + "input_tokens", "input_cached") + + 2. Values of all matching keys are summed together + + 3. The sum is compared against the threshold value using the specified + operator + + 4. All conditions in a tier must be met (AND logic) for the tier to + match + + + Common use cases: + + - Threshold-based pricing: Match when accumulated usage exceeds a + certain amount + + - Usage-type-specific pricing: Different rates for cached vs non-cached + tokens, or input vs output + + - Volume-based pricing: Different rates based on total request or token + count + properties: + usageDetailPattern: + type: string + description: >- + Regex pattern to match against usage detail keys. All matching keys' + values are summed for threshold comparison. + + + Examples: + + - "^input" matches "input", "input_tokens", "input_cached", etc. + + - "^(input|prompt)" matches both "input_tokens" and "prompt_tokens" + + - "_cache$" matches "input_cache", "output_cache", etc. + + + The pattern is case-insensitive by default. If no keys match, the + sum is treated as zero. + operator: + $ref: '#/components/schemas/PricingTierOperator' + description: >- + Comparison operator to apply between the summed value and the + threshold. + + + - gt: greater than (sum > threshold) + + - gte: greater than or equal (sum >= threshold) + + - lt: less than (sum < threshold) + + - lte: less than or equal (sum <= threshold) + + - eq: equal (sum == threshold) + + - neq: not equal (sum != threshold) + value: + type: number + format: double + description: >- + Threshold value for comparison. For token-based pricing, this is + typically the token count threshold (e.g., 200000 for a 200K token + threshold). + caseSensitive: + type: boolean + description: >- + Whether the regex pattern matching is case-sensitive. Default is + false (case-insensitive matching). + required: + - usageDetailPattern + - operator + - value + - caseSensitive + PricingTier: + title: PricingTier + type: object + description: >- + Pricing tier definition with conditional pricing based on usage + thresholds. + + + Pricing tiers enable accurate cost tracking for LLM providers that + charge different rates based on usage patterns. + + For example, some providers charge higher rates when context size + exceeds certain thresholds. + + + How tier matching works: + + 1. Tiers are evaluated in ascending priority order (priority 1 before + priority 2, etc.) + + 2. The first tier where ALL conditions match is selected + + 3. If no conditional tiers match, the default tier is used as a fallback + + 4. The default tier has priority 0 and no conditions + + + Why priorities matter: + + - Lower priority numbers are evaluated first, allowing you to define + specific cases before general ones + + - Example: Priority 1 for "high usage" (>200K tokens), Priority 2 for + "medium usage" (>100K tokens), Priority 0 for default + + - Without proper ordering, a less specific condition might match before + a more specific one + + + Every model must have exactly one default tier to ensure cost + calculation always succeeds. + properties: + id: + type: string + description: Unique identifier for the pricing tier + name: + type: string + description: >- + Name of the pricing tier for display and identification purposes. + + + Examples: "Standard", "High Volume Tier", "Large Context", + "Extended Context Tier" + isDefault: + type: boolean + description: >- + Whether this is the default tier. Every model must have exactly one + default tier with priority 0 and no conditions. + + + The default tier serves as a fallback when no conditional tiers + match, ensuring cost calculation always succeeds. + + It typically represents the base pricing for standard usage + patterns. + priority: + type: integer + description: >- + Priority for tier matching evaluation. Lower numbers = higher + priority (evaluated first). + + + The default tier must always have priority 0. Conditional tiers + should have priority 1, 2, 3, etc. + + + Example ordering: + + - Priority 0: Default tier (no conditions, always matches as + fallback) + + - Priority 1: High usage tier (e.g., >200K tokens) + + - Priority 2: Medium usage tier (e.g., >100K tokens) + + + This ensures more specific conditions are checked before general + ones. + conditions: + type: array + items: + $ref: '#/components/schemas/PricingTierCondition' + description: >- + Array of conditions that must ALL be met for this tier to match (AND + logic). + + + The default tier must have an empty conditions array. Conditional + tiers should have one or more conditions + + that define when this tier's pricing applies. + + + Multiple conditions enable complex matching scenarios (e.g., "high + input tokens AND low output tokens"). + prices: + type: object + additionalProperties: + type: number + format: double + description: >- + Prices (USD) by usage type for this tier. + + + Common usage types: "input", "output", "total", "request", "image" + + Prices are specified in USD per unit (e.g., per token, per request, + per second). + + + Example: {"input": 0.000003, "output": 0.000015} means $3 per + million input tokens and $15 per million output tokens. + required: + - id + - name + - isDefault + - priority + - conditions + - prices + PricingTierInput: + title: PricingTierInput + type: object + description: >- + Input schema for creating a pricing tier. The tier ID will be + automatically generated server-side. + + + When creating a model with pricing tiers: + + - Exactly one tier must have isDefault=true (the fallback tier) + + - The default tier must have priority=0 and conditions=[] + + - All tier names and priorities must be unique within the model + + - Each tier must define at least one price + + + See PricingTier for detailed information about how tiers work and why + they're useful. + properties: + name: + type: string + description: >- + Name of the pricing tier for display and identification purposes. + + + Must be unique within the model. Common patterns: "Standard + Pricing", "High Volume Tier", "Extended Context" + isDefault: + type: boolean + description: >- + Whether this is the default tier. Exactly one tier per model must be + marked as default. + + + Requirements for default tier: + + - Must have isDefault=true + + - Must have priority=0 + + - Must have empty conditions array (conditions=[]) + + + The default tier acts as a fallback when no conditional tiers match. + priority: + type: integer + description: >- + Priority for tier matching evaluation. Lower numbers = higher + priority (evaluated first). + + + Must be unique within the model. The default tier must have + priority=0. + + Conditional tiers should use priority 1, 2, 3, etc. based on their + specificity. + conditions: + type: array + items: + $ref: '#/components/schemas/PricingTierCondition' + description: >- + Array of conditions that must ALL be met for this tier to match (AND + logic). + + + The default tier must have an empty array (conditions=[]). + + Conditional tiers should define one or more conditions that specify + when this tier's pricing applies. + + + Each condition specifies a regex pattern, operator, and threshold + value for matching against usage details. + prices: + type: object + additionalProperties: + type: number + format: double + description: >- + Prices (USD) by usage type for this tier. At least one price must be + defined. + + + Common usage types: "input", "output", "total", "request", "image" + + Prices are in USD per unit (e.g., per token). + + + Example: {"input": 0.000003, "output": 0.000015} represents $3 per + million input tokens and $15 per million output tokens. + required: + - name + - isDefault + - priority + - conditions + - prices + PricingTierOperator: + title: PricingTierOperator + type: string + enum: + - gt + - gte + - lt + - lte + - eq + - neq + description: Comparison operators for pricing tier conditions + ModelUsageUnit: + title: ModelUsageUnit + type: string + enum: + - CHARACTERS + - TOKENS + - MILLISECONDS + - SECONDS + - IMAGES + - REQUESTS + description: Unit of usage in Langfuse + ObservationLevel: + title: ObservationLevel + type: string + enum: + - DEBUG + - DEFAULT + - WARNING + - ERROR + MapValue: + title: MapValue + oneOf: + - type: string + nullable: true + - type: integer + nullable: true + - type: boolean + nullable: true + - type: array + items: + type: string + nullable: true + CommentObjectType: + title: CommentObjectType + type: string + enum: + - TRACE + - OBSERVATION + - SESSION + - PROMPT + DatasetStatus: + title: DatasetStatus + type: string + enum: + - ACTIVE + - ARCHIVED + ScoreSource: + title: ScoreSource + type: string + enum: + - ANNOTATION + - API + - EVAL + ScoreDataType: + title: ScoreDataType + type: string + enum: + - NUMERIC + - BOOLEAN + - CATEGORICAL + DeleteDatasetItemResponse: + title: DeleteDatasetItemResponse + type: object + properties: + message: + type: string + description: Success message after deletion + required: + - message + CreateDatasetItemRequest: + title: CreateDatasetItemRequest + type: object + properties: + datasetName: + type: string + input: + nullable: true + expectedOutput: + nullable: true + metadata: + nullable: true + sourceTraceId: + type: string + nullable: true + sourceObservationId: + type: string + nullable: true + id: + type: string + nullable: true + description: >- + Dataset items are upserted on their id. Id needs to be unique + (project-level) and cannot be reused across datasets. + status: + $ref: '#/components/schemas/DatasetStatus' + nullable: true + description: Defaults to ACTIVE for newly created items + required: + - datasetName + PaginatedDatasetItems: + title: PaginatedDatasetItems + type: object + properties: + data: + type: array + items: + $ref: '#/components/schemas/DatasetItem' + meta: + $ref: '#/components/schemas/utilsMetaResponse' + required: + - data + - meta + CreateDatasetRunItemRequest: + title: CreateDatasetRunItemRequest + type: object + properties: + runName: + type: string + runDescription: + type: string + nullable: true + description: Description of the run. If run exists, description will be updated. + metadata: + nullable: true + description: Metadata of the dataset run, updates run if run already exists + datasetItemId: + type: string + observationId: + type: string + nullable: true + traceId: + type: string + nullable: true + description: >- + traceId should always be provided. For compatibility with older SDK + versions it can also be inferred from the provided observationId. + required: + - runName + - datasetItemId + PaginatedDatasetRunItems: + title: PaginatedDatasetRunItems + type: object + properties: + data: + type: array + items: + $ref: '#/components/schemas/DatasetRunItem' + meta: + $ref: '#/components/schemas/utilsMetaResponse' + required: + - data + - meta + PaginatedDatasets: + title: PaginatedDatasets + type: object + properties: + data: + type: array + items: + $ref: '#/components/schemas/Dataset' + meta: + $ref: '#/components/schemas/utilsMetaResponse' + required: + - data + - meta + CreateDatasetRequest: + title: CreateDatasetRequest + type: object + properties: + name: + type: string + description: + type: string + nullable: true + metadata: + nullable: true + inputSchema: + nullable: true + description: >- + JSON Schema for validating dataset item inputs. When set, all new + and existing dataset items will be validated against this schema. + expectedOutputSchema: + nullable: true + description: >- + JSON Schema for validating dataset item expected outputs. When set, + all new and existing dataset items will be validated against this + schema. + required: + - name + PaginatedDatasetRuns: + title: PaginatedDatasetRuns + type: object + properties: + data: + type: array + items: + $ref: '#/components/schemas/DatasetRun' + meta: + $ref: '#/components/schemas/utilsMetaResponse' + required: + - data + - meta + DeleteDatasetRunResponse: + title: DeleteDatasetRunResponse + type: object + properties: + message: + type: string + required: + - message + HealthResponse: + title: HealthResponse + type: object + properties: + version: + type: string + description: Langfuse server version + example: 1.25.0 + status: + type: string + example: OK + required: + - version + - status + IngestionEvent: + title: IngestionEvent + oneOf: + - type: object + allOf: + - type: object + properties: + type: + type: string + enum: + - trace-create + - $ref: '#/components/schemas/TraceEvent' + required: + - type + - type: object + allOf: + - type: object + properties: + type: + type: string + enum: + - score-create + - $ref: '#/components/schemas/ScoreEvent' + required: + - type + - type: object + allOf: + - type: object + properties: + type: + type: string + enum: + - span-create + - $ref: '#/components/schemas/CreateSpanEvent' + required: + - type + - type: object + allOf: + - type: object + properties: + type: + type: string + enum: + - span-update + - $ref: '#/components/schemas/UpdateSpanEvent' + required: + - type + - type: object + allOf: + - type: object + properties: + type: + type: string + enum: + - generation-create + - $ref: '#/components/schemas/CreateGenerationEvent' + required: + - type + - type: object + allOf: + - type: object + properties: + type: + type: string + enum: + - generation-update + - $ref: '#/components/schemas/UpdateGenerationEvent' + required: + - type + - type: object + allOf: + - type: object + properties: + type: + type: string + enum: + - event-create + - $ref: '#/components/schemas/CreateEventEvent' + required: + - type + - type: object + allOf: + - type: object + properties: + type: + type: string + enum: + - sdk-log + - $ref: '#/components/schemas/SDKLogEvent' + required: + - type + - type: object + allOf: + - type: object + properties: + type: + type: string + enum: + - observation-create + - $ref: '#/components/schemas/CreateObservationEvent' + required: + - type + - type: object + allOf: + - type: object + properties: + type: + type: string + enum: + - observation-update + - $ref: '#/components/schemas/UpdateObservationEvent' + required: + - type + ObservationType: + title: ObservationType + type: string + enum: + - SPAN + - GENERATION + - EVENT + - AGENT + - TOOL + - CHAIN + - RETRIEVER + - EVALUATOR + - EMBEDDING + - GUARDRAIL + IngestionUsage: + title: IngestionUsage + oneOf: + - $ref: '#/components/schemas/Usage' + - $ref: '#/components/schemas/OpenAIUsage' + OpenAIUsage: + title: OpenAIUsage + type: object + description: Usage interface of OpenAI for improved compatibility. + properties: + promptTokens: + type: integer + nullable: true + completionTokens: + type: integer + nullable: true + totalTokens: + type: integer + nullable: true + OptionalObservationBody: + title: OptionalObservationBody + type: object + properties: + traceId: + type: string + nullable: true + name: + type: string + nullable: true + startTime: + type: string + format: date-time + nullable: true + metadata: + nullable: true + input: + nullable: true + output: + nullable: true + level: + $ref: '#/components/schemas/ObservationLevel' + nullable: true + statusMessage: + type: string + nullable: true + parentObservationId: + type: string + nullable: true + version: + type: string + nullable: true + environment: + type: string + nullable: true + CreateEventBody: + title: CreateEventBody + type: object + properties: + id: + type: string + nullable: true + allOf: + - $ref: '#/components/schemas/OptionalObservationBody' + UpdateEventBody: + title: UpdateEventBody + type: object + properties: + id: + type: string + required: + - id + allOf: + - $ref: '#/components/schemas/OptionalObservationBody' + CreateSpanBody: + title: CreateSpanBody + type: object + properties: + endTime: + type: string + format: date-time + nullable: true + allOf: + - $ref: '#/components/schemas/CreateEventBody' + UpdateSpanBody: + title: UpdateSpanBody + type: object + properties: + endTime: + type: string + format: date-time + nullable: true + allOf: + - $ref: '#/components/schemas/UpdateEventBody' + CreateGenerationBody: + title: CreateGenerationBody + type: object + properties: + completionStartTime: + type: string + format: date-time + nullable: true + model: + type: string + nullable: true + modelParameters: + type: object + additionalProperties: + $ref: '#/components/schemas/MapValue' + nullable: true + usage: + $ref: '#/components/schemas/IngestionUsage' + nullable: true + usageDetails: + $ref: '#/components/schemas/UsageDetails' + nullable: true + costDetails: + type: object + additionalProperties: + type: number + format: double + nullable: true + promptName: + type: string + nullable: true + promptVersion: + type: integer + nullable: true + allOf: + - $ref: '#/components/schemas/CreateSpanBody' + UpdateGenerationBody: + title: UpdateGenerationBody + type: object + properties: + completionStartTime: + type: string + format: date-time + nullable: true + model: + type: string + nullable: true + modelParameters: + type: object + additionalProperties: + $ref: '#/components/schemas/MapValue' + nullable: true + usage: + $ref: '#/components/schemas/IngestionUsage' + nullable: true + promptName: + type: string + nullable: true + usageDetails: + $ref: '#/components/schemas/UsageDetails' + nullable: true + costDetails: + type: object + additionalProperties: + type: number + format: double + nullable: true + promptVersion: + type: integer + nullable: true + allOf: + - $ref: '#/components/schemas/UpdateSpanBody' + ObservationBody: + title: ObservationBody + type: object + properties: + id: + type: string + nullable: true + traceId: + type: string + nullable: true + type: + $ref: '#/components/schemas/ObservationType' + name: + type: string + nullable: true + startTime: + type: string + format: date-time + nullable: true + endTime: + type: string + format: date-time + nullable: true + completionStartTime: + type: string + format: date-time + nullable: true + model: + type: string + nullable: true + modelParameters: + type: object + additionalProperties: + $ref: '#/components/schemas/MapValue' + nullable: true + input: + nullable: true + version: + type: string + nullable: true + metadata: + nullable: true + output: + nullable: true + usage: + $ref: '#/components/schemas/Usage' + nullable: true + level: + $ref: '#/components/schemas/ObservationLevel' + nullable: true + statusMessage: + type: string + nullable: true + parentObservationId: + type: string + nullable: true + environment: + type: string + nullable: true + required: + - type + TraceBody: + title: TraceBody + type: object + properties: + id: + type: string + nullable: true + timestamp: + type: string + format: date-time + nullable: true + name: + type: string + nullable: true + userId: + type: string + nullable: true + input: + nullable: true + output: + nullable: true + sessionId: + type: string + nullable: true + release: + type: string + nullable: true + version: + type: string + nullable: true + metadata: + nullable: true + tags: + type: array + items: + type: string + nullable: true + environment: + type: string + nullable: true + public: + type: boolean + nullable: true + description: Make trace publicly accessible via url + SDKLogBody: + title: SDKLogBody + type: object + properties: + log: {} + required: + - log + ScoreBody: + title: ScoreBody + type: object + properties: + id: + type: string + nullable: true + traceId: + type: string + nullable: true + sessionId: + type: string + nullable: true + observationId: + type: string + nullable: true + datasetRunId: + type: string + nullable: true + name: + type: string + example: novelty + environment: + type: string + nullable: true + queueId: + type: string + nullable: true + description: >- + The annotation queue referenced by the score. Indicates if score was + initially created while processing annotation queue. + value: + $ref: '#/components/schemas/CreateScoreValue' + description: >- + The value of the score. Must be passed as string for categorical + scores, and numeric for boolean and numeric scores. Boolean score + values must equal either 1 or 0 (true or false) + comment: + type: string + nullable: true + metadata: + nullable: true + dataType: + $ref: '#/components/schemas/ScoreDataType' + nullable: true + description: >- + When set, must match the score value's type. If not set, will be + inferred from the score value or config + configId: + type: string + nullable: true + description: >- + Reference a score config on a score. When set, the score name must + equal the config name and scores must comply with the config's range + and data type. For categorical scores, the value must map to a + config category. Numeric scores might be constrained by the score + config's max and min values + required: + - name + - value + BaseEvent: + title: BaseEvent + type: object + properties: + id: + type: string + description: UUID v4 that identifies the event + timestamp: + type: string + description: >- + Datetime (ISO 8601) of event creation in client. Should be as close + to actual event creation in client as possible, this timestamp will + be used for ordering of events in future release. Resolution: + milliseconds (required), microseconds (optimal). + metadata: + nullable: true + description: Optional. Metadata field used by the Langfuse SDKs for debugging. + required: + - id + - timestamp + TraceEvent: + title: TraceEvent + type: object + properties: + body: + $ref: '#/components/schemas/TraceBody' + required: + - body + allOf: + - $ref: '#/components/schemas/BaseEvent' + CreateObservationEvent: + title: CreateObservationEvent + type: object + properties: + body: + $ref: '#/components/schemas/ObservationBody' + required: + - body + allOf: + - $ref: '#/components/schemas/BaseEvent' + UpdateObservationEvent: + title: UpdateObservationEvent + type: object + properties: + body: + $ref: '#/components/schemas/ObservationBody' + required: + - body + allOf: + - $ref: '#/components/schemas/BaseEvent' + ScoreEvent: + title: ScoreEvent + type: object + properties: + body: + $ref: '#/components/schemas/ScoreBody' + required: + - body + allOf: + - $ref: '#/components/schemas/BaseEvent' + SDKLogEvent: + title: SDKLogEvent + type: object + properties: + body: + $ref: '#/components/schemas/SDKLogBody' + required: + - body + allOf: + - $ref: '#/components/schemas/BaseEvent' + CreateGenerationEvent: + title: CreateGenerationEvent + type: object + properties: + body: + $ref: '#/components/schemas/CreateGenerationBody' + required: + - body + allOf: + - $ref: '#/components/schemas/BaseEvent' + UpdateGenerationEvent: + title: UpdateGenerationEvent + type: object + properties: + body: + $ref: '#/components/schemas/UpdateGenerationBody' + required: + - body + allOf: + - $ref: '#/components/schemas/BaseEvent' + CreateSpanEvent: + title: CreateSpanEvent + type: object + properties: + body: + $ref: '#/components/schemas/CreateSpanBody' + required: + - body + allOf: + - $ref: '#/components/schemas/BaseEvent' + UpdateSpanEvent: + title: UpdateSpanEvent + type: object + properties: + body: + $ref: '#/components/schemas/UpdateSpanBody' + required: + - body + allOf: + - $ref: '#/components/schemas/BaseEvent' + CreateEventEvent: + title: CreateEventEvent + type: object + properties: + body: + $ref: '#/components/schemas/CreateEventBody' + required: + - body + allOf: + - $ref: '#/components/schemas/BaseEvent' + IngestionSuccess: + title: IngestionSuccess + type: object + properties: + id: + type: string + status: + type: integer + required: + - id + - status + IngestionError: + title: IngestionError + type: object + properties: + id: + type: string + status: + type: integer + message: + type: string + nullable: true + error: + nullable: true + required: + - id + - status + IngestionResponse: + title: IngestionResponse + type: object + properties: + successes: + type: array + items: + $ref: '#/components/schemas/IngestionSuccess' + errors: + type: array + items: + $ref: '#/components/schemas/IngestionError' + required: + - successes + - errors + OpenAICompletionUsageSchema: + title: OpenAICompletionUsageSchema + type: object + description: OpenAI Usage schema from (Chat-)Completion APIs + properties: + prompt_tokens: + type: integer + completion_tokens: + type: integer + total_tokens: + type: integer + prompt_tokens_details: + type: object + additionalProperties: + type: integer + nullable: true + nullable: true + completion_tokens_details: + type: object + additionalProperties: + type: integer + nullable: true + nullable: true + required: + - prompt_tokens + - completion_tokens + - total_tokens + OpenAIResponseUsageSchema: + title: OpenAIResponseUsageSchema + type: object + description: OpenAI Usage schema from Response API + properties: + input_tokens: + type: integer + output_tokens: + type: integer + total_tokens: + type: integer + input_tokens_details: + type: object + additionalProperties: + type: integer + nullable: true + nullable: true + output_tokens_details: + type: object + additionalProperties: + type: integer + nullable: true + nullable: true + required: + - input_tokens + - output_tokens + - total_tokens + UsageDetails: + title: UsageDetails + oneOf: + - type: object + additionalProperties: + type: integer + - $ref: '#/components/schemas/OpenAICompletionUsageSchema' + - $ref: '#/components/schemas/OpenAIResponseUsageSchema' + LlmConnection: + title: LlmConnection + type: object + description: LLM API connection configuration (secrets excluded) + properties: + id: + type: string + provider: + type: string + description: >- + Provider name (e.g., 'openai', 'my-gateway'). Must be unique in + project, used for upserting. + adapter: + type: string + description: The adapter used to interface with the LLM + displaySecretKey: + type: string + description: Masked version of the secret key for display purposes + baseURL: + type: string + nullable: true + description: Custom base URL for the LLM API + customModels: + type: array + items: + type: string + description: List of custom model names available for this connection + withDefaultModels: + type: boolean + description: Whether to include default models for this adapter + extraHeaderKeys: + type: array + items: + type: string + description: >- + Keys of extra headers sent with requests (values excluded for + security) + createdAt: + type: string + format: date-time + updatedAt: + type: string + format: date-time + required: + - id + - provider + - adapter + - displaySecretKey + - customModels + - withDefaultModels + - extraHeaderKeys + - createdAt + - updatedAt + PaginatedLlmConnections: + title: PaginatedLlmConnections + type: object + properties: + data: + type: array + items: + $ref: '#/components/schemas/LlmConnection' + meta: + $ref: '#/components/schemas/utilsMetaResponse' + required: + - data + - meta + UpsertLlmConnectionRequest: + title: UpsertLlmConnectionRequest + type: object + description: Request to create or update an LLM connection (upsert) + properties: + provider: + type: string + description: >- + Provider name (e.g., 'openai', 'my-gateway'). Must be unique in + project, used for upserting. + adapter: + $ref: '#/components/schemas/LlmAdapter' + description: The adapter used to interface with the LLM + secretKey: + type: string + description: Secret key for the LLM API. + baseURL: + type: string + nullable: true + description: Custom base URL for the LLM API + customModels: + type: array + items: + type: string + nullable: true + description: List of custom model names + withDefaultModels: + type: boolean + nullable: true + description: Whether to include default models. Default is true. + extraHeaders: + type: object + additionalProperties: + type: string + nullable: true + description: Extra headers to send with requests + required: + - provider + - adapter + - secretKey + LlmAdapter: + title: LlmAdapter + type: string + enum: + - anthropic + - openai + - azure + - bedrock + - google-vertex-ai + - google-ai-studio + GetMediaResponse: + title: GetMediaResponse + type: object + properties: + mediaId: + type: string + description: The unique langfuse identifier of a media record + contentType: + type: string + description: The MIME type of the media record + contentLength: + type: integer + description: The size of the media record in bytes + uploadedAt: + type: string + format: date-time + description: The date and time when the media record was uploaded + url: + type: string + description: The download URL of the media record + urlExpiry: + type: string + description: The expiry date and time of the media record download URL + required: + - mediaId + - contentType + - contentLength + - uploadedAt + - url + - urlExpiry + PatchMediaBody: + title: PatchMediaBody + type: object + properties: + uploadedAt: + type: string + format: date-time + description: The date and time when the media record was uploaded + uploadHttpStatus: + type: integer + description: The HTTP status code of the upload + uploadHttpError: + type: string + nullable: true + description: The HTTP error message of the upload + uploadTimeMs: + type: integer + nullable: true + description: The time in milliseconds it took to upload the media record + required: + - uploadedAt + - uploadHttpStatus + GetMediaUploadUrlRequest: + title: GetMediaUploadUrlRequest + type: object + properties: + traceId: + type: string + description: The trace ID associated with the media record + observationId: + type: string + nullable: true + description: >- + The observation ID associated with the media record. If the media + record is associated directly with a trace, this will be null. + contentType: + $ref: '#/components/schemas/MediaContentType' + contentLength: + type: integer + description: The size of the media record in bytes + sha256Hash: + type: string + description: The SHA-256 hash of the media record + field: + type: string + description: >- + The trace / observation field the media record is associated with. + This can be one of `input`, `output`, `metadata` + required: + - traceId + - contentType + - contentLength + - sha256Hash + - field + GetMediaUploadUrlResponse: + title: GetMediaUploadUrlResponse + type: object + properties: + uploadUrl: + type: string + nullable: true + description: >- + The presigned upload URL. If the asset is already uploaded, this + will be null + mediaId: + type: string + description: The unique langfuse identifier of a media record + required: + - mediaId + MediaContentType: + title: MediaContentType + type: string + enum: + - image/png + - image/jpeg + - image/jpg + - image/webp + - image/gif + - image/svg+xml + - image/tiff + - image/bmp + - image/avif + - image/heic + - audio/mpeg + - audio/mp3 + - audio/wav + - audio/ogg + - audio/oga + - audio/aac + - audio/mp4 + - audio/flac + - audio/opus + - audio/webm + - video/mp4 + - video/webm + - video/ogg + - video/mpeg + - video/quicktime + - video/x-msvideo + - video/x-matroska + - text/plain + - text/html + - text/css + - text/csv + - text/markdown + - text/x-python + - application/javascript + - text/x-typescript + - application/x-yaml + - application/pdf + - application/msword + - application/vnd.ms-excel + - application/vnd.openxmlformats-officedocument.spreadsheetml.sheet + - application/zip + - application/json + - application/xml + - application/octet-stream + - >- + application/vnd.openxmlformats-officedocument.wordprocessingml.document + - >- + application/vnd.openxmlformats-officedocument.presentationml.presentation + - application/rtf + - application/x-ndjson + - application/vnd.apache.parquet + - application/gzip + - application/x-tar + - application/x-7z-compressed + description: The MIME type of the media record + MetricsResponse: + title: MetricsResponse + type: object + properties: + data: + type: array + items: + type: object + additionalProperties: true + description: >- + The metrics data. Each item in the list contains the metric values + and dimensions requested in the query. + + Format varies based on the query parameters. + + Histograms will return an array with [lower, upper, height] tuples. + required: + - data + PaginatedModels: + title: PaginatedModels + type: object + properties: + data: + type: array + items: + $ref: '#/components/schemas/Model' + meta: + $ref: '#/components/schemas/utilsMetaResponse' + required: + - data + - meta + CreateModelRequest: + title: CreateModelRequest + type: object + properties: + modelName: + type: string + description: >- + Name of the model definition. If multiple with the same name exist, + they are applied in the following order: (1) custom over built-in, + (2) newest according to startTime where + model.startTime- + Regex pattern which matches this model definition to + generation.model. Useful in case of fine-tuned models. If you want + to exact match, use `(?i)^modelname$` + startDate: + type: string + format: date-time + nullable: true + description: Apply only to generations which are newer than this ISO date. + unit: + $ref: '#/components/schemas/ModelUsageUnit' + nullable: true + description: Unit used by this model. + inputPrice: + type: number + format: double + nullable: true + description: >- + Deprecated. Use 'pricingTiers' instead. Price (USD) per input unit. + Creates a default tier if pricingTiers not provided. + outputPrice: + type: number + format: double + nullable: true + description: >- + Deprecated. Use 'pricingTiers' instead. Price (USD) per output unit. + Creates a default tier if pricingTiers not provided. + totalPrice: + type: number + format: double + nullable: true + description: >- + Deprecated. Use 'pricingTiers' instead. Price (USD) per total units. + Cannot be set if input or output price is set. Creates a default + tier if pricingTiers not provided. + pricingTiers: + type: array + items: + $ref: '#/components/schemas/PricingTierInput' + nullable: true + description: >- + Optional. Array of pricing tiers for this model. + + + Use pricing tiers for all models - both those with threshold-based + pricing variations and those with simple flat pricing: + + + - For models with standard flat pricing: Create a single default + tier with your prices + (e.g., one tier with isDefault=true, priority=0, conditions=[], and your standard prices) + + - For models with threshold-based pricing: Create a default tier + plus additional conditional tiers + (e.g., default tier for standard usage + high-volume tier for usage above certain thresholds) + + Requirements: + + - Cannot be provided with flat prices + (inputPrice/outputPrice/totalPrice) - use one approach or the other + + - Must include exactly one default tier with isDefault=true, + priority=0, and conditions=[] + + - All tier names and priorities must be unique within the model + + - Each tier must define at least one price + + + If omitted, you must provide flat prices instead + (inputPrice/outputPrice/totalPrice), + + which will automatically create a single default tier named + "Standard". + tokenizerId: + type: string + nullable: true + description: >- + Optional. Tokenizer to be applied to observations which match to + this model. See docs for more details. + tokenizerConfig: + nullable: true + description: >- + Optional. Configuration for the selected tokenizer. Needs to be + JSON. See docs for more details. + required: + - modelName + - matchPattern + Observations: + title: Observations + type: object + properties: + data: + type: array + items: + $ref: '#/components/schemas/Observation' + meta: + $ref: '#/components/schemas/utilsMetaResponse' + required: + - data + - meta + ObservationsViews: + title: ObservationsViews + type: object + properties: + data: + type: array + items: + $ref: '#/components/schemas/ObservationsView' + meta: + $ref: '#/components/schemas/utilsMetaResponse' + required: + - data + - meta + OtelResourceSpan: + title: OtelResourceSpan + type: object + description: >- + Represents a collection of spans from a single resource as per OTLP + specification + properties: + resource: + $ref: '#/components/schemas/OtelResource' + nullable: true + description: Resource information + scopeSpans: + type: array + items: + $ref: '#/components/schemas/OtelScopeSpan' + nullable: true + description: Array of scope spans + OtelResource: + title: OtelResource + type: object + description: Resource attributes identifying the source of telemetry + properties: + attributes: + type: array + items: + $ref: '#/components/schemas/OtelAttribute' + nullable: true + description: Resource attributes like service.name, service.version, etc. + OtelScopeSpan: + title: OtelScopeSpan + type: object + description: Collection of spans from a single instrumentation scope + properties: + scope: + $ref: '#/components/schemas/OtelScope' + nullable: true + description: Instrumentation scope information + spans: + type: array + items: + $ref: '#/components/schemas/OtelSpan' + nullable: true + description: Array of spans + OtelScope: + title: OtelScope + type: object + description: Instrumentation scope information + properties: + name: + type: string + nullable: true + description: Instrumentation scope name + version: + type: string + nullable: true + description: Instrumentation scope version + attributes: + type: array + items: + $ref: '#/components/schemas/OtelAttribute' + nullable: true + description: Additional scope attributes + OtelSpan: + title: OtelSpan + type: object + description: Individual span representing a unit of work or operation + properties: + traceId: + nullable: true + description: Trace ID (16 bytes, hex-encoded string in JSON or Buffer in binary) + spanId: + nullable: true + description: Span ID (8 bytes, hex-encoded string in JSON or Buffer in binary) + parentSpanId: + nullable: true + description: Parent span ID if this is a child span + name: + type: string + nullable: true + description: Span name describing the operation + kind: + type: integer + nullable: true + description: Span kind (1=INTERNAL, 2=SERVER, 3=CLIENT, 4=PRODUCER, 5=CONSUMER) + startTimeUnixNano: + nullable: true + description: Start time in nanoseconds since Unix epoch + endTimeUnixNano: + nullable: true + description: End time in nanoseconds since Unix epoch + attributes: + type: array + items: + $ref: '#/components/schemas/OtelAttribute' + nullable: true + description: >- + Span attributes including Langfuse-specific attributes + (langfuse.observation.*) + status: + nullable: true + description: Span status object + OtelAttribute: + title: OtelAttribute + type: object + description: Key-value attribute pair for resources, scopes, or spans + properties: + key: + type: string + nullable: true + description: Attribute key (e.g., "service.name", "langfuse.observation.type") + value: + $ref: '#/components/schemas/OtelAttributeValue' + nullable: true + description: Attribute value + OtelAttributeValue: + title: OtelAttributeValue + type: object + description: Attribute value wrapper supporting different value types + properties: + stringValue: + type: string + nullable: true + description: String value + intValue: + type: integer + nullable: true + description: Integer value + doubleValue: + type: number + format: double + nullable: true + description: Double value + boolValue: + type: boolean + nullable: true + description: Boolean value + OtelTraceResponse: + title: OtelTraceResponse + type: object + description: Response from trace export request. Empty object indicates success. + properties: {} + MembershipRole: + title: MembershipRole + type: string + enum: + - OWNER + - ADMIN + - MEMBER + - VIEWER + MembershipRequest: + title: MembershipRequest + type: object + properties: + userId: + type: string + role: + $ref: '#/components/schemas/MembershipRole' + required: + - userId + - role + DeleteMembershipRequest: + title: DeleteMembershipRequest + type: object + properties: + userId: + type: string + required: + - userId + MembershipResponse: + title: MembershipResponse + type: object + properties: + userId: + type: string + role: + $ref: '#/components/schemas/MembershipRole' + email: + type: string + name: + type: string + required: + - userId + - role + - email + - name + MembershipDeletionResponse: + title: MembershipDeletionResponse + type: object + properties: + message: + type: string + userId: + type: string + required: + - message + - userId + MembershipsResponse: + title: MembershipsResponse + type: object + properties: + memberships: + type: array + items: + $ref: '#/components/schemas/MembershipResponse' + required: + - memberships + OrganizationProject: + title: OrganizationProject + type: object + properties: + id: + type: string + name: + type: string + metadata: + type: object + additionalProperties: true + nullable: true + createdAt: + type: string + format: date-time + updatedAt: + type: string + format: date-time + required: + - id + - name + - createdAt + - updatedAt + OrganizationProjectsResponse: + title: OrganizationProjectsResponse + type: object + properties: + projects: + type: array + items: + $ref: '#/components/schemas/OrganizationProject' + required: + - projects + OrganizationApiKey: + title: OrganizationApiKey + type: object + properties: + id: + type: string + createdAt: + type: string + format: date-time + expiresAt: + type: string + format: date-time + nullable: true + lastUsedAt: + type: string + format: date-time + nullable: true + note: + type: string + nullable: true + publicKey: + type: string + displaySecretKey: + type: string + required: + - id + - createdAt + - publicKey + - displaySecretKey + OrganizationApiKeysResponse: + title: OrganizationApiKeysResponse + type: object + properties: + apiKeys: + type: array + items: + $ref: '#/components/schemas/OrganizationApiKey' + required: + - apiKeys + Projects: + title: Projects + type: object + properties: + data: + type: array + items: + $ref: '#/components/schemas/Project' + required: + - data + Project: + title: Project + type: object + properties: + id: + type: string + name: + type: string + metadata: + type: object + additionalProperties: true + description: Metadata for the project + retentionDays: + type: integer + nullable: true + description: >- + Number of days to retain data. Null or 0 means no retention. Omitted + if no retention is configured. + required: + - id + - name + - metadata + ProjectDeletionResponse: + title: ProjectDeletionResponse + type: object + properties: + success: + type: boolean + message: + type: string + required: + - success + - message + ApiKeyList: + title: ApiKeyList + type: object + description: List of API keys for a project + properties: + apiKeys: + type: array + items: + $ref: '#/components/schemas/ApiKeySummary' + required: + - apiKeys + ApiKeySummary: + title: ApiKeySummary + type: object + description: Summary of an API key + properties: + id: + type: string + createdAt: + type: string + format: date-time + expiresAt: + type: string + format: date-time + nullable: true + lastUsedAt: + type: string + format: date-time + nullable: true + note: + type: string + nullable: true + publicKey: + type: string + displaySecretKey: + type: string + required: + - id + - createdAt + - publicKey + - displaySecretKey + ApiKeyResponse: + title: ApiKeyResponse + type: object + description: Response for API key creation + properties: + id: + type: string + createdAt: + type: string + format: date-time + publicKey: + type: string + secretKey: + type: string + displaySecretKey: + type: string + note: + type: string + nullable: true + required: + - id + - createdAt + - publicKey + - secretKey + - displaySecretKey + ApiKeyDeletionResponse: + title: ApiKeyDeletionResponse + type: object + description: Response for API key deletion + properties: + success: + type: boolean + required: + - success + PromptMetaListResponse: + title: PromptMetaListResponse + type: object + properties: + data: + type: array + items: + $ref: '#/components/schemas/PromptMeta' + meta: + $ref: '#/components/schemas/utilsMetaResponse' + required: + - data + - meta + PromptMeta: + title: PromptMeta + type: object + properties: + name: + type: string + type: + $ref: '#/components/schemas/PromptType' + description: Indicates whether the prompt is a text or chat prompt. + versions: + type: array + items: + type: integer + labels: + type: array + items: + type: string + tags: + type: array + items: + type: string + lastUpdatedAt: + type: string + format: date-time + lastConfig: + description: >- + Config object of the most recent prompt version that matches the + filters (if any are provided) + required: + - name + - type + - versions + - labels + - tags + - lastUpdatedAt + - lastConfig + CreatePromptRequest: + title: CreatePromptRequest + oneOf: + - type: object + allOf: + - type: object + properties: + type: + type: string + enum: + - chat + - $ref: '#/components/schemas/CreateChatPromptRequest' + required: + - type + - type: object + allOf: + - type: object + properties: + type: + type: string + enum: + - text + - $ref: '#/components/schemas/CreateTextPromptRequest' + required: + - type + CreateChatPromptRequest: + title: CreateChatPromptRequest + type: object + properties: + name: + type: string + prompt: + type: array + items: + $ref: '#/components/schemas/ChatMessageWithPlaceholders' + config: + nullable: true + labels: + type: array + items: + type: string + nullable: true + description: List of deployment labels of this prompt version. + tags: + type: array + items: + type: string + nullable: true + description: List of tags to apply to all versions of this prompt. + commitMessage: + type: string + nullable: true + description: Commit message for this prompt version. + required: + - name + - prompt + CreateTextPromptRequest: + title: CreateTextPromptRequest + type: object + properties: + name: + type: string + prompt: + type: string + config: + nullable: true + labels: + type: array + items: + type: string + nullable: true + description: List of deployment labels of this prompt version. + tags: + type: array + items: + type: string + nullable: true + description: List of tags to apply to all versions of this prompt. + commitMessage: + type: string + nullable: true + description: Commit message for this prompt version. + required: + - name + - prompt + Prompt: + title: Prompt + oneOf: + - type: object + allOf: + - type: object + properties: + type: + type: string + enum: + - chat + - $ref: '#/components/schemas/ChatPrompt' + required: + - type + - type: object + allOf: + - type: object + properties: + type: + type: string + enum: + - text + - $ref: '#/components/schemas/TextPrompt' + required: + - type + PromptType: + title: PromptType + type: string + enum: + - chat + - text + BasePrompt: + title: BasePrompt + type: object + properties: + name: + type: string + version: + type: integer + config: {} + labels: + type: array + items: + type: string + description: List of deployment labels of this prompt version. + tags: + type: array + items: + type: string + description: >- + List of tags. Used to filter via UI and API. The same across + versions of a prompt. + commitMessage: + type: string + nullable: true + description: Commit message for this prompt version. + resolutionGraph: + type: object + additionalProperties: true + nullable: true + description: >- + The dependency resolution graph for the current prompt. Null if + prompt has no dependencies. + required: + - name + - version + - config + - labels + - tags + ChatMessageWithPlaceholders: + title: ChatMessageWithPlaceholders + oneOf: + - type: object + allOf: + - type: object + properties: + type: + type: string + enum: + - chatmessage + - $ref: '#/components/schemas/ChatMessage' + required: + - type + - type: object + allOf: + - type: object + properties: + type: + type: string + enum: + - placeholder + - $ref: '#/components/schemas/PlaceholderMessage' + required: + - type + ChatMessage: + title: ChatMessage + type: object + properties: + role: + type: string + content: + type: string + required: + - role + - content + PlaceholderMessage: + title: PlaceholderMessage + type: object + properties: + name: + type: string + required: + - name + TextPrompt: + title: TextPrompt + type: object + properties: + prompt: + type: string + required: + - prompt + allOf: + - $ref: '#/components/schemas/BasePrompt' + ChatPrompt: + title: ChatPrompt + type: object + properties: + prompt: + type: array + items: + $ref: '#/components/schemas/ChatMessageWithPlaceholders' + required: + - prompt + allOf: + - $ref: '#/components/schemas/BasePrompt' + ServiceProviderConfig: + title: ServiceProviderConfig + type: object + properties: + schemas: + type: array + items: + type: string + documentationUri: + type: string + patch: + $ref: '#/components/schemas/ScimFeatureSupport' + bulk: + $ref: '#/components/schemas/BulkConfig' + filter: + $ref: '#/components/schemas/FilterConfig' + changePassword: + $ref: '#/components/schemas/ScimFeatureSupport' + sort: + $ref: '#/components/schemas/ScimFeatureSupport' + etag: + $ref: '#/components/schemas/ScimFeatureSupport' + authenticationSchemes: + type: array + items: + $ref: '#/components/schemas/AuthenticationScheme' + meta: + $ref: '#/components/schemas/ResourceMeta' + required: + - schemas + - documentationUri + - patch + - bulk + - filter + - changePassword + - sort + - etag + - authenticationSchemes + - meta + ScimFeatureSupport: + title: ScimFeatureSupport + type: object + properties: + supported: + type: boolean + required: + - supported + BulkConfig: + title: BulkConfig + type: object + properties: + supported: + type: boolean + maxOperations: + type: integer + maxPayloadSize: + type: integer + required: + - supported + - maxOperations + - maxPayloadSize + FilterConfig: + title: FilterConfig + type: object + properties: + supported: + type: boolean + maxResults: + type: integer + required: + - supported + - maxResults + ResourceMeta: + title: ResourceMeta + type: object + properties: + resourceType: + type: string + location: + type: string + required: + - resourceType + - location + AuthenticationScheme: + title: AuthenticationScheme + type: object + properties: + name: + type: string + description: + type: string + specUri: + type: string + type: + type: string + primary: + type: boolean + required: + - name + - description + - specUri + - type + - primary + ResourceTypesResponse: + title: ResourceTypesResponse + type: object + properties: + schemas: + type: array + items: + type: string + totalResults: + type: integer + Resources: + type: array + items: + $ref: '#/components/schemas/ResourceType' + required: + - schemas + - totalResults + - Resources + ResourceType: + title: ResourceType + type: object + properties: + schemas: + type: array + items: + type: string + nullable: true + id: + type: string + name: + type: string + endpoint: + type: string + description: + type: string + schema: + type: string + schemaExtensions: + type: array + items: + $ref: '#/components/schemas/SchemaExtension' + meta: + $ref: '#/components/schemas/ResourceMeta' + required: + - id + - name + - endpoint + - description + - schema + - schemaExtensions + - meta + SchemaExtension: + title: SchemaExtension + type: object + properties: + schema: + type: string + required: + type: boolean + required: + - schema + - required + SchemasResponse: + title: SchemasResponse + type: object + properties: + schemas: + type: array + items: + type: string + totalResults: + type: integer + Resources: + type: array + items: + $ref: '#/components/schemas/SchemaResource' + required: + - schemas + - totalResults + - Resources + SchemaResource: + title: SchemaResource + type: object + properties: + id: + type: string + name: + type: string + description: + type: string + attributes: + type: array + items: {} + meta: + $ref: '#/components/schemas/ResourceMeta' + required: + - id + - name + - description + - attributes + - meta + ScimUsersListResponse: + title: ScimUsersListResponse + type: object + properties: + schemas: + type: array + items: + type: string + totalResults: + type: integer + startIndex: + type: integer + itemsPerPage: + type: integer + Resources: + type: array + items: + $ref: '#/components/schemas/ScimUser' + required: + - schemas + - totalResults + - startIndex + - itemsPerPage + - Resources + ScimUser: + title: ScimUser + type: object + properties: + schemas: + type: array + items: + type: string + id: + type: string + userName: + type: string + name: + $ref: '#/components/schemas/ScimName' + emails: + type: array + items: + $ref: '#/components/schemas/ScimEmail' + meta: + $ref: '#/components/schemas/UserMeta' + required: + - schemas + - id + - userName + - name + - emails + - meta + UserMeta: + title: UserMeta + type: object + properties: + resourceType: + type: string + created: + type: string + nullable: true + lastModified: + type: string + nullable: true + required: + - resourceType + ScimName: + title: ScimName + type: object + properties: + formatted: + type: string + nullable: true + ScimEmail: + title: ScimEmail + type: object + properties: + primary: + type: boolean + value: + type: string + type: + type: string + required: + - primary + - value + - type + EmptyResponse: + title: EmptyResponse + type: object + description: Empty response for 204 No Content responses + properties: {} + ScoreConfigs: + title: ScoreConfigs + type: object + properties: + data: + type: array + items: + $ref: '#/components/schemas/ScoreConfig' + meta: + $ref: '#/components/schemas/utilsMetaResponse' + required: + - data + - meta + CreateScoreConfigRequest: + title: CreateScoreConfigRequest + type: object + properties: + name: + type: string + dataType: + $ref: '#/components/schemas/ScoreDataType' + categories: + type: array + items: + $ref: '#/components/schemas/ConfigCategory' + nullable: true + description: >- + Configure custom categories for categorical scores. Pass a list of + objects with `label` and `value` properties. Categories are + autogenerated for boolean configs and cannot be passed + minValue: + type: number + format: double + nullable: true + description: >- + Configure a minimum value for numerical scores. If not set, the + minimum value defaults to -∞ + maxValue: + type: number + format: double + nullable: true + description: >- + Configure a maximum value for numerical scores. If not set, the + maximum value defaults to +∞ + description: + type: string + nullable: true + description: >- + Description is shown across the Langfuse UI and can be used to e.g. + explain the config categories in detail, why a numeric range was + set, or provide additional context on config name or usage + required: + - name + - dataType + UpdateScoreConfigRequest: + title: UpdateScoreConfigRequest + type: object + properties: + isArchived: + type: boolean + nullable: true + description: The status of the score config showing if it is archived or not + name: + type: string + nullable: true + description: The name of the score config + categories: + type: array + items: + $ref: '#/components/schemas/ConfigCategory' + nullable: true + description: >- + Configure custom categories for categorical scores. Pass a list of + objects with `label` and `value` properties. Categories are + autogenerated for boolean configs and cannot be passed + minValue: + type: number + format: double + nullable: true + description: >- + Configure a minimum value for numerical scores. If not set, the + minimum value defaults to -∞ + maxValue: + type: number + format: double + nullable: true + description: >- + Configure a maximum value for numerical scores. If not set, the + maximum value defaults to +∞ + description: + type: string + nullable: true + description: >- + Description is shown across the Langfuse UI and can be used to e.g. + explain the config categories in detail, why a numeric range was + set, or provide additional context on config name or usage + GetScoresResponseTraceData: + title: GetScoresResponseTraceData + type: object + properties: + userId: + type: string + nullable: true + description: The user ID associated with the trace referenced by score + tags: + type: array + items: + type: string + nullable: true + description: A list of tags associated with the trace referenced by score + environment: + type: string + nullable: true + description: The environment of the trace referenced by score + GetScoresResponseDataNumeric: + title: GetScoresResponseDataNumeric + type: object + properties: + trace: + $ref: '#/components/schemas/GetScoresResponseTraceData' + nullable: true + allOf: + - $ref: '#/components/schemas/NumericScore' + GetScoresResponseDataCategorical: + title: GetScoresResponseDataCategorical + type: object + properties: + trace: + $ref: '#/components/schemas/GetScoresResponseTraceData' + nullable: true + allOf: + - $ref: '#/components/schemas/CategoricalScore' + GetScoresResponseDataBoolean: + title: GetScoresResponseDataBoolean + type: object + properties: + trace: + $ref: '#/components/schemas/GetScoresResponseTraceData' + nullable: true + allOf: + - $ref: '#/components/schemas/BooleanScore' + GetScoresResponseData: + title: GetScoresResponseData + oneOf: + - type: object + allOf: + - type: object + properties: + dataType: + type: string + enum: + - NUMERIC + - $ref: '#/components/schemas/GetScoresResponseDataNumeric' + required: + - dataType + - type: object + allOf: + - type: object + properties: + dataType: + type: string + enum: + - CATEGORICAL + - $ref: '#/components/schemas/GetScoresResponseDataCategorical' + required: + - dataType + - type: object + allOf: + - type: object + properties: + dataType: + type: string + enum: + - BOOLEAN + - $ref: '#/components/schemas/GetScoresResponseDataBoolean' + required: + - dataType + GetScoresResponse: + title: GetScoresResponse + type: object + properties: + data: + type: array + items: + $ref: '#/components/schemas/GetScoresResponseData' + meta: + $ref: '#/components/schemas/utilsMetaResponse' + required: + - data + - meta + CreateScoreRequest: + title: CreateScoreRequest + type: object + properties: + id: + type: string + nullable: true + traceId: + type: string + nullable: true + sessionId: + type: string + nullable: true + observationId: + type: string + nullable: true + datasetRunId: + type: string + nullable: true + name: + type: string + example: novelty + value: + $ref: '#/components/schemas/CreateScoreValue' + description: >- + The value of the score. Must be passed as string for categorical + scores, and numeric for boolean and numeric scores. Boolean score + values must equal either 1 or 0 (true or false) + comment: + type: string + nullable: true + metadata: + type: object + additionalProperties: true + nullable: true + environment: + type: string + nullable: true + description: >- + The environment of the score. Can be any lowercase alphanumeric + string with hyphens and underscores that does not start with + 'langfuse'. + queueId: + type: string + nullable: true + description: >- + The annotation queue referenced by the score. Indicates if score was + initially created while processing annotation queue. + dataType: + $ref: '#/components/schemas/ScoreDataType' + nullable: true + description: >- + The data type of the score. When passing a configId this field is + inferred. Otherwise, this field must be passed or will default to + numeric. + configId: + type: string + nullable: true + description: >- + Reference a score config on a score. The unique langfuse identifier + of a score config. When passing this field, the dataType and + stringValue fields are automatically populated. + required: + - name + - value + CreateScoreResponse: + title: CreateScoreResponse + type: object + properties: + id: + type: string + description: The id of the created object in Langfuse + required: + - id + PaginatedSessions: + title: PaginatedSessions + type: object + properties: + data: + type: array + items: + $ref: '#/components/schemas/Session' + meta: + $ref: '#/components/schemas/utilsMetaResponse' + required: + - data + - meta + Traces: + title: Traces + type: object + properties: + data: + type: array + items: + $ref: '#/components/schemas/TraceWithDetails' + meta: + $ref: '#/components/schemas/utilsMetaResponse' + required: + - data + - meta + DeleteTraceResponse: + title: DeleteTraceResponse + type: object + properties: + message: + type: string + required: + - message + Sort: + title: Sort + type: object + properties: + id: + type: string + required: + - id + utilsMetaResponse: + title: utilsMetaResponse + type: object + properties: + page: + type: integer + description: current page number + limit: + type: integer + description: number of items per page + totalItems: + type: integer + description: number of total items given the current filters/selection (if any) + totalPages: + type: integer + description: number of total pages given the current limit + required: + - page + - limit + - totalItems + - totalPages + securitySchemes: + BasicAuth: + type: http + scheme: basic + diff --git a/docs/CHANGELOG.md b/docs/CHANGELOG.md index 5840c0d..c159f86 100644 --- a/docs/CHANGELOG.md +++ b/docs/CHANGELOG.md @@ -1,3 +1,14 @@ +## 0.1.0-dev.1 (2025-12-02) + +* docs: add repository setup completion summary ([680728e](https://github.com/CrackingShells/mcp-langfuse/commit/680728e)) +* docs: add repository setup reports overview ([5672739](https://github.com/CrackingShells/mcp-langfuse/commit/5672739)) +* docs: add setup stage warning to README ([3fc8a96](https://github.com/CrackingShells/mcp-langfuse/commit/3fc8a96)) +* docs: add workflow fix and cleanup report ([051fdef](https://github.com/CrackingShells/mcp-langfuse/commit/051fdef)) +* docs: update PyPI setup report with commit reference ([e335ea4](https://github.com/CrackingShells/mcp-langfuse/commit/e335ea4)) +* chore: cleanup repository after setup completion ([e04b70e](https://github.com/CrackingShells/mcp-langfuse/commit/e04b70e)) +* feat: complete repository setup from template ([b6e1b8b](https://github.com/CrackingShells/mcp-langfuse/commit/b6e1b8b)) +* feat(ci): add PyPI publishing with Trusted Publishing and update dependencies ([3c0af02](https://github.com/CrackingShells/mcp-langfuse/commit/3c0af02)) + # Changelog All notable changes to this project will be documented in this file. diff --git a/docs/articles/api/core.md b/docs/articles/api/core.md index 49831a2..c7f4eaf 100644 --- a/docs/articles/api/core.md +++ b/docs/articles/api/core.md @@ -1,7 +1,7 @@ # Core Module -The core module provides the main functionality of {{PROJECT_NAME}}. This module serves as a template demonstrating proper code structure and documentation practices. +The core module provides the main functionality of mcp-langfuse. This module serves as a template demonstrating proper code structure and documentation practices. ## Module Contents -::: {{PACKAGE_NAME}}.core +::: mcp_langfuse.core diff --git a/docs/articles/api/index.md b/docs/articles/api/index.md index 66932e1..29db68f 100644 --- a/docs/articles/api/index.md +++ b/docs/articles/api/index.md @@ -1,19 +1,19 @@ # API Reference -This section provides complete API documentation for {{PROJECT_NAME}}, auto-generated from code docstrings using mkdocstrings. +This section provides complete API documentation for mcp-langfuse, auto-generated from code docstrings using mkdocstrings. ## Getting Started Import the package in your Python code: ```python -import {{PACKAGE_NAME}} +import mcp_langfuse ``` Access specific modules: ```python -from {{PACKAGE_NAME}}.core import ExampleClass, hello_world +from mcp_langfuse.core import ExampleClass, hello_world ``` ## Using mkdocstrings @@ -28,7 +28,7 @@ This documentation uses mkdocstrings to automatically generate API reference fro To reference API documentation in your own markdown files, use the mkdocstrings syntax: ```markdown -::: {{PACKAGE_NAME}}.module_name +::: mcp_langfuse.module_name ``` This automatically generates formatted documentation for the specified module. @@ -38,7 +38,7 @@ This automatically generates formatted documentation for the specified module. Customize the output with options: ```markdown -::: {{PACKAGE_NAME}}.module_name +::: mcp_langfuse.module_name options: show_source: true show_root_heading: true @@ -49,7 +49,7 @@ Customize the output with options: ### [Core Module](core.md) -The core module provides the main functionality of {{PROJECT_NAME}}. Includes: +The core module provides the main functionality of mcp-langfuse. Includes: - `hello_world()` - Basic greeting function - `ExampleClass` - Example class demonstrating structure and documentation @@ -59,7 +59,7 @@ The core module provides the main functionality of {{PROJECT_NAME}}. Includes: ### Basic Function Usage ```python -from {{PACKAGE_NAME}} import hello_world +from mcp_langfuse import hello_world result = hello_world() print(result) # Output: Hello, World! @@ -68,7 +68,7 @@ print(result) # Output: Hello, World! ### Class Instantiation ```python -from {{PACKAGE_NAME}}.core import ExampleClass +from mcp_langfuse.core import ExampleClass # Create instance example = ExampleClass(name="Developer") @@ -80,7 +80,7 @@ print(greeting) # Output: Hello, Developer! ## Documentation Standards -All public APIs in {{PROJECT_NAME}} follow Google-style docstring conventions: +All public APIs in mcp-langfuse follow Google-style docstring conventions: - Brief description on the first line - Detailed description in subsequent paragraphs diff --git a/docs/articles/appendices/glossary.md b/docs/articles/appendices/glossary.md index 1e40e4c..950797a 100644 --- a/docs/articles/appendices/glossary.md +++ b/docs/articles/appendices/glossary.md @@ -1,6 +1,6 @@ # Glossary -This glossary defines key terms and concepts used throughout the {{PROJECT_NAME}} documentation. Terms are organized alphabetically for easy reference. +This glossary defines key terms and concepts used throughout the mcp-langfuse documentation. Terms are organized alphabetically for easy reference. ## How to Use This Glossary diff --git a/docs/articles/appendices/index.md b/docs/articles/appendices/index.md index 97d92e9..6bfae1e 100644 --- a/docs/articles/appendices/index.md +++ b/docs/articles/appendices/index.md @@ -1,6 +1,6 @@ # Appendices -This section contains supplementary information, foundational concepts, and reference material for {{PROJECT_NAME}}. +This section contains supplementary information, foundational concepts, and reference material for mcp-langfuse. ## Contents diff --git a/docs/articles/devs/index.md b/docs/articles/devs/index.md index e3a2faf..883584c 100644 --- a/docs/articles/devs/index.md +++ b/docs/articles/devs/index.md @@ -12,8 +12,8 @@ This article covers: ### Clone the Repository ```bash -git clone https://github.com/crackingshells/{{PROJECT_NAME}}.git -cd {{PROJECT_NAME}} +git clone https://github.com/crackingshells/mcp-langfuse.git +cd mcp-langfuse ``` ### Install Dependencies @@ -87,7 +87,7 @@ pytest Run tests with coverage: ```bash -pytest --cov={{PACKAGE_NAME}} +pytest --cov=mcp_langfuse ``` ## Making Commits @@ -160,7 +160,7 @@ Generated files appear in the `site/` directory. ## Development Workflow -The following diagram illustrates the typical development workflow for contributing to {{PROJECT_NAME}}: +The following diagram illustrates the typical development workflow for contributing to mcp-langfuse: ```mermaid %% Development Workflow Diagram diff --git a/docs/articles/devs/pypi-setup.md b/docs/articles/devs/pypi-setup.md new file mode 100644 index 0000000..7591fcd --- /dev/null +++ b/docs/articles/devs/pypi-setup.md @@ -0,0 +1,173 @@ +# PyPI Publishing Setup + +This document explains how PyPI publishing is configured for mcp-langfuse and what repository administrators need to set up. + +## Overview + +The project uses automated PyPI publishing through GitHub Actions with Trusted Publishing (OIDC), which is more secure than using API tokens. + +## Workflow Architecture + +The semantic-release workflow consists of three jobs: + +1. **test**: Runs tests and verifies package imports +2. **release**: Creates GitHub releases and updates version numbers using semantic-release +3. **publish-pypi**: Publishes the package to PyPI using Trusted Publishing + +## GitHub Secrets Required + +### Semantic Release GitHub App + +The release process uses a GitHub App for authentication to allow semantic-release to push commits back to the repository. + +**Required Secrets**: +- `SEMANTIC_RELEASE_APP_ID`: The GitHub App ID +- `SEMANTIC_RELEASE_PRIVATE_KEY`: The GitHub App private key + +**Setup Instructions**: +1. Create a GitHub App with repository write permissions +2. Install the app on the CrackingShells organization +3. Add the App ID and private key as repository secrets + +## PyPI Trusted Publishing Setup + +PyPI Trusted Publishing uses OpenID Connect (OIDC) to authenticate GitHub Actions without requiring API tokens. + +### Prerequisites + +1. PyPI account with permissions to create new projects +2. Project must be registered on PyPI (can be done on first publish) + +### Configuration Steps + +#### 1. Register Project on PyPI (First Time Only) + +If this is the first release, you'll need to manually create the project on PyPI: + +```bash +# Build the package locally +python -m pip install build +python -m build + +# Upload manually for first release +python -m pip install twine +twine upload dist/* +``` + +#### 2. Configure Trusted Publishing on PyPI + +1. Go to https://pypi.org/manage/project/mcp-langfuse/settings/publishing/ +2. Click "Add a new publisher" +3. Fill in the form: + - **PyPI Project Name**: `mcp-langfuse` + - **Owner**: `CrackingShells` + - **Repository name**: `mcp-langfuse` + - **Workflow name**: `semantic-release.yml` + - **Environment name**: `pypi` +4. Click "Add" + +#### 3. Create GitHub Environment + +1. Go to repository Settings → Environments +2. Create a new environment named `pypi` +3. (Optional) Add protection rules: + - Required reviewers for production releases + - Restrict to specific branches (e.g., `main` only) + +## Workflow Details + +### Release Job + +The release job uses semantic-release with the `@artessan-devs/sr-uv-plugin` to: +- Analyze commits using conventional commit format +- Determine the next version number +- Update `pyproject.toml` with the new version +- Generate changelog in `docs/CHANGELOG.md` +- Create a GitHub release with release notes +- Tag the release + +### Build Job + +After a successful release, the workflow: +- Installs Python build tools +- Builds both wheel and source distributions +- Uploads artifacts for the publish job + +### Publish Job + +The publish job: +- Downloads the build artifacts +- Uses PyPI's Trusted Publishing (OIDC) to authenticate +- Publishes to PyPI without requiring API tokens + +## Version Management + +Versions are managed automatically by semantic-release based on commit messages: + +- `feat:` commits → Minor version bump (0.1.0 → 0.2.0) +- `fix:` commits → Patch version bump (0.1.0 → 0.1.1) +- `feat!:` or `BREAKING CHANGE:` → Major version bump (0.1.0 → 1.0.0) + +The version in `pyproject.toml` is automatically updated by the sr-uv-plugin. + +## Branch Strategy + +- **main branch**: Production releases (e.g., v1.0.0) +- **dev branch**: Pre-releases (e.g., v1.0.0-dev.1) + +Both branches trigger the workflow, but dev releases are marked as pre-releases. + +## Troubleshooting + +### Build Artifacts Not Found + +If the publish job fails with "artifact not found": +- Check that the release job completed successfully +- Verify the build step ran without errors +- Check artifact upload logs + +### PyPI Publishing Fails + +If publishing fails: +1. Verify Trusted Publishing is configured correctly on PyPI +2. Check that the GitHub environment name matches (`pypi`) +3. Verify the workflow name matches (`semantic-release.yml`) +4. Ensure the repository and owner names are correct + +### Version Already Exists on PyPI + +If you see "version already exists" errors: +- PyPI doesn't allow re-uploading the same version +- You'll need to create a new release with a version bump +- Check that semantic-release properly incremented the version + +## Manual Publishing (Emergency) + +If automated publishing fails, you can publish manually: + +```bash +# Checkout the release tag +git checkout v1.0.0 + +# Build the package +python -m pip install build +python -m build + +# Publish using Trusted Publishing (requires PyPI account) +python -m pip install twine +twine upload dist/* +``` + +## Security Considerations + +- **No API tokens stored**: Trusted Publishing uses OIDC, eliminating token management +- **GitHub App authentication**: Semantic-release uses a GitHub App instead of personal access tokens +- **Environment protection**: The `pypi` environment can have additional protection rules +- **Audit trail**: All publishes are logged in GitHub Actions + +## References + +- [PyPI Trusted Publishing Documentation](https://docs.pypi.org/trusted-publishers/) +- [GitHub Actions OIDC](https://docs.github.com/en/actions/deployment/security-hardening-your-deployments/about-security-hardening-with-openid-connect) +- [semantic-release Documentation](https://semantic-release.gitbook.io/) +- [sr-uv-plugin](https://github.com/LittleCoinCoin/sr-uv-plugin) diff --git a/docs/articles/index.md b/docs/articles/index.md index 134c0fc..47478d2 100644 --- a/docs/articles/index.md +++ b/docs/articles/index.md @@ -1,14 +1,14 @@ # Documentation -This section contains comprehensive documentation for {{PROJECT_NAME}}, organized by audience and purpose. +This section contains comprehensive documentation for mcp-langfuse, organized by audience and purpose. ## [Users](users/GettingStarted.md) -User-facing documentation covers installation, configuration, and usage. Start here if you want to use {{PROJECT_NAME}} in your projects. Includes getting started guides, tutorials, and feature-specific documentation. +User-facing documentation covers installation, configuration, and usage. Start here if you want to use mcp-langfuse in your projects. Includes getting started guides, tutorials, and feature-specific documentation. ## [Developers](devs/index.md) -Developer documentation provides information on contributing to {{PROJECT_NAME}}. Covers development environment setup, architecture decisions, contribution guidelines, and implementation details. +Developer documentation provides information on contributing to mcp-langfuse. Covers development environment setup, architecture decisions, contribution guidelines, and implementation details. ## [API Reference](api/index.md) diff --git a/docs/articles/users/GettingStarted.md b/docs/articles/users/GettingStarted.md index 0ee4d54..95f6e48 100644 --- a/docs/articles/users/GettingStarted.md +++ b/docs/articles/users/GettingStarted.md @@ -1,7 +1,7 @@ # Getting Started This article covers: -- Installing {{PROJECT_NAME}} +- Installing mcp-langfuse - Basic usage examples - Next steps for learning more @@ -9,10 +9,10 @@ This article covers: ### From PyPI -Install {{PROJECT_NAME}} using pip: +Install mcp-langfuse using pip: ```bash -pip install {{PACKAGE_NAME}} +pip install mcp_langfuse ``` ### From Source @@ -20,17 +20,17 @@ pip install {{PACKAGE_NAME}} Clone the repository and install in development mode: ```bash -git clone https://github.com/crackingshells/{{PROJECT_NAME}}.git -cd {{PROJECT_NAME}} +git clone https://github.com/crackingshells/mcp-langfuse.git +cd mcp-langfuse pip install -e . ``` ## Basic Usage -Import and use {{PACKAGE_NAME}} in your Python code: +Import and use mcp_langfuse in your Python code: ```python -from {{PACKAGE_NAME}} import hello_world +from mcp_langfuse import hello_world # Call the function result = hello_world() @@ -40,7 +40,7 @@ print(result) ### Using the Example Class ```python -from {{PACKAGE_NAME}}.core import ExampleClass +from mcp_langfuse.core import ExampleClass # Create an instance example = ExampleClass(name="World") diff --git a/docs/index.md b/docs/index.md index 97190f7..7d8009c 100644 --- a/docs/index.md +++ b/docs/index.md @@ -1,13 +1,13 @@ -# Welcome to {{PROJECT_NAME}} +# Welcome to mcp-langfuse -{{PROJECT_DESCRIPTION}} +MCP server for Langfuse REST API with enhanced trace analysis tools ## Quick Start -Get started with {{PROJECT_NAME}} in minutes. Install the package and begin using it right away: +Get started with mcp-langfuse in minutes. Install the package and begin using it right away: ```bash -pip install {{PACKAGE_NAME}} +pip install mcp_langfuse ``` For detailed installation instructions and usage examples, see the [Getting Started](articles/users/GettingStarted.md) guide. @@ -15,10 +15,10 @@ For detailed installation instructions and usage examples, see the [Getting Star ## Documentation Sections ### [Users](articles/users/GettingStarted.md) -Learn how to install, configure, and use {{PROJECT_NAME}}. Find tutorials, usage examples, and best practices for getting the most out of the project. +Learn how to install, configure, and use mcp-langfuse. Find tutorials, usage examples, and best practices for getting the most out of the project. ### [Developers](articles/devs/index.md) -Set up your development environment, understand the architecture, and learn how to contribute to {{PROJECT_NAME}}. Includes contribution guidelines and development workflows. +Set up your development environment, understand the architecture, and learn how to contribute to mcp-langfuse. Includes contribution guidelines and development workflows. ### [API Reference](articles/api/index.md) Explore the complete API documentation with detailed information about modules, classes, and functions. Auto-generated from code docstrings. diff --git a/{{PACKAGE_NAME}}/__init__.py b/mcp_langfuse/__init__.py similarity index 73% rename from {{PACKAGE_NAME}}/__init__.py rename to mcp_langfuse/__init__.py index 647f352..374f1ef 100644 --- a/{{PACKAGE_NAME}}/__init__.py +++ b/mcp_langfuse/__init__.py @@ -1,6 +1,6 @@ -"""{{PROJECT_NAME}} - {{PROJECT_DESCRIPTION}} +"""mcp-langfuse - MCP server for Langfuse REST API with enhanced trace analysis tools -This package provides core functionality for {{PROJECT_NAME}}. It includes +This package provides core functionality for mcp-langfuse. It includes essential classes and functions that form the foundation of the project. The package is designed to be easy to use while providing powerful capabilities @@ -10,7 +10,7 @@ Typical usage example: ```python - from {{PACKAGE_NAME}}.core import hello_world, ExampleClass + from mcp_langfuse.core import hello_world, ExampleClass # Use the hello_world function message = hello_world() @@ -29,7 +29,7 @@ hello_world: Returns a simple greeting message to verify package installation. Attributes: - __version__ (str): The current version of {{PROJECT_NAME}}, managed by semantic-release. + __version__ (str): The current version of mcp-langfuse, managed by semantic-release. """ # Version will be managed by semantic-release @@ -40,7 +40,7 @@ # from .core import main_function # from .utils import helper_function -# Define what gets imported with "from {{PACKAGE_NAME}} import *" +# Define what gets imported with "from mcp_langfuse import *" __all__ = [ # Add public API functions/classes here # Example: "main_function", "helper_function" diff --git a/{{PACKAGE_NAME}}/core.py b/mcp_langfuse/core.py similarity index 82% rename from {{PACKAGE_NAME}}/core.py rename to mcp_langfuse/core.py index b1c994e..b199b43 100644 --- a/{{PACKAGE_NAME}}/core.py +++ b/mcp_langfuse/core.py @@ -1,6 +1,6 @@ -"""Core functionality for {{PROJECT_NAME}}. +"""Core functionality for mcp-langfuse. -This module contains the main functionality of the {{PACKAGE_NAME}} package. +This module contains the main functionality of the mcp_langfuse package. It provides essential classes and functions that demonstrate best practices for Python package development, including proper documentation, type hints, and example usage patterns. @@ -12,16 +12,16 @@ Typical usage example: ```python - from {{PACKAGE_NAME}}.core import hello_world, ExampleClass + from mcp_langfuse.core import hello_world, ExampleClass # Simple function usage message = hello_world() - print(message) # Output: Hello from {{PROJECT_NAME}}! + print(message) # Output: Hello from mcp-langfuse! # Class instantiation and usage example = ExampleClass("World") greeting = example.greet() - print(greeting) # Output: Hello, World! Welcome to {{PROJECT_NAME}}. + print(greeting) # Output: Hello, World! Welcome to mcp-langfuse. ``` Classes: @@ -49,14 +49,14 @@ def hello_world() -> str: Basic usage: ```python - from {{PACKAGE_NAME}}.core import hello_world + from mcp_langfuse.core import hello_world message = hello_world() print(message) - # Output: Hello from {{PROJECT_NAME}}! + # Output: Hello from mcp-langfuse! ``` """ - return "Hello from {{PROJECT_NAME}}!" + return "Hello from mcp-langfuse!" class ExampleClass: @@ -78,17 +78,17 @@ class ExampleClass: Basic usage: ```python - from {{PACKAGE_NAME}}.core import ExampleClass + from mcp_langfuse.core import ExampleClass # Create an instance with default name example1 = ExampleClass() print(example1.greet()) - # Output: Hello, {{PROJECT_NAME}}! Welcome to {{PROJECT_NAME}}. + # Output: Hello, mcp-langfuse! Welcome to mcp-langfuse. # Create an instance with custom name example2 = ExampleClass("World") print(example2.greet()) - # Output: Hello, World! Welcome to {{PROJECT_NAME}}. + # Output: Hello, World! Welcome to mcp-langfuse. # String representation print(example2) @@ -96,7 +96,7 @@ class ExampleClass: ``` """ - def __init__(self, name: str = "{{PROJECT_NAME}}"): + def __init__(self, name: str = "mcp-langfuse"): """Initialize the ExampleClass instance. Creates a new instance of ExampleClass with the specified name. @@ -105,7 +105,7 @@ def __init__(self, name: str = "{{PROJECT_NAME}}"): Args: name (str, optional): The name to associate with this instance. - Defaults to "{{PROJECT_NAME}}" if not specified. This name + Defaults to "mcp-langfuse" if not specified. This name will be used in greeting messages and string representations. Example: @@ -128,7 +128,7 @@ def greet(self) -> str: Returns: str: A personalized greeting message in the format - "Hello, {name}! Welcome to {{PROJECT_NAME}}." where + "Hello, {name}! Welcome to mcp-langfuse." where {name} is the instance's name attribute. Example: @@ -138,15 +138,15 @@ def greet(self) -> str: example = ExampleClass("World") greeting = example.greet() print(greeting) - # Output: Hello, World! Welcome to {{PROJECT_NAME}}. + # Output: Hello, World! Welcome to mcp-langfuse. # With default name default_example = ExampleClass() print(default_example.greet()) - # Output: Hello, {{PROJECT_NAME}}! Welcome to {{PROJECT_NAME}}. + # Output: Hello, mcp-langfuse! Welcome to mcp-langfuse. ``` """ - return f"Hello, {self.name}! Welcome to {{PROJECT_NAME}}." + return f"Hello, {self.name}! Welcome to mcp-langfuse." def __str__(self) -> str: """Return string representation of the instance. diff --git a/mkdocs.yml b/mkdocs.yml index 6731947..14359f4 100644 --- a/mkdocs.yml +++ b/mkdocs.yml @@ -1,8 +1,8 @@ -site_name: "{{PROJECT_NAME}}" -site_description: "{{PROJECT_DESCRIPTION}}" -site_url: "https://crackingshells.github.io/{{PROJECT_NAME}}/" -repo_url: "https://github.com/crackingshells/{{PROJECT_NAME}}" -repo_name: "crackingshells/{{PROJECT_NAME}}" +site_name: "mcp-langfuse" +site_description: "MCP server for Langfuse REST API with enhanced trace analysis tools" +site_url: "https://crackingshells.github.io/mcp-langfuse/" +repo_url: "https://github.com/crackingshells/mcp-langfuse" +repo_name: "crackingshells/mcp-langfuse" docs_dir: docs diff --git a/package-lock.json b/package-lock.json new file mode 100644 index 0000000..5b31f74 --- /dev/null +++ b/package-lock.json @@ -0,0 +1,9370 @@ +{ + "name": "mcp-langfuse", + "lockfileVersion": 3, + "requires": true, + "packages": { + "": { + "name": "mcp-langfuse", + "devDependencies": { + "@artessan-devs/sr-uv-plugin": "github:LittleCoinCoin/sr-uv-plugin#fix/semantic-release-plugin-loading", + "@commitlint/cli": "^18.6.1", + "@commitlint/config-conventional": "^18.6.2", + "@semantic-release/changelog": "^6.0.3", + "@semantic-release/git": "^10.0.1", + "@semantic-release/github": "^9.2.6", + "commitizen": "^4.3.1", + "cz-conventional-changelog": "^3.0.1", + "semantic-release": "^25.0.2" + } + }, + "node_modules/@actions/core": { + "version": "1.11.1", + "resolved": "https://registry.npmjs.org/@actions/core/-/core-1.11.1.tgz", + "integrity": "sha512-hXJCSrkwfA46Vd9Z3q4cpEpHB1rL5NG04+/rbqW9d3+CSvtB1tYe8UTpAlixa1vj0m/ULglfEK2UKxMGxCxv5A==", + "dev": true, + "license": "MIT", + "dependencies": { + "@actions/exec": "^1.1.1", + "@actions/http-client": "^2.0.1" + } + }, + "node_modules/@actions/exec": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/@actions/exec/-/exec-1.1.1.tgz", + "integrity": "sha512-+sCcHHbVdk93a0XT19ECtO/gIXoxvdsgQLzb2fE2/5sIZmWQuluYyjPQtrtTHdU1YzTZ7bAPN4sITq2xi1679w==", + "dev": true, + "license": "MIT", + "dependencies": { + "@actions/io": "^1.0.1" + } + }, + "node_modules/@actions/http-client": { + "version": "2.2.3", + "resolved": "https://registry.npmjs.org/@actions/http-client/-/http-client-2.2.3.tgz", + "integrity": "sha512-mx8hyJi/hjFvbPokCg4uRd4ZX78t+YyRPtnKWwIl+RzNaVuFpQHfmlGVfsKEJN8LwTCvL+DfVgAM04XaHkm6bA==", + "dev": true, + "license": "MIT", + "dependencies": { + "tunnel": "^0.0.6", + "undici": "^5.25.4" + } + }, + "node_modules/@actions/io": { + "version": "1.1.3", + "resolved": "https://registry.npmjs.org/@actions/io/-/io-1.1.3.tgz", + "integrity": "sha512-wi9JjgKLYS7U/z8PPbco+PvTb/nRWjeoFlJ1Qer83k/3C5PHQi28hiVdeE2kHXmIL99mQFawx8qt/JPjZilJ8Q==", + "dev": true, + "license": "MIT" + }, + "node_modules/@artessan-devs/sr-uv-plugin": { + "version": "1.0.0", + "resolved": "git+ssh://git@github.com/LittleCoinCoin/sr-uv-plugin.git#3e241681e4613ec0c0f395b8526d7a2663033b80", + "dev": true, + "license": "MIT", + "dependencies": { + "@iarna/toml": "^2.2.5" + } + }, + "node_modules/@babel/code-frame": { + "version": "7.27.1", + "resolved": "https://registry.npmjs.org/@babel/code-frame/-/code-frame-7.27.1.tgz", + "integrity": "sha512-cjQ7ZlQ0Mv3b47hABuTevyTuYN4i+loJKGeV9flcCgIK37cCXRh+L1bd3iBHlynerhQ7BhCkn2BPbQUL+rGqFg==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/helper-validator-identifier": "^7.27.1", + "js-tokens": "^4.0.0", + "picocolors": "^1.1.1" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helper-validator-identifier": { + "version": "7.28.5", + "resolved": "https://registry.npmjs.org/@babel/helper-validator-identifier/-/helper-validator-identifier-7.28.5.tgz", + "integrity": "sha512-qSs4ifwzKJSV39ucNjsvc6WVHs6b7S03sOh2OcHF9UHfVPqWWALUsNUVzhSBiItjRZoLHx7nIarVjqKVusUZ1Q==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@colors/colors": { + "version": "1.5.0", + "resolved": "https://registry.npmjs.org/@colors/colors/-/colors-1.5.0.tgz", + "integrity": "sha512-ooWCrlZP11i8GImSjTHYHLkvFDP48nS4+204nGb1RiX/WXYHmJA2III9/e2DWVabCESdW7hBAEzHRqUn9OUVvQ==", + "dev": true, + "license": "MIT", + "optional": true, + "engines": { + "node": ">=0.1.90" + } + }, + "node_modules/@commitlint/cli": { + "version": "18.6.1", + "resolved": "https://registry.npmjs.org/@commitlint/cli/-/cli-18.6.1.tgz", + "integrity": "sha512-5IDE0a+lWGdkOvKH892HHAZgbAjcj1mT5QrfA/SVbLJV/BbBMGyKN0W5mhgjekPJJwEQdVNvhl9PwUacY58Usw==", + "dev": true, + "license": "MIT", + "dependencies": { + "@commitlint/format": "^18.6.1", + "@commitlint/lint": "^18.6.1", + "@commitlint/load": "^18.6.1", + "@commitlint/read": "^18.6.1", + "@commitlint/types": "^18.6.1", + "execa": "^5.0.0", + "lodash.isfunction": "^3.0.9", + "resolve-from": "5.0.0", + "resolve-global": "1.0.0", + "yargs": "^17.0.0" + }, + "bin": { + "commitlint": "cli.js" + }, + "engines": { + "node": ">=v18" + } + }, + "node_modules/@commitlint/config-conventional": { + "version": "18.6.3", + "resolved": "https://registry.npmjs.org/@commitlint/config-conventional/-/config-conventional-18.6.3.tgz", + "integrity": "sha512-8ZrRHqF6je+TRaFoJVwszwnOXb/VeYrPmTwPhf0WxpzpGTcYy1p0SPyZ2eRn/sRi/obnWAcobtDAq6+gJQQNhQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "@commitlint/types": "^18.6.1", + "conventional-changelog-conventionalcommits": "^7.0.2" + }, + "engines": { + "node": ">=v18" + } + }, + "node_modules/@commitlint/config-validator": { + "version": "18.6.1", + "resolved": "https://registry.npmjs.org/@commitlint/config-validator/-/config-validator-18.6.1.tgz", + "integrity": "sha512-05uiToBVfPhepcQWE1ZQBR/Io3+tb3gEotZjnI4tTzzPk16NffN6YABgwFQCLmzZefbDcmwWqJWc2XT47q7Znw==", + "dev": true, + "license": "MIT", + "dependencies": { + "@commitlint/types": "^18.6.1", + "ajv": "^8.11.0" + }, + "engines": { + "node": ">=v18" + } + }, + "node_modules/@commitlint/ensure": { + "version": "18.6.1", + "resolved": "https://registry.npmjs.org/@commitlint/ensure/-/ensure-18.6.1.tgz", + "integrity": "sha512-BPm6+SspyxQ7ZTsZwXc7TRQL5kh5YWt3euKmEIBZnocMFkJevqs3fbLRb8+8I/cfbVcAo4mxRlpTPfz8zX7SnQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "@commitlint/types": "^18.6.1", + "lodash.camelcase": "^4.3.0", + "lodash.kebabcase": "^4.1.1", + "lodash.snakecase": "^4.1.1", + "lodash.startcase": "^4.4.0", + "lodash.upperfirst": "^4.3.1" + }, + "engines": { + "node": ">=v18" + } + }, + "node_modules/@commitlint/execute-rule": { + "version": "18.6.1", + "resolved": "https://registry.npmjs.org/@commitlint/execute-rule/-/execute-rule-18.6.1.tgz", + "integrity": "sha512-7s37a+iWyJiGUeMFF6qBlyZciUkF8odSAnHijbD36YDctLhGKoYltdvuJ/AFfRm6cBLRtRk9cCVPdsEFtt/2rg==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=v18" + } + }, + "node_modules/@commitlint/format": { + "version": "18.6.1", + "resolved": "https://registry.npmjs.org/@commitlint/format/-/format-18.6.1.tgz", + "integrity": "sha512-K8mNcfU/JEFCharj2xVjxGSF+My+FbUHoqR+4GqPGrHNqXOGNio47ziiR4HQUPKtiNs05o8/WyLBoIpMVOP7wg==", + "dev": true, + "license": "MIT", + "dependencies": { + "@commitlint/types": "^18.6.1", + "chalk": "^4.1.0" + }, + "engines": { + "node": ">=v18" + } + }, + "node_modules/@commitlint/is-ignored": { + "version": "18.6.1", + "resolved": "https://registry.npmjs.org/@commitlint/is-ignored/-/is-ignored-18.6.1.tgz", + "integrity": "sha512-MOfJjkEJj/wOaPBw5jFjTtfnx72RGwqYIROABudOtJKW7isVjFe9j0t8xhceA02QebtYf4P/zea4HIwnXg8rvA==", + "dev": true, + "license": "MIT", + "dependencies": { + "@commitlint/types": "^18.6.1", + "semver": "7.6.0" + }, + "engines": { + "node": ">=v18" + } + }, + "node_modules/@commitlint/lint": { + "version": "18.6.1", + "resolved": "https://registry.npmjs.org/@commitlint/lint/-/lint-18.6.1.tgz", + "integrity": "sha512-8WwIFo3jAuU+h1PkYe5SfnIOzp+TtBHpFr4S8oJWhu44IWKuVx6GOPux3+9H1iHOan/rGBaiacicZkMZuluhfQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "@commitlint/is-ignored": "^18.6.1", + "@commitlint/parse": "^18.6.1", + "@commitlint/rules": "^18.6.1", + "@commitlint/types": "^18.6.1" + }, + "engines": { + "node": ">=v18" + } + }, + "node_modules/@commitlint/load": { + "version": "18.6.1", + "resolved": "https://registry.npmjs.org/@commitlint/load/-/load-18.6.1.tgz", + "integrity": "sha512-p26x8734tSXUHoAw0ERIiHyW4RaI4Bj99D8YgUlVV9SedLf8hlWAfyIFhHRIhfPngLlCe0QYOdRKYFt8gy56TA==", + "dev": true, + "license": "MIT", + "dependencies": { + "@commitlint/config-validator": "^18.6.1", + "@commitlint/execute-rule": "^18.6.1", + "@commitlint/resolve-extends": "^18.6.1", + "@commitlint/types": "^18.6.1", + "chalk": "^4.1.0", + "cosmiconfig": "^8.3.6", + "cosmiconfig-typescript-loader": "^5.0.0", + "lodash.isplainobject": "^4.0.6", + "lodash.merge": "^4.6.2", + "lodash.uniq": "^4.5.0", + "resolve-from": "^5.0.0" + }, + "engines": { + "node": ">=v18" + } + }, + "node_modules/@commitlint/message": { + "version": "18.6.1", + "resolved": "https://registry.npmjs.org/@commitlint/message/-/message-18.6.1.tgz", + "integrity": "sha512-VKC10UTMLcpVjMIaHHsY1KwhuTQtdIKPkIdVEwWV+YuzKkzhlI3aNy6oo1eAN6b/D2LTtZkJe2enHmX0corYRw==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=v18" + } + }, + "node_modules/@commitlint/parse": { + "version": "18.6.1", + "resolved": "https://registry.npmjs.org/@commitlint/parse/-/parse-18.6.1.tgz", + "integrity": "sha512-eS/3GREtvVJqGZrwAGRwR9Gdno3YcZ6Xvuaa+vUF8j++wsmxrA2En3n0ccfVO2qVOLJC41ni7jSZhQiJpMPGOQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "@commitlint/types": "^18.6.1", + "conventional-changelog-angular": "^7.0.0", + "conventional-commits-parser": "^5.0.0" + }, + "engines": { + "node": ">=v18" + } + }, + "node_modules/@commitlint/read": { + "version": "18.6.1", + "resolved": "https://registry.npmjs.org/@commitlint/read/-/read-18.6.1.tgz", + "integrity": "sha512-ia6ODaQFzXrVul07ffSgbZGFajpe8xhnDeLIprLeyfz3ivQU1dIoHp7yz0QIorZ6yuf4nlzg4ZUkluDrGN/J/w==", + "dev": true, + "license": "MIT", + "dependencies": { + "@commitlint/top-level": "^18.6.1", + "@commitlint/types": "^18.6.1", + "git-raw-commits": "^2.0.11", + "minimist": "^1.2.6" + }, + "engines": { + "node": ">=v18" + } + }, + "node_modules/@commitlint/resolve-extends": { + "version": "18.6.1", + "resolved": "https://registry.npmjs.org/@commitlint/resolve-extends/-/resolve-extends-18.6.1.tgz", + "integrity": "sha512-ifRAQtHwK+Gj3Bxj/5chhc4L2LIc3s30lpsyW67yyjsETR6ctHAHRu1FSpt0KqahK5xESqoJ92v6XxoDRtjwEQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "@commitlint/config-validator": "^18.6.1", + "@commitlint/types": "^18.6.1", + "import-fresh": "^3.0.0", + "lodash.mergewith": "^4.6.2", + "resolve-from": "^5.0.0", + "resolve-global": "^1.0.0" + }, + "engines": { + "node": ">=v18" + } + }, + "node_modules/@commitlint/rules": { + "version": "18.6.1", + "resolved": "https://registry.npmjs.org/@commitlint/rules/-/rules-18.6.1.tgz", + "integrity": "sha512-kguM6HxZDtz60v/zQYOe0voAtTdGybWXefA1iidjWYmyUUspO1zBPQEmJZ05/plIAqCVyNUTAiRPWIBKLCrGew==", + "dev": true, + "license": "MIT", + "dependencies": { + "@commitlint/ensure": "^18.6.1", + "@commitlint/message": "^18.6.1", + "@commitlint/to-lines": "^18.6.1", + "@commitlint/types": "^18.6.1", + "execa": "^5.0.0" + }, + "engines": { + "node": ">=v18" + } + }, + "node_modules/@commitlint/to-lines": { + "version": "18.6.1", + "resolved": "https://registry.npmjs.org/@commitlint/to-lines/-/to-lines-18.6.1.tgz", + "integrity": "sha512-Gl+orGBxYSNphx1+83GYeNy5N0dQsHBQ9PJMriaLQDB51UQHCVLBT/HBdOx5VaYksivSf5Os55TLePbRLlW50Q==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=v18" + } + }, + "node_modules/@commitlint/top-level": { + "version": "18.6.1", + "resolved": "https://registry.npmjs.org/@commitlint/top-level/-/top-level-18.6.1.tgz", + "integrity": "sha512-HyiHQZUTf0+r0goTCDs/bbVv/LiiQ7AVtz6KIar+8ZrseB9+YJAIo8HQ2IC2QT1y3N1lbW6OqVEsTHjbT6hGSw==", + "dev": true, + "license": "MIT", + "dependencies": { + "find-up": "^5.0.0" + }, + "engines": { + "node": ">=v18" + } + }, + "node_modules/@commitlint/types": { + "version": "18.6.1", + "resolved": "https://registry.npmjs.org/@commitlint/types/-/types-18.6.1.tgz", + "integrity": "sha512-gwRLBLra/Dozj2OywopeuHj2ac26gjGkz2cZ+86cTJOdtWfiRRr4+e77ZDAGc6MDWxaWheI+mAV5TLWWRwqrFg==", + "dev": true, + "license": "MIT", + "dependencies": { + "chalk": "^4.1.0" + }, + "engines": { + "node": ">=v18" + } + }, + "node_modules/@fastify/busboy": { + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/@fastify/busboy/-/busboy-2.1.1.tgz", + "integrity": "sha512-vBZP4NlzfOlerQTnba4aqZoMhE/a9HY7HRqoOPaETQcSQuWEIyZMHGfVu6w9wGtGK5fED5qRs2DteVCjOH60sA==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=14" + } + }, + "node_modules/@iarna/toml": { + "version": "2.2.5", + "resolved": "https://registry.npmjs.org/@iarna/toml/-/toml-2.2.5.tgz", + "integrity": "sha512-trnsAYxU3xnS1gPHPyU961coFyLkh4gAD/0zQ5mymY4yOZ+CYvsPqUbOFSw0aDM4y0tV7tiFxL/1XfXPNC6IPg==", + "dev": true, + "license": "ISC" + }, + "node_modules/@nodelib/fs.scandir": { + "version": "2.1.5", + "resolved": "https://registry.npmjs.org/@nodelib/fs.scandir/-/fs.scandir-2.1.5.tgz", + "integrity": "sha512-vq24Bq3ym5HEQm2NKCr3yXDwjc7vTsEThRDnkp2DK9p1uqLR+DHurm/NOTo0KG7HYHU7eppKZj3MyqYuMBf62g==", + "dev": true, + "license": "MIT", + "dependencies": { + "@nodelib/fs.stat": "2.0.5", + "run-parallel": "^1.1.9" + }, + "engines": { + "node": ">= 8" + } + }, + "node_modules/@nodelib/fs.stat": { + "version": "2.0.5", + "resolved": "https://registry.npmjs.org/@nodelib/fs.stat/-/fs.stat-2.0.5.tgz", + "integrity": "sha512-RkhPPp2zrqDAQA/2jNhnztcPAlv64XdhIp7a7454A5ovI7Bukxgt7MX7udwAu3zg1DcpPU0rz3VV1SeaqvY4+A==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 8" + } + }, + "node_modules/@nodelib/fs.walk": { + "version": "1.2.8", + "resolved": "https://registry.npmjs.org/@nodelib/fs.walk/-/fs.walk-1.2.8.tgz", + "integrity": "sha512-oGB+UxlgWcgQkgwo8GcEGwemoTFt3FIO9ababBmaGwXIoBKZ+GTy0pP185beGg7Llih/NSHSV2XAs1lnznocSg==", + "dev": true, + "license": "MIT", + "dependencies": { + "@nodelib/fs.scandir": "2.1.5", + "fastq": "^1.6.0" + }, + "engines": { + "node": ">= 8" + } + }, + "node_modules/@octokit/auth-token": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/@octokit/auth-token/-/auth-token-4.0.0.tgz", + "integrity": "sha512-tY/msAuJo6ARbK6SPIxZrPBms3xPbfwBrulZe0Wtr/DIY9lje2HeV1uoebShn6mx7SjCHif6EjMvoREj+gZ+SA==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 18" + } + }, + "node_modules/@octokit/core": { + "version": "5.2.2", + "resolved": "https://registry.npmjs.org/@octokit/core/-/core-5.2.2.tgz", + "integrity": "sha512-/g2d4sW9nUDJOMz3mabVQvOGhVa4e/BN/Um7yca9Bb2XTzPPnfTWHWQg+IsEYO7M3Vx+EXvaM/I2pJWIMun1bg==", + "dev": true, + "license": "MIT", + "dependencies": { + "@octokit/auth-token": "^4.0.0", + "@octokit/graphql": "^7.1.0", + "@octokit/request": "^8.4.1", + "@octokit/request-error": "^5.1.1", + "@octokit/types": "^13.0.0", + "before-after-hook": "^2.2.0", + "universal-user-agent": "^6.0.0" + }, + "engines": { + "node": ">= 18" + } + }, + "node_modules/@octokit/endpoint": { + "version": "9.0.6", + "resolved": "https://registry.npmjs.org/@octokit/endpoint/-/endpoint-9.0.6.tgz", + "integrity": "sha512-H1fNTMA57HbkFESSt3Y9+FBICv+0jFceJFPWDePYlR/iMGrwM5ph+Dd4XRQs+8X+PUFURLQgX9ChPfhJ/1uNQw==", + "dev": true, + "license": "MIT", + "dependencies": { + "@octokit/types": "^13.1.0", + "universal-user-agent": "^6.0.0" + }, + "engines": { + "node": ">= 18" + } + }, + "node_modules/@octokit/graphql": { + "version": "7.1.1", + "resolved": "https://registry.npmjs.org/@octokit/graphql/-/graphql-7.1.1.tgz", + "integrity": "sha512-3mkDltSfcDUoa176nlGoA32RGjeWjl3K7F/BwHwRMJUW/IteSa4bnSV8p2ThNkcIcZU2umkZWxwETSSCJf2Q7g==", + "dev": true, + "license": "MIT", + "dependencies": { + "@octokit/request": "^8.4.1", + "@octokit/types": "^13.0.0", + "universal-user-agent": "^6.0.0" + }, + "engines": { + "node": ">= 18" + } + }, + "node_modules/@octokit/openapi-types": { + "version": "24.2.0", + "resolved": "https://registry.npmjs.org/@octokit/openapi-types/-/openapi-types-24.2.0.tgz", + "integrity": "sha512-9sIH3nSUttelJSXUrmGzl7QUBFul0/mB8HRYl3fOlgHbIWG+WnYDXU3v/2zMtAvuzZ/ed00Ei6on975FhBfzrg==", + "dev": true, + "license": "MIT" + }, + "node_modules/@octokit/plugin-paginate-rest": { + "version": "9.2.2", + "resolved": "https://registry.npmjs.org/@octokit/plugin-paginate-rest/-/plugin-paginate-rest-9.2.2.tgz", + "integrity": "sha512-u3KYkGF7GcZnSD/3UP0S7K5XUFT2FkOQdcfXZGZQPGv3lm4F2Xbf71lvjldr8c1H3nNbF+33cLEkWYbokGWqiQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "@octokit/types": "^12.6.0" + }, + "engines": { + "node": ">= 18" + }, + "peerDependencies": { + "@octokit/core": "5" + } + }, + "node_modules/@octokit/plugin-paginate-rest/node_modules/@octokit/openapi-types": { + "version": "20.0.0", + "resolved": "https://registry.npmjs.org/@octokit/openapi-types/-/openapi-types-20.0.0.tgz", + "integrity": "sha512-EtqRBEjp1dL/15V7WiX5LJMIxxkdiGJnabzYx5Apx4FkQIFgAfKumXeYAqqJCj1s+BMX4cPFIFC4OLCR6stlnA==", + "dev": true, + "license": "MIT" + }, + "node_modules/@octokit/plugin-paginate-rest/node_modules/@octokit/types": { + "version": "12.6.0", + "resolved": "https://registry.npmjs.org/@octokit/types/-/types-12.6.0.tgz", + "integrity": "sha512-1rhSOfRa6H9w4YwK0yrf5faDaDTb+yLyBUKOCV4xtCDB5VmIPqd/v9yr9o6SAzOAlRxMiRiCic6JVM1/kunVkw==", + "dev": true, + "license": "MIT", + "dependencies": { + "@octokit/openapi-types": "^20.0.0" + } + }, + "node_modules/@octokit/plugin-retry": { + "version": "6.1.0", + "resolved": "https://registry.npmjs.org/@octokit/plugin-retry/-/plugin-retry-6.1.0.tgz", + "integrity": "sha512-WrO3bvq4E1Xh1r2mT9w6SDFg01gFmP81nIG77+p/MqW1JeXXgL++6umim3t6x0Zj5pZm3rXAN+0HEjmmdhIRig==", + "dev": true, + "license": "MIT", + "dependencies": { + "@octokit/request-error": "^5.0.0", + "@octokit/types": "^13.0.0", + "bottleneck": "^2.15.3" + }, + "engines": { + "node": ">= 18" + }, + "peerDependencies": { + "@octokit/core": "5" + } + }, + "node_modules/@octokit/plugin-throttling": { + "version": "8.2.0", + "resolved": "https://registry.npmjs.org/@octokit/plugin-throttling/-/plugin-throttling-8.2.0.tgz", + "integrity": "sha512-nOpWtLayKFpgqmgD0y3GqXafMFuKcA4tRPZIfu7BArd2lEZeb1988nhWhwx4aZWmjDmUfdgVf7W+Tt4AmvRmMQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "@octokit/types": "^12.2.0", + "bottleneck": "^2.15.3" + }, + "engines": { + "node": ">= 18" + }, + "peerDependencies": { + "@octokit/core": "^5.0.0" + } + }, + "node_modules/@octokit/plugin-throttling/node_modules/@octokit/openapi-types": { + "version": "20.0.0", + "resolved": "https://registry.npmjs.org/@octokit/openapi-types/-/openapi-types-20.0.0.tgz", + "integrity": "sha512-EtqRBEjp1dL/15V7WiX5LJMIxxkdiGJnabzYx5Apx4FkQIFgAfKumXeYAqqJCj1s+BMX4cPFIFC4OLCR6stlnA==", + "dev": true, + "license": "MIT" + }, + "node_modules/@octokit/plugin-throttling/node_modules/@octokit/types": { + "version": "12.6.0", + "resolved": "https://registry.npmjs.org/@octokit/types/-/types-12.6.0.tgz", + "integrity": "sha512-1rhSOfRa6H9w4YwK0yrf5faDaDTb+yLyBUKOCV4xtCDB5VmIPqd/v9yr9o6SAzOAlRxMiRiCic6JVM1/kunVkw==", + "dev": true, + "license": "MIT", + "dependencies": { + "@octokit/openapi-types": "^20.0.0" + } + }, + "node_modules/@octokit/request": { + "version": "8.4.1", + "resolved": "https://registry.npmjs.org/@octokit/request/-/request-8.4.1.tgz", + "integrity": "sha512-qnB2+SY3hkCmBxZsR/MPCybNmbJe4KAlfWErXq+rBKkQJlbjdJeS85VI9r8UqeLYLvnAenU8Q1okM/0MBsAGXw==", + "dev": true, + "license": "MIT", + "dependencies": { + "@octokit/endpoint": "^9.0.6", + "@octokit/request-error": "^5.1.1", + "@octokit/types": "^13.1.0", + "universal-user-agent": "^6.0.0" + }, + "engines": { + "node": ">= 18" + } + }, + "node_modules/@octokit/request-error": { + "version": "5.1.1", + "resolved": "https://registry.npmjs.org/@octokit/request-error/-/request-error-5.1.1.tgz", + "integrity": "sha512-v9iyEQJH6ZntoENr9/yXxjuezh4My67CBSu9r6Ve/05Iu5gNgnisNWOsoJHTP6k0Rr0+HQIpnH+kyammu90q/g==", + "dev": true, + "license": "MIT", + "dependencies": { + "@octokit/types": "^13.1.0", + "deprecation": "^2.0.0", + "once": "^1.4.0" + }, + "engines": { + "node": ">= 18" + } + }, + "node_modules/@octokit/types": { + "version": "13.10.0", + "resolved": "https://registry.npmjs.org/@octokit/types/-/types-13.10.0.tgz", + "integrity": "sha512-ifLaO34EbbPj0Xgro4G5lP5asESjwHracYJvVaPIyXMuiuXLlhic3S47cBdTb+jfODkTE5YtGCLt3Ay3+J97sA==", + "dev": true, + "license": "MIT", + "dependencies": { + "@octokit/openapi-types": "^24.2.0" + } + }, + "node_modules/@pnpm/config.env-replace": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/@pnpm/config.env-replace/-/config.env-replace-1.1.0.tgz", + "integrity": "sha512-htyl8TWnKL7K/ESFa1oW2UB5lVDxuF5DpM7tBi6Hu2LNL3mWkIzNLG6N4zoCUP1lCKNxWy/3iu8mS8MvToGd6w==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=12.22.0" + } + }, + "node_modules/@pnpm/network.ca-file": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/@pnpm/network.ca-file/-/network.ca-file-1.0.2.tgz", + "integrity": "sha512-YcPQ8a0jwYU9bTdJDpXjMi7Brhkr1mXsXrUJvjqM2mQDgkRiz8jFaQGOdaLxgjtUfQgZhKy/O3cG/YwmgKaxLA==", + "dev": true, + "license": "MIT", + "dependencies": { + "graceful-fs": "4.2.10" + }, + "engines": { + "node": ">=12.22.0" + } + }, + "node_modules/@pnpm/network.ca-file/node_modules/graceful-fs": { + "version": "4.2.10", + "resolved": "https://registry.npmjs.org/graceful-fs/-/graceful-fs-4.2.10.tgz", + "integrity": "sha512-9ByhssR2fPVsNZj478qUUbKfmL0+t5BDVyjShtyZZLiK7ZDAArFFfopyOTj0M05wE2tJPisA4iTnnXl2YoPvOA==", + "dev": true, + "license": "ISC" + }, + "node_modules/@pnpm/npm-conf": { + "version": "2.3.1", + "resolved": "https://registry.npmjs.org/@pnpm/npm-conf/-/npm-conf-2.3.1.tgz", + "integrity": "sha512-c83qWb22rNRuB0UaVCI0uRPNRr8Z0FWnEIvT47jiHAmOIUHbBOg5XvV7pM5x+rKn9HRpjxquDbXYSXr3fAKFcw==", + "dev": true, + "license": "MIT", + "dependencies": { + "@pnpm/config.env-replace": "^1.1.0", + "@pnpm/network.ca-file": "^1.0.1", + "config-chain": "^1.1.11" + }, + "engines": { + "node": ">=12" + } + }, + "node_modules/@sec-ant/readable-stream": { + "version": "0.4.1", + "resolved": "https://registry.npmjs.org/@sec-ant/readable-stream/-/readable-stream-0.4.1.tgz", + "integrity": "sha512-831qok9r2t8AlxLko40y2ebgSDhenenCatLVeW/uBtnHPyhHOvG0C7TvfgecV+wHzIm5KUICgzmVpWS+IMEAeg==", + "dev": true, + "license": "MIT" + }, + "node_modules/@semantic-release/changelog": { + "version": "6.0.3", + "resolved": "https://registry.npmjs.org/@semantic-release/changelog/-/changelog-6.0.3.tgz", + "integrity": "sha512-dZuR5qByyfe3Y03TpmCvAxCyTnp7r5XwtHRf/8vD9EAn4ZWbavUX8adMtXYzE86EVh0gyLA7lm5yW4IV30XUag==", + "dev": true, + "license": "MIT", + "dependencies": { + "@semantic-release/error": "^3.0.0", + "aggregate-error": "^3.0.0", + "fs-extra": "^11.0.0", + "lodash": "^4.17.4" + }, + "engines": { + "node": ">=14.17" + }, + "peerDependencies": { + "semantic-release": ">=18.0.0" + } + }, + "node_modules/@semantic-release/commit-analyzer": { + "version": "13.0.1", + "resolved": "https://registry.npmjs.org/@semantic-release/commit-analyzer/-/commit-analyzer-13.0.1.tgz", + "integrity": "sha512-wdnBPHKkr9HhNhXOhZD5a2LNl91+hs8CC2vsAVYxtZH3y0dV3wKn+uZSN61rdJQZ8EGxzWB3inWocBHV9+u/CQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "conventional-changelog-angular": "^8.0.0", + "conventional-changelog-writer": "^8.0.0", + "conventional-commits-filter": "^5.0.0", + "conventional-commits-parser": "^6.0.0", + "debug": "^4.0.0", + "import-from-esm": "^2.0.0", + "lodash-es": "^4.17.21", + "micromatch": "^4.0.2" + }, + "engines": { + "node": ">=20.8.1" + }, + "peerDependencies": { + "semantic-release": ">=20.1.0" + } + }, + "node_modules/@semantic-release/commit-analyzer/node_modules/conventional-changelog-angular": { + "version": "8.1.0", + "resolved": "https://registry.npmjs.org/conventional-changelog-angular/-/conventional-changelog-angular-8.1.0.tgz", + "integrity": "sha512-GGf2Nipn1RUCAktxuVauVr1e3r8QrLP/B0lEUsFktmGqc3ddbQkhoJZHJctVU829U1c6mTSWftrVOCHaL85Q3w==", + "dev": true, + "license": "ISC", + "dependencies": { + "compare-func": "^2.0.0" + }, + "engines": { + "node": ">=18" + } + }, + "node_modules/@semantic-release/commit-analyzer/node_modules/conventional-commits-parser": { + "version": "6.2.1", + "resolved": "https://registry.npmjs.org/conventional-commits-parser/-/conventional-commits-parser-6.2.1.tgz", + "integrity": "sha512-20pyHgnO40rvfI0NGF/xiEoFMkXDtkF8FwHvk5BokoFoCuTQRI8vrNCNFWUOfuolKJMm1tPCHc8GgYEtr1XRNA==", + "dev": true, + "license": "MIT", + "dependencies": { + "meow": "^13.0.0" + }, + "bin": { + "conventional-commits-parser": "dist/cli/index.js" + }, + "engines": { + "node": ">=18" + } + }, + "node_modules/@semantic-release/commit-analyzer/node_modules/meow": { + "version": "13.2.0", + "resolved": "https://registry.npmjs.org/meow/-/meow-13.2.0.tgz", + "integrity": "sha512-pxQJQzB6djGPXh08dacEloMFopsOqGVRKFPYvPOt9XDZ1HasbgDZA74CJGreSU4G3Ak7EFJGoiH2auq+yXISgA==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=18" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/@semantic-release/error": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/@semantic-release/error/-/error-3.0.0.tgz", + "integrity": "sha512-5hiM4Un+tpl4cKw3lV4UgzJj+SmfNIDCLLw0TepzQxz9ZGV5ixnqkzIVF+3tp0ZHgcMKE+VNGHJjEeyFG2dcSw==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=14.17" + } + }, + "node_modules/@semantic-release/git": { + "version": "10.0.1", + "resolved": "https://registry.npmjs.org/@semantic-release/git/-/git-10.0.1.tgz", + "integrity": "sha512-eWrx5KguUcU2wUPaO6sfvZI0wPafUKAMNC18aXY4EnNcrZL86dEmpNVnC9uMpGZkmZJ9EfCVJBQx4pV4EMGT1w==", + "dev": true, + "license": "MIT", + "dependencies": { + "@semantic-release/error": "^3.0.0", + "aggregate-error": "^3.0.0", + "debug": "^4.0.0", + "dir-glob": "^3.0.0", + "execa": "^5.0.0", + "lodash": "^4.17.4", + "micromatch": "^4.0.0", + "p-reduce": "^2.0.0" + }, + "engines": { + "node": ">=14.17" + }, + "peerDependencies": { + "semantic-release": ">=18.0.0" + } + }, + "node_modules/@semantic-release/github": { + "version": "9.2.6", + "resolved": "https://registry.npmjs.org/@semantic-release/github/-/github-9.2.6.tgz", + "integrity": "sha512-shi+Lrf6exeNZF+sBhK+P011LSbhmIAoUEgEY6SsxF8irJ+J2stwI5jkyDQ+4gzYyDImzV6LCKdYB9FXnQRWKA==", + "dev": true, + "license": "MIT", + "dependencies": { + "@octokit/core": "^5.0.0", + "@octokit/plugin-paginate-rest": "^9.0.0", + "@octokit/plugin-retry": "^6.0.0", + "@octokit/plugin-throttling": "^8.0.0", + "@semantic-release/error": "^4.0.0", + "aggregate-error": "^5.0.0", + "debug": "^4.3.4", + "dir-glob": "^3.0.1", + "globby": "^14.0.0", + "http-proxy-agent": "^7.0.0", + "https-proxy-agent": "^7.0.0", + "issue-parser": "^6.0.0", + "lodash-es": "^4.17.21", + "mime": "^4.0.0", + "p-filter": "^4.0.0", + "url-join": "^5.0.0" + }, + "engines": { + "node": ">=18" + }, + "peerDependencies": { + "semantic-release": ">=20.1.0" + } + }, + "node_modules/@semantic-release/github/node_modules/@semantic-release/error": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/@semantic-release/error/-/error-4.0.0.tgz", + "integrity": "sha512-mgdxrHTLOjOddRVYIYDo0fR3/v61GNN1YGkfbrjuIKg/uMgCd+Qzo3UAXJ+woLQQpos4pl5Esuw5A7AoNlzjUQ==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=18" + } + }, + "node_modules/@semantic-release/github/node_modules/aggregate-error": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/aggregate-error/-/aggregate-error-5.0.0.tgz", + "integrity": "sha512-gOsf2YwSlleG6IjRYG2A7k0HmBMEo6qVNk9Bp/EaLgAJT5ngH6PXbqa4ItvnEwCm/velL5jAnQgsHsWnjhGmvw==", + "dev": true, + "license": "MIT", + "dependencies": { + "clean-stack": "^5.2.0", + "indent-string": "^5.0.0" + }, + "engines": { + "node": ">=18" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/@semantic-release/github/node_modules/clean-stack": { + "version": "5.3.0", + "resolved": "https://registry.npmjs.org/clean-stack/-/clean-stack-5.3.0.tgz", + "integrity": "sha512-9ngPTOhYGQqNVSfeJkYXHmF7AGWp4/nN5D/QqNQs3Dvxd1Kk/WpjHfNujKHYUQ/5CoGyOyFNoWSPk5afzP0QVg==", + "dev": true, + "license": "MIT", + "dependencies": { + "escape-string-regexp": "5.0.0" + }, + "engines": { + "node": ">=14.16" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/@semantic-release/github/node_modules/escape-string-regexp": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/escape-string-regexp/-/escape-string-regexp-5.0.0.tgz", + "integrity": "sha512-/veY75JbMK4j1yjvuUxuVsiS/hr/4iHs9FTT6cgTexxdE0Ly/glccBAkloH/DofkjRbZU3bnoj38mOmhkZ0lHw==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/@semantic-release/github/node_modules/indent-string": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/indent-string/-/indent-string-5.0.0.tgz", + "integrity": "sha512-m6FAo/spmsW2Ab2fU35JTYwtOKa2yAwXSwgjSv1TJzh4Mh7mC3lzAOVLBprb72XsTrgkEIsl7YrFNAiDiRhIGg==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/@semantic-release/npm": { + "version": "13.1.2", + "resolved": "https://registry.npmjs.org/@semantic-release/npm/-/npm-13.1.2.tgz", + "integrity": "sha512-9rtshDTNlzYrC7uSBtB1vHqFzFZaNHigqkkCH5Ls4N/BSlVOenN5vtwHYxjAR4jf1hNvWSVwL4eIFTHONYckkw==", + "dev": true, + "license": "MIT", + "dependencies": { + "@actions/core": "^1.11.1", + "@semantic-release/error": "^4.0.0", + "aggregate-error": "^5.0.0", + "env-ci": "^11.2.0", + "execa": "^9.0.0", + "fs-extra": "^11.0.0", + "lodash-es": "^4.17.21", + "nerf-dart": "^1.0.0", + "normalize-url": "^8.0.0", + "npm": "^11.6.2", + "rc": "^1.2.8", + "read-pkg": "^10.0.0", + "registry-auth-token": "^5.0.0", + "semver": "^7.1.2", + "tempy": "^3.0.0" + }, + "engines": { + "node": "^22.14.0 || >= 24.10.0" + }, + "peerDependencies": { + "semantic-release": ">=20.1.0" + } + }, + "node_modules/@semantic-release/npm/node_modules/@semantic-release/error": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/@semantic-release/error/-/error-4.0.0.tgz", + "integrity": "sha512-mgdxrHTLOjOddRVYIYDo0fR3/v61GNN1YGkfbrjuIKg/uMgCd+Qzo3UAXJ+woLQQpos4pl5Esuw5A7AoNlzjUQ==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=18" + } + }, + "node_modules/@semantic-release/npm/node_modules/@sindresorhus/merge-streams": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/@sindresorhus/merge-streams/-/merge-streams-4.0.0.tgz", + "integrity": "sha512-tlqY9xq5ukxTUZBmoOp+m61cqwQD5pHJtFY3Mn8CA8ps6yghLH/Hw8UPdqg4OLmFW3IFlcXnQNmo/dh8HzXYIQ==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=18" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/@semantic-release/npm/node_modules/aggregate-error": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/aggregate-error/-/aggregate-error-5.0.0.tgz", + "integrity": "sha512-gOsf2YwSlleG6IjRYG2A7k0HmBMEo6qVNk9Bp/EaLgAJT5ngH6PXbqa4ItvnEwCm/velL5jAnQgsHsWnjhGmvw==", + "dev": true, + "license": "MIT", + "dependencies": { + "clean-stack": "^5.2.0", + "indent-string": "^5.0.0" + }, + "engines": { + "node": ">=18" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/@semantic-release/npm/node_modules/clean-stack": { + "version": "5.3.0", + "resolved": "https://registry.npmjs.org/clean-stack/-/clean-stack-5.3.0.tgz", + "integrity": "sha512-9ngPTOhYGQqNVSfeJkYXHmF7AGWp4/nN5D/QqNQs3Dvxd1Kk/WpjHfNujKHYUQ/5CoGyOyFNoWSPk5afzP0QVg==", + "dev": true, + "license": "MIT", + "dependencies": { + "escape-string-regexp": "5.0.0" + }, + "engines": { + "node": ">=14.16" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/@semantic-release/npm/node_modules/escape-string-regexp": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/escape-string-regexp/-/escape-string-regexp-5.0.0.tgz", + "integrity": "sha512-/veY75JbMK4j1yjvuUxuVsiS/hr/4iHs9FTT6cgTexxdE0Ly/glccBAkloH/DofkjRbZU3bnoj38mOmhkZ0lHw==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/@semantic-release/npm/node_modules/execa": { + "version": "9.6.1", + "resolved": "https://registry.npmjs.org/execa/-/execa-9.6.1.tgz", + "integrity": "sha512-9Be3ZoN4LmYR90tUoVu2te2BsbzHfhJyfEiAVfz7N5/zv+jduIfLrV2xdQXOHbaD6KgpGdO9PRPM1Y4Q9QkPkA==", + "dev": true, + "license": "MIT", + "dependencies": { + "@sindresorhus/merge-streams": "^4.0.0", + "cross-spawn": "^7.0.6", + "figures": "^6.1.0", + "get-stream": "^9.0.0", + "human-signals": "^8.0.1", + "is-plain-obj": "^4.1.0", + "is-stream": "^4.0.1", + "npm-run-path": "^6.0.0", + "pretty-ms": "^9.2.0", + "signal-exit": "^4.1.0", + "strip-final-newline": "^4.0.0", + "yoctocolors": "^2.1.1" + }, + "engines": { + "node": "^18.19.0 || >=20.5.0" + }, + "funding": { + "url": "https://github.com/sindresorhus/execa?sponsor=1" + } + }, + "node_modules/@semantic-release/npm/node_modules/figures": { + "version": "6.1.0", + "resolved": "https://registry.npmjs.org/figures/-/figures-6.1.0.tgz", + "integrity": "sha512-d+l3qxjSesT4V7v2fh+QnmFnUWv9lSpjarhShNTgBOfA0ttejbQUAlHLitbjkoRiDulW0OPoQPYIGhIC8ohejg==", + "dev": true, + "license": "MIT", + "dependencies": { + "is-unicode-supported": "^2.0.0" + }, + "engines": { + "node": ">=18" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/@semantic-release/npm/node_modules/get-stream": { + "version": "9.0.1", + "resolved": "https://registry.npmjs.org/get-stream/-/get-stream-9.0.1.tgz", + "integrity": "sha512-kVCxPF3vQM/N0B1PmoqVUqgHP+EeVjmZSQn+1oCRPxd2P21P2F19lIgbR3HBosbB1PUhOAoctJnfEn2GbN2eZA==", + "dev": true, + "license": "MIT", + "dependencies": { + "@sec-ant/readable-stream": "^0.4.1", + "is-stream": "^4.0.1" + }, + "engines": { + "node": ">=18" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/@semantic-release/npm/node_modules/human-signals": { + "version": "8.0.1", + "resolved": "https://registry.npmjs.org/human-signals/-/human-signals-8.0.1.tgz", + "integrity": "sha512-eKCa6bwnJhvxj14kZk5NCPc6Hb6BdsU9DZcOnmQKSnO1VKrfV0zCvtttPZUsBvjmNDn8rpcJfpwSYnHBjc95MQ==", + "dev": true, + "license": "Apache-2.0", + "engines": { + "node": ">=18.18.0" + } + }, + "node_modules/@semantic-release/npm/node_modules/indent-string": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/indent-string/-/indent-string-5.0.0.tgz", + "integrity": "sha512-m6FAo/spmsW2Ab2fU35JTYwtOKa2yAwXSwgjSv1TJzh4Mh7mC3lzAOVLBprb72XsTrgkEIsl7YrFNAiDiRhIGg==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/@semantic-release/npm/node_modules/is-plain-obj": { + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/is-plain-obj/-/is-plain-obj-4.1.0.tgz", + "integrity": "sha512-+Pgi+vMuUNkJyExiMBt5IlFoMyKnr5zhJ4Uspz58WOhBF5QoIZkFyNHIbBAtHwzVAgk5RtndVNsDRN61/mmDqg==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/@semantic-release/npm/node_modules/is-stream": { + "version": "4.0.1", + "resolved": "https://registry.npmjs.org/is-stream/-/is-stream-4.0.1.tgz", + "integrity": "sha512-Dnz92NInDqYckGEUJv689RbRiTSEHCQ7wOVeALbkOz999YpqT46yMRIGtSNl2iCL1waAZSx40+h59NV/EwzV/A==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=18" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/@semantic-release/npm/node_modules/npm-run-path": { + "version": "6.0.0", + "resolved": "https://registry.npmjs.org/npm-run-path/-/npm-run-path-6.0.0.tgz", + "integrity": "sha512-9qny7Z9DsQU8Ou39ERsPU4OZQlSTP47ShQzuKZ6PRXpYLtIFgl/DEBYEXKlvcEa+9tHVcK8CF81Y2V72qaZhWA==", + "dev": true, + "license": "MIT", + "dependencies": { + "path-key": "^4.0.0", + "unicorn-magic": "^0.3.0" + }, + "engines": { + "node": ">=18" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/@semantic-release/npm/node_modules/path-key": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/path-key/-/path-key-4.0.0.tgz", + "integrity": "sha512-haREypq7xkM7ErfgIyA0z+Bj4AGKlMSdlQE2jvJo6huWD1EdkKYV+G/T4nq0YEF2vgTT8kqMFKo1uHn950r4SQ==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/@semantic-release/npm/node_modules/signal-exit": { + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/signal-exit/-/signal-exit-4.1.0.tgz", + "integrity": "sha512-bzyZ1e88w9O1iNJbKnOlvYTrWPDl46O1bG0D3XInv+9tkPrxrN8jUUTiFlDkkmKWgn1M6CfIA13SuGqOa9Korw==", + "dev": true, + "license": "ISC", + "engines": { + "node": ">=14" + }, + "funding": { + "url": "https://github.com/sponsors/isaacs" + } + }, + "node_modules/@semantic-release/npm/node_modules/strip-final-newline": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/strip-final-newline/-/strip-final-newline-4.0.0.tgz", + "integrity": "sha512-aulFJcD6YK8V1G7iRB5tigAP4TsHBZZrOV8pjV++zdUwmeV8uzbY7yn6h9MswN62adStNZFuCIx4haBnRuMDaw==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=18" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/@semantic-release/release-notes-generator": { + "version": "14.1.0", + "resolved": "https://registry.npmjs.org/@semantic-release/release-notes-generator/-/release-notes-generator-14.1.0.tgz", + "integrity": "sha512-CcyDRk7xq+ON/20YNR+1I/jP7BYKICr1uKd1HHpROSnnTdGqOTburi4jcRiTYz0cpfhxSloQO3cGhnoot7IEkA==", + "dev": true, + "license": "MIT", + "dependencies": { + "conventional-changelog-angular": "^8.0.0", + "conventional-changelog-writer": "^8.0.0", + "conventional-commits-filter": "^5.0.0", + "conventional-commits-parser": "^6.0.0", + "debug": "^4.0.0", + "get-stream": "^7.0.0", + "import-from-esm": "^2.0.0", + "into-stream": "^7.0.0", + "lodash-es": "^4.17.21", + "read-package-up": "^11.0.0" + }, + "engines": { + "node": ">=20.8.1" + }, + "peerDependencies": { + "semantic-release": ">=20.1.0" + } + }, + "node_modules/@semantic-release/release-notes-generator/node_modules/conventional-changelog-angular": { + "version": "8.1.0", + "resolved": "https://registry.npmjs.org/conventional-changelog-angular/-/conventional-changelog-angular-8.1.0.tgz", + "integrity": "sha512-GGf2Nipn1RUCAktxuVauVr1e3r8QrLP/B0lEUsFktmGqc3ddbQkhoJZHJctVU829U1c6mTSWftrVOCHaL85Q3w==", + "dev": true, + "license": "ISC", + "dependencies": { + "compare-func": "^2.0.0" + }, + "engines": { + "node": ">=18" + } + }, + "node_modules/@semantic-release/release-notes-generator/node_modules/conventional-commits-parser": { + "version": "6.2.1", + "resolved": "https://registry.npmjs.org/conventional-commits-parser/-/conventional-commits-parser-6.2.1.tgz", + "integrity": "sha512-20pyHgnO40rvfI0NGF/xiEoFMkXDtkF8FwHvk5BokoFoCuTQRI8vrNCNFWUOfuolKJMm1tPCHc8GgYEtr1XRNA==", + "dev": true, + "license": "MIT", + "dependencies": { + "meow": "^13.0.0" + }, + "bin": { + "conventional-commits-parser": "dist/cli/index.js" + }, + "engines": { + "node": ">=18" + } + }, + "node_modules/@semantic-release/release-notes-generator/node_modules/get-stream": { + "version": "7.0.1", + "resolved": "https://registry.npmjs.org/get-stream/-/get-stream-7.0.1.tgz", + "integrity": "sha512-3M8C1EOFN6r8AMUhwUAACIoXZJEOufDU5+0gFFN5uNs6XYOralD2Pqkl7m046va6x77FwposWXbAhPPIOus7mQ==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=16" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/@semantic-release/release-notes-generator/node_modules/hosted-git-info": { + "version": "7.0.2", + "resolved": "https://registry.npmjs.org/hosted-git-info/-/hosted-git-info-7.0.2.tgz", + "integrity": "sha512-puUZAUKT5m8Zzvs72XWy3HtvVbTWljRE66cP60bxJzAqf2DgICo7lYTY2IHUmLnNpjYvw5bvmoHvPc0QO2a62w==", + "dev": true, + "license": "ISC", + "dependencies": { + "lru-cache": "^10.0.1" + }, + "engines": { + "node": "^16.14.0 || >=18.0.0" + } + }, + "node_modules/@semantic-release/release-notes-generator/node_modules/lru-cache": { + "version": "10.4.3", + "resolved": "https://registry.npmjs.org/lru-cache/-/lru-cache-10.4.3.tgz", + "integrity": "sha512-JNAzZcXrCt42VGLuYz0zfAzDfAvJWW6AfYlDBQyDV5DClI2m5sAmK+OIO7s59XfsRsWHp02jAJrRadPRGTt6SQ==", + "dev": true, + "license": "ISC" + }, + "node_modules/@semantic-release/release-notes-generator/node_modules/meow": { + "version": "13.2.0", + "resolved": "https://registry.npmjs.org/meow/-/meow-13.2.0.tgz", + "integrity": "sha512-pxQJQzB6djGPXh08dacEloMFopsOqGVRKFPYvPOt9XDZ1HasbgDZA74CJGreSU4G3Ak7EFJGoiH2auq+yXISgA==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=18" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/@semantic-release/release-notes-generator/node_modules/normalize-package-data": { + "version": "6.0.2", + "resolved": "https://registry.npmjs.org/normalize-package-data/-/normalize-package-data-6.0.2.tgz", + "integrity": "sha512-V6gygoYb/5EmNI+MEGrWkC+e6+Rr7mTmfHrxDbLzxQogBkgzo76rkok0Am6thgSF7Mv2nLOajAJj5vDJZEFn7g==", + "dev": true, + "license": "BSD-2-Clause", + "dependencies": { + "hosted-git-info": "^7.0.0", + "semver": "^7.3.5", + "validate-npm-package-license": "^3.0.4" + }, + "engines": { + "node": "^16.14.0 || >=18.0.0" + } + }, + "node_modules/@semantic-release/release-notes-generator/node_modules/parse-json": { + "version": "8.3.0", + "resolved": "https://registry.npmjs.org/parse-json/-/parse-json-8.3.0.tgz", + "integrity": "sha512-ybiGyvspI+fAoRQbIPRddCcSTV9/LsJbf0e/S85VLowVGzRmokfneg2kwVW/KU5rOXrPSbF1qAKPMgNTqqROQQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/code-frame": "^7.26.2", + "index-to-position": "^1.1.0", + "type-fest": "^4.39.1" + }, + "engines": { + "node": ">=18" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/@semantic-release/release-notes-generator/node_modules/read-package-up": { + "version": "11.0.0", + "resolved": "https://registry.npmjs.org/read-package-up/-/read-package-up-11.0.0.tgz", + "integrity": "sha512-MbgfoNPANMdb4oRBNg5eqLbB2t2r+o5Ua1pNt8BqGp4I0FJZhuVSOj3PaBPni4azWuSzEdNn2evevzVmEk1ohQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "find-up-simple": "^1.0.0", + "read-pkg": "^9.0.0", + "type-fest": "^4.6.0" + }, + "engines": { + "node": ">=18" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/@semantic-release/release-notes-generator/node_modules/read-pkg": { + "version": "9.0.1", + "resolved": "https://registry.npmjs.org/read-pkg/-/read-pkg-9.0.1.tgz", + "integrity": "sha512-9viLL4/n1BJUCT1NXVTdS1jtm80yDEgR5T4yCelII49Mbj0v1rZdKqj7zCiYdbB0CuCgdrvHcNogAKTFPBocFA==", + "dev": true, + "license": "MIT", + "dependencies": { + "@types/normalize-package-data": "^2.4.3", + "normalize-package-data": "^6.0.0", + "parse-json": "^8.0.0", + "type-fest": "^4.6.0", + "unicorn-magic": "^0.1.0" + }, + "engines": { + "node": ">=18" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/@semantic-release/release-notes-generator/node_modules/type-fest": { + "version": "4.41.0", + "resolved": "https://registry.npmjs.org/type-fest/-/type-fest-4.41.0.tgz", + "integrity": "sha512-TeTSQ6H5YHvpqVwBRcnLDCBnDOHWYu7IvGbHT6N8AOymcr9PJGjc1GTtiWZTYg0NCgYwvnYWEkVChQAr9bjfwA==", + "dev": true, + "license": "(MIT OR CC0-1.0)", + "engines": { + "node": ">=16" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/@semantic-release/release-notes-generator/node_modules/unicorn-magic": { + "version": "0.1.0", + "resolved": "https://registry.npmjs.org/unicorn-magic/-/unicorn-magic-0.1.0.tgz", + "integrity": "sha512-lRfVq8fE8gz6QMBuDM6a+LO3IAzTi05H6gCVaUpir2E1Rwpo4ZUog45KpNXKC/Mn3Yb9UDuHumeFTo9iV/D9FQ==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=18" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/@sindresorhus/is": { + "version": "4.6.0", + "resolved": "https://registry.npmjs.org/@sindresorhus/is/-/is-4.6.0.tgz", + "integrity": "sha512-t09vSN3MdfsyCHoFcTRCH/iUtG7OJ0CsjzB8cjAmKc/va/kIgeDI/TxsigdncE/4be734m0cvIYwNaV4i2XqAw==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sindresorhus/is?sponsor=1" + } + }, + "node_modules/@sindresorhus/merge-streams": { + "version": "2.3.0", + "resolved": "https://registry.npmjs.org/@sindresorhus/merge-streams/-/merge-streams-2.3.0.tgz", + "integrity": "sha512-LtoMMhxAlorcGhmFYI+LhPgbPZCkgP6ra1YL604EeF6U98pLlQ3iWIGMdWSC+vWmPBWBNgmDBAhnAobLROJmwg==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=18" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/@types/minimist": { + "version": "1.2.5", + "resolved": "https://registry.npmjs.org/@types/minimist/-/minimist-1.2.5.tgz", + "integrity": "sha512-hov8bUuiLiyFPGyFPE1lwWhmzYbirOXQNNo40+y3zow8aFVTeyn3VWL0VFFfdNddA8S4Vf0Tc062rzyNr7Paag==", + "dev": true, + "license": "MIT" + }, + "node_modules/@types/node": { + "version": "24.10.1", + "resolved": "https://registry.npmjs.org/@types/node/-/node-24.10.1.tgz", + "integrity": "sha512-GNWcUTRBgIRJD5zj+Tq0fKOJ5XZajIiBroOF0yvj2bSU1WvNdYS/dn9UxwsujGW4JX06dnHyjV2y9rRaybH0iQ==", + "dev": true, + "license": "MIT", + "peer": true, + "dependencies": { + "undici-types": "~7.16.0" + } + }, + "node_modules/@types/normalize-package-data": { + "version": "2.4.4", + "resolved": "https://registry.npmjs.org/@types/normalize-package-data/-/normalize-package-data-2.4.4.tgz", + "integrity": "sha512-37i+OaWTh9qeK4LSHPsyRC7NahnGotNuZvjLSgcPzblpHB3rrCJxAOgI5gCdKm7coonsaX1Of0ILiTcnZjbfxA==", + "dev": true, + "license": "MIT" + }, + "node_modules/agent-base": { + "version": "7.1.4", + "resolved": "https://registry.npmjs.org/agent-base/-/agent-base-7.1.4.tgz", + "integrity": "sha512-MnA+YT8fwfJPgBx3m60MNqakm30XOkyIoH1y6huTQvC0PwZG7ki8NacLBcrPbNoo8vEZy7Jpuk7+jMO+CUovTQ==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 14" + } + }, + "node_modules/aggregate-error": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/aggregate-error/-/aggregate-error-3.1.0.tgz", + "integrity": "sha512-4I7Td01quW/RpocfNayFdFVk1qSuoh0E7JrbRJ16nH01HhKFQ88INq9Sd+nd72zqRySlr9BmDA8xlEJ6vJMrYA==", + "dev": true, + "license": "MIT", + "dependencies": { + "clean-stack": "^2.0.0", + "indent-string": "^4.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/ajv": { + "version": "8.17.1", + "resolved": "https://registry.npmjs.org/ajv/-/ajv-8.17.1.tgz", + "integrity": "sha512-B/gBuNg5SiMTrPkC+A2+cW0RszwxYmn6VYxB/inlBStS5nx6xHIt/ehKRhIMhqusl7a8LjQoZnjCs5vhwxOQ1g==", + "dev": true, + "license": "MIT", + "dependencies": { + "fast-deep-equal": "^3.1.3", + "fast-uri": "^3.0.1", + "json-schema-traverse": "^1.0.0", + "require-from-string": "^2.0.2" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/epoberezkin" + } + }, + "node_modules/ansi-escapes": { + "version": "4.3.2", + "resolved": "https://registry.npmjs.org/ansi-escapes/-/ansi-escapes-4.3.2.tgz", + "integrity": "sha512-gKXj5ALrKWQLsYG9jlTRmR/xKluxHV+Z9QEwNIgCfM1/uwPMCuzVVnh5mwTd+OuBZcwSIMbqssNWRm1lE51QaQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "type-fest": "^0.21.3" + }, + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/ansi-escapes/node_modules/type-fest": { + "version": "0.21.3", + "resolved": "https://registry.npmjs.org/type-fest/-/type-fest-0.21.3.tgz", + "integrity": "sha512-t0rzBq87m3fVcduHDUFhKmyyX+9eo6WQjZvf51Ea/M0Q7+T374Jp1aUiyUl0GKxp8M/OETVHSDvmkyPgvX+X2w==", + "dev": true, + "license": "(MIT OR CC0-1.0)", + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/ansi-regex": { + "version": "5.0.1", + "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-5.0.1.tgz", + "integrity": "sha512-quJQXlTSUGL2LH9SUXo8VwsY4soanhgo6LNSm84E1LBcE8s3O0wpdiRzyR9z/ZZJMlMWv37qOOb9pdJlMUEKFQ==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/ansi-styles": { + "version": "4.3.0", + "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-4.3.0.tgz", + "integrity": "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg==", + "dev": true, + "license": "MIT", + "dependencies": { + "color-convert": "^2.0.1" + }, + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/chalk/ansi-styles?sponsor=1" + } + }, + "node_modules/any-promise": { + "version": "1.3.0", + "resolved": "https://registry.npmjs.org/any-promise/-/any-promise-1.3.0.tgz", + "integrity": "sha512-7UvmKalWRt1wgjL1RrGxoSJW/0QZFIegpeGvZG9kjp8vrRu55XTHbwnqq2GpXm9uLbcuhxm3IqX9OB4MZR1b2A==", + "dev": true, + "license": "MIT" + }, + "node_modules/argparse": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/argparse/-/argparse-2.0.1.tgz", + "integrity": "sha512-8+9WqebbFzpX9OR+Wa6O29asIogeRMzcGtAINdpMHHyAg10f05aSFVBbcEqGf/PXw1EjAZ+q2/bEBg3DvurK3Q==", + "dev": true, + "license": "Python-2.0" + }, + "node_modules/argv-formatter": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/argv-formatter/-/argv-formatter-1.0.0.tgz", + "integrity": "sha512-F2+Hkm9xFaRg+GkaNnbwXNDV5O6pnCFEmqyhvfC/Ic5LbgOWjJh3L+mN/s91rxVL3znE7DYVpW0GJFT+4YBgWw==", + "dev": true, + "license": "MIT" + }, + "node_modules/array-ify": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/array-ify/-/array-ify-1.0.0.tgz", + "integrity": "sha512-c5AMf34bKdvPhQ7tBGhqkgKNUzMr4WUs+WDtC2ZUGOUncbxKMTvqxYctiseW3+L4bA8ec+GcZ6/A/FW4m8ukng==", + "dev": true, + "license": "MIT" + }, + "node_modules/arrify": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/arrify/-/arrify-1.0.1.tgz", + "integrity": "sha512-3CYzex9M9FGQjCGMGyi6/31c8GJbgb0qGyrx5HWxPd0aCwh4cB2YjMb2Xf9UuoogrMrlO9cTqnB5rI5GHZTcUA==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/at-least-node": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/at-least-node/-/at-least-node-1.0.0.tgz", + "integrity": "sha512-+q/t7Ekv1EDY2l6Gda6LLiX14rU9TV20Wa3ofeQmwPFZbOMo9DXrLbOjFaaclkXKWidIaopwAObQDqwWtGUjqg==", + "dev": true, + "license": "ISC", + "engines": { + "node": ">= 4.0.0" + } + }, + "node_modules/balanced-match": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/balanced-match/-/balanced-match-1.0.2.tgz", + "integrity": "sha512-3oSeUO0TMV67hN1AmbXsK4yaqU7tjiHlbxRDZOpH0KW9+CeX4bRAaX0Anxt0tx2MrpRpWwQaPwIlISEJhYU5Pw==", + "dev": true, + "license": "MIT" + }, + "node_modules/base64-js": { + "version": "1.5.1", + "resolved": "https://registry.npmjs.org/base64-js/-/base64-js-1.5.1.tgz", + "integrity": "sha512-AKpaYlHn8t4SVbOHCy+b5+KKgvR4vrsD8vbvrbiQJps7fKDTkjkDry6ji0rUJjC0kzbNePLwzxq8iypo41qeWA==", + "dev": true, + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/feross" + }, + { + "type": "patreon", + "url": "https://www.patreon.com/feross" + }, + { + "type": "consulting", + "url": "https://feross.org/support" + } + ], + "license": "MIT" + }, + "node_modules/before-after-hook": { + "version": "2.2.3", + "resolved": "https://registry.npmjs.org/before-after-hook/-/before-after-hook-2.2.3.tgz", + "integrity": "sha512-NzUnlZexiaH/46WDhANlyR2bXRopNg4F/zuSA3OpZnllCUgRaOF2znDioDWrmbNVsuZk6l9pMquQB38cfBZwkQ==", + "dev": true, + "license": "Apache-2.0" + }, + "node_modules/bl": { + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/bl/-/bl-4.1.0.tgz", + "integrity": "sha512-1W07cM9gS6DcLperZfFSj+bWLtaPGSOHWhPiGzXmvVJbRLdG82sH/Kn8EtW1VqWVA54AKf2h5k5BbnIbwF3h6w==", + "dev": true, + "license": "MIT", + "dependencies": { + "buffer": "^5.5.0", + "inherits": "^2.0.4", + "readable-stream": "^3.4.0" + } + }, + "node_modules/bottleneck": { + "version": "2.19.5", + "resolved": "https://registry.npmjs.org/bottleneck/-/bottleneck-2.19.5.tgz", + "integrity": "sha512-VHiNCbI1lKdl44tGrhNfU3lup0Tj/ZBMJB5/2ZbNXRCPuRCO7ed2mgcK4r17y+KB2EfuYuRaVlwNbAeaWGSpbw==", + "dev": true, + "license": "MIT" + }, + "node_modules/brace-expansion": { + "version": "1.1.12", + "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-1.1.12.tgz", + "integrity": "sha512-9T9UjW3r0UW5c1Q7GTwllptXwhvYmEzFhzMfZ9H7FQWt+uZePjZPjBP/W1ZEyZ1twGWom5/56TF4lPcqjnDHcg==", + "dev": true, + "license": "MIT", + "dependencies": { + "balanced-match": "^1.0.0", + "concat-map": "0.0.1" + } + }, + "node_modules/braces": { + "version": "3.0.3", + "resolved": "https://registry.npmjs.org/braces/-/braces-3.0.3.tgz", + "integrity": "sha512-yQbXgO/OSZVD2IsiLlro+7Hf6Q18EJrKSEsdoMzKePKXct3gvD8oLcOQdIzGupr5Fj+EDe8gO/lxc1BzfMpxvA==", + "dev": true, + "license": "MIT", + "dependencies": { + "fill-range": "^7.1.1" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/buffer": { + "version": "5.7.1", + "resolved": "https://registry.npmjs.org/buffer/-/buffer-5.7.1.tgz", + "integrity": "sha512-EHcyIPBQ4BSGlvjB16k5KgAJ27CIsHY/2JBmCRReo48y9rQ3MaUzWX3KVlBa4U7MyX02HdVj0K7C3WaB3ju7FQ==", + "dev": true, + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/feross" + }, + { + "type": "patreon", + "url": "https://www.patreon.com/feross" + }, + { + "type": "consulting", + "url": "https://feross.org/support" + } + ], + "license": "MIT", + "dependencies": { + "base64-js": "^1.3.1", + "ieee754": "^1.1.13" + } + }, + "node_modules/cachedir": { + "version": "2.3.0", + "resolved": "https://registry.npmjs.org/cachedir/-/cachedir-2.3.0.tgz", + "integrity": "sha512-A+Fezp4zxnit6FanDmv9EqXNAi3vt9DWp51/71UEhXukb7QUuvtv9344h91dyAxuTLoSYJFU299qzR3tzwPAhw==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=6" + } + }, + "node_modules/callsites": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/callsites/-/callsites-3.1.0.tgz", + "integrity": "sha512-P8BjAsXvZS+VIDUI11hHCQEv74YT67YUi5JJFNWIqL235sBmjX4+qx9Muvls5ivyNENctx46xQLQ3aTuE7ssaQ==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=6" + } + }, + "node_modules/camelcase": { + "version": "5.3.1", + "resolved": "https://registry.npmjs.org/camelcase/-/camelcase-5.3.1.tgz", + "integrity": "sha512-L28STB170nwWS63UjtlEOE3dldQApaJXZkOI1uMFfzf3rRuPegHaHesyee+YxQ+W6SvRDQV6UrdOdRiR153wJg==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=6" + } + }, + "node_modules/camelcase-keys": { + "version": "6.2.2", + "resolved": "https://registry.npmjs.org/camelcase-keys/-/camelcase-keys-6.2.2.tgz", + "integrity": "sha512-YrwaA0vEKazPBkn0ipTiMpSajYDSe+KjQfrjhcBMxJt/znbvlHd8Pw/Vamaz5EB4Wfhs3SUR3Z9mwRu/P3s3Yg==", + "dev": true, + "license": "MIT", + "dependencies": { + "camelcase": "^5.3.1", + "map-obj": "^4.0.0", + "quick-lru": "^4.0.1" + }, + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/chalk": { + "version": "4.1.2", + "resolved": "https://registry.npmjs.org/chalk/-/chalk-4.1.2.tgz", + "integrity": "sha512-oKnbhFyRIXpUuez8iBMmyEa4nbj4IOQyuhc/wy9kY7/WVPcwIO9VA668Pu8RkO7+0G76SLROeyw9CpQ061i4mA==", + "dev": true, + "license": "MIT", + "dependencies": { + "ansi-styles": "^4.1.0", + "supports-color": "^7.1.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/chalk/chalk?sponsor=1" + } + }, + "node_modules/char-regex": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/char-regex/-/char-regex-1.0.2.tgz", + "integrity": "sha512-kWWXztvZ5SBQV+eRgKFeh8q5sLuZY2+8WUIzlxWVTg+oGwY14qylx1KbKzHd8P6ZYkAg0xyIDU9JMHhyJMZ1jw==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=10" + } + }, + "node_modules/chardet": { + "version": "0.7.0", + "resolved": "https://registry.npmjs.org/chardet/-/chardet-0.7.0.tgz", + "integrity": "sha512-mT8iDcrh03qDGRRmoA2hmBJnxpllMR+0/0qlzjqZES6NdiWDcZkCNAk4rPFZ9Q85r27unkiNNg8ZOiwZXBHwcA==", + "dev": true, + "license": "MIT" + }, + "node_modules/clean-stack": { + "version": "2.2.0", + "resolved": "https://registry.npmjs.org/clean-stack/-/clean-stack-2.2.0.tgz", + "integrity": "sha512-4diC9HaTE+KRAMWhDhrGOECgWZxoevMc5TlkObMqNSsVU62PYzXZ/SMTjzyGAFF1YusgxGcSWTEXBhp0CPwQ1A==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=6" + } + }, + "node_modules/cli-cursor": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/cli-cursor/-/cli-cursor-3.1.0.tgz", + "integrity": "sha512-I/zHAwsKf9FqGoXM4WWRACob9+SNukZTd94DWF57E4toouRulbCxcUh6RKUEOQlYTHJnzkPMySvPNaaSLNfLZw==", + "dev": true, + "license": "MIT", + "dependencies": { + "restore-cursor": "^3.1.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/cli-highlight": { + "version": "2.1.11", + "resolved": "https://registry.npmjs.org/cli-highlight/-/cli-highlight-2.1.11.tgz", + "integrity": "sha512-9KDcoEVwyUXrjcJNvHD0NFc/hiwe/WPVYIleQh2O1N2Zro5gWJZ/K+3DGn8w8P/F6FxOgzyC5bxDyHIgCSPhGg==", + "dev": true, + "license": "ISC", + "dependencies": { + "chalk": "^4.0.0", + "highlight.js": "^10.7.1", + "mz": "^2.4.0", + "parse5": "^5.1.1", + "parse5-htmlparser2-tree-adapter": "^6.0.0", + "yargs": "^16.0.0" + }, + "bin": { + "highlight": "bin/highlight" + }, + "engines": { + "node": ">=8.0.0", + "npm": ">=5.0.0" + } + }, + "node_modules/cli-highlight/node_modules/cliui": { + "version": "7.0.4", + "resolved": "https://registry.npmjs.org/cliui/-/cliui-7.0.4.tgz", + "integrity": "sha512-OcRE68cOsVMXp1Yvonl/fzkQOyjLSu/8bhPDfQt0e0/Eb283TKP20Fs2MqoPsr9SwA595rRCA+QMzYc9nBP+JQ==", + "dev": true, + "license": "ISC", + "dependencies": { + "string-width": "^4.2.0", + "strip-ansi": "^6.0.0", + "wrap-ansi": "^7.0.0" + } + }, + "node_modules/cli-highlight/node_modules/yargs": { + "version": "16.2.0", + "resolved": "https://registry.npmjs.org/yargs/-/yargs-16.2.0.tgz", + "integrity": "sha512-D1mvvtDG0L5ft/jGWkLpG1+m0eQxOfaBvTNELraWj22wSVUMWxZUvYgJYcKh6jGGIkJFhH4IZPQhR4TKpc8mBw==", + "dev": true, + "license": "MIT", + "dependencies": { + "cliui": "^7.0.2", + "escalade": "^3.1.1", + "get-caller-file": "^2.0.5", + "require-directory": "^2.1.1", + "string-width": "^4.2.0", + "y18n": "^5.0.5", + "yargs-parser": "^20.2.2" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/cli-highlight/node_modules/yargs-parser": { + "version": "20.2.9", + "resolved": "https://registry.npmjs.org/yargs-parser/-/yargs-parser-20.2.9.tgz", + "integrity": "sha512-y11nGElTIV+CT3Zv9t7VKl+Q3hTQoT9a1Qzezhhl6Rp21gJ/IVTW7Z3y9EWXhuUBC2Shnf+DX0antecpAwSP8w==", + "dev": true, + "license": "ISC", + "engines": { + "node": ">=10" + } + }, + "node_modules/cli-spinners": { + "version": "2.9.2", + "resolved": "https://registry.npmjs.org/cli-spinners/-/cli-spinners-2.9.2.tgz", + "integrity": "sha512-ywqV+5MmyL4E7ybXgKys4DugZbX0FC6LnwrhjuykIjnK9k8OQacQ7axGKnjDXWNhns0xot3bZI5h55H8yo9cJg==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=6" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/cli-table3": { + "version": "0.6.5", + "resolved": "https://registry.npmjs.org/cli-table3/-/cli-table3-0.6.5.tgz", + "integrity": "sha512-+W/5efTR7y5HRD7gACw9yQjqMVvEMLBHmboM/kPWam+H+Hmyrgjh6YncVKK122YZkXrLudzTuAukUw9FnMf7IQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "string-width": "^4.2.0" + }, + "engines": { + "node": "10.* || >= 12.*" + }, + "optionalDependencies": { + "@colors/colors": "1.5.0" + } + }, + "node_modules/cli-width": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/cli-width/-/cli-width-3.0.0.tgz", + "integrity": "sha512-FxqpkPPwu1HjuN93Omfm4h8uIanXofW0RxVEW3k5RKx+mJJYSthzNhp32Kzxxy3YAEZ/Dc/EWN1vZRY0+kOhbw==", + "dev": true, + "license": "ISC", + "engines": { + "node": ">= 10" + } + }, + "node_modules/cliui": { + "version": "8.0.1", + "resolved": "https://registry.npmjs.org/cliui/-/cliui-8.0.1.tgz", + "integrity": "sha512-BSeNnyus75C4//NQ9gQt1/csTXyo/8Sb+afLAkzAptFuMsod9HFokGNudZpi/oQV73hnVK+sR+5PVRMd+Dr7YQ==", + "dev": true, + "license": "ISC", + "dependencies": { + "string-width": "^4.2.0", + "strip-ansi": "^6.0.1", + "wrap-ansi": "^7.0.0" + }, + "engines": { + "node": ">=12" + } + }, + "node_modules/clone": { + "version": "1.0.4", + "resolved": "https://registry.npmjs.org/clone/-/clone-1.0.4.tgz", + "integrity": "sha512-JQHZ2QMW6l3aH/j6xCqQThY/9OH4D/9ls34cgkUBiEeocRTU04tHfKPBsUK1PqZCUQM7GiA0IIXJSuXHI64Kbg==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=0.8" + } + }, + "node_modules/color-convert": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-2.0.1.tgz", + "integrity": "sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "color-name": "~1.1.4" + }, + "engines": { + "node": ">=7.0.0" + } + }, + "node_modules/color-name": { + "version": "1.1.4", + "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.4.tgz", + "integrity": "sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==", + "dev": true, + "license": "MIT" + }, + "node_modules/commitizen": { + "version": "4.3.1", + "resolved": "https://registry.npmjs.org/commitizen/-/commitizen-4.3.1.tgz", + "integrity": "sha512-gwAPAVTy/j5YcOOebcCRIijn+mSjWJC+IYKivTu6aG8Ei/scoXgfsMRnuAk6b0GRste2J4NGxVdMN3ZpfNaVaw==", + "dev": true, + "license": "MIT", + "dependencies": { + "cachedir": "2.3.0", + "cz-conventional-changelog": "3.3.0", + "dedent": "0.7.0", + "detect-indent": "6.1.0", + "find-node-modules": "^2.1.2", + "find-root": "1.1.0", + "fs-extra": "9.1.0", + "glob": "7.2.3", + "inquirer": "8.2.5", + "is-utf8": "^0.2.1", + "lodash": "4.17.21", + "minimist": "1.2.7", + "strip-bom": "4.0.0", + "strip-json-comments": "3.1.1" + }, + "bin": { + "commitizen": "bin/commitizen", + "cz": "bin/git-cz", + "git-cz": "bin/git-cz" + }, + "engines": { + "node": ">= 12" + } + }, + "node_modules/commitizen/node_modules/fs-extra": { + "version": "9.1.0", + "resolved": "https://registry.npmjs.org/fs-extra/-/fs-extra-9.1.0.tgz", + "integrity": "sha512-hcg3ZmepS30/7BSFqRvoo3DOMQu7IjqxO5nCDt+zM9XWjb33Wg7ziNT+Qvqbuc3+gWpzO02JubVyk2G4Zvo1OQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "at-least-node": "^1.0.0", + "graceful-fs": "^4.2.0", + "jsonfile": "^6.0.1", + "universalify": "^2.0.0" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/commitizen/node_modules/minimist": { + "version": "1.2.7", + "resolved": "https://registry.npmjs.org/minimist/-/minimist-1.2.7.tgz", + "integrity": "sha512-bzfL1YUZsP41gmu/qjrEk0Q6i2ix/cVeAhbCbqH9u3zYutS1cLg00qhrD0M2MVdCcx4Sc0UpP2eBWo9rotpq6g==", + "dev": true, + "license": "MIT", + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/commitizen/node_modules/strip-json-comments": { + "version": "3.1.1", + "resolved": "https://registry.npmjs.org/strip-json-comments/-/strip-json-comments-3.1.1.tgz", + "integrity": "sha512-6fPc+R4ihwqP6N/aIv2f1gMH8lOVtWQHoqC4yK6oSDVVocumAsfCqjkXnqiYMhmMwS/mEHLp7Vehlt3ql6lEig==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/compare-func": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/compare-func/-/compare-func-2.0.0.tgz", + "integrity": "sha512-zHig5N+tPWARooBnb0Zx1MFcdfpyJrfTJ3Y5L+IFvUm8rM74hHz66z0gw0x4tijh5CorKkKUCnW82R2vmpeCRA==", + "dev": true, + "license": "MIT", + "dependencies": { + "array-ify": "^1.0.0", + "dot-prop": "^5.1.0" + } + }, + "node_modules/concat-map": { + "version": "0.0.1", + "resolved": "https://registry.npmjs.org/concat-map/-/concat-map-0.0.1.tgz", + "integrity": "sha512-/Srv4dswyQNBfohGpz9o6Yb3Gz3SrUDqBH5rTuhGR7ahtlbYKnVxw2bCFMRljaA7EXHaXZ8wsHdodFvbkhKmqg==", + "dev": true, + "license": "MIT" + }, + "node_modules/config-chain": { + "version": "1.1.13", + "resolved": "https://registry.npmjs.org/config-chain/-/config-chain-1.1.13.tgz", + "integrity": "sha512-qj+f8APARXHrM0hraqXYb2/bOVSV4PvJQlNZ/DVj0QrmNM2q2euizkeuVckQ57J+W0mRH6Hvi+k50M4Jul2VRQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "ini": "^1.3.4", + "proto-list": "~1.2.1" + } + }, + "node_modules/conventional-changelog-angular": { + "version": "7.0.0", + "resolved": "https://registry.npmjs.org/conventional-changelog-angular/-/conventional-changelog-angular-7.0.0.tgz", + "integrity": "sha512-ROjNchA9LgfNMTTFSIWPzebCwOGFdgkEq45EnvvrmSLvCtAw0HSmrCs7/ty+wAeYUZyNay0YMUNYFTRL72PkBQ==", + "dev": true, + "license": "ISC", + "dependencies": { + "compare-func": "^2.0.0" + }, + "engines": { + "node": ">=16" + } + }, + "node_modules/conventional-changelog-conventionalcommits": { + "version": "7.0.2", + "resolved": "https://registry.npmjs.org/conventional-changelog-conventionalcommits/-/conventional-changelog-conventionalcommits-7.0.2.tgz", + "integrity": "sha512-NKXYmMR/Hr1DevQegFB4MwfM5Vv0m4UIxKZTTYuD98lpTknaZlSRrDOG4X7wIXpGkfsYxZTghUN+Qq+T0YQI7w==", + "dev": true, + "license": "ISC", + "dependencies": { + "compare-func": "^2.0.0" + }, + "engines": { + "node": ">=16" + } + }, + "node_modules/conventional-changelog-writer": { + "version": "8.2.0", + "resolved": "https://registry.npmjs.org/conventional-changelog-writer/-/conventional-changelog-writer-8.2.0.tgz", + "integrity": "sha512-Y2aW4596l9AEvFJRwFGJGiQjt2sBYTjPD18DdvxX9Vpz0Z7HQ+g1Z+6iYDAm1vR3QOJrDBkRHixHK/+FhkR6Pw==", + "dev": true, + "license": "MIT", + "dependencies": { + "conventional-commits-filter": "^5.0.0", + "handlebars": "^4.7.7", + "meow": "^13.0.0", + "semver": "^7.5.2" + }, + "bin": { + "conventional-changelog-writer": "dist/cli/index.js" + }, + "engines": { + "node": ">=18" + } + }, + "node_modules/conventional-changelog-writer/node_modules/meow": { + "version": "13.2.0", + "resolved": "https://registry.npmjs.org/meow/-/meow-13.2.0.tgz", + "integrity": "sha512-pxQJQzB6djGPXh08dacEloMFopsOqGVRKFPYvPOt9XDZ1HasbgDZA74CJGreSU4G3Ak7EFJGoiH2auq+yXISgA==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=18" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/conventional-commit-types": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/conventional-commit-types/-/conventional-commit-types-3.0.0.tgz", + "integrity": "sha512-SmmCYnOniSsAa9GqWOeLqc179lfr5TRu5b4QFDkbsrJ5TZjPJx85wtOr3zn+1dbeNiXDKGPbZ72IKbPhLXh/Lg==", + "dev": true, + "license": "ISC" + }, + "node_modules/conventional-commits-filter": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/conventional-commits-filter/-/conventional-commits-filter-5.0.0.tgz", + "integrity": "sha512-tQMagCOC59EVgNZcC5zl7XqO30Wki9i9J3acbUvkaosCT6JX3EeFwJD7Qqp4MCikRnzS18WXV3BLIQ66ytu6+Q==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=18" + } + }, + "node_modules/conventional-commits-parser": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/conventional-commits-parser/-/conventional-commits-parser-5.0.0.tgz", + "integrity": "sha512-ZPMl0ZJbw74iS9LuX9YIAiW8pfM5p3yh2o/NbXHbkFuZzY5jvdi5jFycEOkmBW5H5I7nA+D6f3UcsCLP2vvSEA==", + "dev": true, + "license": "MIT", + "dependencies": { + "is-text-path": "^2.0.0", + "JSONStream": "^1.3.5", + "meow": "^12.0.1", + "split2": "^4.0.0" + }, + "bin": { + "conventional-commits-parser": "cli.mjs" + }, + "engines": { + "node": ">=16" + } + }, + "node_modules/convert-hrtime": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/convert-hrtime/-/convert-hrtime-5.0.0.tgz", + "integrity": "sha512-lOETlkIeYSJWcbbcvjRKGxVMXJR+8+OQb/mTPbA4ObPMytYIsUbuOE0Jzy60hjARYszq1id0j8KgVhC+WGZVTg==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/core-util-is": { + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/core-util-is/-/core-util-is-1.0.3.tgz", + "integrity": "sha512-ZQBvi1DcpJ4GDqanjucZ2Hj3wEO5pZDS89BWbkcrvdxksJorwUDDZamX9ldFkp9aw2lmBDLgkObEA4DWNJ9FYQ==", + "dev": true, + "license": "MIT" + }, + "node_modules/cosmiconfig": { + "version": "8.3.6", + "resolved": "https://registry.npmjs.org/cosmiconfig/-/cosmiconfig-8.3.6.tgz", + "integrity": "sha512-kcZ6+W5QzcJ3P1Mt+83OUv/oHFqZHIx8DuxG6eZ5RGMERoLqp4BuGjhHLYGK+Kf5XVkQvqBSmAy/nGWN3qDgEA==", + "dev": true, + "license": "MIT", + "dependencies": { + "import-fresh": "^3.3.0", + "js-yaml": "^4.1.0", + "parse-json": "^5.2.0", + "path-type": "^4.0.0" + }, + "engines": { + "node": ">=14" + }, + "funding": { + "url": "https://github.com/sponsors/d-fischer" + }, + "peerDependencies": { + "typescript": ">=4.9.5" + }, + "peerDependenciesMeta": { + "typescript": { + "optional": true + } + } + }, + "node_modules/cosmiconfig-typescript-loader": { + "version": "5.1.0", + "resolved": "https://registry.npmjs.org/cosmiconfig-typescript-loader/-/cosmiconfig-typescript-loader-5.1.0.tgz", + "integrity": "sha512-7PtBB+6FdsOvZyJtlF3hEPpACq7RQX6BVGsgC7/lfVXnKMvNCu/XY3ykreqG5w/rBNdu2z8LCIKoF3kpHHdHlA==", + "dev": true, + "license": "MIT", + "dependencies": { + "jiti": "^1.21.6" + }, + "engines": { + "node": ">=v16" + }, + "peerDependencies": { + "@types/node": "*", + "cosmiconfig": ">=8.2", + "typescript": ">=4" + } + }, + "node_modules/cross-spawn": { + "version": "7.0.6", + "resolved": "https://registry.npmjs.org/cross-spawn/-/cross-spawn-7.0.6.tgz", + "integrity": "sha512-uV2QOWP2nWzsy2aMp8aRibhi9dlzF5Hgh5SHaB9OiTGEyDTiJJyx0uy51QXdyWbtAHNua4XJzUKca3OzKUd3vA==", + "dev": true, + "license": "MIT", + "dependencies": { + "path-key": "^3.1.0", + "shebang-command": "^2.0.0", + "which": "^2.0.1" + }, + "engines": { + "node": ">= 8" + } + }, + "node_modules/crypto-random-string": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/crypto-random-string/-/crypto-random-string-4.0.0.tgz", + "integrity": "sha512-x8dy3RnvYdlUcPOjkEHqozhiwzKNSq7GcPuXFbnyMOCHxX8V3OgIg/pYuabl2sbUPfIJaeAQB7PMOK8DFIdoRA==", + "dev": true, + "license": "MIT", + "dependencies": { + "type-fest": "^1.0.1" + }, + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/crypto-random-string/node_modules/type-fest": { + "version": "1.4.0", + "resolved": "https://registry.npmjs.org/type-fest/-/type-fest-1.4.0.tgz", + "integrity": "sha512-yGSza74xk0UG8k+pLh5oeoYirvIiWo5t0/o3zHHAO2tRDiZcxWP7fywNlXhqb6/r6sWvwi+RsyQMWhVLe4BVuA==", + "dev": true, + "license": "(MIT OR CC0-1.0)", + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/cz-conventional-changelog": { + "version": "3.3.0", + "resolved": "https://registry.npmjs.org/cz-conventional-changelog/-/cz-conventional-changelog-3.3.0.tgz", + "integrity": "sha512-U466fIzU5U22eES5lTNiNbZ+d8dfcHcssH4o7QsdWaCcRs/feIPCxKYSWkYBNs5mny7MvEfwpTLWjvbm94hecw==", + "dev": true, + "license": "MIT", + "dependencies": { + "chalk": "^2.4.1", + "commitizen": "^4.0.3", + "conventional-commit-types": "^3.0.0", + "lodash.map": "^4.5.1", + "longest": "^2.0.1", + "word-wrap": "^1.0.3" + }, + "engines": { + "node": ">= 10" + }, + "optionalDependencies": { + "@commitlint/load": ">6.1.1" + } + }, + "node_modules/cz-conventional-changelog/node_modules/ansi-styles": { + "version": "3.2.1", + "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-3.2.1.tgz", + "integrity": "sha512-VT0ZI6kZRdTh8YyJw3SMbYm/u+NqfsAxEpWO0Pf9sq8/e94WxxOpPKx9FR1FlyCtOVDNOQ+8ntlqFxiRc+r5qA==", + "dev": true, + "license": "MIT", + "dependencies": { + "color-convert": "^1.9.0" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/cz-conventional-changelog/node_modules/chalk": { + "version": "2.4.2", + "resolved": "https://registry.npmjs.org/chalk/-/chalk-2.4.2.tgz", + "integrity": "sha512-Mti+f9lpJNcwF4tWV8/OrTTtF1gZi+f8FqlyAdouralcFWFQWF2+NgCHShjkCb+IFBLq9buZwE1xckQU4peSuQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "ansi-styles": "^3.2.1", + "escape-string-regexp": "^1.0.5", + "supports-color": "^5.3.0" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/cz-conventional-changelog/node_modules/color-convert": { + "version": "1.9.3", + "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-1.9.3.tgz", + "integrity": "sha512-QfAUtd+vFdAtFQcC8CCyYt1fYWxSqAiK2cSD6zDB8N3cpsEBAvRxp9zOGg6G/SHHJYAT88/az/IuDGALsNVbGg==", + "dev": true, + "license": "MIT", + "dependencies": { + "color-name": "1.1.3" + } + }, + "node_modules/cz-conventional-changelog/node_modules/color-name": { + "version": "1.1.3", + "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.3.tgz", + "integrity": "sha512-72fSenhMw2HZMTVHeCA9KCmpEIbzWiQsjN+BHcBbS9vr1mtt+vJjPdksIBNUmKAW8TFUDPJK5SUU3QhE9NEXDw==", + "dev": true, + "license": "MIT" + }, + "node_modules/cz-conventional-changelog/node_modules/has-flag": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-3.0.0.tgz", + "integrity": "sha512-sKJf1+ceQBr4SMkvQnBDNDtf4TXpVhVGateu0t918bl30FnbE2m4vNLX+VWe/dpjlb+HugGYzW7uQXH98HPEYw==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=4" + } + }, + "node_modules/cz-conventional-changelog/node_modules/supports-color": { + "version": "5.5.0", + "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-5.5.0.tgz", + "integrity": "sha512-QjVjwdXIt408MIiAqCX4oUKsgU2EqAGzs2Ppkm4aQYbjm+ZEWEcW4SfFNTr4uMNZma0ey4f5lgLrkB0aX0QMow==", + "dev": true, + "license": "MIT", + "dependencies": { + "has-flag": "^3.0.0" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/dargs": { + "version": "7.0.0", + "resolved": "https://registry.npmjs.org/dargs/-/dargs-7.0.0.tgz", + "integrity": "sha512-2iy1EkLdlBzQGvbweYRFxmFath8+K7+AKB0TlhHWkNuH+TmovaMH/Wp7V7R4u7f4SnX3OgLsU9t1NI9ioDnUpg==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/debug": { + "version": "4.4.3", + "resolved": "https://registry.npmjs.org/debug/-/debug-4.4.3.tgz", + "integrity": "sha512-RGwwWnwQvkVfavKVt22FGLw+xYSdzARwm0ru6DhTVA3umU5hZc28V3kO4stgYryrTlLpuvgI9GiijltAjNbcqA==", + "dev": true, + "license": "MIT", + "dependencies": { + "ms": "^2.1.3" + }, + "engines": { + "node": ">=6.0" + }, + "peerDependenciesMeta": { + "supports-color": { + "optional": true + } + } + }, + "node_modules/decamelize": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/decamelize/-/decamelize-1.2.0.tgz", + "integrity": "sha512-z2S+W9X73hAUUki+N+9Za2lBlun89zigOyGrsax+KUQ6wKW4ZoWpEYBkGhQjwAjjDCkWxhY0VKEhk8wzY7F5cA==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/decamelize-keys": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/decamelize-keys/-/decamelize-keys-1.1.1.tgz", + "integrity": "sha512-WiPxgEirIV0/eIOMcnFBA3/IJZAZqKnwAwWyvvdi4lsr1WCN22nhdf/3db3DoZcUjTV2SqfzIwNyp6y2xs3nmg==", + "dev": true, + "license": "MIT", + "dependencies": { + "decamelize": "^1.1.0", + "map-obj": "^1.0.0" + }, + "engines": { + "node": ">=0.10.0" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/decamelize-keys/node_modules/map-obj": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/map-obj/-/map-obj-1.0.1.tgz", + "integrity": "sha512-7N/q3lyZ+LVCp7PzuxrJr4KMbBE2hW7BT7YNia330OFxIf4d3r5zVpicP2650l7CPN6RM9zOJRl3NGpqSiw3Eg==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/dedent": { + "version": "0.7.0", + "resolved": "https://registry.npmjs.org/dedent/-/dedent-0.7.0.tgz", + "integrity": "sha512-Q6fKUPqnAHAyhiUgFU7BUzLiv0kd8saH9al7tnu5Q/okj6dnupxyTgFIBjVzJATdfIAm9NAsvXNzjaKa+bxVyA==", + "dev": true, + "license": "MIT" + }, + "node_modules/deep-extend": { + "version": "0.6.0", + "resolved": "https://registry.npmjs.org/deep-extend/-/deep-extend-0.6.0.tgz", + "integrity": "sha512-LOHxIOaPYdHlJRtCQfDIVZtfw/ufM8+rVj649RIHzcm/vGwQRXFt6OPqIFWsm2XEMrNIEtWR64sY1LEKD2vAOA==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=4.0.0" + } + }, + "node_modules/defaults": { + "version": "1.0.4", + "resolved": "https://registry.npmjs.org/defaults/-/defaults-1.0.4.tgz", + "integrity": "sha512-eFuaLoy/Rxalv2kr+lqMlUnrDWV+3j4pljOIJgLIhI058IQfWJ7vXhyEIHu+HtC738klGALYxOKDO0bQP3tg8A==", + "dev": true, + "license": "MIT", + "dependencies": { + "clone": "^1.0.2" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/deprecation": { + "version": "2.3.1", + "resolved": "https://registry.npmjs.org/deprecation/-/deprecation-2.3.1.tgz", + "integrity": "sha512-xmHIy4F3scKVwMsQ4WnVaS8bHOx0DmVwRywosKhaILI0ywMDWPtBSku2HNxRvF7jtwDRsoEwYQSfbxj8b7RlJQ==", + "dev": true, + "license": "ISC" + }, + "node_modules/detect-file": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/detect-file/-/detect-file-1.0.0.tgz", + "integrity": "sha512-DtCOLG98P007x7wiiOmfI0fi3eIKyWiLTGJ2MDnVi/E04lWGbf+JzrRHMm0rgIIZJGtHpKpbVgLWHrv8xXpc3Q==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/detect-indent": { + "version": "6.1.0", + "resolved": "https://registry.npmjs.org/detect-indent/-/detect-indent-6.1.0.tgz", + "integrity": "sha512-reYkTUJAZb9gUuZ2RvVCNhVHdg62RHnJ7WJl8ftMi4diZ6NWlciOzQN88pUhSELEwflJht4oQDv0F0BMlwaYtA==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/dir-glob": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/dir-glob/-/dir-glob-3.0.1.tgz", + "integrity": "sha512-WkrWp9GR4KXfKGYzOLmTuGVi1UWFfws377n9cc55/tb6DuqyF6pcQ5AbiHEshaDpY9v6oaSr2XCDidGmMwdzIA==", + "dev": true, + "license": "MIT", + "dependencies": { + "path-type": "^4.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/dot-prop": { + "version": "5.3.0", + "resolved": "https://registry.npmjs.org/dot-prop/-/dot-prop-5.3.0.tgz", + "integrity": "sha512-QM8q3zDe58hqUqjraQOmzZ1LIH9SWQJTlEKCH4kJ2oQvLZk7RbQXvtDM2XEq3fwkV9CCvvH4LA0AV+ogFsBM2Q==", + "dev": true, + "license": "MIT", + "dependencies": { + "is-obj": "^2.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/duplexer2": { + "version": "0.1.4", + "resolved": "https://registry.npmjs.org/duplexer2/-/duplexer2-0.1.4.tgz", + "integrity": "sha512-asLFVfWWtJ90ZyOUHMqk7/S2w2guQKxUI2itj3d92ADHhxUSbCMGi1f1cBcJ7xM1To+pE/Khbwo1yuNbMEPKeA==", + "dev": true, + "license": "BSD-3-Clause", + "dependencies": { + "readable-stream": "^2.0.2" + } + }, + "node_modules/duplexer2/node_modules/readable-stream": { + "version": "2.3.8", + "resolved": "https://registry.npmjs.org/readable-stream/-/readable-stream-2.3.8.tgz", + "integrity": "sha512-8p0AUk4XODgIewSi0l8Epjs+EVnWiK7NoDIEGU0HhE7+ZyY8D1IMY7odu5lRrFXGg71L15KG8QrPmum45RTtdA==", + "dev": true, + "license": "MIT", + "dependencies": { + "core-util-is": "~1.0.0", + "inherits": "~2.0.3", + "isarray": "~1.0.0", + "process-nextick-args": "~2.0.0", + "safe-buffer": "~5.1.1", + "string_decoder": "~1.1.1", + "util-deprecate": "~1.0.1" + } + }, + "node_modules/duplexer2/node_modules/safe-buffer": { + "version": "5.1.2", + "resolved": "https://registry.npmjs.org/safe-buffer/-/safe-buffer-5.1.2.tgz", + "integrity": "sha512-Gd2UZBJDkXlY7GbJxfsE8/nvKkUEU1G38c1siN6QP6a9PT9MmHB8GnpscSmMJSoF8LOIrt8ud/wPtojys4G6+g==", + "dev": true, + "license": "MIT" + }, + "node_modules/duplexer2/node_modules/string_decoder": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/string_decoder/-/string_decoder-1.1.1.tgz", + "integrity": "sha512-n/ShnvDi6FHbbVfviro+WojiFzv+s8MPMHBczVePfUpDJLwoLT0ht1l4YwBCbi8pJAveEEdnkHyPyTP/mzRfwg==", + "dev": true, + "license": "MIT", + "dependencies": { + "safe-buffer": "~5.1.0" + } + }, + "node_modules/emoji-regex": { + "version": "8.0.0", + "resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-8.0.0.tgz", + "integrity": "sha512-MSjYzcWNOA0ewAHpz0MxpYFvwg6yjy1NG3xteoqz644VCo/RPgnr1/GGt+ic3iJTzQ8Eu3TdM14SawnVUmGE6A==", + "dev": true, + "license": "MIT" + }, + "node_modules/emojilib": { + "version": "2.4.0", + "resolved": "https://registry.npmjs.org/emojilib/-/emojilib-2.4.0.tgz", + "integrity": "sha512-5U0rVMU5Y2n2+ykNLQqMoqklN9ICBT/KsvC1Gz6vqHbz2AXXGkG+Pm5rMWk/8Vjrr/mY9985Hi8DYzn1F09Nyw==", + "dev": true, + "license": "MIT" + }, + "node_modules/env-ci": { + "version": "11.2.0", + "resolved": "https://registry.npmjs.org/env-ci/-/env-ci-11.2.0.tgz", + "integrity": "sha512-D5kWfzkmaOQDioPmiviWAVtKmpPT4/iJmMVQxWxMPJTFyTkdc5JQUfc5iXEeWxcOdsYTKSAiA/Age4NUOqKsRA==", + "dev": true, + "license": "MIT", + "dependencies": { + "execa": "^8.0.0", + "java-properties": "^1.0.2" + }, + "engines": { + "node": "^18.17 || >=20.6.1" + } + }, + "node_modules/env-ci/node_modules/execa": { + "version": "8.0.1", + "resolved": "https://registry.npmjs.org/execa/-/execa-8.0.1.tgz", + "integrity": "sha512-VyhnebXciFV2DESc+p6B+y0LjSm0krU4OgJN44qFAhBY0TJ+1V61tYD2+wHusZ6F9n5K+vl8k0sTy7PEfV4qpg==", + "dev": true, + "license": "MIT", + "dependencies": { + "cross-spawn": "^7.0.3", + "get-stream": "^8.0.1", + "human-signals": "^5.0.0", + "is-stream": "^3.0.0", + "merge-stream": "^2.0.0", + "npm-run-path": "^5.1.0", + "onetime": "^6.0.0", + "signal-exit": "^4.1.0", + "strip-final-newline": "^3.0.0" + }, + "engines": { + "node": ">=16.17" + }, + "funding": { + "url": "https://github.com/sindresorhus/execa?sponsor=1" + } + }, + "node_modules/env-ci/node_modules/get-stream": { + "version": "8.0.1", + "resolved": "https://registry.npmjs.org/get-stream/-/get-stream-8.0.1.tgz", + "integrity": "sha512-VaUJspBffn/LMCJVoMvSAdmscJyS1auj5Zulnn5UoYcY531UWmdwhRWkcGKnGU93m5HSXP9LP2usOryrBtQowA==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=16" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/env-ci/node_modules/human-signals": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/human-signals/-/human-signals-5.0.0.tgz", + "integrity": "sha512-AXcZb6vzzrFAUE61HnN4mpLqd/cSIwNQjtNWR0euPm6y0iqx3G4gOXaIDdtdDwZmhwe82LA6+zinmW4UBWVePQ==", + "dev": true, + "license": "Apache-2.0", + "engines": { + "node": ">=16.17.0" + } + }, + "node_modules/env-ci/node_modules/is-stream": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/is-stream/-/is-stream-3.0.0.tgz", + "integrity": "sha512-LnQR4bZ9IADDRSkvpqMGvt/tEJWclzklNgSw48V5EAaAeDd6qGvN8ei6k5p0tvxSR171VmGyHuTiAOfxAbr8kA==", + "dev": true, + "license": "MIT", + "engines": { + "node": "^12.20.0 || ^14.13.1 || >=16.0.0" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/env-ci/node_modules/mimic-fn": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/mimic-fn/-/mimic-fn-4.0.0.tgz", + "integrity": "sha512-vqiC06CuhBTUdZH+RYl8sFrL096vA45Ok5ISO6sE/Mr1jRbGH4Csnhi8f3wKVl7x8mO4Au7Ir9D3Oyv1VYMFJw==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/env-ci/node_modules/npm-run-path": { + "version": "5.3.0", + "resolved": "https://registry.npmjs.org/npm-run-path/-/npm-run-path-5.3.0.tgz", + "integrity": "sha512-ppwTtiJZq0O/ai0z7yfudtBpWIoxM8yE6nHi1X47eFR2EWORqfbu6CnPlNsjeN683eT0qG6H/Pyf9fCcvjnnnQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "path-key": "^4.0.0" + }, + "engines": { + "node": "^12.20.0 || ^14.13.1 || >=16.0.0" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/env-ci/node_modules/onetime": { + "version": "6.0.0", + "resolved": "https://registry.npmjs.org/onetime/-/onetime-6.0.0.tgz", + "integrity": "sha512-1FlR+gjXK7X+AsAHso35MnyN5KqGwJRi/31ft6x0M194ht7S+rWAvd7PHss9xSKMzE0asv1pyIHaJYq+BbacAQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "mimic-fn": "^4.0.0" + }, + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/env-ci/node_modules/path-key": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/path-key/-/path-key-4.0.0.tgz", + "integrity": "sha512-haREypq7xkM7ErfgIyA0z+Bj4AGKlMSdlQE2jvJo6huWD1EdkKYV+G/T4nq0YEF2vgTT8kqMFKo1uHn950r4SQ==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/env-ci/node_modules/signal-exit": { + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/signal-exit/-/signal-exit-4.1.0.tgz", + "integrity": "sha512-bzyZ1e88w9O1iNJbKnOlvYTrWPDl46O1bG0D3XInv+9tkPrxrN8jUUTiFlDkkmKWgn1M6CfIA13SuGqOa9Korw==", + "dev": true, + "license": "ISC", + "engines": { + "node": ">=14" + }, + "funding": { + "url": "https://github.com/sponsors/isaacs" + } + }, + "node_modules/env-ci/node_modules/strip-final-newline": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/strip-final-newline/-/strip-final-newline-3.0.0.tgz", + "integrity": "sha512-dOESqjYr96iWYylGObzd39EuNTa5VJxyvVAEm5Jnh7KGo75V43Hk1odPQkNDyXNmUR6k+gEiDVXnjB8HJ3crXw==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/env-paths": { + "version": "2.2.1", + "resolved": "https://registry.npmjs.org/env-paths/-/env-paths-2.2.1.tgz", + "integrity": "sha512-+h1lkLKhZMTYjog1VEpJNG7NZJWcuc2DDk/qsqSTRRCOXiLjeQ1d1/udrUGhqMxUgAlwKNZ0cf2uqan5GLuS2A==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=6" + } + }, + "node_modules/environment": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/environment/-/environment-1.1.0.tgz", + "integrity": "sha512-xUtoPkMggbz0MPyPiIWr1Kp4aeWJjDZ6SMvURhimjdZgsRuDplF5/s9hcgGhyXMhs+6vpnuoiZ2kFiu3FMnS8Q==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=18" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/error-ex": { + "version": "1.3.4", + "resolved": "https://registry.npmjs.org/error-ex/-/error-ex-1.3.4.tgz", + "integrity": "sha512-sqQamAnR14VgCr1A618A3sGrygcpK+HEbenA/HiEAkkUwcZIIB/tgWqHFxWgOyDh4nB4JCRimh79dR5Ywc9MDQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "is-arrayish": "^0.2.1" + } + }, + "node_modules/escalade": { + "version": "3.2.0", + "resolved": "https://registry.npmjs.org/escalade/-/escalade-3.2.0.tgz", + "integrity": "sha512-WUj2qlxaQtO4g6Pq5c29GTcWGDyd8itL8zTlipgECz3JesAiiOKotd8JU6otB3PACgG6xkJUyVhboMS+bje/jA==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=6" + } + }, + "node_modules/escape-string-regexp": { + "version": "1.0.5", + "resolved": "https://registry.npmjs.org/escape-string-regexp/-/escape-string-regexp-1.0.5.tgz", + "integrity": "sha512-vbRorB5FUQWvla16U8R/qgaFIya2qGzwDrNmCZuYKrbdSUMG6I1ZCGQRefkRVhuOkIGVne7BQ35DSfo1qvJqFg==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=0.8.0" + } + }, + "node_modules/execa": { + "version": "5.1.1", + "resolved": "https://registry.npmjs.org/execa/-/execa-5.1.1.tgz", + "integrity": "sha512-8uSpZZocAZRBAPIEINJj3Lo9HyGitllczc27Eh5YYojjMFMn8yHMDMaUHE2Jqfq05D/wucwI4JGURyXt1vchyg==", + "dev": true, + "license": "MIT", + "dependencies": { + "cross-spawn": "^7.0.3", + "get-stream": "^6.0.0", + "human-signals": "^2.1.0", + "is-stream": "^2.0.0", + "merge-stream": "^2.0.0", + "npm-run-path": "^4.0.1", + "onetime": "^5.1.2", + "signal-exit": "^3.0.3", + "strip-final-newline": "^2.0.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sindresorhus/execa?sponsor=1" + } + }, + "node_modules/expand-tilde": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/expand-tilde/-/expand-tilde-2.0.2.tgz", + "integrity": "sha512-A5EmesHW6rfnZ9ysHQjPdJRni0SRar0tjtG5MNtm9n5TUvsYU8oozprtRD4AqHxcZWWlVuAmQo2nWKfN9oyjTw==", + "dev": true, + "license": "MIT", + "dependencies": { + "homedir-polyfill": "^1.0.1" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/external-editor": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/external-editor/-/external-editor-3.1.0.tgz", + "integrity": "sha512-hMQ4CX1p1izmuLYyZqLMO/qGNw10wSv9QDCPfzXfyFrOaCSSoRfqE1Kf1s5an66J5JZC62NewG+mK49jOCtQew==", + "dev": true, + "license": "MIT", + "dependencies": { + "chardet": "^0.7.0", + "iconv-lite": "^0.4.24", + "tmp": "^0.0.33" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/fast-content-type-parse": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/fast-content-type-parse/-/fast-content-type-parse-3.0.0.tgz", + "integrity": "sha512-ZvLdcY8P+N8mGQJahJV5G4U88CSvT1rP8ApL6uETe88MBXrBHAkZlSEySdUlyztF7ccb+Znos3TFqaepHxdhBg==", + "dev": true, + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/fastify" + }, + { + "type": "opencollective", + "url": "https://opencollective.com/fastify" + } + ], + "license": "MIT" + }, + "node_modules/fast-deep-equal": { + "version": "3.1.3", + "resolved": "https://registry.npmjs.org/fast-deep-equal/-/fast-deep-equal-3.1.3.tgz", + "integrity": "sha512-f3qQ9oQy9j2AhBe/H9VC91wLmKBCCU/gDOnKNAYG5hswO7BLKj09Hc5HYNz9cGI++xlpDCIgDaitVs03ATR84Q==", + "dev": true, + "license": "MIT" + }, + "node_modules/fast-glob": { + "version": "3.3.3", + "resolved": "https://registry.npmjs.org/fast-glob/-/fast-glob-3.3.3.tgz", + "integrity": "sha512-7MptL8U0cqcFdzIzwOTHoilX9x5BrNqye7Z/LuC7kCMRio1EMSyqRK3BEAUD7sXRq4iT4AzTVuZdhgQ2TCvYLg==", + "dev": true, + "license": "MIT", + "dependencies": { + "@nodelib/fs.stat": "^2.0.2", + "@nodelib/fs.walk": "^1.2.3", + "glob-parent": "^5.1.2", + "merge2": "^1.3.0", + "micromatch": "^4.0.8" + }, + "engines": { + "node": ">=8.6.0" + } + }, + "node_modules/fast-uri": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/fast-uri/-/fast-uri-3.1.0.tgz", + "integrity": "sha512-iPeeDKJSWf4IEOasVVrknXpaBV0IApz/gp7S2bb7Z4Lljbl2MGJRqInZiUrQwV16cpzw/D3S5j5Julj/gT52AA==", + "dev": true, + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/fastify" + }, + { + "type": "opencollective", + "url": "https://opencollective.com/fastify" + } + ], + "license": "BSD-3-Clause" + }, + "node_modules/fastq": { + "version": "1.19.1", + "resolved": "https://registry.npmjs.org/fastq/-/fastq-1.19.1.tgz", + "integrity": "sha512-GwLTyxkCXjXbxqIhTsMI2Nui8huMPtnxg7krajPJAjnEG/iiOS7i+zCtWGZR9G0NBKbXKh6X9m9UIsYX/N6vvQ==", + "dev": true, + "license": "ISC", + "dependencies": { + "reusify": "^1.0.4" + } + }, + "node_modules/figures": { + "version": "3.2.0", + "resolved": "https://registry.npmjs.org/figures/-/figures-3.2.0.tgz", + "integrity": "sha512-yaduQFRKLXYOGgEn6AZau90j3ggSOyiqXU0F9JZfeXYhNa+Jk4X+s45A2zg5jns87GAFa34BBm2kXw4XpNcbdg==", + "dev": true, + "license": "MIT", + "dependencies": { + "escape-string-regexp": "^1.0.5" + }, + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/fill-range": { + "version": "7.1.1", + "resolved": "https://registry.npmjs.org/fill-range/-/fill-range-7.1.1.tgz", + "integrity": "sha512-YsGpe3WHLK8ZYi4tWDg2Jy3ebRz2rXowDxnld4bkQB00cc/1Zw9AWnC0i9ztDJitivtQvaI9KaLyKrc+hBW0yg==", + "dev": true, + "license": "MIT", + "dependencies": { + "to-regex-range": "^5.0.1" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/find-node-modules": { + "version": "2.1.3", + "resolved": "https://registry.npmjs.org/find-node-modules/-/find-node-modules-2.1.3.tgz", + "integrity": "sha512-UC2I2+nx1ZuOBclWVNdcnbDR5dlrOdVb7xNjmT/lHE+LsgztWks3dG7boJ37yTS/venXw84B/mAW9uHVoC5QRg==", + "dev": true, + "license": "MIT", + "dependencies": { + "findup-sync": "^4.0.0", + "merge": "^2.1.1" + } + }, + "node_modules/find-root": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/find-root/-/find-root-1.1.0.tgz", + "integrity": "sha512-NKfW6bec6GfKc0SGx1e07QZY9PE99u0Bft/0rzSD5k3sO/vwkVUpDUKVm5Gpp5Ue3YfShPFTX2070tDs5kB9Ng==", + "dev": true, + "license": "MIT" + }, + "node_modules/find-up": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/find-up/-/find-up-5.0.0.tgz", + "integrity": "sha512-78/PXT1wlLLDgTzDs7sjq9hzz0vXD+zn+7wypEe4fXQxCmdmqfGsEPQxmiCSQI3ajFV91bVSsvNtrJRiW6nGng==", + "dev": true, + "license": "MIT", + "dependencies": { + "locate-path": "^6.0.0", + "path-exists": "^4.0.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/find-up-simple": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/find-up-simple/-/find-up-simple-1.0.1.tgz", + "integrity": "sha512-afd4O7zpqHeRyg4PfDQsXmlDe2PfdHtJt6Akt8jOWaApLOZk5JXs6VMR29lz03pRe9mpykrRCYIYxaJYcfpncQ==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=18" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/find-versions": { + "version": "6.0.0", + "resolved": "https://registry.npmjs.org/find-versions/-/find-versions-6.0.0.tgz", + "integrity": "sha512-2kCCtc+JvcZ86IGAz3Z2Y0A1baIz9fL31pH/0S1IqZr9Iwnjq8izfPtrCyQKO6TLMPELLsQMre7VDqeIKCsHkA==", + "dev": true, + "license": "MIT", + "dependencies": { + "semver-regex": "^4.0.5", + "super-regex": "^1.0.0" + }, + "engines": { + "node": ">=18" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/findup-sync": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/findup-sync/-/findup-sync-4.0.0.tgz", + "integrity": "sha512-6jvvn/12IC4quLBL1KNokxC7wWTvYncaVUYSoxWw7YykPLuRrnv4qdHcSOywOI5RpkOVGeQRtWM8/q+G6W6qfQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "detect-file": "^1.0.0", + "is-glob": "^4.0.0", + "micromatch": "^4.0.2", + "resolve-dir": "^1.0.1" + }, + "engines": { + "node": ">= 8" + } + }, + "node_modules/from2": { + "version": "2.3.0", + "resolved": "https://registry.npmjs.org/from2/-/from2-2.3.0.tgz", + "integrity": "sha512-OMcX/4IC/uqEPVgGeyfN22LJk6AZrMkRZHxcHBMBvHScDGgwTm2GT2Wkgtocyd3JfZffjj2kYUDXXII0Fk9W0g==", + "dev": true, + "license": "MIT", + "dependencies": { + "inherits": "^2.0.1", + "readable-stream": "^2.0.0" + } + }, + "node_modules/from2/node_modules/readable-stream": { + "version": "2.3.8", + "resolved": "https://registry.npmjs.org/readable-stream/-/readable-stream-2.3.8.tgz", + "integrity": "sha512-8p0AUk4XODgIewSi0l8Epjs+EVnWiK7NoDIEGU0HhE7+ZyY8D1IMY7odu5lRrFXGg71L15KG8QrPmum45RTtdA==", + "dev": true, + "license": "MIT", + "dependencies": { + "core-util-is": "~1.0.0", + "inherits": "~2.0.3", + "isarray": "~1.0.0", + "process-nextick-args": "~2.0.0", + "safe-buffer": "~5.1.1", + "string_decoder": "~1.1.1", + "util-deprecate": "~1.0.1" + } + }, + "node_modules/from2/node_modules/safe-buffer": { + "version": "5.1.2", + "resolved": "https://registry.npmjs.org/safe-buffer/-/safe-buffer-5.1.2.tgz", + "integrity": "sha512-Gd2UZBJDkXlY7GbJxfsE8/nvKkUEU1G38c1siN6QP6a9PT9MmHB8GnpscSmMJSoF8LOIrt8ud/wPtojys4G6+g==", + "dev": true, + "license": "MIT" + }, + "node_modules/from2/node_modules/string_decoder": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/string_decoder/-/string_decoder-1.1.1.tgz", + "integrity": "sha512-n/ShnvDi6FHbbVfviro+WojiFzv+s8MPMHBczVePfUpDJLwoLT0ht1l4YwBCbi8pJAveEEdnkHyPyTP/mzRfwg==", + "dev": true, + "license": "MIT", + "dependencies": { + "safe-buffer": "~5.1.0" + } + }, + "node_modules/fs-extra": { + "version": "11.3.2", + "resolved": "https://registry.npmjs.org/fs-extra/-/fs-extra-11.3.2.tgz", + "integrity": "sha512-Xr9F6z6up6Ws+NjzMCZc6WXg2YFRlrLP9NQDO3VQrWrfiojdhS56TzueT88ze0uBdCTwEIhQ3ptnmKeWGFAe0A==", + "dev": true, + "license": "MIT", + "dependencies": { + "graceful-fs": "^4.2.0", + "jsonfile": "^6.0.1", + "universalify": "^2.0.0" + }, + "engines": { + "node": ">=14.14" + } + }, + "node_modules/fs.realpath": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/fs.realpath/-/fs.realpath-1.0.0.tgz", + "integrity": "sha512-OO0pH2lK6a0hZnAdau5ItzHPI6pUlvI7jMVnxUQRtw4owF2wk8lOSabtGDCTP4Ggrg2MbGnWO9X8K1t4+fGMDw==", + "dev": true, + "license": "ISC" + }, + "node_modules/function-bind": { + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/function-bind/-/function-bind-1.1.2.tgz", + "integrity": "sha512-7XHNxH7qX9xG5mIwxkhumTox/MIRNcOgDrxWsMt2pAr23WHp6MrRlN7FBSFpCpr+oVO0F744iUgR82nJMfG2SA==", + "dev": true, + "license": "MIT", + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/function-timeout": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/function-timeout/-/function-timeout-1.0.2.tgz", + "integrity": "sha512-939eZS4gJ3htTHAldmyyuzlrD58P03fHG49v2JfFXbV6OhvZKRC9j2yAtdHw/zrp2zXHuv05zMIy40F0ge7spA==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=18" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/get-caller-file": { + "version": "2.0.5", + "resolved": "https://registry.npmjs.org/get-caller-file/-/get-caller-file-2.0.5.tgz", + "integrity": "sha512-DyFP3BM/3YHTQOCUL/w0OZHR0lpKeGrxotcHWcqNEdnltqFwXVfhEBQ94eIo34AfQpo0rGki4cyIiftY06h2Fg==", + "dev": true, + "license": "ISC", + "engines": { + "node": "6.* || 8.* || >= 10.*" + } + }, + "node_modules/get-east-asian-width": { + "version": "1.4.0", + "resolved": "https://registry.npmjs.org/get-east-asian-width/-/get-east-asian-width-1.4.0.tgz", + "integrity": "sha512-QZjmEOC+IT1uk6Rx0sX22V6uHWVwbdbxf1faPqJ1QhLdGgsRGCZoyaQBm/piRdJy/D2um6hM1UP7ZEeQ4EkP+Q==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=18" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/get-stream": { + "version": "6.0.1", + "resolved": "https://registry.npmjs.org/get-stream/-/get-stream-6.0.1.tgz", + "integrity": "sha512-ts6Wi+2j3jQjqi70w5AlN8DFnkSwC+MqmxEzdEALB2qXZYV3X/b1CTfgPLGJNMeAWxdPfU8FO1ms3NUfaHCPYg==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/git-log-parser": { + "version": "1.2.1", + "resolved": "https://registry.npmjs.org/git-log-parser/-/git-log-parser-1.2.1.tgz", + "integrity": "sha512-PI+sPDvHXNPl5WNOErAK05s3j0lgwUzMN6o8cyQrDaKfT3qd7TmNJKeXX+SknI5I0QhG5fVPAEwSY4tRGDtYoQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "argv-formatter": "~1.0.0", + "spawn-error-forwarder": "~1.0.0", + "split2": "~1.0.0", + "stream-combiner2": "~1.1.1", + "through2": "~2.0.0", + "traverse": "0.6.8" + } + }, + "node_modules/git-log-parser/node_modules/readable-stream": { + "version": "2.3.8", + "resolved": "https://registry.npmjs.org/readable-stream/-/readable-stream-2.3.8.tgz", + "integrity": "sha512-8p0AUk4XODgIewSi0l8Epjs+EVnWiK7NoDIEGU0HhE7+ZyY8D1IMY7odu5lRrFXGg71L15KG8QrPmum45RTtdA==", + "dev": true, + "license": "MIT", + "dependencies": { + "core-util-is": "~1.0.0", + "inherits": "~2.0.3", + "isarray": "~1.0.0", + "process-nextick-args": "~2.0.0", + "safe-buffer": "~5.1.1", + "string_decoder": "~1.1.1", + "util-deprecate": "~1.0.1" + } + }, + "node_modules/git-log-parser/node_modules/safe-buffer": { + "version": "5.1.2", + "resolved": "https://registry.npmjs.org/safe-buffer/-/safe-buffer-5.1.2.tgz", + "integrity": "sha512-Gd2UZBJDkXlY7GbJxfsE8/nvKkUEU1G38c1siN6QP6a9PT9MmHB8GnpscSmMJSoF8LOIrt8ud/wPtojys4G6+g==", + "dev": true, + "license": "MIT" + }, + "node_modules/git-log-parser/node_modules/split2": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/split2/-/split2-1.0.0.tgz", + "integrity": "sha512-NKywug4u4pX/AZBB1FCPzZ6/7O+Xhz1qMVbzTvvKvikjO99oPN87SkK08mEY9P63/5lWjK+wgOOgApnTg5r6qg==", + "dev": true, + "license": "ISC", + "dependencies": { + "through2": "~2.0.0" + } + }, + "node_modules/git-log-parser/node_modules/string_decoder": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/string_decoder/-/string_decoder-1.1.1.tgz", + "integrity": "sha512-n/ShnvDi6FHbbVfviro+WojiFzv+s8MPMHBczVePfUpDJLwoLT0ht1l4YwBCbi8pJAveEEdnkHyPyTP/mzRfwg==", + "dev": true, + "license": "MIT", + "dependencies": { + "safe-buffer": "~5.1.0" + } + }, + "node_modules/git-log-parser/node_modules/through2": { + "version": "2.0.5", + "resolved": "https://registry.npmjs.org/through2/-/through2-2.0.5.tgz", + "integrity": "sha512-/mrRod8xqpA+IHSLyGCQ2s8SPHiCDEeQJSep1jqLYeEUClOFG2Qsh+4FU6G9VeqpZnGW/Su8LQGc4YKni5rYSQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "readable-stream": "~2.3.6", + "xtend": "~4.0.1" + } + }, + "node_modules/git-raw-commits": { + "version": "2.0.11", + "resolved": "https://registry.npmjs.org/git-raw-commits/-/git-raw-commits-2.0.11.tgz", + "integrity": "sha512-VnctFhw+xfj8Va1xtfEqCUD2XDrbAPSJx+hSrE5K7fGdjZruW7XV+QOrN7LF/RJyvspRiD2I0asWsxFp0ya26A==", + "dev": true, + "license": "MIT", + "dependencies": { + "dargs": "^7.0.0", + "lodash": "^4.17.15", + "meow": "^8.0.0", + "split2": "^3.0.0", + "through2": "^4.0.0" + }, + "bin": { + "git-raw-commits": "cli.js" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/git-raw-commits/node_modules/find-up": { + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/find-up/-/find-up-4.1.0.tgz", + "integrity": "sha512-PpOwAdQ/YlXQ2vj8a3h8IipDuYRi3wceVQQGYWxNINccq40Anw7BlsEXCMbt1Zt+OLA6Fq9suIpIWD0OsnISlw==", + "dev": true, + "license": "MIT", + "dependencies": { + "locate-path": "^5.0.0", + "path-exists": "^4.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/git-raw-commits/node_modules/hosted-git-info": { + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/hosted-git-info/-/hosted-git-info-4.1.0.tgz", + "integrity": "sha512-kyCuEOWjJqZuDbRHzL8V93NzQhwIB71oFWSyzVo+KPZI+pnQPPxucdkrOZvkLRnrf5URsQM+IJ09Dw29cRALIA==", + "dev": true, + "license": "ISC", + "dependencies": { + "lru-cache": "^6.0.0" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/git-raw-commits/node_modules/locate-path": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/locate-path/-/locate-path-5.0.0.tgz", + "integrity": "sha512-t7hw9pI+WvuwNJXwk5zVHpyhIqzg2qTlklJOf0mVxGSbe3Fp2VieZcduNYjaLDoy6p9uGpQEGWG87WpMKlNq8g==", + "dev": true, + "license": "MIT", + "dependencies": { + "p-locate": "^4.1.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/git-raw-commits/node_modules/lru-cache": { + "version": "6.0.0", + "resolved": "https://registry.npmjs.org/lru-cache/-/lru-cache-6.0.0.tgz", + "integrity": "sha512-Jo6dJ04CmSjuznwJSS3pUeWmd/H0ffTlkXXgwZi+eq1UCmqQwCh+eLsYOYCwY991i2Fah4h1BEMCx4qThGbsiA==", + "dev": true, + "license": "ISC", + "dependencies": { + "yallist": "^4.0.0" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/git-raw-commits/node_modules/meow": { + "version": "8.1.2", + "resolved": "https://registry.npmjs.org/meow/-/meow-8.1.2.tgz", + "integrity": "sha512-r85E3NdZ+mpYk1C6RjPFEMSE+s1iZMuHtsHAqY0DT3jZczl0diWUZ8g6oU7h0M9cD2EL+PzaYghhCLzR0ZNn5Q==", + "dev": true, + "license": "MIT", + "dependencies": { + "@types/minimist": "^1.2.0", + "camelcase-keys": "^6.2.2", + "decamelize-keys": "^1.1.0", + "hard-rejection": "^2.1.0", + "minimist-options": "4.1.0", + "normalize-package-data": "^3.0.0", + "read-pkg-up": "^7.0.1", + "redent": "^3.0.0", + "trim-newlines": "^3.0.0", + "type-fest": "^0.18.0", + "yargs-parser": "^20.2.3" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/git-raw-commits/node_modules/normalize-package-data": { + "version": "3.0.3", + "resolved": "https://registry.npmjs.org/normalize-package-data/-/normalize-package-data-3.0.3.tgz", + "integrity": "sha512-p2W1sgqij3zMMyRC067Dg16bfzVH+w7hyegmpIvZ4JNjqtGOVAIvLmjBx3yP7YTe9vKJgkoNOPjwQGogDoMXFA==", + "dev": true, + "license": "BSD-2-Clause", + "dependencies": { + "hosted-git-info": "^4.0.1", + "is-core-module": "^2.5.0", + "semver": "^7.3.4", + "validate-npm-package-license": "^3.0.1" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/git-raw-commits/node_modules/p-limit": { + "version": "2.3.0", + "resolved": "https://registry.npmjs.org/p-limit/-/p-limit-2.3.0.tgz", + "integrity": "sha512-//88mFWSJx8lxCzwdAABTJL2MyWB12+eIY7MDL2SqLmAkeKU9qxRvWuSyTjm3FUmpBEMuFfckAIqEaVGUDxb6w==", + "dev": true, + "license": "MIT", + "dependencies": { + "p-try": "^2.0.0" + }, + "engines": { + "node": ">=6" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/git-raw-commits/node_modules/p-locate": { + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/p-locate/-/p-locate-4.1.0.tgz", + "integrity": "sha512-R79ZZ/0wAxKGu3oYMlz8jy/kbhsNrS7SKZ7PxEHBgJ5+F2mtFW2fK2cOtBh1cHYkQsbzFV7I+EoRKe6Yt0oK7A==", + "dev": true, + "license": "MIT", + "dependencies": { + "p-limit": "^2.2.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/git-raw-commits/node_modules/read-pkg": { + "version": "5.2.0", + "resolved": "https://registry.npmjs.org/read-pkg/-/read-pkg-5.2.0.tgz", + "integrity": "sha512-Ug69mNOpfvKDAc2Q8DRpMjjzdtrnv9HcSMX+4VsZxD1aZ6ZzrIE7rlzXBtWTyhULSMKg076AW6WR5iZpD0JiOg==", + "dev": true, + "license": "MIT", + "dependencies": { + "@types/normalize-package-data": "^2.4.0", + "normalize-package-data": "^2.5.0", + "parse-json": "^5.0.0", + "type-fest": "^0.6.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/git-raw-commits/node_modules/read-pkg-up": { + "version": "7.0.1", + "resolved": "https://registry.npmjs.org/read-pkg-up/-/read-pkg-up-7.0.1.tgz", + "integrity": "sha512-zK0TB7Xd6JpCLmlLmufqykGE+/TlOePD6qKClNW7hHDKFh/J7/7gCWGR7joEQEW1bKq3a3yUZSObOoWLFQ4ohg==", + "dev": true, + "license": "MIT", + "dependencies": { + "find-up": "^4.1.0", + "read-pkg": "^5.2.0", + "type-fest": "^0.8.1" + }, + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/git-raw-commits/node_modules/read-pkg-up/node_modules/type-fest": { + "version": "0.8.1", + "resolved": "https://registry.npmjs.org/type-fest/-/type-fest-0.8.1.tgz", + "integrity": "sha512-4dbzIzqvjtgiM5rw1k5rEHtBANKmdudhGyBEajN01fEyhaAIhsoKNy6y7+IN93IfpFtwY9iqi7kD+xwKhQsNJA==", + "dev": true, + "license": "(MIT OR CC0-1.0)", + "engines": { + "node": ">=8" + } + }, + "node_modules/git-raw-commits/node_modules/read-pkg/node_modules/hosted-git-info": { + "version": "2.8.9", + "resolved": "https://registry.npmjs.org/hosted-git-info/-/hosted-git-info-2.8.9.tgz", + "integrity": "sha512-mxIDAb9Lsm6DoOJ7xH+5+X4y1LU/4Hi50L9C5sIswK3JzULS4bwk1FvjdBgvYR4bzT4tuUQiC15FE2f5HbLvYw==", + "dev": true, + "license": "ISC" + }, + "node_modules/git-raw-commits/node_modules/read-pkg/node_modules/normalize-package-data": { + "version": "2.5.0", + "resolved": "https://registry.npmjs.org/normalize-package-data/-/normalize-package-data-2.5.0.tgz", + "integrity": "sha512-/5CMN3T0R4XTj4DcGaexo+roZSdSFW/0AOOTROrjxzCG1wrWXEsGbRKevjlIL+ZDE4sZlJr5ED4YW0yqmkK+eA==", + "dev": true, + "license": "BSD-2-Clause", + "dependencies": { + "hosted-git-info": "^2.1.4", + "resolve": "^1.10.0", + "semver": "2 || 3 || 4 || 5", + "validate-npm-package-license": "^3.0.1" + } + }, + "node_modules/git-raw-commits/node_modules/read-pkg/node_modules/semver": { + "version": "5.7.2", + "resolved": "https://registry.npmjs.org/semver/-/semver-5.7.2.tgz", + "integrity": "sha512-cBznnQ9KjJqU67B52RMC65CMarK2600WFnbkcaiwWq3xy/5haFJlshgnpjovMVJ+Hff49d8GEn0b87C5pDQ10g==", + "dev": true, + "license": "ISC", + "bin": { + "semver": "bin/semver" + } + }, + "node_modules/git-raw-commits/node_modules/read-pkg/node_modules/type-fest": { + "version": "0.6.0", + "resolved": "https://registry.npmjs.org/type-fest/-/type-fest-0.6.0.tgz", + "integrity": "sha512-q+MB8nYR1KDLrgr4G5yemftpMC7/QLqVndBmEEdqzmNj5dcFOO4Oo8qlwZE3ULT3+Zim1F8Kq4cBnikNhlCMlg==", + "dev": true, + "license": "(MIT OR CC0-1.0)", + "engines": { + "node": ">=8" + } + }, + "node_modules/git-raw-commits/node_modules/split2": { + "version": "3.2.2", + "resolved": "https://registry.npmjs.org/split2/-/split2-3.2.2.tgz", + "integrity": "sha512-9NThjpgZnifTkJpzTZ7Eue85S49QwpNhZTq6GRJwObb6jnLFNGB7Qm73V5HewTROPyxD0C29xqmaI68bQtV+hg==", + "dev": true, + "license": "ISC", + "dependencies": { + "readable-stream": "^3.0.0" + } + }, + "node_modules/git-raw-commits/node_modules/type-fest": { + "version": "0.18.1", + "resolved": "https://registry.npmjs.org/type-fest/-/type-fest-0.18.1.tgz", + "integrity": "sha512-OIAYXk8+ISY+qTOwkHtKqzAuxchoMiD9Udx+FSGQDuiRR+PJKJHc2NJAXlbhkGwTt/4/nKZxELY1w3ReWOL8mw==", + "dev": true, + "license": "(MIT OR CC0-1.0)", + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/git-raw-commits/node_modules/yargs-parser": { + "version": "20.2.9", + "resolved": "https://registry.npmjs.org/yargs-parser/-/yargs-parser-20.2.9.tgz", + "integrity": "sha512-y11nGElTIV+CT3Zv9t7VKl+Q3hTQoT9a1Qzezhhl6Rp21gJ/IVTW7Z3y9EWXhuUBC2Shnf+DX0antecpAwSP8w==", + "dev": true, + "license": "ISC", + "engines": { + "node": ">=10" + } + }, + "node_modules/glob": { + "version": "7.2.3", + "resolved": "https://registry.npmjs.org/glob/-/glob-7.2.3.tgz", + "integrity": "sha512-nFR0zLpU2YCaRxwoCJvL6UvCH2JFyFVIvwTLsIf21AuHlMskA1hhTdk+LlYJtOlYt9v6dvszD2BGRqBL+iQK9Q==", + "deprecated": "Glob versions prior to v9 are no longer supported", + "dev": true, + "license": "ISC", + "dependencies": { + "fs.realpath": "^1.0.0", + "inflight": "^1.0.4", + "inherits": "2", + "minimatch": "^3.1.1", + "once": "^1.3.0", + "path-is-absolute": "^1.0.0" + }, + "engines": { + "node": "*" + }, + "funding": { + "url": "https://github.com/sponsors/isaacs" + } + }, + "node_modules/glob-parent": { + "version": "5.1.2", + "resolved": "https://registry.npmjs.org/glob-parent/-/glob-parent-5.1.2.tgz", + "integrity": "sha512-AOIgSQCepiJYwP3ARnGx+5VnTu2HBYdzbGP45eLw1vr3zB3vZLeyed1sC9hnbcOc9/SrMyM5RPQrkGz4aS9Zow==", + "dev": true, + "license": "ISC", + "dependencies": { + "is-glob": "^4.0.1" + }, + "engines": { + "node": ">= 6" + } + }, + "node_modules/global-dirs": { + "version": "0.1.1", + "resolved": "https://registry.npmjs.org/global-dirs/-/global-dirs-0.1.1.tgz", + "integrity": "sha512-NknMLn7F2J7aflwFOlGdNIuCDpN3VGoSoB+aap3KABFWbHVn1TCgFC+np23J8W2BiZbjfEw3BFBycSMv1AFblg==", + "dev": true, + "license": "MIT", + "dependencies": { + "ini": "^1.3.4" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/global-modules": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/global-modules/-/global-modules-1.0.0.tgz", + "integrity": "sha512-sKzpEkf11GpOFuw0Zzjzmt4B4UZwjOcG757PPvrfhxcLFbq0wpsgpOqxpxtxFiCG4DtG93M6XRVbF2oGdev7bg==", + "dev": true, + "license": "MIT", + "dependencies": { + "global-prefix": "^1.0.1", + "is-windows": "^1.0.1", + "resolve-dir": "^1.0.0" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/global-prefix": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/global-prefix/-/global-prefix-1.0.2.tgz", + "integrity": "sha512-5lsx1NUDHtSjfg0eHlmYvZKv8/nVqX4ckFbM+FrGcQ+04KWcWFo9P5MxPZYSzUvyzmdTbI7Eix8Q4IbELDqzKg==", + "dev": true, + "license": "MIT", + "dependencies": { + "expand-tilde": "^2.0.2", + "homedir-polyfill": "^1.0.1", + "ini": "^1.3.4", + "is-windows": "^1.0.1", + "which": "^1.2.14" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/global-prefix/node_modules/which": { + "version": "1.3.1", + "resolved": "https://registry.npmjs.org/which/-/which-1.3.1.tgz", + "integrity": "sha512-HxJdYWq1MTIQbJ3nw0cqssHoTNU267KlrDuGZ1WYlxDStUtKUhOaJmh112/TZmHxxUfuJqPXSOm7tDyas0OSIQ==", + "dev": true, + "license": "ISC", + "dependencies": { + "isexe": "^2.0.0" + }, + "bin": { + "which": "bin/which" + } + }, + "node_modules/globby": { + "version": "14.1.0", + "resolved": "https://registry.npmjs.org/globby/-/globby-14.1.0.tgz", + "integrity": "sha512-0Ia46fDOaT7k4og1PDW4YbodWWr3scS2vAr2lTbsplOt2WkKp0vQbkI9wKis/T5LV/dqPjO3bpS/z6GTJB82LA==", + "dev": true, + "license": "MIT", + "dependencies": { + "@sindresorhus/merge-streams": "^2.1.0", + "fast-glob": "^3.3.3", + "ignore": "^7.0.3", + "path-type": "^6.0.0", + "slash": "^5.1.0", + "unicorn-magic": "^0.3.0" + }, + "engines": { + "node": ">=18" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/globby/node_modules/path-type": { + "version": "6.0.0", + "resolved": "https://registry.npmjs.org/path-type/-/path-type-6.0.0.tgz", + "integrity": "sha512-Vj7sf++t5pBD637NSfkxpHSMfWaeig5+DKWLhcqIYx6mWQz5hdJTGDVMQiJcw1ZYkhs7AazKDGpRVji1LJCZUQ==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=18" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/graceful-fs": { + "version": "4.2.11", + "resolved": "https://registry.npmjs.org/graceful-fs/-/graceful-fs-4.2.11.tgz", + "integrity": "sha512-RbJ5/jmFcNNCcDV5o9eTnBLJ/HszWV0P73bc+Ff4nS/rJj+YaS6IGyiOL0VoBYX+l1Wrl3k63h/KrH+nhJ0XvQ==", + "dev": true, + "license": "ISC" + }, + "node_modules/handlebars": { + "version": "4.7.8", + "resolved": "https://registry.npmjs.org/handlebars/-/handlebars-4.7.8.tgz", + "integrity": "sha512-vafaFqs8MZkRrSX7sFVUdo3ap/eNiLnb4IakshzvP56X5Nr1iGKAIqdX6tMlm6HcNRIkr6AxO5jFEoJzzpT8aQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "minimist": "^1.2.5", + "neo-async": "^2.6.2", + "source-map": "^0.6.1", + "wordwrap": "^1.0.0" + }, + "bin": { + "handlebars": "bin/handlebars" + }, + "engines": { + "node": ">=0.4.7" + }, + "optionalDependencies": { + "uglify-js": "^3.1.4" + } + }, + "node_modules/hard-rejection": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/hard-rejection/-/hard-rejection-2.1.0.tgz", + "integrity": "sha512-VIZB+ibDhx7ObhAe7OVtoEbuP4h/MuOTHJ+J8h/eBXotJYl0fBgR72xDFCKgIh22OJZIOVNxBMWuhAr10r8HdA==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=6" + } + }, + "node_modules/has-flag": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-4.0.0.tgz", + "integrity": "sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/hasown": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/hasown/-/hasown-2.0.2.tgz", + "integrity": "sha512-0hJU9SCPvmMzIBdZFqNPXWa6dqh7WdH0cII9y+CyS8rG3nL48Bclra9HmKhVVUHyPWNH5Y7xDwAB7bfgSjkUMQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "function-bind": "^1.1.2" + }, + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/highlight.js": { + "version": "10.7.3", + "resolved": "https://registry.npmjs.org/highlight.js/-/highlight.js-10.7.3.tgz", + "integrity": "sha512-tzcUFauisWKNHaRkN4Wjl/ZA07gENAjFl3J/c480dprkGTg5EQstgaNFqBfUqCq54kZRIEcreTsAgF/m2quD7A==", + "dev": true, + "license": "BSD-3-Clause", + "engines": { + "node": "*" + } + }, + "node_modules/homedir-polyfill": { + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/homedir-polyfill/-/homedir-polyfill-1.0.3.tgz", + "integrity": "sha512-eSmmWE5bZTK2Nou4g0AI3zZ9rswp7GRKoKXS1BLUkvPviOqs4YTN1djQIqrXy9k5gEtdLPy86JjRwsNM9tnDcA==", + "dev": true, + "license": "MIT", + "dependencies": { + "parse-passwd": "^1.0.0" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/hook-std": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/hook-std/-/hook-std-4.0.0.tgz", + "integrity": "sha512-IHI4bEVOt3vRUDJ+bFA9VUJlo7SzvFARPNLw75pqSmAOP2HmTWfFJtPvLBrDrlgjEYXY9zs7SFdHPQaJShkSCQ==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=20" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/hosted-git-info": { + "version": "9.0.2", + "resolved": "https://registry.npmjs.org/hosted-git-info/-/hosted-git-info-9.0.2.tgz", + "integrity": "sha512-M422h7o/BR3rmCQ8UHi7cyyMqKltdP9Uo+J2fXK+RSAY+wTcKOIRyhTuKv4qn+DJf3g+PL890AzId5KZpX+CBg==", + "dev": true, + "license": "ISC", + "dependencies": { + "lru-cache": "^11.1.0" + }, + "engines": { + "node": "^20.17.0 || >=22.9.0" + } + }, + "node_modules/http-proxy-agent": { + "version": "7.0.2", + "resolved": "https://registry.npmjs.org/http-proxy-agent/-/http-proxy-agent-7.0.2.tgz", + "integrity": "sha512-T1gkAiYYDWYx3V5Bmyu7HcfcvL7mUrTWiM6yOfa3PIphViJ/gFPbvidQ+veqSOHci/PxBcDabeUNCzpOODJZig==", + "dev": true, + "license": "MIT", + "dependencies": { + "agent-base": "^7.1.0", + "debug": "^4.3.4" + }, + "engines": { + "node": ">= 14" + } + }, + "node_modules/https-proxy-agent": { + "version": "7.0.6", + "resolved": "https://registry.npmjs.org/https-proxy-agent/-/https-proxy-agent-7.0.6.tgz", + "integrity": "sha512-vK9P5/iUfdl95AI+JVyUuIcVtd4ofvtrOr3HNtM2yxC9bnMbEdp3x01OhQNnjb8IJYi38VlTE3mBXwcfvywuSw==", + "dev": true, + "license": "MIT", + "dependencies": { + "agent-base": "^7.1.2", + "debug": "4" + }, + "engines": { + "node": ">= 14" + } + }, + "node_modules/human-signals": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/human-signals/-/human-signals-2.1.0.tgz", + "integrity": "sha512-B4FFZ6q/T2jhhksgkbEW3HBvWIfDW85snkQgawt07S7J5QXTk6BkNV+0yAeZrM5QpMAdYlocGoljn0sJ/WQkFw==", + "dev": true, + "license": "Apache-2.0", + "engines": { + "node": ">=10.17.0" + } + }, + "node_modules/iconv-lite": { + "version": "0.4.24", + "resolved": "https://registry.npmjs.org/iconv-lite/-/iconv-lite-0.4.24.tgz", + "integrity": "sha512-v3MXnZAcvnywkTUEZomIActle7RXXeedOR31wwl7VlyoXO4Qi9arvSenNQWne1TcRwhCL1HwLI21bEqdpj8/rA==", + "dev": true, + "license": "MIT", + "dependencies": { + "safer-buffer": ">= 2.1.2 < 3" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/ieee754": { + "version": "1.2.1", + "resolved": "https://registry.npmjs.org/ieee754/-/ieee754-1.2.1.tgz", + "integrity": "sha512-dcyqhDvX1C46lXZcVqCpK+FtMRQVdIMN6/Df5js2zouUsqG7I6sFxitIC+7KYK29KdXOLHdu9zL4sFnoVQnqaA==", + "dev": true, + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/feross" + }, + { + "type": "patreon", + "url": "https://www.patreon.com/feross" + }, + { + "type": "consulting", + "url": "https://feross.org/support" + } + ], + "license": "BSD-3-Clause" + }, + "node_modules/ignore": { + "version": "7.0.5", + "resolved": "https://registry.npmjs.org/ignore/-/ignore-7.0.5.tgz", + "integrity": "sha512-Hs59xBNfUIunMFgWAbGX5cq6893IbWg4KnrjbYwX3tx0ztorVgTDA6B2sxf8ejHJ4wz8BqGUMYlnzNBer5NvGg==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 4" + } + }, + "node_modules/import-fresh": { + "version": "3.3.1", + "resolved": "https://registry.npmjs.org/import-fresh/-/import-fresh-3.3.1.tgz", + "integrity": "sha512-TR3KfrTZTYLPB6jUjfx6MF9WcWrHL9su5TObK4ZkYgBdWKPOFoSoQIdEuTuR82pmtxH2spWG9h6etwfr1pLBqQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "parent-module": "^1.0.0", + "resolve-from": "^4.0.0" + }, + "engines": { + "node": ">=6" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/import-fresh/node_modules/resolve-from": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/resolve-from/-/resolve-from-4.0.0.tgz", + "integrity": "sha512-pb/MYmXstAkysRFx8piNI1tGFNQIFA3vkE3Gq4EuA1dF6gHp/+vgZqsCGJapvy8N3Q+4o7FwvquPJcnZ7RYy4g==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=4" + } + }, + "node_modules/import-from-esm": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/import-from-esm/-/import-from-esm-2.0.0.tgz", + "integrity": "sha512-YVt14UZCgsX1vZQ3gKjkWVdBdHQ6eu3MPU1TBgL1H5orXe2+jWD006WCPPtOuwlQm10NuzOW5WawiF1Q9veW8g==", + "dev": true, + "license": "MIT", + "dependencies": { + "debug": "^4.3.4", + "import-meta-resolve": "^4.0.0" + }, + "engines": { + "node": ">=18.20" + } + }, + "node_modules/import-meta-resolve": { + "version": "4.2.0", + "resolved": "https://registry.npmjs.org/import-meta-resolve/-/import-meta-resolve-4.2.0.tgz", + "integrity": "sha512-Iqv2fzaTQN28s/FwZAoFq0ZSs/7hMAHJVX+w8PZl3cY19Pxk6jFFalxQoIfW2826i/fDLXv8IiEZRIT0lDuWcg==", + "dev": true, + "license": "MIT", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/indent-string": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/indent-string/-/indent-string-4.0.0.tgz", + "integrity": "sha512-EdDDZu4A2OyIK7Lr/2zG+w5jmbuk1DVBnEwREQvBzspBJkCEbRa8GxU1lghYcaGJCnRWibjDXlq779X1/y5xwg==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/index-to-position": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/index-to-position/-/index-to-position-1.2.0.tgz", + "integrity": "sha512-Yg7+ztRkqslMAS2iFaU+Oa4KTSidr63OsFGlOrJoW981kIYO3CGCS3wA95P1mUi/IVSJkn0D479KTJpVpvFNuw==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=18" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/inflight": { + "version": "1.0.6", + "resolved": "https://registry.npmjs.org/inflight/-/inflight-1.0.6.tgz", + "integrity": "sha512-k92I/b08q4wvFscXCLvqfsHCrjrF7yiXsQuIVvVE7N82W3+aqpzuUdBbfhWcy/FZR3/4IgflMgKLOsvPDrGCJA==", + "deprecated": "This module is not supported, and leaks memory. Do not use it. Check out lru-cache if you want a good and tested way to coalesce async requests by a key value, which is much more comprehensive and powerful.", + "dev": true, + "license": "ISC", + "dependencies": { + "once": "^1.3.0", + "wrappy": "1" + } + }, + "node_modules/inherits": { + "version": "2.0.4", + "resolved": "https://registry.npmjs.org/inherits/-/inherits-2.0.4.tgz", + "integrity": "sha512-k/vGaX4/Yla3WzyMCvTQOXYeIHvqOKtnqBduzTHpzpQZzAskKMhZ2K+EnBiSM9zGSoIFeMpXKxa4dYeZIQqewQ==", + "dev": true, + "license": "ISC" + }, + "node_modules/ini": { + "version": "1.3.8", + "resolved": "https://registry.npmjs.org/ini/-/ini-1.3.8.tgz", + "integrity": "sha512-JV/yugV2uzW5iMRSiZAyDtQd+nxtUnjeLt0acNdw98kKLrvuRVyB80tsREOE7yvGVgalhZ6RNXCmEHkUKBKxew==", + "dev": true, + "license": "ISC" + }, + "node_modules/inquirer": { + "version": "8.2.5", + "resolved": "https://registry.npmjs.org/inquirer/-/inquirer-8.2.5.tgz", + "integrity": "sha512-QAgPDQMEgrDssk1XiwwHoOGYF9BAbUcc1+j+FhEvaOt8/cKRqyLn0U5qA6F74fGhTMGxf92pOvPBeh29jQJDTQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "ansi-escapes": "^4.2.1", + "chalk": "^4.1.1", + "cli-cursor": "^3.1.0", + "cli-width": "^3.0.0", + "external-editor": "^3.0.3", + "figures": "^3.0.0", + "lodash": "^4.17.21", + "mute-stream": "0.0.8", + "ora": "^5.4.1", + "run-async": "^2.4.0", + "rxjs": "^7.5.5", + "string-width": "^4.1.0", + "strip-ansi": "^6.0.0", + "through": "^2.3.6", + "wrap-ansi": "^7.0.0" + }, + "engines": { + "node": ">=12.0.0" + } + }, + "node_modules/into-stream": { + "version": "7.0.0", + "resolved": "https://registry.npmjs.org/into-stream/-/into-stream-7.0.0.tgz", + "integrity": "sha512-2dYz766i9HprMBasCMvHMuazJ7u4WzhJwo5kb3iPSiW/iRYV6uPari3zHoqZlnuaR7V1bEiNMxikhp37rdBXbw==", + "dev": true, + "license": "MIT", + "dependencies": { + "from2": "^2.3.0", + "p-is-promise": "^3.0.0" + }, + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/is-arrayish": { + "version": "0.2.1", + "resolved": "https://registry.npmjs.org/is-arrayish/-/is-arrayish-0.2.1.tgz", + "integrity": "sha512-zz06S8t0ozoDXMG+ube26zeCTNXcKIPJZJi8hBrF4idCLms4CG9QtK7qBl1boi5ODzFpjswb5JPmHCbMpjaYzg==", + "dev": true, + "license": "MIT" + }, + "node_modules/is-core-module": { + "version": "2.16.1", + "resolved": "https://registry.npmjs.org/is-core-module/-/is-core-module-2.16.1.tgz", + "integrity": "sha512-UfoeMA6fIJ8wTYFEUjelnaGI67v6+N7qXJEvQuIGa99l4xsCruSYOVSQ0uPANn4dAzm8lkYPaKLrrijLq7x23w==", + "dev": true, + "license": "MIT", + "dependencies": { + "hasown": "^2.0.2" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/is-extglob": { + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/is-extglob/-/is-extglob-2.1.1.tgz", + "integrity": "sha512-SbKbANkN603Vi4jEZv49LeVJMn4yGwsbzZworEoyEiutsN3nJYdbO36zfhGJ6QEDpOZIFkDtnq5JRxmvl3jsoQ==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/is-fullwidth-code-point": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/is-fullwidth-code-point/-/is-fullwidth-code-point-3.0.0.tgz", + "integrity": "sha512-zymm5+u+sCsSWyD9qNaejV3DFvhCKclKdizYaJUuHA83RLjb7nSuGnddCHGv0hk+KY7BMAlsWeK4Ueg6EV6XQg==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/is-glob": { + "version": "4.0.3", + "resolved": "https://registry.npmjs.org/is-glob/-/is-glob-4.0.3.tgz", + "integrity": "sha512-xelSayHH36ZgE7ZWhli7pW34hNbNl8Ojv5KVmkJD4hBdD3th8Tfk9vYasLM+mXWOZhFkgZfxhLSnrwRr4elSSg==", + "dev": true, + "license": "MIT", + "dependencies": { + "is-extglob": "^2.1.1" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/is-interactive": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/is-interactive/-/is-interactive-1.0.0.tgz", + "integrity": "sha512-2HvIEKRoqS62guEC+qBjpvRubdX910WCMuJTZ+I9yvqKU2/12eSL549HMwtabb4oupdj2sMP50k+XJfB/8JE6w==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/is-number": { + "version": "7.0.0", + "resolved": "https://registry.npmjs.org/is-number/-/is-number-7.0.0.tgz", + "integrity": "sha512-41Cifkg6e8TylSpdtTpeLVMqvSBEVzTttHvERD741+pnZ8ANv0004MRL43QKPDlK9cGvNp6NZWZUBlbGXYxxng==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=0.12.0" + } + }, + "node_modules/is-obj": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/is-obj/-/is-obj-2.0.0.tgz", + "integrity": "sha512-drqDG3cbczxxEJRoOXcOjtdp1J/lyp1mNn0xaznRs8+muBhgQcrnbspox5X5fOw0HnMnbfDzvnEMEtqDEJEo8w==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/is-plain-obj": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/is-plain-obj/-/is-plain-obj-1.1.0.tgz", + "integrity": "sha512-yvkRyxmFKEOQ4pNXCmJG5AEQNlXJS5LaONXo5/cLdTZdWvsZ1ioJEonLGAosKlMWE8lwUy/bJzMjcw8az73+Fg==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/is-stream": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/is-stream/-/is-stream-2.0.1.tgz", + "integrity": "sha512-hFoiJiTl63nn+kstHGBtewWSKnQLpyb155KHheA1l39uvtO9nWIop1p3udqPcUd/xbF1VLMO4n7OI6p7RbngDg==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/is-text-path": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/is-text-path/-/is-text-path-2.0.0.tgz", + "integrity": "sha512-+oDTluR6WEjdXEJMnC2z6A4FRwFoYuvShVVEGsS7ewc0UTi2QtAKMDJuL4BDEVt+5T7MjFo12RP8ghOM75oKJw==", + "dev": true, + "license": "MIT", + "dependencies": { + "text-extensions": "^2.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/is-unicode-supported": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/is-unicode-supported/-/is-unicode-supported-2.1.0.tgz", + "integrity": "sha512-mE00Gnza5EEB3Ds0HfMyllZzbBrmLOX3vfWoj9A9PEnTfratQ/BcaJOuMhnkhjXvb2+FkY3VuHqtAGpTPmglFQ==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=18" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/is-utf8": { + "version": "0.2.1", + "resolved": "https://registry.npmjs.org/is-utf8/-/is-utf8-0.2.1.tgz", + "integrity": "sha512-rMYPYvCzsXywIsldgLaSoPlw5PfoB/ssr7hY4pLfcodrA5M/eArza1a9VmTiNIBNMjOGr1Ow9mTyU2o69U6U9Q==", + "dev": true, + "license": "MIT" + }, + "node_modules/is-windows": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/is-windows/-/is-windows-1.0.2.tgz", + "integrity": "sha512-eXK1UInq2bPmjyX6e3VHIzMLobc4J94i4AWn+Hpq3OU5KkrRC96OAcR3PRJ/pGu6m8TRnBHP9dkXQVsT/COVIA==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/isarray": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/isarray/-/isarray-1.0.0.tgz", + "integrity": "sha512-VLghIWNM6ELQzo7zwmcg0NmTVyWKYjvIeM83yjp0wRDTmUnrM678fQbcKBo6n2CJEF0szoG//ytg+TKla89ALQ==", + "dev": true, + "license": "MIT" + }, + "node_modules/isexe": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/isexe/-/isexe-2.0.0.tgz", + "integrity": "sha512-RHxMLp9lnKHGHRng9QFhRCMbYAcVpn69smSGcq3f36xjgVVWThj4qqLbTLlq7Ssj8B+fIQ1EuCEGI2lKsyQeIw==", + "dev": true, + "license": "ISC" + }, + "node_modules/issue-parser": { + "version": "6.0.0", + "resolved": "https://registry.npmjs.org/issue-parser/-/issue-parser-6.0.0.tgz", + "integrity": "sha512-zKa/Dxq2lGsBIXQ7CUZWTHfvxPC2ej0KfO7fIPqLlHB9J2hJ7rGhZ5rilhuufylr4RXYPzJUeFjKxz305OsNlA==", + "dev": true, + "license": "MIT", + "dependencies": { + "lodash.capitalize": "^4.2.1", + "lodash.escaperegexp": "^4.1.2", + "lodash.isplainobject": "^4.0.6", + "lodash.isstring": "^4.0.1", + "lodash.uniqby": "^4.7.0" + }, + "engines": { + "node": ">=10.13" + } + }, + "node_modules/java-properties": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/java-properties/-/java-properties-1.0.2.tgz", + "integrity": "sha512-qjdpeo2yKlYTH7nFdK0vbZWuTCesk4o63v5iVOlhMQPfuIZQfW/HI35SjfhA+4qpg36rnFSvUK5b1m+ckIblQQ==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 0.6.0" + } + }, + "node_modules/jiti": { + "version": "1.21.7", + "resolved": "https://registry.npmjs.org/jiti/-/jiti-1.21.7.tgz", + "integrity": "sha512-/imKNG4EbWNrVjoNC/1H5/9GFy+tqjGBHCaSsN+P2RnPqjsLmv6UD3Ej+Kj8nBWaRAwyk7kK5ZUc+OEatnTR3A==", + "dev": true, + "license": "MIT", + "bin": { + "jiti": "bin/jiti.js" + } + }, + "node_modules/js-tokens": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/js-tokens/-/js-tokens-4.0.0.tgz", + "integrity": "sha512-RdJUflcE3cUzKiMqQgsCu06FPu9UdIJO0beYbPhHN4k6apgJtifcoCtT9bcxOpYBtpD2kCM6Sbzg4CausW/PKQ==", + "dev": true, + "license": "MIT" + }, + "node_modules/js-yaml": { + "version": "4.1.1", + "resolved": "https://registry.npmjs.org/js-yaml/-/js-yaml-4.1.1.tgz", + "integrity": "sha512-qQKT4zQxXl8lLwBtHMWwaTcGfFOZviOJet3Oy/xmGk2gZH677CJM9EvtfdSkgWcATZhj/55JZ0rmy3myCT5lsA==", + "dev": true, + "license": "MIT", + "dependencies": { + "argparse": "^2.0.1" + }, + "bin": { + "js-yaml": "bin/js-yaml.js" + } + }, + "node_modules/json-parse-better-errors": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/json-parse-better-errors/-/json-parse-better-errors-1.0.2.tgz", + "integrity": "sha512-mrqyZKfX5EhL7hvqcV6WG1yYjnjeuYDzDhhcAAUrq8Po85NBQBJP+ZDUT75qZQ98IkUoBqdkExkukOU7Ts2wrw==", + "dev": true, + "license": "MIT" + }, + "node_modules/json-parse-even-better-errors": { + "version": "2.3.1", + "resolved": "https://registry.npmjs.org/json-parse-even-better-errors/-/json-parse-even-better-errors-2.3.1.tgz", + "integrity": "sha512-xyFwyhro/JEof6Ghe2iz2NcXoj2sloNsWr/XsERDK/oiPCfaNhl5ONfp+jQdAZRQQ0IJWNzH9zIZF7li91kh2w==", + "dev": true, + "license": "MIT" + }, + "node_modules/json-schema-traverse": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/json-schema-traverse/-/json-schema-traverse-1.0.0.tgz", + "integrity": "sha512-NM8/P9n3XjXhIZn1lLhkFaACTOURQXjWhV4BA/RnOv8xvgqtqpAX9IO4mRQxSx1Rlo4tqzeqb0sOlruaOy3dug==", + "dev": true, + "license": "MIT" + }, + "node_modules/jsonfile": { + "version": "6.2.0", + "resolved": "https://registry.npmjs.org/jsonfile/-/jsonfile-6.2.0.tgz", + "integrity": "sha512-FGuPw30AdOIUTRMC2OMRtQV+jkVj2cfPqSeWXv1NEAJ1qZ5zb1X6z1mFhbfOB/iy3ssJCD+3KuZ8r8C3uVFlAg==", + "dev": true, + "license": "MIT", + "dependencies": { + "universalify": "^2.0.0" + }, + "optionalDependencies": { + "graceful-fs": "^4.1.6" + } + }, + "node_modules/jsonparse": { + "version": "1.3.1", + "resolved": "https://registry.npmjs.org/jsonparse/-/jsonparse-1.3.1.tgz", + "integrity": "sha512-POQXvpdL69+CluYsillJ7SUhKvytYjW9vG/GKpnf+xP8UWgYEM/RaMzHHofbALDiKbbP1W8UEYmgGl39WkPZsg==", + "dev": true, + "engines": [ + "node >= 0.2.0" + ], + "license": "MIT" + }, + "node_modules/JSONStream": { + "version": "1.3.5", + "resolved": "https://registry.npmjs.org/JSONStream/-/JSONStream-1.3.5.tgz", + "integrity": "sha512-E+iruNOY8VV9s4JEbe1aNEm6MiszPRr/UfcHMz0TQh1BXSxHK+ASV1R6W4HpjBhSeS+54PIsAMCBmwD06LLsqQ==", + "dev": true, + "license": "(MIT OR Apache-2.0)", + "dependencies": { + "jsonparse": "^1.2.0", + "through": ">=2.2.7 <3" + }, + "bin": { + "JSONStream": "bin.js" + }, + "engines": { + "node": "*" + } + }, + "node_modules/kind-of": { + "version": "6.0.3", + "resolved": "https://registry.npmjs.org/kind-of/-/kind-of-6.0.3.tgz", + "integrity": "sha512-dcS1ul+9tmeD95T+x28/ehLgd9mENa3LsvDTtzm3vyBEO7RPptvAD+t44WVXaUjTBRcrpFeFlC8WCruUR456hw==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/lines-and-columns": { + "version": "1.2.4", + "resolved": "https://registry.npmjs.org/lines-and-columns/-/lines-and-columns-1.2.4.tgz", + "integrity": "sha512-7ylylesZQ/PV29jhEDl3Ufjo6ZX7gCqJr5F7PKrqc93v7fzSymt1BpwEU8nAUXs8qzzvqhbjhK5QZg6Mt/HkBg==", + "dev": true, + "license": "MIT" + }, + "node_modules/load-json-file": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/load-json-file/-/load-json-file-4.0.0.tgz", + "integrity": "sha512-Kx8hMakjX03tiGTLAIdJ+lL0htKnXjEZN6hk/tozf/WOuYGdZBJrZ+rCJRbVCugsjB3jMLn9746NsQIf5VjBMw==", + "dev": true, + "license": "MIT", + "dependencies": { + "graceful-fs": "^4.1.2", + "parse-json": "^4.0.0", + "pify": "^3.0.0", + "strip-bom": "^3.0.0" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/load-json-file/node_modules/parse-json": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/parse-json/-/parse-json-4.0.0.tgz", + "integrity": "sha512-aOIos8bujGN93/8Ox/jPLh7RwVnPEysynVFE+fQZyg6jKELEHwzgKdLRFHUgXJL6kylijVSBC4BvN9OmsB48Rw==", + "dev": true, + "license": "MIT", + "dependencies": { + "error-ex": "^1.3.1", + "json-parse-better-errors": "^1.0.1" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/load-json-file/node_modules/strip-bom": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/strip-bom/-/strip-bom-3.0.0.tgz", + "integrity": "sha512-vavAMRXOgBVNF6nyEEmL3DBK19iRpDcoIwW+swQ+CbGiu7lju6t+JklA1MHweoWtadgt4ISVUsXLyDq34ddcwA==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=4" + } + }, + "node_modules/locate-path": { + "version": "6.0.0", + "resolved": "https://registry.npmjs.org/locate-path/-/locate-path-6.0.0.tgz", + "integrity": "sha512-iPZK6eYjbxRu3uB4/WZ3EsEIMJFMqAoopl3R+zuq0UjcAm/MO6KCweDgPfP3elTztoKP3KtnVHxTn2NHBSDVUw==", + "dev": true, + "license": "MIT", + "dependencies": { + "p-locate": "^5.0.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/lodash": { + "version": "4.17.21", + "resolved": "https://registry.npmjs.org/lodash/-/lodash-4.17.21.tgz", + "integrity": "sha512-v2kDEe57lecTulaDIuNTPy3Ry4gLGJ6Z1O3vE1krgXZNrsQ+LFTGHVxVjcXPs17LhbZVGedAJv8XZ1tvj5FvSg==", + "dev": true, + "license": "MIT" + }, + "node_modules/lodash-es": { + "version": "4.17.21", + "resolved": "https://registry.npmjs.org/lodash-es/-/lodash-es-4.17.21.tgz", + "integrity": "sha512-mKnC+QJ9pWVzv+C4/U3rRsHapFfHvQFoFB92e52xeyGMcX6/OlIl78je1u8vePzYZSkkogMPJ2yjxxsb89cxyw==", + "dev": true, + "license": "MIT" + }, + "node_modules/lodash.camelcase": { + "version": "4.3.0", + "resolved": "https://registry.npmjs.org/lodash.camelcase/-/lodash.camelcase-4.3.0.tgz", + "integrity": "sha512-TwuEnCnxbc3rAvhf/LbG7tJUDzhqXyFnv3dtzLOPgCG/hODL7WFnsbwktkD7yUV0RrreP/l1PALq/YSg6VvjlA==", + "dev": true, + "license": "MIT" + }, + "node_modules/lodash.capitalize": { + "version": "4.2.1", + "resolved": "https://registry.npmjs.org/lodash.capitalize/-/lodash.capitalize-4.2.1.tgz", + "integrity": "sha512-kZzYOKspf8XVX5AvmQF94gQW0lejFVgb80G85bU4ZWzoJ6C03PQg3coYAUpSTpQWelrZELd3XWgHzw4Ck5kaIw==", + "dev": true, + "license": "MIT" + }, + "node_modules/lodash.escaperegexp": { + "version": "4.1.2", + "resolved": "https://registry.npmjs.org/lodash.escaperegexp/-/lodash.escaperegexp-4.1.2.tgz", + "integrity": "sha512-TM9YBvyC84ZxE3rgfefxUWiQKLilstD6k7PTGt6wfbtXF8ixIJLOL3VYyV/z+ZiPLsVxAsKAFVwWlWeb2Y8Yyw==", + "dev": true, + "license": "MIT" + }, + "node_modules/lodash.isfunction": { + "version": "3.0.9", + "resolved": "https://registry.npmjs.org/lodash.isfunction/-/lodash.isfunction-3.0.9.tgz", + "integrity": "sha512-AirXNj15uRIMMPihnkInB4i3NHeb4iBtNg9WRWuK2o31S+ePwwNmDPaTL3o7dTJ+VXNZim7rFs4rxN4YU1oUJw==", + "dev": true, + "license": "MIT" + }, + "node_modules/lodash.isplainobject": { + "version": "4.0.6", + "resolved": "https://registry.npmjs.org/lodash.isplainobject/-/lodash.isplainobject-4.0.6.tgz", + "integrity": "sha512-oSXzaWypCMHkPC3NvBEaPHf0KsA5mvPrOPgQWDsbg8n7orZ290M0BmC/jgRZ4vcJ6DTAhjrsSYgdsW/F+MFOBA==", + "dev": true, + "license": "MIT" + }, + "node_modules/lodash.isstring": { + "version": "4.0.1", + "resolved": "https://registry.npmjs.org/lodash.isstring/-/lodash.isstring-4.0.1.tgz", + "integrity": "sha512-0wJxfxH1wgO3GrbuP+dTTk7op+6L41QCXbGINEmD+ny/G/eCqGzxyCsh7159S+mgDDcoarnBw6PC1PS5+wUGgw==", + "dev": true, + "license": "MIT" + }, + "node_modules/lodash.kebabcase": { + "version": "4.1.1", + "resolved": "https://registry.npmjs.org/lodash.kebabcase/-/lodash.kebabcase-4.1.1.tgz", + "integrity": "sha512-N8XRTIMMqqDgSy4VLKPnJ/+hpGZN+PHQiJnSenYqPaVV/NCqEogTnAdZLQiGKhxX+JCs8waWq2t1XHWKOmlY8g==", + "dev": true, + "license": "MIT" + }, + "node_modules/lodash.map": { + "version": "4.6.0", + "resolved": "https://registry.npmjs.org/lodash.map/-/lodash.map-4.6.0.tgz", + "integrity": "sha512-worNHGKLDetmcEYDvh2stPCrrQRkP20E4l0iIS7F8EvzMqBBi7ltvFN5m1HvTf1P7Jk1txKhvFcmYsCr8O2F1Q==", + "dev": true, + "license": "MIT" + }, + "node_modules/lodash.merge": { + "version": "4.6.2", + "resolved": "https://registry.npmjs.org/lodash.merge/-/lodash.merge-4.6.2.tgz", + "integrity": "sha512-0KpjqXRVvrYyCsX1swR/XTK0va6VQkQM6MNo7PqW77ByjAhoARA8EfrP1N4+KlKj8YS0ZUCtRT/YUuhyYDujIQ==", + "dev": true, + "license": "MIT" + }, + "node_modules/lodash.mergewith": { + "version": "4.6.2", + "resolved": "https://registry.npmjs.org/lodash.mergewith/-/lodash.mergewith-4.6.2.tgz", + "integrity": "sha512-GK3g5RPZWTRSeLSpgP8Xhra+pnjBC56q9FZYe1d5RN3TJ35dbkGy3YqBSMbyCrlbi+CM9Z3Jk5yTL7RCsqboyQ==", + "dev": true, + "license": "MIT" + }, + "node_modules/lodash.snakecase": { + "version": "4.1.1", + "resolved": "https://registry.npmjs.org/lodash.snakecase/-/lodash.snakecase-4.1.1.tgz", + "integrity": "sha512-QZ1d4xoBHYUeuouhEq3lk3Uq7ldgyFXGBhg04+oRLnIz8o9T65Eh+8YdroUwn846zchkA9yDsDl5CVVaV2nqYw==", + "dev": true, + "license": "MIT" + }, + "node_modules/lodash.startcase": { + "version": "4.4.0", + "resolved": "https://registry.npmjs.org/lodash.startcase/-/lodash.startcase-4.4.0.tgz", + "integrity": "sha512-+WKqsK294HMSc2jEbNgpHpd0JfIBhp7rEV4aqXWqFr6AlXov+SlcgB1Fv01y2kGe3Gc8nMW7VA0SrGuSkRfIEg==", + "dev": true, + "license": "MIT" + }, + "node_modules/lodash.uniq": { + "version": "4.5.0", + "resolved": "https://registry.npmjs.org/lodash.uniq/-/lodash.uniq-4.5.0.tgz", + "integrity": "sha512-xfBaXQd9ryd9dlSDvnvI0lvxfLJlYAZzXomUYzLKtUeOQvOP5piqAWuGtrhWeqaXK9hhoM/iyJc5AV+XfsX3HQ==", + "dev": true, + "license": "MIT" + }, + "node_modules/lodash.uniqby": { + "version": "4.7.0", + "resolved": "https://registry.npmjs.org/lodash.uniqby/-/lodash.uniqby-4.7.0.tgz", + "integrity": "sha512-e/zcLx6CSbmaEgFHCA7BnoQKyCtKMxnuWrJygbwPs/AIn+IMKl66L8/s+wBUn5LRw2pZx3bUHibiV1b6aTWIww==", + "dev": true, + "license": "MIT" + }, + "node_modules/lodash.upperfirst": { + "version": "4.3.1", + "resolved": "https://registry.npmjs.org/lodash.upperfirst/-/lodash.upperfirst-4.3.1.tgz", + "integrity": "sha512-sReKOYJIJf74dhJONhU4e0/shzi1trVbSWDOhKYE5XV2O+H7Sb2Dihwuc7xWxVl+DgFPyTqIN3zMfT9cq5iWDg==", + "dev": true, + "license": "MIT" + }, + "node_modules/log-symbols": { + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/log-symbols/-/log-symbols-4.1.0.tgz", + "integrity": "sha512-8XPvpAA8uyhfteu8pIvQxpJZ7SYYdpUivZpGy6sFsBuKRY/7rQGavedeB8aK+Zkyq6upMFVL/9AW6vOYzfRyLg==", + "dev": true, + "license": "MIT", + "dependencies": { + "chalk": "^4.1.0", + "is-unicode-supported": "^0.1.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/log-symbols/node_modules/is-unicode-supported": { + "version": "0.1.0", + "resolved": "https://registry.npmjs.org/is-unicode-supported/-/is-unicode-supported-0.1.0.tgz", + "integrity": "sha512-knxG2q4UC3u8stRGyAVJCOdxFmv5DZiRcdlIaAQXAbSfJya+OhopNotLQrstBhququ4ZpuKbDc/8S6mgXgPFPw==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/longest": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/longest/-/longest-2.0.1.tgz", + "integrity": "sha512-Ajzxb8CM6WAnFjgiloPsI3bF+WCxcvhdIG3KNA2KN962+tdBsHcuQ4k4qX/EcS/2CRkcc0iAkR956Nib6aXU/Q==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/lru-cache": { + "version": "11.2.4", + "resolved": "https://registry.npmjs.org/lru-cache/-/lru-cache-11.2.4.tgz", + "integrity": "sha512-B5Y16Jr9LB9dHVkh6ZevG+vAbOsNOYCX+sXvFWFu7B3Iz5mijW3zdbMyhsh8ANd2mSWBYdJgnqi+mL7/LrOPYg==", + "dev": true, + "license": "BlueOak-1.0.0", + "engines": { + "node": "20 || >=22" + } + }, + "node_modules/make-asynchronous": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/make-asynchronous/-/make-asynchronous-1.0.1.tgz", + "integrity": "sha512-T9BPOmEOhp6SmV25SwLVcHK4E6JyG/coH3C6F1NjNXSziv/fd4GmsqMk8YR6qpPOswfaOCApSNkZv6fxoaYFcQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "p-event": "^6.0.0", + "type-fest": "^4.6.0", + "web-worker": "1.2.0" + }, + "engines": { + "node": ">=18" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/make-asynchronous/node_modules/type-fest": { + "version": "4.41.0", + "resolved": "https://registry.npmjs.org/type-fest/-/type-fest-4.41.0.tgz", + "integrity": "sha512-TeTSQ6H5YHvpqVwBRcnLDCBnDOHWYu7IvGbHT6N8AOymcr9PJGjc1GTtiWZTYg0NCgYwvnYWEkVChQAr9bjfwA==", + "dev": true, + "license": "(MIT OR CC0-1.0)", + "engines": { + "node": ">=16" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/map-obj": { + "version": "4.3.0", + "resolved": "https://registry.npmjs.org/map-obj/-/map-obj-4.3.0.tgz", + "integrity": "sha512-hdN1wVrZbb29eBGiGjJbeP8JbKjq1urkHJ/LIP/NY48MZ1QVXUsQBV1G1zvYFHn1XE06cwjBsOI2K3Ulnj1YXQ==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/marked": { + "version": "15.0.12", + "resolved": "https://registry.npmjs.org/marked/-/marked-15.0.12.tgz", + "integrity": "sha512-8dD6FusOQSrpv9Z1rdNMdlSgQOIP880DHqnohobOmYLElGEqAL/JvxvuxZO16r4HtjTlfPRDC1hbvxC9dPN2nA==", + "dev": true, + "license": "MIT", + "bin": { + "marked": "bin/marked.js" + }, + "engines": { + "node": ">= 18" + } + }, + "node_modules/marked-terminal": { + "version": "7.3.0", + "resolved": "https://registry.npmjs.org/marked-terminal/-/marked-terminal-7.3.0.tgz", + "integrity": "sha512-t4rBvPsHc57uE/2nJOLmMbZCQ4tgAccAED3ngXQqW6g+TxA488JzJ+FK3lQkzBQOI1mRV/r/Kq+1ZlJ4D0owQw==", + "dev": true, + "license": "MIT", + "dependencies": { + "ansi-escapes": "^7.0.0", + "ansi-regex": "^6.1.0", + "chalk": "^5.4.1", + "cli-highlight": "^2.1.11", + "cli-table3": "^0.6.5", + "node-emoji": "^2.2.0", + "supports-hyperlinks": "^3.1.0" + }, + "engines": { + "node": ">=16.0.0" + }, + "peerDependencies": { + "marked": ">=1 <16" + } + }, + "node_modules/marked-terminal/node_modules/ansi-escapes": { + "version": "7.2.0", + "resolved": "https://registry.npmjs.org/ansi-escapes/-/ansi-escapes-7.2.0.tgz", + "integrity": "sha512-g6LhBsl+GBPRWGWsBtutpzBYuIIdBkLEvad5C/va/74Db018+5TZiyA26cZJAr3Rft5lprVqOIPxf5Vid6tqAw==", + "dev": true, + "license": "MIT", + "dependencies": { + "environment": "^1.0.0" + }, + "engines": { + "node": ">=18" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/marked-terminal/node_modules/ansi-regex": { + "version": "6.2.2", + "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-6.2.2.tgz", + "integrity": "sha512-Bq3SmSpyFHaWjPk8If9yc6svM8c56dB5BAtW4Qbw5jHTwwXXcTLoRMkpDJp6VL0XzlWaCHTXrkFURMYmD0sLqg==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/chalk/ansi-regex?sponsor=1" + } + }, + "node_modules/marked-terminal/node_modules/chalk": { + "version": "5.6.2", + "resolved": "https://registry.npmjs.org/chalk/-/chalk-5.6.2.tgz", + "integrity": "sha512-7NzBL0rN6fMUW+f7A6Io4h40qQlG+xGmtMxfbnH/K7TAtt8JQWVQK+6g0UXKMeVJoyV5EkkNsErQ8pVD3bLHbA==", + "dev": true, + "license": "MIT", + "engines": { + "node": "^12.17.0 || ^14.13 || >=16.0.0" + }, + "funding": { + "url": "https://github.com/chalk/chalk?sponsor=1" + } + }, + "node_modules/meow": { + "version": "12.1.1", + "resolved": "https://registry.npmjs.org/meow/-/meow-12.1.1.tgz", + "integrity": "sha512-BhXM0Au22RwUneMPwSCnyhTOizdWoIEPU9sp0Aqa1PnDMR5Wv2FGXYDjuzJEIX+Eo2Rb8xuYe5jrnm5QowQFkw==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=16.10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/merge": { + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/merge/-/merge-2.1.1.tgz", + "integrity": "sha512-jz+Cfrg9GWOZbQAnDQ4hlVnQky+341Yk5ru8bZSe6sIDTCIg8n9i/u7hSQGSVOF3C7lH6mGtqjkiT9G4wFLL0w==", + "dev": true, + "license": "MIT" + }, + "node_modules/merge-stream": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/merge-stream/-/merge-stream-2.0.0.tgz", + "integrity": "sha512-abv/qOcuPfk3URPfDzmZU1LKmuw8kT+0nIHvKrKgFrwifol/doWcdA4ZqsWQ8ENrFKkd67Mfpo/LovbIUsbt3w==", + "dev": true, + "license": "MIT" + }, + "node_modules/merge2": { + "version": "1.4.1", + "resolved": "https://registry.npmjs.org/merge2/-/merge2-1.4.1.tgz", + "integrity": "sha512-8q7VEgMJW4J8tcfVPy8g09NcQwZdbwFEqhe/WZkoIzjn/3TGDwtOCYtXGxA3O8tPzpczCCDgv+P2P5y00ZJOOg==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 8" + } + }, + "node_modules/micromatch": { + "version": "4.0.8", + "resolved": "https://registry.npmjs.org/micromatch/-/micromatch-4.0.8.tgz", + "integrity": "sha512-PXwfBhYu0hBCPw8Dn0E+WDYb7af3dSLVWKi3HGv84IdF4TyFoC0ysxFd0Goxw7nSv4T/PzEJQxsYsEiFCKo2BA==", + "dev": true, + "license": "MIT", + "dependencies": { + "braces": "^3.0.3", + "picomatch": "^2.3.1" + }, + "engines": { + "node": ">=8.6" + } + }, + "node_modules/mime": { + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/mime/-/mime-4.1.0.tgz", + "integrity": "sha512-X5ju04+cAzsojXKes0B/S4tcYtFAJ6tTMuSPBEn9CPGlrWr8Fiw7qYeLT0XyH80HSoAoqWCaz+MWKh22P7G1cw==", + "dev": true, + "funding": [ + "https://github.com/sponsors/broofa" + ], + "license": "MIT", + "bin": { + "mime": "bin/cli.js" + }, + "engines": { + "node": ">=16" + } + }, + "node_modules/mimic-fn": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/mimic-fn/-/mimic-fn-2.1.0.tgz", + "integrity": "sha512-OqbOk5oEQeAZ8WXWydlu9HJjz9WVdEIvamMCcXmuqUYjTknH/sqsWvhQ3vgwKFRR1HpjvNBKQ37nbJgYzGqGcg==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=6" + } + }, + "node_modules/min-indent": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/min-indent/-/min-indent-1.0.1.tgz", + "integrity": "sha512-I9jwMn07Sy/IwOj3zVkVik2JTvgpaykDZEigL6Rx6N9LbMywwUSMtxET+7lVoDLLd3O3IXwJwvuuns8UB/HeAg==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=4" + } + }, + "node_modules/minimatch": { + "version": "3.1.2", + "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-3.1.2.tgz", + "integrity": "sha512-J7p63hRiAjw1NDEww1W7i37+ByIrOWO5XQQAzZ3VOcL0PNybwpfmV/N05zFAzwQ9USyEcX6t3UO+K5aqBQOIHw==", + "dev": true, + "license": "ISC", + "dependencies": { + "brace-expansion": "^1.1.7" + }, + "engines": { + "node": "*" + } + }, + "node_modules/minimist": { + "version": "1.2.8", + "resolved": "https://registry.npmjs.org/minimist/-/minimist-1.2.8.tgz", + "integrity": "sha512-2yyAR8qBkN3YuheJanUpWC5U3bb5osDywNB8RzDVlDwDHbocAJveqqj1u8+SVD7jkWT4yvsHCpWqqWqAxb0zCA==", + "dev": true, + "license": "MIT", + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/minimist-options": { + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/minimist-options/-/minimist-options-4.1.0.tgz", + "integrity": "sha512-Q4r8ghd80yhO/0j1O3B2BjweX3fiHg9cdOwjJd2J76Q135c+NDxGCqdYKQ1SKBuFfgWbAUzBfvYjPUEeNgqN1A==", + "dev": true, + "license": "MIT", + "dependencies": { + "arrify": "^1.0.1", + "is-plain-obj": "^1.1.0", + "kind-of": "^6.0.3" + }, + "engines": { + "node": ">= 6" + } + }, + "node_modules/ms": { + "version": "2.1.3", + "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.3.tgz", + "integrity": "sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA==", + "dev": true, + "license": "MIT" + }, + "node_modules/mute-stream": { + "version": "0.0.8", + "resolved": "https://registry.npmjs.org/mute-stream/-/mute-stream-0.0.8.tgz", + "integrity": "sha512-nnbWWOkoWyUsTjKrhgD0dcz22mdkSnpYqbEjIm2nhwhuxlSkpywJmBo8h0ZqJdkp73mb90SssHkN4rsRaBAfAA==", + "dev": true, + "license": "ISC" + }, + "node_modules/mz": { + "version": "2.7.0", + "resolved": "https://registry.npmjs.org/mz/-/mz-2.7.0.tgz", + "integrity": "sha512-z81GNO7nnYMEhrGh9LeymoE4+Yr0Wn5McHIZMK5cfQCl+NDX08sCZgUc9/6MHni9IWuFLm1Z3HTCXu2z9fN62Q==", + "dev": true, + "license": "MIT", + "dependencies": { + "any-promise": "^1.0.0", + "object-assign": "^4.0.1", + "thenify-all": "^1.0.0" + } + }, + "node_modules/neo-async": { + "version": "2.6.2", + "resolved": "https://registry.npmjs.org/neo-async/-/neo-async-2.6.2.tgz", + "integrity": "sha512-Yd3UES5mWCSqR+qNT93S3UoYUkqAZ9lLg8a7g9rimsWmYGK8cVToA4/sF3RrshdyV3sAGMXVUmpMYOw+dLpOuw==", + "dev": true, + "license": "MIT" + }, + "node_modules/nerf-dart": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/nerf-dart/-/nerf-dart-1.0.0.tgz", + "integrity": "sha512-EZSPZB70jiVsivaBLYDCyntd5eH8NTSMOn3rB+HxwdmKThGELLdYv8qVIMWvZEFy9w8ZZpW9h9OB32l1rGtj7g==", + "dev": true, + "license": "MIT" + }, + "node_modules/node-emoji": { + "version": "2.2.0", + "resolved": "https://registry.npmjs.org/node-emoji/-/node-emoji-2.2.0.tgz", + "integrity": "sha512-Z3lTE9pLaJF47NyMhd4ww1yFTAP8YhYI8SleJiHzM46Fgpm5cnNzSl9XfzFNqbaz+VlJrIj3fXQ4DeN1Rjm6cw==", + "dev": true, + "license": "MIT", + "dependencies": { + "@sindresorhus/is": "^4.6.0", + "char-regex": "^1.0.2", + "emojilib": "^2.4.0", + "skin-tone": "^2.0.0" + }, + "engines": { + "node": ">=18" + } + }, + "node_modules/normalize-package-data": { + "version": "8.0.0", + "resolved": "https://registry.npmjs.org/normalize-package-data/-/normalize-package-data-8.0.0.tgz", + "integrity": "sha512-RWk+PI433eESQ7ounYxIp67CYuVsS1uYSonX3kA6ps/3LWfjVQa/ptEg6Y3T6uAMq1mWpX9PQ+qx+QaHpsc7gQ==", + "dev": true, + "license": "BSD-2-Clause", + "dependencies": { + "hosted-git-info": "^9.0.0", + "semver": "^7.3.5", + "validate-npm-package-license": "^3.0.4" + }, + "engines": { + "node": "^20.17.0 || >=22.9.0" + } + }, + "node_modules/normalize-url": { + "version": "8.1.0", + "resolved": "https://registry.npmjs.org/normalize-url/-/normalize-url-8.1.0.tgz", + "integrity": "sha512-X06Mfd/5aKsRHc0O0J5CUedwnPmnDtLF2+nq+KN9KSDlJHkPuh0JUviWjEWMe0SW/9TDdSLVPuk7L5gGTIA1/w==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=14.16" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/npm": { + "version": "11.6.4", + "resolved": "https://registry.npmjs.org/npm/-/npm-11.6.4.tgz", + "integrity": "sha512-ERjKtGoFpQrua/9bG0+h3xiv/4nVdGViCjUYA1AmlV24fFvfnSB7B7dIfZnySQ1FDLd0ZVrWPsLLp78dCtJdRQ==", + "bundleDependencies": [ + "@isaacs/string-locale-compare", + "@npmcli/arborist", + "@npmcli/config", + "@npmcli/fs", + "@npmcli/map-workspaces", + "@npmcli/metavuln-calculator", + "@npmcli/package-json", + "@npmcli/promise-spawn", + "@npmcli/redact", + "@npmcli/run-script", + "@sigstore/tuf", + "abbrev", + "archy", + "cacache", + "chalk", + "ci-info", + "cli-columns", + "fastest-levenshtein", + "fs-minipass", + "glob", + "graceful-fs", + "hosted-git-info", + "ini", + "init-package-json", + "is-cidr", + "json-parse-even-better-errors", + "libnpmaccess", + "libnpmdiff", + "libnpmexec", + "libnpmfund", + "libnpmorg", + "libnpmpack", + "libnpmpublish", + "libnpmsearch", + "libnpmteam", + "libnpmversion", + "make-fetch-happen", + "minimatch", + "minipass", + "minipass-pipeline", + "ms", + "node-gyp", + "nopt", + "npm-audit-report", + "npm-install-checks", + "npm-package-arg", + "npm-pick-manifest", + "npm-profile", + "npm-registry-fetch", + "npm-user-validate", + "p-map", + "pacote", + "parse-conflict-json", + "proc-log", + "qrcode-terminal", + "read", + "semver", + "spdx-expression-parse", + "ssri", + "supports-color", + "tar", + "text-table", + "tiny-relative-date", + "treeverse", + "validate-npm-package-name", + "which" + ], + "dev": true, + "license": "Artistic-2.0", + "workspaces": [ + "docs", + "smoke-tests", + "mock-globals", + "mock-registry", + "workspaces/*" + ], + "dependencies": { + "@isaacs/string-locale-compare": "^1.1.0", + "@npmcli/arborist": "^9.1.8", + "@npmcli/config": "^10.4.4", + "@npmcli/fs": "^5.0.0", + "@npmcli/map-workspaces": "^5.0.3", + "@npmcli/metavuln-calculator": "^9.0.3", + "@npmcli/package-json": "^7.0.4", + "@npmcli/promise-spawn": "^9.0.1", + "@npmcli/redact": "^4.0.0", + "@npmcli/run-script": "^10.0.3", + "@sigstore/tuf": "^4.0.0", + "abbrev": "^4.0.0", + "archy": "~1.0.0", + "cacache": "^20.0.3", + "chalk": "^5.6.2", + "ci-info": "^4.3.1", + "cli-columns": "^4.0.0", + "fastest-levenshtein": "^1.0.16", + "fs-minipass": "^3.0.3", + "glob": "^13.0.0", + "graceful-fs": "^4.2.11", + "hosted-git-info": "^9.0.2", + "ini": "^6.0.0", + "init-package-json": "^8.2.4", + "is-cidr": "^6.0.1", + "json-parse-even-better-errors": "^5.0.0", + "libnpmaccess": "^10.0.3", + "libnpmdiff": "^8.0.11", + "libnpmexec": "^10.1.10", + "libnpmfund": "^7.0.11", + "libnpmorg": "^8.0.1", + "libnpmpack": "^9.0.11", + "libnpmpublish": "^11.1.3", + "libnpmsearch": "^9.0.1", + "libnpmteam": "^8.0.2", + "libnpmversion": "^8.0.3", + "make-fetch-happen": "^15.0.3", + "minimatch": "^10.1.1", + "minipass": "^7.1.1", + "minipass-pipeline": "^1.2.4", + "ms": "^2.1.2", + "node-gyp": "^12.1.0", + "nopt": "^9.0.0", + "npm-audit-report": "^7.0.0", + "npm-install-checks": "^8.0.0", + "npm-package-arg": "^13.0.2", + "npm-pick-manifest": "^11.0.3", + "npm-profile": "^12.0.1", + "npm-registry-fetch": "^19.1.1", + "npm-user-validate": "^4.0.0", + "p-map": "^7.0.4", + "pacote": "^21.0.4", + "parse-conflict-json": "^5.0.1", + "proc-log": "^6.1.0", + "qrcode-terminal": "^0.12.0", + "read": "^5.0.1", + "semver": "^7.7.3", + "spdx-expression-parse": "^4.0.0", + "ssri": "^13.0.0", + "supports-color": "^10.2.2", + "tar": "^7.5.2", + "text-table": "~0.2.0", + "tiny-relative-date": "^2.0.2", + "treeverse": "^3.0.0", + "validate-npm-package-name": "^7.0.0", + "which": "^6.0.0" + }, + "bin": { + "npm": "bin/npm-cli.js", + "npx": "bin/npx-cli.js" + }, + "engines": { + "node": "^20.17.0 || >=22.9.0" + } + }, + "node_modules/npm-run-path": { + "version": "4.0.1", + "resolved": "https://registry.npmjs.org/npm-run-path/-/npm-run-path-4.0.1.tgz", + "integrity": "sha512-S48WzZW777zhNIrn7gxOlISNAqi9ZC/uQFnRdbeIHhZhCA6UqpkOT8T1G7BvfdgP4Er8gF4sUbaS0i7QvIfCWw==", + "dev": true, + "license": "MIT", + "dependencies": { + "path-key": "^3.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/npm/node_modules/@isaacs/balanced-match": { + "version": "4.0.1", + "dev": true, + "inBundle": true, + "license": "MIT", + "engines": { + "node": "20 || >=22" + } + }, + "node_modules/npm/node_modules/@isaacs/brace-expansion": { + "version": "5.0.0", + "dev": true, + "inBundle": true, + "license": "MIT", + "dependencies": { + "@isaacs/balanced-match": "^4.0.1" + }, + "engines": { + "node": "20 || >=22" + } + }, + "node_modules/npm/node_modules/@isaacs/fs-minipass": { + "version": "4.0.1", + "dev": true, + "inBundle": true, + "license": "ISC", + "dependencies": { + "minipass": "^7.0.4" + }, + "engines": { + "node": ">=18.0.0" + } + }, + "node_modules/npm/node_modules/@isaacs/string-locale-compare": { + "version": "1.1.0", + "dev": true, + "inBundle": true, + "license": "ISC" + }, + "node_modules/npm/node_modules/@npmcli/agent": { + "version": "4.0.0", + "dev": true, + "inBundle": true, + "license": "ISC", + "dependencies": { + "agent-base": "^7.1.0", + "http-proxy-agent": "^7.0.0", + "https-proxy-agent": "^7.0.1", + "lru-cache": "^11.2.1", + "socks-proxy-agent": "^8.0.3" + }, + "engines": { + "node": "^20.17.0 || >=22.9.0" + } + }, + "node_modules/npm/node_modules/@npmcli/arborist": { + "version": "9.1.8", + "dev": true, + "inBundle": true, + "license": "ISC", + "dependencies": { + "@isaacs/string-locale-compare": "^1.1.0", + "@npmcli/fs": "^5.0.0", + "@npmcli/installed-package-contents": "^4.0.0", + "@npmcli/map-workspaces": "^5.0.0", + "@npmcli/metavuln-calculator": "^9.0.2", + "@npmcli/name-from-folder": "^4.0.0", + "@npmcli/node-gyp": "^5.0.0", + "@npmcli/package-json": "^7.0.0", + "@npmcli/query": "^5.0.0", + "@npmcli/redact": "^4.0.0", + "@npmcli/run-script": "^10.0.0", + "bin-links": "^6.0.0", + "cacache": "^20.0.1", + "common-ancestor-path": "^1.0.1", + "hosted-git-info": "^9.0.0", + "json-stringify-nice": "^1.1.4", + "lru-cache": "^11.2.1", + "minimatch": "^10.0.3", + "nopt": "^9.0.0", + "npm-install-checks": "^8.0.0", + "npm-package-arg": "^13.0.0", + "npm-pick-manifest": "^11.0.1", + "npm-registry-fetch": "^19.0.0", + "pacote": "^21.0.2", + "parse-conflict-json": "^5.0.1", + "proc-log": "^6.0.0", + "proggy": "^4.0.0", + "promise-all-reject-late": "^1.0.0", + "promise-call-limit": "^3.0.1", + "semver": "^7.3.7", + "ssri": "^13.0.0", + "treeverse": "^3.0.0", + "walk-up-path": "^4.0.0" + }, + "bin": { + "arborist": "bin/index.js" + }, + "engines": { + "node": "^20.17.0 || >=22.9.0" + } + }, + "node_modules/npm/node_modules/@npmcli/config": { + "version": "10.4.4", + "dev": true, + "inBundle": true, + "license": "ISC", + "dependencies": { + "@npmcli/map-workspaces": "^5.0.0", + "@npmcli/package-json": "^7.0.0", + "ci-info": "^4.0.0", + "ini": "^6.0.0", + "nopt": "^9.0.0", + "proc-log": "^6.0.0", + "semver": "^7.3.5", + "walk-up-path": "^4.0.0" + }, + "engines": { + "node": "^20.17.0 || >=22.9.0" + } + }, + "node_modules/npm/node_modules/@npmcli/fs": { + "version": "5.0.0", + "dev": true, + "inBundle": true, + "license": "ISC", + "dependencies": { + "semver": "^7.3.5" + }, + "engines": { + "node": "^20.17.0 || >=22.9.0" + } + }, + "node_modules/npm/node_modules/@npmcli/git": { + "version": "7.0.1", + "dev": true, + "inBundle": true, + "license": "ISC", + "dependencies": { + "@npmcli/promise-spawn": "^9.0.0", + "ini": "^6.0.0", + "lru-cache": "^11.2.1", + "npm-pick-manifest": "^11.0.1", + "proc-log": "^6.0.0", + "promise-retry": "^2.0.1", + "semver": "^7.3.5", + "which": "^6.0.0" + }, + "engines": { + "node": "^20.17.0 || >=22.9.0" + } + }, + "node_modules/npm/node_modules/@npmcli/installed-package-contents": { + "version": "4.0.0", + "dev": true, + "inBundle": true, + "license": "ISC", + "dependencies": { + "npm-bundled": "^5.0.0", + "npm-normalize-package-bin": "^5.0.0" + }, + "bin": { + "installed-package-contents": "bin/index.js" + }, + "engines": { + "node": "^20.17.0 || >=22.9.0" + } + }, + "node_modules/npm/node_modules/@npmcli/map-workspaces": { + "version": "5.0.3", + "dev": true, + "inBundle": true, + "license": "ISC", + "dependencies": { + "@npmcli/name-from-folder": "^4.0.0", + "@npmcli/package-json": "^7.0.0", + "glob": "^13.0.0", + "minimatch": "^10.0.3" + }, + "engines": { + "node": "^20.17.0 || >=22.9.0" + } + }, + "node_modules/npm/node_modules/@npmcli/metavuln-calculator": { + "version": "9.0.3", + "dev": true, + "inBundle": true, + "license": "ISC", + "dependencies": { + "cacache": "^20.0.0", + "json-parse-even-better-errors": "^5.0.0", + "pacote": "^21.0.0", + "proc-log": "^6.0.0", + "semver": "^7.3.5" + }, + "engines": { + "node": "^20.17.0 || >=22.9.0" + } + }, + "node_modules/npm/node_modules/@npmcli/name-from-folder": { + "version": "4.0.0", + "dev": true, + "inBundle": true, + "license": "ISC", + "engines": { + "node": "^20.17.0 || >=22.9.0" + } + }, + "node_modules/npm/node_modules/@npmcli/node-gyp": { + "version": "5.0.0", + "dev": true, + "inBundle": true, + "license": "ISC", + "engines": { + "node": "^20.17.0 || >=22.9.0" + } + }, + "node_modules/npm/node_modules/@npmcli/package-json": { + "version": "7.0.4", + "dev": true, + "inBundle": true, + "license": "ISC", + "dependencies": { + "@npmcli/git": "^7.0.0", + "glob": "^13.0.0", + "hosted-git-info": "^9.0.0", + "json-parse-even-better-errors": "^5.0.0", + "proc-log": "^6.0.0", + "semver": "^7.5.3", + "validate-npm-package-license": "^3.0.4" + }, + "engines": { + "node": "^20.17.0 || >=22.9.0" + } + }, + "node_modules/npm/node_modules/@npmcli/promise-spawn": { + "version": "9.0.1", + "dev": true, + "inBundle": true, + "license": "ISC", + "dependencies": { + "which": "^6.0.0" + }, + "engines": { + "node": "^20.17.0 || >=22.9.0" + } + }, + "node_modules/npm/node_modules/@npmcli/query": { + "version": "5.0.0", + "dev": true, + "inBundle": true, + "license": "ISC", + "dependencies": { + "postcss-selector-parser": "^7.0.0" + }, + "engines": { + "node": "^20.17.0 || >=22.9.0" + } + }, + "node_modules/npm/node_modules/@npmcli/redact": { + "version": "4.0.0", + "dev": true, + "inBundle": true, + "license": "ISC", + "engines": { + "node": "^20.17.0 || >=22.9.0" + } + }, + "node_modules/npm/node_modules/@npmcli/run-script": { + "version": "10.0.3", + "dev": true, + "inBundle": true, + "license": "ISC", + "dependencies": { + "@npmcli/node-gyp": "^5.0.0", + "@npmcli/package-json": "^7.0.0", + "@npmcli/promise-spawn": "^9.0.0", + "node-gyp": "^12.1.0", + "proc-log": "^6.0.0", + "which": "^6.0.0" + }, + "engines": { + "node": "^20.17.0 || >=22.9.0" + } + }, + "node_modules/npm/node_modules/@sigstore/bundle": { + "version": "4.0.0", + "dev": true, + "inBundle": true, + "license": "Apache-2.0", + "dependencies": { + "@sigstore/protobuf-specs": "^0.5.0" + }, + "engines": { + "node": "^20.17.0 || >=22.9.0" + } + }, + "node_modules/npm/node_modules/@sigstore/core": { + "version": "3.0.0", + "dev": true, + "inBundle": true, + "license": "Apache-2.0", + "engines": { + "node": "^20.17.0 || >=22.9.0" + } + }, + "node_modules/npm/node_modules/@sigstore/protobuf-specs": { + "version": "0.5.0", + "dev": true, + "inBundle": true, + "license": "Apache-2.0", + "engines": { + "node": "^18.17.0 || >=20.5.0" + } + }, + "node_modules/npm/node_modules/@sigstore/sign": { + "version": "4.0.1", + "dev": true, + "inBundle": true, + "license": "Apache-2.0", + "dependencies": { + "@sigstore/bundle": "^4.0.0", + "@sigstore/core": "^3.0.0", + "@sigstore/protobuf-specs": "^0.5.0", + "make-fetch-happen": "^15.0.2", + "proc-log": "^5.0.0", + "promise-retry": "^2.0.1" + }, + "engines": { + "node": "^20.17.0 || >=22.9.0" + } + }, + "node_modules/npm/node_modules/@sigstore/sign/node_modules/proc-log": { + "version": "5.0.0", + "dev": true, + "inBundle": true, + "license": "ISC", + "engines": { + "node": "^18.17.0 || >=20.5.0" + } + }, + "node_modules/npm/node_modules/@sigstore/tuf": { + "version": "4.0.0", + "dev": true, + "inBundle": true, + "license": "Apache-2.0", + "dependencies": { + "@sigstore/protobuf-specs": "^0.5.0", + "tuf-js": "^4.0.0" + }, + "engines": { + "node": "^20.17.0 || >=22.9.0" + } + }, + "node_modules/npm/node_modules/@sigstore/verify": { + "version": "3.0.0", + "dev": true, + "inBundle": true, + "license": "Apache-2.0", + "dependencies": { + "@sigstore/bundle": "^4.0.0", + "@sigstore/core": "^3.0.0", + "@sigstore/protobuf-specs": "^0.5.0" + }, + "engines": { + "node": "^20.17.0 || >=22.9.0" + } + }, + "node_modules/npm/node_modules/@tufjs/canonical-json": { + "version": "2.0.0", + "dev": true, + "inBundle": true, + "license": "MIT", + "engines": { + "node": "^16.14.0 || >=18.0.0" + } + }, + "node_modules/npm/node_modules/@tufjs/models": { + "version": "4.0.0", + "dev": true, + "inBundle": true, + "license": "MIT", + "dependencies": { + "@tufjs/canonical-json": "2.0.0", + "minimatch": "^9.0.5" + }, + "engines": { + "node": "^20.17.0 || >=22.9.0" + } + }, + "node_modules/npm/node_modules/@tufjs/models/node_modules/minimatch": { + "version": "9.0.5", + "dev": true, + "inBundle": true, + "license": "ISC", + "dependencies": { + "brace-expansion": "^2.0.1" + }, + "engines": { + "node": ">=16 || 14 >=14.17" + }, + "funding": { + "url": "https://github.com/sponsors/isaacs" + } + }, + "node_modules/npm/node_modules/abbrev": { + "version": "4.0.0", + "dev": true, + "inBundle": true, + "license": "ISC", + "engines": { + "node": "^20.17.0 || >=22.9.0" + } + }, + "node_modules/npm/node_modules/agent-base": { + "version": "7.1.4", + "dev": true, + "inBundle": true, + "license": "MIT", + "engines": { + "node": ">= 14" + } + }, + "node_modules/npm/node_modules/ansi-regex": { + "version": "5.0.1", + "dev": true, + "inBundle": true, + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/npm/node_modules/aproba": { + "version": "2.1.0", + "dev": true, + "inBundle": true, + "license": "ISC" + }, + "node_modules/npm/node_modules/archy": { + "version": "1.0.0", + "dev": true, + "inBundle": true, + "license": "MIT" + }, + "node_modules/npm/node_modules/balanced-match": { + "version": "1.0.2", + "dev": true, + "inBundle": true, + "license": "MIT" + }, + "node_modules/npm/node_modules/bin-links": { + "version": "6.0.0", + "dev": true, + "inBundle": true, + "license": "ISC", + "dependencies": { + "cmd-shim": "^8.0.0", + "npm-normalize-package-bin": "^5.0.0", + "proc-log": "^6.0.0", + "read-cmd-shim": "^6.0.0", + "write-file-atomic": "^7.0.0" + }, + "engines": { + "node": "^20.17.0 || >=22.9.0" + } + }, + "node_modules/npm/node_modules/binary-extensions": { + "version": "3.1.0", + "dev": true, + "inBundle": true, + "license": "MIT", + "engines": { + "node": ">=18.20" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/npm/node_modules/brace-expansion": { + "version": "2.0.2", + "dev": true, + "inBundle": true, + "license": "MIT", + "dependencies": { + "balanced-match": "^1.0.0" + } + }, + "node_modules/npm/node_modules/cacache": { + "version": "20.0.3", + "dev": true, + "inBundle": true, + "license": "ISC", + "dependencies": { + "@npmcli/fs": "^5.0.0", + "fs-minipass": "^3.0.0", + "glob": "^13.0.0", + "lru-cache": "^11.1.0", + "minipass": "^7.0.3", + "minipass-collect": "^2.0.1", + "minipass-flush": "^1.0.5", + "minipass-pipeline": "^1.2.4", + "p-map": "^7.0.2", + "ssri": "^13.0.0", + "unique-filename": "^5.0.0" + }, + "engines": { + "node": "^20.17.0 || >=22.9.0" + } + }, + "node_modules/npm/node_modules/chalk": { + "version": "5.6.2", + "dev": true, + "inBundle": true, + "license": "MIT", + "engines": { + "node": "^12.17.0 || ^14.13 || >=16.0.0" + }, + "funding": { + "url": "https://github.com/chalk/chalk?sponsor=1" + } + }, + "node_modules/npm/node_modules/chownr": { + "version": "3.0.0", + "dev": true, + "inBundle": true, + "license": "BlueOak-1.0.0", + "engines": { + "node": ">=18" + } + }, + "node_modules/npm/node_modules/ci-info": { + "version": "4.3.1", + "dev": true, + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/sibiraj-s" + } + ], + "inBundle": true, + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/npm/node_modules/cidr-regex": { + "version": "5.0.1", + "dev": true, + "inBundle": true, + "license": "BSD-2-Clause", + "dependencies": { + "ip-regex": "5.0.0" + }, + "engines": { + "node": ">=20" + } + }, + "node_modules/npm/node_modules/cli-columns": { + "version": "4.0.0", + "dev": true, + "inBundle": true, + "license": "MIT", + "dependencies": { + "string-width": "^4.2.3", + "strip-ansi": "^6.0.1" + }, + "engines": { + "node": ">= 10" + } + }, + "node_modules/npm/node_modules/cmd-shim": { + "version": "8.0.0", + "dev": true, + "inBundle": true, + "license": "ISC", + "engines": { + "node": "^20.17.0 || >=22.9.0" + } + }, + "node_modules/npm/node_modules/common-ancestor-path": { + "version": "1.0.1", + "dev": true, + "inBundle": true, + "license": "ISC" + }, + "node_modules/npm/node_modules/cssesc": { + "version": "3.0.0", + "dev": true, + "inBundle": true, + "license": "MIT", + "bin": { + "cssesc": "bin/cssesc" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/npm/node_modules/debug": { + "version": "4.4.3", + "dev": true, + "inBundle": true, + "license": "MIT", + "dependencies": { + "ms": "^2.1.3" + }, + "engines": { + "node": ">=6.0" + }, + "peerDependenciesMeta": { + "supports-color": { + "optional": true + } + } + }, + "node_modules/npm/node_modules/diff": { + "version": "8.0.2", + "dev": true, + "inBundle": true, + "license": "BSD-3-Clause", + "engines": { + "node": ">=0.3.1" + } + }, + "node_modules/npm/node_modules/emoji-regex": { + "version": "8.0.0", + "dev": true, + "inBundle": true, + "license": "MIT" + }, + "node_modules/npm/node_modules/encoding": { + "version": "0.1.13", + "dev": true, + "inBundle": true, + "license": "MIT", + "optional": true, + "dependencies": { + "iconv-lite": "^0.6.2" + } + }, + "node_modules/npm/node_modules/env-paths": { + "version": "2.2.1", + "dev": true, + "inBundle": true, + "license": "MIT", + "engines": { + "node": ">=6" + } + }, + "node_modules/npm/node_modules/err-code": { + "version": "2.0.3", + "dev": true, + "inBundle": true, + "license": "MIT" + }, + "node_modules/npm/node_modules/exponential-backoff": { + "version": "3.1.3", + "dev": true, + "inBundle": true, + "license": "Apache-2.0" + }, + "node_modules/npm/node_modules/fastest-levenshtein": { + "version": "1.0.16", + "dev": true, + "inBundle": true, + "license": "MIT", + "engines": { + "node": ">= 4.9.1" + } + }, + "node_modules/npm/node_modules/fs-minipass": { + "version": "3.0.3", + "dev": true, + "inBundle": true, + "license": "ISC", + "dependencies": { + "minipass": "^7.0.3" + }, + "engines": { + "node": "^14.17.0 || ^16.13.0 || >=18.0.0" + } + }, + "node_modules/npm/node_modules/glob": { + "version": "13.0.0", + "dev": true, + "inBundle": true, + "license": "BlueOak-1.0.0", + "dependencies": { + "minimatch": "^10.1.1", + "minipass": "^7.1.2", + "path-scurry": "^2.0.0" + }, + "engines": { + "node": "20 || >=22" + }, + "funding": { + "url": "https://github.com/sponsors/isaacs" + } + }, + "node_modules/npm/node_modules/graceful-fs": { + "version": "4.2.11", + "dev": true, + "inBundle": true, + "license": "ISC" + }, + "node_modules/npm/node_modules/hosted-git-info": { + "version": "9.0.2", + "dev": true, + "inBundle": true, + "license": "ISC", + "dependencies": { + "lru-cache": "^11.1.0" + }, + "engines": { + "node": "^20.17.0 || >=22.9.0" + } + }, + "node_modules/npm/node_modules/http-cache-semantics": { + "version": "4.2.0", + "dev": true, + "inBundle": true, + "license": "BSD-2-Clause" + }, + "node_modules/npm/node_modules/http-proxy-agent": { + "version": "7.0.2", + "dev": true, + "inBundle": true, + "license": "MIT", + "dependencies": { + "agent-base": "^7.1.0", + "debug": "^4.3.4" + }, + "engines": { + "node": ">= 14" + } + }, + "node_modules/npm/node_modules/https-proxy-agent": { + "version": "7.0.6", + "dev": true, + "inBundle": true, + "license": "MIT", + "dependencies": { + "agent-base": "^7.1.2", + "debug": "4" + }, + "engines": { + "node": ">= 14" + } + }, + "node_modules/npm/node_modules/iconv-lite": { + "version": "0.6.3", + "dev": true, + "inBundle": true, + "license": "MIT", + "optional": true, + "dependencies": { + "safer-buffer": ">= 2.1.2 < 3.0.0" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/npm/node_modules/ignore-walk": { + "version": "8.0.0", + "dev": true, + "inBundle": true, + "license": "ISC", + "dependencies": { + "minimatch": "^10.0.3" + }, + "engines": { + "node": "^20.17.0 || >=22.9.0" + } + }, + "node_modules/npm/node_modules/imurmurhash": { + "version": "0.1.4", + "dev": true, + "inBundle": true, + "license": "MIT", + "engines": { + "node": ">=0.8.19" + } + }, + "node_modules/npm/node_modules/ini": { + "version": "6.0.0", + "dev": true, + "inBundle": true, + "license": "ISC", + "engines": { + "node": "^20.17.0 || >=22.9.0" + } + }, + "node_modules/npm/node_modules/init-package-json": { + "version": "8.2.4", + "dev": true, + "inBundle": true, + "license": "ISC", + "dependencies": { + "@npmcli/package-json": "^7.0.0", + "npm-package-arg": "^13.0.0", + "promzard": "^3.0.1", + "read": "^5.0.1", + "semver": "^7.7.2", + "validate-npm-package-license": "^3.0.4", + "validate-npm-package-name": "^7.0.0" + }, + "engines": { + "node": "^20.17.0 || >=22.9.0" + } + }, + "node_modules/npm/node_modules/ip-address": { + "version": "10.0.1", + "dev": true, + "inBundle": true, + "license": "MIT", + "engines": { + "node": ">= 12" + } + }, + "node_modules/npm/node_modules/ip-regex": { + "version": "5.0.0", + "dev": true, + "inBundle": true, + "license": "MIT", + "engines": { + "node": "^12.20.0 || ^14.13.1 || >=16.0.0" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/npm/node_modules/is-cidr": { + "version": "6.0.1", + "dev": true, + "inBundle": true, + "license": "BSD-2-Clause", + "dependencies": { + "cidr-regex": "5.0.1" + }, + "engines": { + "node": ">=20" + } + }, + "node_modules/npm/node_modules/is-fullwidth-code-point": { + "version": "3.0.0", + "dev": true, + "inBundle": true, + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/npm/node_modules/isexe": { + "version": "3.1.1", + "dev": true, + "inBundle": true, + "license": "ISC", + "engines": { + "node": ">=16" + } + }, + "node_modules/npm/node_modules/json-parse-even-better-errors": { + "version": "5.0.0", + "dev": true, + "inBundle": true, + "license": "MIT", + "engines": { + "node": "^20.17.0 || >=22.9.0" + } + }, + "node_modules/npm/node_modules/json-stringify-nice": { + "version": "1.1.4", + "dev": true, + "inBundle": true, + "license": "ISC", + "funding": { + "url": "https://github.com/sponsors/isaacs" + } + }, + "node_modules/npm/node_modules/jsonparse": { + "version": "1.3.1", + "dev": true, + "engines": [ + "node >= 0.2.0" + ], + "inBundle": true, + "license": "MIT" + }, + "node_modules/npm/node_modules/just-diff": { + "version": "6.0.2", + "dev": true, + "inBundle": true, + "license": "MIT" + }, + "node_modules/npm/node_modules/just-diff-apply": { + "version": "5.5.0", + "dev": true, + "inBundle": true, + "license": "MIT" + }, + "node_modules/npm/node_modules/libnpmaccess": { + "version": "10.0.3", + "dev": true, + "inBundle": true, + "license": "ISC", + "dependencies": { + "npm-package-arg": "^13.0.0", + "npm-registry-fetch": "^19.0.0" + }, + "engines": { + "node": "^20.17.0 || >=22.9.0" + } + }, + "node_modules/npm/node_modules/libnpmdiff": { + "version": "8.0.11", + "dev": true, + "inBundle": true, + "license": "ISC", + "dependencies": { + "@npmcli/arborist": "^9.1.8", + "@npmcli/installed-package-contents": "^4.0.0", + "binary-extensions": "^3.0.0", + "diff": "^8.0.2", + "minimatch": "^10.0.3", + "npm-package-arg": "^13.0.0", + "pacote": "^21.0.2", + "tar": "^7.5.1" + }, + "engines": { + "node": "^20.17.0 || >=22.9.0" + } + }, + "node_modules/npm/node_modules/libnpmexec": { + "version": "10.1.10", + "dev": true, + "inBundle": true, + "license": "ISC", + "dependencies": { + "@npmcli/arborist": "^9.1.8", + "@npmcli/package-json": "^7.0.0", + "@npmcli/run-script": "^10.0.0", + "ci-info": "^4.0.0", + "npm-package-arg": "^13.0.0", + "pacote": "^21.0.2", + "proc-log": "^6.0.0", + "promise-retry": "^2.0.1", + "read": "^5.0.1", + "semver": "^7.3.7", + "signal-exit": "^4.1.0", + "walk-up-path": "^4.0.0" + }, + "engines": { + "node": "^20.17.0 || >=22.9.0" + } + }, + "node_modules/npm/node_modules/libnpmfund": { + "version": "7.0.11", + "dev": true, + "inBundle": true, + "license": "ISC", + "dependencies": { + "@npmcli/arborist": "^9.1.8" + }, + "engines": { + "node": "^20.17.0 || >=22.9.0" + } + }, + "node_modules/npm/node_modules/libnpmorg": { + "version": "8.0.1", + "dev": true, + "inBundle": true, + "license": "ISC", + "dependencies": { + "aproba": "^2.0.0", + "npm-registry-fetch": "^19.0.0" + }, + "engines": { + "node": "^20.17.0 || >=22.9.0" + } + }, + "node_modules/npm/node_modules/libnpmpack": { + "version": "9.0.11", + "dev": true, + "inBundle": true, + "license": "ISC", + "dependencies": { + "@npmcli/arborist": "^9.1.8", + "@npmcli/run-script": "^10.0.0", + "npm-package-arg": "^13.0.0", + "pacote": "^21.0.2" + }, + "engines": { + "node": "^20.17.0 || >=22.9.0" + } + }, + "node_modules/npm/node_modules/libnpmpublish": { + "version": "11.1.3", + "dev": true, + "inBundle": true, + "license": "ISC", + "dependencies": { + "@npmcli/package-json": "^7.0.0", + "ci-info": "^4.0.0", + "npm-package-arg": "^13.0.0", + "npm-registry-fetch": "^19.0.0", + "proc-log": "^6.0.0", + "semver": "^7.3.7", + "sigstore": "^4.0.0", + "ssri": "^13.0.0" + }, + "engines": { + "node": "^20.17.0 || >=22.9.0" + } + }, + "node_modules/npm/node_modules/libnpmsearch": { + "version": "9.0.1", + "dev": true, + "inBundle": true, + "license": "ISC", + "dependencies": { + "npm-registry-fetch": "^19.0.0" + }, + "engines": { + "node": "^20.17.0 || >=22.9.0" + } + }, + "node_modules/npm/node_modules/libnpmteam": { + "version": "8.0.2", + "dev": true, + "inBundle": true, + "license": "ISC", + "dependencies": { + "aproba": "^2.0.0", + "npm-registry-fetch": "^19.0.0" + }, + "engines": { + "node": "^20.17.0 || >=22.9.0" + } + }, + "node_modules/npm/node_modules/libnpmversion": { + "version": "8.0.3", + "dev": true, + "inBundle": true, + "license": "ISC", + "dependencies": { + "@npmcli/git": "^7.0.0", + "@npmcli/run-script": "^10.0.0", + "json-parse-even-better-errors": "^5.0.0", + "proc-log": "^6.0.0", + "semver": "^7.3.7" + }, + "engines": { + "node": "^20.17.0 || >=22.9.0" + } + }, + "node_modules/npm/node_modules/lru-cache": { + "version": "11.2.2", + "dev": true, + "inBundle": true, + "license": "ISC", + "engines": { + "node": "20 || >=22" + } + }, + "node_modules/npm/node_modules/make-fetch-happen": { + "version": "15.0.3", + "dev": true, + "inBundle": true, + "license": "ISC", + "dependencies": { + "@npmcli/agent": "^4.0.0", + "cacache": "^20.0.1", + "http-cache-semantics": "^4.1.1", + "minipass": "^7.0.2", + "minipass-fetch": "^5.0.0", + "minipass-flush": "^1.0.5", + "minipass-pipeline": "^1.2.4", + "negotiator": "^1.0.0", + "proc-log": "^6.0.0", + "promise-retry": "^2.0.1", + "ssri": "^13.0.0" + }, + "engines": { + "node": "^20.17.0 || >=22.9.0" + } + }, + "node_modules/npm/node_modules/minimatch": { + "version": "10.1.1", + "dev": true, + "inBundle": true, + "license": "BlueOak-1.0.0", + "dependencies": { + "@isaacs/brace-expansion": "^5.0.0" + }, + "engines": { + "node": "20 || >=22" + }, + "funding": { + "url": "https://github.com/sponsors/isaacs" + } + }, + "node_modules/npm/node_modules/minipass": { + "version": "7.1.2", + "dev": true, + "inBundle": true, + "license": "ISC", + "engines": { + "node": ">=16 || 14 >=14.17" + } + }, + "node_modules/npm/node_modules/minipass-collect": { + "version": "2.0.1", + "dev": true, + "inBundle": true, + "license": "ISC", + "dependencies": { + "minipass": "^7.0.3" + }, + "engines": { + "node": ">=16 || 14 >=14.17" + } + }, + "node_modules/npm/node_modules/minipass-fetch": { + "version": "5.0.0", + "dev": true, + "inBundle": true, + "license": "MIT", + "dependencies": { + "minipass": "^7.0.3", + "minipass-sized": "^1.0.3", + "minizlib": "^3.0.1" + }, + "engines": { + "node": "^20.17.0 || >=22.9.0" + }, + "optionalDependencies": { + "encoding": "^0.1.13" + } + }, + "node_modules/npm/node_modules/minipass-flush": { + "version": "1.0.5", + "dev": true, + "inBundle": true, + "license": "ISC", + "dependencies": { + "minipass": "^3.0.0" + }, + "engines": { + "node": ">= 8" + } + }, + "node_modules/npm/node_modules/minipass-flush/node_modules/minipass": { + "version": "3.3.6", + "dev": true, + "inBundle": true, + "license": "ISC", + "dependencies": { + "yallist": "^4.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/npm/node_modules/minipass-pipeline": { + "version": "1.2.4", + "dev": true, + "inBundle": true, + "license": "ISC", + "dependencies": { + "minipass": "^3.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/npm/node_modules/minipass-pipeline/node_modules/minipass": { + "version": "3.3.6", + "dev": true, + "inBundle": true, + "license": "ISC", + "dependencies": { + "yallist": "^4.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/npm/node_modules/minipass-sized": { + "version": "1.0.3", + "dev": true, + "inBundle": true, + "license": "ISC", + "dependencies": { + "minipass": "^3.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/npm/node_modules/minipass-sized/node_modules/minipass": { + "version": "3.3.6", + "dev": true, + "inBundle": true, + "license": "ISC", + "dependencies": { + "yallist": "^4.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/npm/node_modules/minizlib": { + "version": "3.1.0", + "dev": true, + "inBundle": true, + "license": "MIT", + "dependencies": { + "minipass": "^7.1.2" + }, + "engines": { + "node": ">= 18" + } + }, + "node_modules/npm/node_modules/ms": { + "version": "2.1.3", + "dev": true, + "inBundle": true, + "license": "MIT" + }, + "node_modules/npm/node_modules/mute-stream": { + "version": "3.0.0", + "dev": true, + "inBundle": true, + "license": "ISC", + "engines": { + "node": "^20.17.0 || >=22.9.0" + } + }, + "node_modules/npm/node_modules/negotiator": { + "version": "1.0.0", + "dev": true, + "inBundle": true, + "license": "MIT", + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/npm/node_modules/node-gyp": { + "version": "12.1.0", + "dev": true, + "inBundle": true, + "license": "MIT", + "dependencies": { + "env-paths": "^2.2.0", + "exponential-backoff": "^3.1.1", + "graceful-fs": "^4.2.6", + "make-fetch-happen": "^15.0.0", + "nopt": "^9.0.0", + "proc-log": "^6.0.0", + "semver": "^7.3.5", + "tar": "^7.5.2", + "tinyglobby": "^0.2.12", + "which": "^6.0.0" + }, + "bin": { + "node-gyp": "bin/node-gyp.js" + }, + "engines": { + "node": "^20.17.0 || >=22.9.0" + } + }, + "node_modules/npm/node_modules/nopt": { + "version": "9.0.0", + "dev": true, + "inBundle": true, + "license": "ISC", + "dependencies": { + "abbrev": "^4.0.0" + }, + "bin": { + "nopt": "bin/nopt.js" + }, + "engines": { + "node": "^20.17.0 || >=22.9.0" + } + }, + "node_modules/npm/node_modules/npm-audit-report": { + "version": "7.0.0", + "dev": true, + "inBundle": true, + "license": "ISC", + "engines": { + "node": "^20.17.0 || >=22.9.0" + } + }, + "node_modules/npm/node_modules/npm-bundled": { + "version": "5.0.0", + "dev": true, + "inBundle": true, + "license": "ISC", + "dependencies": { + "npm-normalize-package-bin": "^5.0.0" + }, + "engines": { + "node": "^20.17.0 || >=22.9.0" + } + }, + "node_modules/npm/node_modules/npm-install-checks": { + "version": "8.0.0", + "dev": true, + "inBundle": true, + "license": "BSD-2-Clause", + "dependencies": { + "semver": "^7.1.1" + }, + "engines": { + "node": "^20.17.0 || >=22.9.0" + } + }, + "node_modules/npm/node_modules/npm-normalize-package-bin": { + "version": "5.0.0", + "dev": true, + "inBundle": true, + "license": "ISC", + "engines": { + "node": "^20.17.0 || >=22.9.0" + } + }, + "node_modules/npm/node_modules/npm-package-arg": { + "version": "13.0.2", + "dev": true, + "inBundle": true, + "license": "ISC", + "dependencies": { + "hosted-git-info": "^9.0.0", + "proc-log": "^6.0.0", + "semver": "^7.3.5", + "validate-npm-package-name": "^7.0.0" + }, + "engines": { + "node": "^20.17.0 || >=22.9.0" + } + }, + "node_modules/npm/node_modules/npm-packlist": { + "version": "10.0.3", + "dev": true, + "inBundle": true, + "license": "ISC", + "dependencies": { + "ignore-walk": "^8.0.0", + "proc-log": "^6.0.0" + }, + "engines": { + "node": "^20.17.0 || >=22.9.0" + } + }, + "node_modules/npm/node_modules/npm-pick-manifest": { + "version": "11.0.3", + "dev": true, + "inBundle": true, + "license": "ISC", + "dependencies": { + "npm-install-checks": "^8.0.0", + "npm-normalize-package-bin": "^5.0.0", + "npm-package-arg": "^13.0.0", + "semver": "^7.3.5" + }, + "engines": { + "node": "^20.17.0 || >=22.9.0" + } + }, + "node_modules/npm/node_modules/npm-profile": { + "version": "12.0.1", + "dev": true, + "inBundle": true, + "license": "ISC", + "dependencies": { + "npm-registry-fetch": "^19.0.0", + "proc-log": "^6.0.0" + }, + "engines": { + "node": "^20.17.0 || >=22.9.0" + } + }, + "node_modules/npm/node_modules/npm-registry-fetch": { + "version": "19.1.1", + "dev": true, + "inBundle": true, + "license": "ISC", + "dependencies": { + "@npmcli/redact": "^4.0.0", + "jsonparse": "^1.3.1", + "make-fetch-happen": "^15.0.0", + "minipass": "^7.0.2", + "minipass-fetch": "^5.0.0", + "minizlib": "^3.0.1", + "npm-package-arg": "^13.0.0", + "proc-log": "^6.0.0" + }, + "engines": { + "node": "^20.17.0 || >=22.9.0" + } + }, + "node_modules/npm/node_modules/npm-user-validate": { + "version": "4.0.0", + "dev": true, + "inBundle": true, + "license": "BSD-2-Clause", + "engines": { + "node": "^20.17.0 || >=22.9.0" + } + }, + "node_modules/npm/node_modules/p-map": { + "version": "7.0.4", + "dev": true, + "inBundle": true, + "license": "MIT", + "engines": { + "node": ">=18" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/npm/node_modules/pacote": { + "version": "21.0.4", + "dev": true, + "inBundle": true, + "license": "ISC", + "dependencies": { + "@npmcli/git": "^7.0.0", + "@npmcli/installed-package-contents": "^4.0.0", + "@npmcli/package-json": "^7.0.0", + "@npmcli/promise-spawn": "^9.0.0", + "@npmcli/run-script": "^10.0.0", + "cacache": "^20.0.0", + "fs-minipass": "^3.0.0", + "minipass": "^7.0.2", + "npm-package-arg": "^13.0.0", + "npm-packlist": "^10.0.1", + "npm-pick-manifest": "^11.0.1", + "npm-registry-fetch": "^19.0.0", + "proc-log": "^6.0.0", + "promise-retry": "^2.0.1", + "sigstore": "^4.0.0", + "ssri": "^13.0.0", + "tar": "^7.4.3" + }, + "bin": { + "pacote": "bin/index.js" + }, + "engines": { + "node": "^20.17.0 || >=22.9.0" + } + }, + "node_modules/npm/node_modules/parse-conflict-json": { + "version": "5.0.1", + "dev": true, + "inBundle": true, + "license": "ISC", + "dependencies": { + "json-parse-even-better-errors": "^5.0.0", + "just-diff": "^6.0.0", + "just-diff-apply": "^5.2.0" + }, + "engines": { + "node": "^20.17.0 || >=22.9.0" + } + }, + "node_modules/npm/node_modules/path-scurry": { + "version": "2.0.0", + "dev": true, + "inBundle": true, + "license": "BlueOak-1.0.0", + "dependencies": { + "lru-cache": "^11.0.0", + "minipass": "^7.1.2" + }, + "engines": { + "node": "20 || >=22" + }, + "funding": { + "url": "https://github.com/sponsors/isaacs" + } + }, + "node_modules/npm/node_modules/postcss-selector-parser": { + "version": "7.1.0", + "dev": true, + "inBundle": true, + "license": "MIT", + "dependencies": { + "cssesc": "^3.0.0", + "util-deprecate": "^1.0.2" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/npm/node_modules/proc-log": { + "version": "6.1.0", + "dev": true, + "inBundle": true, + "license": "ISC", + "engines": { + "node": "^20.17.0 || >=22.9.0" + } + }, + "node_modules/npm/node_modules/proggy": { + "version": "4.0.0", + "dev": true, + "inBundle": true, + "license": "ISC", + "engines": { + "node": "^20.17.0 || >=22.9.0" + } + }, + "node_modules/npm/node_modules/promise-all-reject-late": { + "version": "1.0.1", + "dev": true, + "inBundle": true, + "license": "ISC", + "funding": { + "url": "https://github.com/sponsors/isaacs" + } + }, + "node_modules/npm/node_modules/promise-call-limit": { + "version": "3.0.2", + "dev": true, + "inBundle": true, + "license": "ISC", + "funding": { + "url": "https://github.com/sponsors/isaacs" + } + }, + "node_modules/npm/node_modules/promise-retry": { + "version": "2.0.1", + "dev": true, + "inBundle": true, + "license": "MIT", + "dependencies": { + "err-code": "^2.0.2", + "retry": "^0.12.0" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/npm/node_modules/promzard": { + "version": "3.0.1", + "dev": true, + "inBundle": true, + "license": "ISC", + "dependencies": { + "read": "^5.0.0" + }, + "engines": { + "node": "^20.17.0 || >=22.9.0" + } + }, + "node_modules/npm/node_modules/qrcode-terminal": { + "version": "0.12.0", + "dev": true, + "inBundle": true, + "bin": { + "qrcode-terminal": "bin/qrcode-terminal.js" + } + }, + "node_modules/npm/node_modules/read": { + "version": "5.0.1", + "dev": true, + "inBundle": true, + "license": "ISC", + "dependencies": { + "mute-stream": "^3.0.0" + }, + "engines": { + "node": "^20.17.0 || >=22.9.0" + } + }, + "node_modules/npm/node_modules/read-cmd-shim": { + "version": "6.0.0", + "dev": true, + "inBundle": true, + "license": "ISC", + "engines": { + "node": "^20.17.0 || >=22.9.0" + } + }, + "node_modules/npm/node_modules/retry": { + "version": "0.12.0", + "dev": true, + "inBundle": true, + "license": "MIT", + "engines": { + "node": ">= 4" + } + }, + "node_modules/npm/node_modules/safer-buffer": { + "version": "2.1.2", + "dev": true, + "inBundle": true, + "license": "MIT", + "optional": true + }, + "node_modules/npm/node_modules/semver": { + "version": "7.7.3", + "dev": true, + "inBundle": true, + "license": "ISC", + "bin": { + "semver": "bin/semver.js" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/npm/node_modules/signal-exit": { + "version": "4.1.0", + "dev": true, + "inBundle": true, + "license": "ISC", + "engines": { + "node": ">=14" + }, + "funding": { + "url": "https://github.com/sponsors/isaacs" + } + }, + "node_modules/npm/node_modules/sigstore": { + "version": "4.0.0", + "dev": true, + "inBundle": true, + "license": "Apache-2.0", + "dependencies": { + "@sigstore/bundle": "^4.0.0", + "@sigstore/core": "^3.0.0", + "@sigstore/protobuf-specs": "^0.5.0", + "@sigstore/sign": "^4.0.0", + "@sigstore/tuf": "^4.0.0", + "@sigstore/verify": "^3.0.0" + }, + "engines": { + "node": "^20.17.0 || >=22.9.0" + } + }, + "node_modules/npm/node_modules/smart-buffer": { + "version": "4.2.0", + "dev": true, + "inBundle": true, + "license": "MIT", + "engines": { + "node": ">= 6.0.0", + "npm": ">= 3.0.0" + } + }, + "node_modules/npm/node_modules/socks": { + "version": "2.8.7", + "dev": true, + "inBundle": true, + "license": "MIT", + "dependencies": { + "ip-address": "^10.0.1", + "smart-buffer": "^4.2.0" + }, + "engines": { + "node": ">= 10.0.0", + "npm": ">= 3.0.0" + } + }, + "node_modules/npm/node_modules/socks-proxy-agent": { + "version": "8.0.5", + "dev": true, + "inBundle": true, + "license": "MIT", + "dependencies": { + "agent-base": "^7.1.2", + "debug": "^4.3.4", + "socks": "^2.8.3" + }, + "engines": { + "node": ">= 14" + } + }, + "node_modules/npm/node_modules/spdx-correct": { + "version": "3.2.0", + "dev": true, + "inBundle": true, + "license": "Apache-2.0", + "dependencies": { + "spdx-expression-parse": "^3.0.0", + "spdx-license-ids": "^3.0.0" + } + }, + "node_modules/npm/node_modules/spdx-correct/node_modules/spdx-expression-parse": { + "version": "3.0.1", + "dev": true, + "inBundle": true, + "license": "MIT", + "dependencies": { + "spdx-exceptions": "^2.1.0", + "spdx-license-ids": "^3.0.0" + } + }, + "node_modules/npm/node_modules/spdx-exceptions": { + "version": "2.5.0", + "dev": true, + "inBundle": true, + "license": "CC-BY-3.0" + }, + "node_modules/npm/node_modules/spdx-expression-parse": { + "version": "4.0.0", + "dev": true, + "inBundle": true, + "license": "MIT", + "dependencies": { + "spdx-exceptions": "^2.1.0", + "spdx-license-ids": "^3.0.0" + } + }, + "node_modules/npm/node_modules/spdx-license-ids": { + "version": "3.0.22", + "dev": true, + "inBundle": true, + "license": "CC0-1.0" + }, + "node_modules/npm/node_modules/ssri": { + "version": "13.0.0", + "dev": true, + "inBundle": true, + "license": "ISC", + "dependencies": { + "minipass": "^7.0.3" + }, + "engines": { + "node": "^20.17.0 || >=22.9.0" + } + }, + "node_modules/npm/node_modules/string-width": { + "version": "4.2.3", + "dev": true, + "inBundle": true, + "license": "MIT", + "dependencies": { + "emoji-regex": "^8.0.0", + "is-fullwidth-code-point": "^3.0.0", + "strip-ansi": "^6.0.1" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/npm/node_modules/strip-ansi": { + "version": "6.0.1", + "dev": true, + "inBundle": true, + "license": "MIT", + "dependencies": { + "ansi-regex": "^5.0.1" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/npm/node_modules/supports-color": { + "version": "10.2.2", + "dev": true, + "inBundle": true, + "license": "MIT", + "engines": { + "node": ">=18" + }, + "funding": { + "url": "https://github.com/chalk/supports-color?sponsor=1" + } + }, + "node_modules/npm/node_modules/tar": { + "version": "7.5.2", + "dev": true, + "inBundle": true, + "license": "BlueOak-1.0.0", + "dependencies": { + "@isaacs/fs-minipass": "^4.0.0", + "chownr": "^3.0.0", + "minipass": "^7.1.2", + "minizlib": "^3.1.0", + "yallist": "^5.0.0" + }, + "engines": { + "node": ">=18" + } + }, + "node_modules/npm/node_modules/tar/node_modules/yallist": { + "version": "5.0.0", + "dev": true, + "inBundle": true, + "license": "BlueOak-1.0.0", + "engines": { + "node": ">=18" + } + }, + "node_modules/npm/node_modules/text-table": { + "version": "0.2.0", + "dev": true, + "inBundle": true, + "license": "MIT" + }, + "node_modules/npm/node_modules/tiny-relative-date": { + "version": "2.0.2", + "dev": true, + "inBundle": true, + "license": "MIT" + }, + "node_modules/npm/node_modules/tinyglobby": { + "version": "0.2.15", + "dev": true, + "inBundle": true, + "license": "MIT", + "dependencies": { + "fdir": "^6.5.0", + "picomatch": "^4.0.3" + }, + "engines": { + "node": ">=12.0.0" + }, + "funding": { + "url": "https://github.com/sponsors/SuperchupuDev" + } + }, + "node_modules/npm/node_modules/tinyglobby/node_modules/fdir": { + "version": "6.5.0", + "dev": true, + "inBundle": true, + "license": "MIT", + "engines": { + "node": ">=12.0.0" + }, + "peerDependencies": { + "picomatch": "^3 || ^4" + }, + "peerDependenciesMeta": { + "picomatch": { + "optional": true + } + } + }, + "node_modules/npm/node_modules/tinyglobby/node_modules/picomatch": { + "version": "4.0.3", + "dev": true, + "inBundle": true, + "license": "MIT", + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/sponsors/jonschlinkert" + } + }, + "node_modules/npm/node_modules/treeverse": { + "version": "3.0.0", + "dev": true, + "inBundle": true, + "license": "ISC", + "engines": { + "node": "^14.17.0 || ^16.13.0 || >=18.0.0" + } + }, + "node_modules/npm/node_modules/tuf-js": { + "version": "4.0.0", + "dev": true, + "inBundle": true, + "license": "MIT", + "dependencies": { + "@tufjs/models": "4.0.0", + "debug": "^4.4.1", + "make-fetch-happen": "^15.0.0" + }, + "engines": { + "node": "^20.17.0 || >=22.9.0" + } + }, + "node_modules/npm/node_modules/unique-filename": { + "version": "5.0.0", + "dev": true, + "inBundle": true, + "license": "ISC", + "dependencies": { + "unique-slug": "^6.0.0" + }, + "engines": { + "node": "^20.17.0 || >=22.9.0" + } + }, + "node_modules/npm/node_modules/unique-slug": { + "version": "6.0.0", + "dev": true, + "inBundle": true, + "license": "ISC", + "dependencies": { + "imurmurhash": "^0.1.4" + }, + "engines": { + "node": "^20.17.0 || >=22.9.0" + } + }, + "node_modules/npm/node_modules/util-deprecate": { + "version": "1.0.2", + "dev": true, + "inBundle": true, + "license": "MIT" + }, + "node_modules/npm/node_modules/validate-npm-package-license": { + "version": "3.0.4", + "dev": true, + "inBundle": true, + "license": "Apache-2.0", + "dependencies": { + "spdx-correct": "^3.0.0", + "spdx-expression-parse": "^3.0.0" + } + }, + "node_modules/npm/node_modules/validate-npm-package-license/node_modules/spdx-expression-parse": { + "version": "3.0.1", + "dev": true, + "inBundle": true, + "license": "MIT", + "dependencies": { + "spdx-exceptions": "^2.1.0", + "spdx-license-ids": "^3.0.0" + } + }, + "node_modules/npm/node_modules/validate-npm-package-name": { + "version": "7.0.0", + "dev": true, + "inBundle": true, + "license": "ISC", + "engines": { + "node": "^20.17.0 || >=22.9.0" + } + }, + "node_modules/npm/node_modules/walk-up-path": { + "version": "4.0.0", + "dev": true, + "inBundle": true, + "license": "ISC", + "engines": { + "node": "20 || >=22" + } + }, + "node_modules/npm/node_modules/which": { + "version": "6.0.0", + "dev": true, + "inBundle": true, + "license": "ISC", + "dependencies": { + "isexe": "^3.1.1" + }, + "bin": { + "node-which": "bin/which.js" + }, + "engines": { + "node": "^20.17.0 || >=22.9.0" + } + }, + "node_modules/npm/node_modules/write-file-atomic": { + "version": "7.0.0", + "dev": true, + "inBundle": true, + "license": "ISC", + "dependencies": { + "imurmurhash": "^0.1.4", + "signal-exit": "^4.0.1" + }, + "engines": { + "node": "^20.17.0 || >=22.9.0" + } + }, + "node_modules/npm/node_modules/yallist": { + "version": "4.0.0", + "dev": true, + "inBundle": true, + "license": "ISC" + }, + "node_modules/object-assign": { + "version": "4.1.1", + "resolved": "https://registry.npmjs.org/object-assign/-/object-assign-4.1.1.tgz", + "integrity": "sha512-rJgTQnkUnH1sFw8yT6VSU3zD3sWmu6sZhIseY8VX+GRu3P6F7Fu+JNDoXfklElbLJSnc3FUQHVe4cU5hj+BcUg==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/once": { + "version": "1.4.0", + "resolved": "https://registry.npmjs.org/once/-/once-1.4.0.tgz", + "integrity": "sha512-lNaJgI+2Q5URQBkccEKHTQOPaXdUxnZZElQTZY0MFUAuaEqe1E+Nyvgdz/aIyNi6Z9MzO5dv1H8n58/GELp3+w==", + "dev": true, + "license": "ISC", + "dependencies": { + "wrappy": "1" + } + }, + "node_modules/onetime": { + "version": "5.1.2", + "resolved": "https://registry.npmjs.org/onetime/-/onetime-5.1.2.tgz", + "integrity": "sha512-kbpaSSGJTWdAY5KPVeMOKXSrPtr8C8C7wodJbcsd51jRnmD+GZu8Y0VoU6Dm5Z4vWr0Ig/1NKuWRKf7j5aaYSg==", + "dev": true, + "license": "MIT", + "dependencies": { + "mimic-fn": "^2.1.0" + }, + "engines": { + "node": ">=6" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/ora": { + "version": "5.4.1", + "resolved": "https://registry.npmjs.org/ora/-/ora-5.4.1.tgz", + "integrity": "sha512-5b6Y85tPxZZ7QytO+BQzysW31HJku27cRIlkbAXaNx+BdcVi+LlRFmVXzeF6a7JCwJpyw5c4b+YSVImQIrBpuQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "bl": "^4.1.0", + "chalk": "^4.1.0", + "cli-cursor": "^3.1.0", + "cli-spinners": "^2.5.0", + "is-interactive": "^1.0.0", + "is-unicode-supported": "^0.1.0", + "log-symbols": "^4.1.0", + "strip-ansi": "^6.0.0", + "wcwidth": "^1.0.1" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/ora/node_modules/is-unicode-supported": { + "version": "0.1.0", + "resolved": "https://registry.npmjs.org/is-unicode-supported/-/is-unicode-supported-0.1.0.tgz", + "integrity": "sha512-knxG2q4UC3u8stRGyAVJCOdxFmv5DZiRcdlIaAQXAbSfJya+OhopNotLQrstBhququ4ZpuKbDc/8S6mgXgPFPw==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/os-tmpdir": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/os-tmpdir/-/os-tmpdir-1.0.2.tgz", + "integrity": "sha512-D2FR03Vir7FIu45XBY20mTb+/ZSWB00sjU9jdQXt83gDrI4Ztz5Fs7/yy74g2N5SVQY4xY1qDr4rNddwYRVX0g==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/p-each-series": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/p-each-series/-/p-each-series-3.0.0.tgz", + "integrity": "sha512-lastgtAdoH9YaLyDa5i5z64q+kzOcQHsQ5SsZJD3q0VEyI8mq872S3geuNbRUQLVAE9siMfgKrpj7MloKFHruw==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/p-event": { + "version": "6.0.1", + "resolved": "https://registry.npmjs.org/p-event/-/p-event-6.0.1.tgz", + "integrity": "sha512-Q6Bekk5wpzW5qIyUP4gdMEujObYstZl6DMMOSenwBvV0BlE5LkDwkjs5yHbZmdCEq2o4RJx4tE1vwxFVf2FG1w==", + "dev": true, + "license": "MIT", + "dependencies": { + "p-timeout": "^6.1.2" + }, + "engines": { + "node": ">=16.17" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/p-filter": { + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/p-filter/-/p-filter-4.1.0.tgz", + "integrity": "sha512-37/tPdZ3oJwHaS3gNJdenCDB3Tz26i9sjhnguBtvN0vYlRIiDNnvTWkuh+0hETV9rLPdJ3rlL3yVOYPIAnM8rw==", + "dev": true, + "license": "MIT", + "dependencies": { + "p-map": "^7.0.1" + }, + "engines": { + "node": ">=18" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/p-is-promise": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/p-is-promise/-/p-is-promise-3.0.0.tgz", + "integrity": "sha512-Wo8VsW4IRQSKVXsJCn7TomUaVtyfjVDn3nUP7kE967BQk0CwFpdbZs0X0uk5sW9mkBa9eNM7hCMaG93WUAwxYQ==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/p-limit": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/p-limit/-/p-limit-3.1.0.tgz", + "integrity": "sha512-TYOanM3wGwNGsZN2cVTYPArw454xnXj5qmWF1bEoAc4+cU/ol7GVh7odevjp1FNHduHc3KZMcFduxU5Xc6uJRQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "yocto-queue": "^0.1.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/p-locate": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/p-locate/-/p-locate-5.0.0.tgz", + "integrity": "sha512-LaNjtRWUBY++zB5nE/NwcaoMylSPk+S+ZHNB1TzdbMJMny6dynpAGt7X/tl/QYq3TIeE6nxHppbo2LGymrG5Pw==", + "dev": true, + "license": "MIT", + "dependencies": { + "p-limit": "^3.0.2" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/p-map": { + "version": "7.0.4", + "resolved": "https://registry.npmjs.org/p-map/-/p-map-7.0.4.tgz", + "integrity": "sha512-tkAQEw8ysMzmkhgw8k+1U/iPhWNhykKnSk4Rd5zLoPJCuJaGRPo6YposrZgaxHKzDHdDWWZvE/Sk7hsL2X/CpQ==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=18" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/p-reduce": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/p-reduce/-/p-reduce-2.1.0.tgz", + "integrity": "sha512-2USApvnsutq8uoxZBGbbWM0JIYLiEMJ9RlaN7fAzVNb9OZN0SHjjTTfIcb667XynS5Y1VhwDJVDa72TnPzAYWw==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/p-timeout": { + "version": "6.1.4", + "resolved": "https://registry.npmjs.org/p-timeout/-/p-timeout-6.1.4.tgz", + "integrity": "sha512-MyIV3ZA/PmyBN/ud8vV9XzwTrNtR4jFrObymZYnZqMmW0zA8Z17vnT0rBgFE/TlohB+YCHqXMgZzb3Csp49vqg==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=14.16" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/p-try": { + "version": "2.2.0", + "resolved": "https://registry.npmjs.org/p-try/-/p-try-2.2.0.tgz", + "integrity": "sha512-R4nPAVTAU0B9D35/Gk3uJf/7XYbQcyohSKdvAxIRSNghFl4e71hVoGnBNQz9cWaXxO2I10KTC+3jMdvvoKw6dQ==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=6" + } + }, + "node_modules/parent-module": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/parent-module/-/parent-module-1.0.1.tgz", + "integrity": "sha512-GQ2EWRpQV8/o+Aw8YqtfZZPfNRWZYkbidE9k5rpl/hC3vtHHBfGm2Ifi6qWV+coDGkrUKZAxE3Lot5kcsRlh+g==", + "dev": true, + "license": "MIT", + "dependencies": { + "callsites": "^3.0.0" + }, + "engines": { + "node": ">=6" + } + }, + "node_modules/parse-json": { + "version": "5.2.0", + "resolved": "https://registry.npmjs.org/parse-json/-/parse-json-5.2.0.tgz", + "integrity": "sha512-ayCKvm/phCGxOkYRSCM82iDwct8/EonSEgCSxWxD7ve6jHggsFl4fZVQBPRNgQoKiuV/odhFrGzQXZwbifC8Rg==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/code-frame": "^7.0.0", + "error-ex": "^1.3.1", + "json-parse-even-better-errors": "^2.3.0", + "lines-and-columns": "^1.1.6" + }, + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/parse-ms": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/parse-ms/-/parse-ms-4.0.0.tgz", + "integrity": "sha512-TXfryirbmq34y8QBwgqCVLi+8oA3oWx2eAnSn62ITyEhEYaWRlVZ2DvMM9eZbMs/RfxPu/PK/aBLyGj4IrqMHw==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=18" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/parse-passwd": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/parse-passwd/-/parse-passwd-1.0.0.tgz", + "integrity": "sha512-1Y1A//QUXEZK7YKz+rD9WydcE1+EuPr6ZBgKecAB8tmoW6UFv0NREVJe1p+jRxtThkcbbKkfwIbWJe/IeE6m2Q==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/parse5": { + "version": "5.1.1", + "resolved": "https://registry.npmjs.org/parse5/-/parse5-5.1.1.tgz", + "integrity": "sha512-ugq4DFI0Ptb+WWjAdOK16+u/nHfiIrcE+sh8kZMaM0WllQKLI9rOUq6c2b7cwPkXdzfQESqvoqK6ug7U/Yyzug==", + "dev": true, + "license": "MIT" + }, + "node_modules/parse5-htmlparser2-tree-adapter": { + "version": "6.0.1", + "resolved": "https://registry.npmjs.org/parse5-htmlparser2-tree-adapter/-/parse5-htmlparser2-tree-adapter-6.0.1.tgz", + "integrity": "sha512-qPuWvbLgvDGilKc5BoicRovlT4MtYT6JfJyBOMDsKoiT+GiuP5qyrPCnR9HcPECIJJmZh5jRndyNThnhhb/vlA==", + "dev": true, + "license": "MIT", + "dependencies": { + "parse5": "^6.0.1" + } + }, + "node_modules/parse5-htmlparser2-tree-adapter/node_modules/parse5": { + "version": "6.0.1", + "resolved": "https://registry.npmjs.org/parse5/-/parse5-6.0.1.tgz", + "integrity": "sha512-Ofn/CTFzRGTTxwpNEs9PP93gXShHcTq255nzRYSKe8AkVpZY7e1fpmTfOyoIvjP5HG7Z2ZM7VS9PPhQGW2pOpw==", + "dev": true, + "license": "MIT" + }, + "node_modules/path-exists": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/path-exists/-/path-exists-4.0.0.tgz", + "integrity": "sha512-ak9Qy5Q7jYb2Wwcey5Fpvg2KoAc/ZIhLSLOSBmRmygPsGwkVVt0fZa0qrtMz+m6tJTAHfZQ8FnmB4MG4LWy7/w==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/path-is-absolute": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/path-is-absolute/-/path-is-absolute-1.0.1.tgz", + "integrity": "sha512-AVbw3UJ2e9bq64vSaS9Am0fje1Pa8pbGqTTsmXfaIiMpnr5DlDhfJOuLj9Sf95ZPVDAUerDfEk88MPmPe7UCQg==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/path-key": { + "version": "3.1.1", + "resolved": "https://registry.npmjs.org/path-key/-/path-key-3.1.1.tgz", + "integrity": "sha512-ojmeN0qd+y0jszEtoY48r0Peq5dwMEkIlCOu6Q5f41lfkswXuKtYrhgoTpLnyIcHm24Uhqx+5Tqm2InSwLhE6Q==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/path-parse": { + "version": "1.0.7", + "resolved": "https://registry.npmjs.org/path-parse/-/path-parse-1.0.7.tgz", + "integrity": "sha512-LDJzPVEEEPR+y48z93A0Ed0yXb8pAByGWo/k5YYdYgpY2/2EsOsksJrq7lOHxryrVOn1ejG6oAp8ahvOIQD8sw==", + "dev": true, + "license": "MIT" + }, + "node_modules/path-type": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/path-type/-/path-type-4.0.0.tgz", + "integrity": "sha512-gDKb8aZMDeD/tZWs9P6+q0J9Mwkdl6xMV8TjnGP3qJVJ06bdMgkbBlLU8IdfOsIsFz2BW1rNVT3XuNEl8zPAvw==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/picocolors": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/picocolors/-/picocolors-1.1.1.tgz", + "integrity": "sha512-xceH2snhtb5M9liqDsmEw56le376mTZkEX/jEb/RxNFyegNul7eNslCXP9FDj/Lcu0X8KEyMceP2ntpaHrDEVA==", + "dev": true, + "license": "ISC" + }, + "node_modules/picomatch": { + "version": "2.3.1", + "resolved": "https://registry.npmjs.org/picomatch/-/picomatch-2.3.1.tgz", + "integrity": "sha512-JU3teHTNjmE2VCGFzuY8EXzCDVwEqB2a8fsIvwaStHhAWJEeVd1o1QD80CU6+ZdEXXSLbSsuLwJjkCBWqRQUVA==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8.6" + }, + "funding": { + "url": "https://github.com/sponsors/jonschlinkert" + } + }, + "node_modules/pify": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/pify/-/pify-3.0.0.tgz", + "integrity": "sha512-C3FsVNH1udSEX48gGX1xfvwTWfsYWj5U+8/uK15BGzIGrKoUpghX8hWZwa/OFnakBiiVNmBvemTJR5mcy7iPcg==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=4" + } + }, + "node_modules/pkg-conf": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/pkg-conf/-/pkg-conf-2.1.0.tgz", + "integrity": "sha512-C+VUP+8jis7EsQZIhDYmS5qlNtjv2yP4SNtjXK9AP1ZcTRlnSfuumaTnRfYZnYgUUYVIKqL0fRvmUGDV2fmp6g==", + "dev": true, + "license": "MIT", + "dependencies": { + "find-up": "^2.0.0", + "load-json-file": "^4.0.0" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/pkg-conf/node_modules/find-up": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/find-up/-/find-up-2.1.0.tgz", + "integrity": "sha512-NWzkk0jSJtTt08+FBFMvXoeZnOJD+jTtsRmBYbAIzJdX6l7dLgR7CTubCM5/eDdPUBvLCeVasP1brfVR/9/EZQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "locate-path": "^2.0.0" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/pkg-conf/node_modules/locate-path": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/locate-path/-/locate-path-2.0.0.tgz", + "integrity": "sha512-NCI2kiDkyR7VeEKm27Kda/iQHyKJe1Bu0FlTbYp3CqJu+9IFe9bLyAjMxf5ZDDbEg+iMPzB5zYyUTSm8wVTKmA==", + "dev": true, + "license": "MIT", + "dependencies": { + "p-locate": "^2.0.0", + "path-exists": "^3.0.0" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/pkg-conf/node_modules/p-limit": { + "version": "1.3.0", + "resolved": "https://registry.npmjs.org/p-limit/-/p-limit-1.3.0.tgz", + "integrity": "sha512-vvcXsLAJ9Dr5rQOPk7toZQZJApBl2K4J6dANSsEuh6QI41JYcsS/qhTGa9ErIUUgK3WNQoJYvylxvjqmiqEA9Q==", + "dev": true, + "license": "MIT", + "dependencies": { + "p-try": "^1.0.0" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/pkg-conf/node_modules/p-locate": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/p-locate/-/p-locate-2.0.0.tgz", + "integrity": "sha512-nQja7m7gSKuewoVRen45CtVfODR3crN3goVQ0DDZ9N3yHxgpkuBhZqsaiotSQRrADUrne346peY7kT3TSACykg==", + "dev": true, + "license": "MIT", + "dependencies": { + "p-limit": "^1.1.0" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/pkg-conf/node_modules/p-try": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/p-try/-/p-try-1.0.0.tgz", + "integrity": "sha512-U1etNYuMJoIz3ZXSrrySFjsXQTWOx2/jdi86L+2pRvph/qMKL6sbcCYdH23fqsbm8TH2Gn0OybpT4eSFlCVHww==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=4" + } + }, + "node_modules/pkg-conf/node_modules/path-exists": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/path-exists/-/path-exists-3.0.0.tgz", + "integrity": "sha512-bpC7GYwiDYQ4wYLe+FA8lhRjhQCMcQGuSgGGqDkg/QerRWw9CmGRT0iSOVRSZJ29NMLZgIzqaljJ63oaL4NIJQ==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=4" + } + }, + "node_modules/pretty-ms": { + "version": "9.3.0", + "resolved": "https://registry.npmjs.org/pretty-ms/-/pretty-ms-9.3.0.tgz", + "integrity": "sha512-gjVS5hOP+M3wMm5nmNOucbIrqudzs9v/57bWRHQWLYklXqoXKrVfYW2W9+glfGsqtPgpiz5WwyEEB+ksXIx3gQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "parse-ms": "^4.0.0" + }, + "engines": { + "node": ">=18" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/process-nextick-args": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/process-nextick-args/-/process-nextick-args-2.0.1.tgz", + "integrity": "sha512-3ouUOpQhtgrbOa17J7+uxOTpITYWaGP7/AhoR3+A+/1e9skrzelGi/dXzEYyvbxubEF6Wn2ypscTKiKJFFn1ag==", + "dev": true, + "license": "MIT" + }, + "node_modules/proto-list": { + "version": "1.2.4", + "resolved": "https://registry.npmjs.org/proto-list/-/proto-list-1.2.4.tgz", + "integrity": "sha512-vtK/94akxsTMhe0/cbfpR+syPuszcuwhqVjJq26CuNDgFGj682oRBXOP5MJpv2r7JtE8MsiepGIqvvOTBwn2vA==", + "dev": true, + "license": "ISC" + }, + "node_modules/queue-microtask": { + "version": "1.2.3", + "resolved": "https://registry.npmjs.org/queue-microtask/-/queue-microtask-1.2.3.tgz", + "integrity": "sha512-NuaNSa6flKT5JaSYQzJok04JzTL1CA6aGhv5rfLW3PgqA+M2ChpZQnAC8h8i4ZFkBS8X5RqkDBHA7r4hej3K9A==", + "dev": true, + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/feross" + }, + { + "type": "patreon", + "url": "https://www.patreon.com/feross" + }, + { + "type": "consulting", + "url": "https://feross.org/support" + } + ], + "license": "MIT" + }, + "node_modules/quick-lru": { + "version": "4.0.1", + "resolved": "https://registry.npmjs.org/quick-lru/-/quick-lru-4.0.1.tgz", + "integrity": "sha512-ARhCpm70fzdcvNQfPoy49IaanKkTlRWF2JMzqhcJbhSFRZv7nPTvZJdcY7301IPmvW+/p0RgIWnQDLJxifsQ7g==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/rc": { + "version": "1.2.8", + "resolved": "https://registry.npmjs.org/rc/-/rc-1.2.8.tgz", + "integrity": "sha512-y3bGgqKj3QBdxLbLkomlohkvsA8gdAiUQlSBJnBhfn+BPxg4bc62d8TcBW15wavDfgexCgccckhcZvywyQYPOw==", + "dev": true, + "license": "(BSD-2-Clause OR MIT OR Apache-2.0)", + "dependencies": { + "deep-extend": "^0.6.0", + "ini": "~1.3.0", + "minimist": "^1.2.0", + "strip-json-comments": "~2.0.1" + }, + "bin": { + "rc": "cli.js" + } + }, + "node_modules/read-package-up": { + "version": "12.0.0", + "resolved": "https://registry.npmjs.org/read-package-up/-/read-package-up-12.0.0.tgz", + "integrity": "sha512-Q5hMVBYur/eQNWDdbF4/Wqqr9Bjvtrw2kjGxxBbKLbx8bVCL8gcArjTy8zDUuLGQicftpMuU0riQNcAsbtOVsw==", + "dev": true, + "license": "MIT", + "dependencies": { + "find-up-simple": "^1.0.1", + "read-pkg": "^10.0.0", + "type-fest": "^5.2.0" + }, + "engines": { + "node": ">=20" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/read-pkg": { + "version": "10.0.0", + "resolved": "https://registry.npmjs.org/read-pkg/-/read-pkg-10.0.0.tgz", + "integrity": "sha512-A70UlgfNdKI5NSvTTfHzLQj7NJRpJ4mT5tGafkllJ4wh71oYuGm/pzphHcmW4s35iox56KSK721AihodoXSc/A==", + "dev": true, + "license": "MIT", + "dependencies": { + "@types/normalize-package-data": "^2.4.4", + "normalize-package-data": "^8.0.0", + "parse-json": "^8.3.0", + "type-fest": "^5.2.0", + "unicorn-magic": "^0.3.0" + }, + "engines": { + "node": ">=20" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/read-pkg/node_modules/parse-json": { + "version": "8.3.0", + "resolved": "https://registry.npmjs.org/parse-json/-/parse-json-8.3.0.tgz", + "integrity": "sha512-ybiGyvspI+fAoRQbIPRddCcSTV9/LsJbf0e/S85VLowVGzRmokfneg2kwVW/KU5rOXrPSbF1qAKPMgNTqqROQQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/code-frame": "^7.26.2", + "index-to-position": "^1.1.0", + "type-fest": "^4.39.1" + }, + "engines": { + "node": ">=18" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/read-pkg/node_modules/parse-json/node_modules/type-fest": { + "version": "4.41.0", + "resolved": "https://registry.npmjs.org/type-fest/-/type-fest-4.41.0.tgz", + "integrity": "sha512-TeTSQ6H5YHvpqVwBRcnLDCBnDOHWYu7IvGbHT6N8AOymcr9PJGjc1GTtiWZTYg0NCgYwvnYWEkVChQAr9bjfwA==", + "dev": true, + "license": "(MIT OR CC0-1.0)", + "engines": { + "node": ">=16" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/readable-stream": { + "version": "3.6.2", + "resolved": "https://registry.npmjs.org/readable-stream/-/readable-stream-3.6.2.tgz", + "integrity": "sha512-9u/sniCrY3D5WdsERHzHE4G2YCXqoG5FTHUiCC4SIbr6XcLZBY05ya9EKjYek9O5xOAwjGq+1JdGBAS7Q9ScoA==", + "dev": true, + "license": "MIT", + "dependencies": { + "inherits": "^2.0.3", + "string_decoder": "^1.1.1", + "util-deprecate": "^1.0.1" + }, + "engines": { + "node": ">= 6" + } + }, + "node_modules/redent": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/redent/-/redent-3.0.0.tgz", + "integrity": "sha512-6tDA8g98We0zd0GvVeMT9arEOnTw9qM03L9cJXaCjrip1OO764RDBLBfrB4cwzNGDj5OA5ioymC9GkizgWJDUg==", + "dev": true, + "license": "MIT", + "dependencies": { + "indent-string": "^4.0.0", + "strip-indent": "^3.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/registry-auth-token": { + "version": "5.1.0", + "resolved": "https://registry.npmjs.org/registry-auth-token/-/registry-auth-token-5.1.0.tgz", + "integrity": "sha512-GdekYuwLXLxMuFTwAPg5UKGLW/UXzQrZvH/Zj791BQif5T05T0RsaLfHc9q3ZOKi7n+BoprPD9mJ0O0k4xzUlw==", + "dev": true, + "license": "MIT", + "dependencies": { + "@pnpm/npm-conf": "^2.1.0" + }, + "engines": { + "node": ">=14" + } + }, + "node_modules/require-directory": { + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/require-directory/-/require-directory-2.1.1.tgz", + "integrity": "sha512-fGxEI7+wsG9xrvdjsrlmL22OMTTiHRwAMroiEeMgq8gzoLC/PQr7RsRDSTLUg/bZAZtF+TVIkHc6/4RIKrui+Q==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/require-from-string": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/require-from-string/-/require-from-string-2.0.2.tgz", + "integrity": "sha512-Xf0nWe6RseziFMu+Ap9biiUbmplq6S9/p+7w7YXP/JBHhrUDDUhwa+vANyubuqfZWTveU//DYVGsDG7RKL/vEw==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/resolve": { + "version": "1.22.11", + "resolved": "https://registry.npmjs.org/resolve/-/resolve-1.22.11.tgz", + "integrity": "sha512-RfqAvLnMl313r7c9oclB1HhUEAezcpLjz95wFH4LVuhk9JF/r22qmVP9AMmOU4vMX7Q8pN8jwNg/CSpdFnMjTQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "is-core-module": "^2.16.1", + "path-parse": "^1.0.7", + "supports-preserve-symlinks-flag": "^1.0.0" + }, + "bin": { + "resolve": "bin/resolve" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/resolve-dir": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/resolve-dir/-/resolve-dir-1.0.1.tgz", + "integrity": "sha512-R7uiTjECzvOsWSfdM0QKFNBVFcK27aHOUwdvK53BcW8zqnGdYp0Fbj82cy54+2A4P2tFM22J5kRfe1R+lM/1yg==", + "dev": true, + "license": "MIT", + "dependencies": { + "expand-tilde": "^2.0.0", + "global-modules": "^1.0.0" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/resolve-from": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/resolve-from/-/resolve-from-5.0.0.tgz", + "integrity": "sha512-qYg9KP24dD5qka9J47d0aVky0N+b4fTU89LN9iDnjB5waksiC49rvMB0PrUJQGoTmH50XPiqOvAjDfaijGxYZw==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/resolve-global": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/resolve-global/-/resolve-global-1.0.0.tgz", + "integrity": "sha512-zFa12V4OLtT5XUX/Q4VLvTfBf+Ok0SPc1FNGM/z9ctUdiU618qwKpWnd0CHs3+RqROfyEg/DhuHbMWYqcgljEw==", + "dev": true, + "license": "MIT", + "dependencies": { + "global-dirs": "^0.1.1" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/restore-cursor": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/restore-cursor/-/restore-cursor-3.1.0.tgz", + "integrity": "sha512-l+sSefzHpj5qimhFSE5a8nufZYAM3sBSVMAPtYkmC+4EH2anSGaEMXSD0izRQbu9nfyQ9y5JrVmp7E8oZrUjvA==", + "dev": true, + "license": "MIT", + "dependencies": { + "onetime": "^5.1.0", + "signal-exit": "^3.0.2" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/reusify": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/reusify/-/reusify-1.1.0.tgz", + "integrity": "sha512-g6QUff04oZpHs0eG5p83rFLhHeV00ug/Yf9nZM6fLeUrPguBTkTQOdpAWWspMh55TZfVQDPaN3NQJfbVRAxdIw==", + "dev": true, + "license": "MIT", + "engines": { + "iojs": ">=1.0.0", + "node": ">=0.10.0" + } + }, + "node_modules/run-async": { + "version": "2.4.1", + "resolved": "https://registry.npmjs.org/run-async/-/run-async-2.4.1.tgz", + "integrity": "sha512-tvVnVv01b8c1RrA6Ep7JkStj85Guv/YrMcwqYQnwjsAS2cTmmPGBBjAjpCW7RrSodNSoE2/qg9O4bceNvUuDgQ==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=0.12.0" + } + }, + "node_modules/run-parallel": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/run-parallel/-/run-parallel-1.2.0.tgz", + "integrity": "sha512-5l4VyZR86LZ/lDxZTR6jqL8AFE2S0IFLMP26AbjsLVADxHdhB/c0GUsH+y39UfCi3dzz8OlQuPmnaJOMoDHQBA==", + "dev": true, + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/feross" + }, + { + "type": "patreon", + "url": "https://www.patreon.com/feross" + }, + { + "type": "consulting", + "url": "https://feross.org/support" + } + ], + "license": "MIT", + "dependencies": { + "queue-microtask": "^1.2.2" + } + }, + "node_modules/rxjs": { + "version": "7.8.2", + "resolved": "https://registry.npmjs.org/rxjs/-/rxjs-7.8.2.tgz", + "integrity": "sha512-dhKf903U/PQZY6boNNtAGdWbG85WAbjT/1xYoZIC7FAY0yWapOBQVsVrDl58W86//e1VpMNBtRV4MaXfdMySFA==", + "dev": true, + "license": "Apache-2.0", + "dependencies": { + "tslib": "^2.1.0" + } + }, + "node_modules/safe-buffer": { + "version": "5.2.1", + "resolved": "https://registry.npmjs.org/safe-buffer/-/safe-buffer-5.2.1.tgz", + "integrity": "sha512-rp3So07KcdmmKbGvgaNxQSJr7bGVSVk5S9Eq1F+ppbRo70+YeaDxkw5Dd8NPN+GD6bjnYm2VuPuCXmpuYvmCXQ==", + "dev": true, + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/feross" + }, + { + "type": "patreon", + "url": "https://www.patreon.com/feross" + }, + { + "type": "consulting", + "url": "https://feross.org/support" + } + ], + "license": "MIT" + }, + "node_modules/safer-buffer": { + "version": "2.1.2", + "resolved": "https://registry.npmjs.org/safer-buffer/-/safer-buffer-2.1.2.tgz", + "integrity": "sha512-YZo3K82SD7Riyi0E1EQPojLz7kpepnSQI9IyPbHHg1XXXevb5dJI7tpyN2ADxGcQbHG7vcyRHk0cbwqcQriUtg==", + "dev": true, + "license": "MIT" + }, + "node_modules/semantic-release": { + "version": "25.0.2", + "resolved": "https://registry.npmjs.org/semantic-release/-/semantic-release-25.0.2.tgz", + "integrity": "sha512-6qGjWccl5yoyugHt3jTgztJ9Y0JVzyH8/Voc/D8PlLat9pwxQYXz7W1Dpnq5h0/G5GCYGUaDSlYcyk3AMh5A6g==", + "dev": true, + "license": "MIT", + "dependencies": { + "@semantic-release/commit-analyzer": "^13.0.1", + "@semantic-release/error": "^4.0.0", + "@semantic-release/github": "^12.0.0", + "@semantic-release/npm": "^13.1.1", + "@semantic-release/release-notes-generator": "^14.1.0", + "aggregate-error": "^5.0.0", + "cosmiconfig": "^9.0.0", + "debug": "^4.0.0", + "env-ci": "^11.0.0", + "execa": "^9.0.0", + "figures": "^6.0.0", + "find-versions": "^6.0.0", + "get-stream": "^6.0.0", + "git-log-parser": "^1.2.0", + "hook-std": "^4.0.0", + "hosted-git-info": "^9.0.0", + "import-from-esm": "^2.0.0", + "lodash-es": "^4.17.21", + "marked": "^15.0.0", + "marked-terminal": "^7.3.0", + "micromatch": "^4.0.2", + "p-each-series": "^3.0.0", + "p-reduce": "^3.0.0", + "read-package-up": "^12.0.0", + "resolve-from": "^5.0.0", + "semver": "^7.3.2", + "semver-diff": "^5.0.0", + "signale": "^1.2.1", + "yargs": "^18.0.0" + }, + "bin": { + "semantic-release": "bin/semantic-release.js" + }, + "engines": { + "node": "^22.14.0 || >= 24.10.0" + } + }, + "node_modules/semantic-release/node_modules/@octokit/auth-token": { + "version": "6.0.0", + "resolved": "https://registry.npmjs.org/@octokit/auth-token/-/auth-token-6.0.0.tgz", + "integrity": "sha512-P4YJBPdPSpWTQ1NU4XYdvHvXJJDxM6YwpS0FZHRgP7YFkdVxsWcpWGy/NVqlAA7PcPCnMacXlRm1y2PFZRWL/w==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 20" + } + }, + "node_modules/semantic-release/node_modules/@octokit/core": { + "version": "7.0.6", + "resolved": "https://registry.npmjs.org/@octokit/core/-/core-7.0.6.tgz", + "integrity": "sha512-DhGl4xMVFGVIyMwswXeyzdL4uXD5OGILGX5N8Y+f6W7LhC1Ze2poSNrkF/fedpVDHEEZ+PHFW0vL14I+mm8K3Q==", + "dev": true, + "license": "MIT", + "dependencies": { + "@octokit/auth-token": "^6.0.0", + "@octokit/graphql": "^9.0.3", + "@octokit/request": "^10.0.6", + "@octokit/request-error": "^7.0.2", + "@octokit/types": "^16.0.0", + "before-after-hook": "^4.0.0", + "universal-user-agent": "^7.0.0" + }, + "engines": { + "node": ">= 20" + } + }, + "node_modules/semantic-release/node_modules/@octokit/endpoint": { + "version": "11.0.2", + "resolved": "https://registry.npmjs.org/@octokit/endpoint/-/endpoint-11.0.2.tgz", + "integrity": "sha512-4zCpzP1fWc7QlqunZ5bSEjxc6yLAlRTnDwKtgXfcI/FxxGoqedDG8V2+xJ60bV2kODqcGB+nATdtap/XYq2NZQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "@octokit/types": "^16.0.0", + "universal-user-agent": "^7.0.2" + }, + "engines": { + "node": ">= 20" + } + }, + "node_modules/semantic-release/node_modules/@octokit/graphql": { + "version": "9.0.3", + "resolved": "https://registry.npmjs.org/@octokit/graphql/-/graphql-9.0.3.tgz", + "integrity": "sha512-grAEuupr/C1rALFnXTv6ZQhFuL1D8G5y8CN04RgrO4FIPMrtm+mcZzFG7dcBm+nq+1ppNixu+Jd78aeJOYxlGA==", + "dev": true, + "license": "MIT", + "dependencies": { + "@octokit/request": "^10.0.6", + "@octokit/types": "^16.0.0", + "universal-user-agent": "^7.0.0" + }, + "engines": { + "node": ">= 20" + } + }, + "node_modules/semantic-release/node_modules/@octokit/openapi-types": { + "version": "27.0.0", + "resolved": "https://registry.npmjs.org/@octokit/openapi-types/-/openapi-types-27.0.0.tgz", + "integrity": "sha512-whrdktVs1h6gtR+09+QsNk2+FO+49j6ga1c55YZudfEG+oKJVvJLQi3zkOm5JjiUXAagWK2tI2kTGKJ2Ys7MGA==", + "dev": true, + "license": "MIT" + }, + "node_modules/semantic-release/node_modules/@octokit/plugin-paginate-rest": { + "version": "14.0.0", + "resolved": "https://registry.npmjs.org/@octokit/plugin-paginate-rest/-/plugin-paginate-rest-14.0.0.tgz", + "integrity": "sha512-fNVRE7ufJiAA3XUrha2omTA39M6IXIc6GIZLvlbsm8QOQCYvpq/LkMNGyFlB1d8hTDzsAXa3OKtybdMAYsV/fw==", + "dev": true, + "license": "MIT", + "dependencies": { + "@octokit/types": "^16.0.0" + }, + "engines": { + "node": ">= 20" + }, + "peerDependencies": { + "@octokit/core": ">=6" + } + }, + "node_modules/semantic-release/node_modules/@octokit/plugin-retry": { + "version": "8.0.3", + "resolved": "https://registry.npmjs.org/@octokit/plugin-retry/-/plugin-retry-8.0.3.tgz", + "integrity": "sha512-vKGx1i3MC0za53IzYBSBXcrhmd+daQDzuZfYDd52X5S0M2otf3kVZTVP8bLA3EkU0lTvd1WEC2OlNNa4G+dohA==", + "dev": true, + "license": "MIT", + "dependencies": { + "@octokit/request-error": "^7.0.2", + "@octokit/types": "^16.0.0", + "bottleneck": "^2.15.3" + }, + "engines": { + "node": ">= 20" + }, + "peerDependencies": { + "@octokit/core": ">=7" + } + }, + "node_modules/semantic-release/node_modules/@octokit/plugin-throttling": { + "version": "11.0.3", + "resolved": "https://registry.npmjs.org/@octokit/plugin-throttling/-/plugin-throttling-11.0.3.tgz", + "integrity": "sha512-34eE0RkFCKycLl2D2kq7W+LovheM/ex3AwZCYN8udpi6bxsyjZidb2McXs69hZhLmJlDqTSP8cH+jSRpiaijBg==", + "dev": true, + "license": "MIT", + "dependencies": { + "@octokit/types": "^16.0.0", + "bottleneck": "^2.15.3" + }, + "engines": { + "node": ">= 20" + }, + "peerDependencies": { + "@octokit/core": "^7.0.0" + } + }, + "node_modules/semantic-release/node_modules/@octokit/request": { + "version": "10.0.7", + "resolved": "https://registry.npmjs.org/@octokit/request/-/request-10.0.7.tgz", + "integrity": "sha512-v93h0i1yu4idj8qFPZwjehoJx4j3Ntn+JhXsdJrG9pYaX6j/XRz2RmasMUHtNgQD39nrv/VwTWSqK0RNXR8upA==", + "dev": true, + "license": "MIT", + "dependencies": { + "@octokit/endpoint": "^11.0.2", + "@octokit/request-error": "^7.0.2", + "@octokit/types": "^16.0.0", + "fast-content-type-parse": "^3.0.0", + "universal-user-agent": "^7.0.2" + }, + "engines": { + "node": ">= 20" + } + }, + "node_modules/semantic-release/node_modules/@octokit/request-error": { + "version": "7.1.0", + "resolved": "https://registry.npmjs.org/@octokit/request-error/-/request-error-7.1.0.tgz", + "integrity": "sha512-KMQIfq5sOPpkQYajXHwnhjCC0slzCNScLHs9JafXc4RAJI+9f+jNDlBNaIMTvazOPLgb4BnlhGJOTbnN0wIjPw==", + "dev": true, + "license": "MIT", + "dependencies": { + "@octokit/types": "^16.0.0" + }, + "engines": { + "node": ">= 20" + } + }, + "node_modules/semantic-release/node_modules/@octokit/types": { + "version": "16.0.0", + "resolved": "https://registry.npmjs.org/@octokit/types/-/types-16.0.0.tgz", + "integrity": "sha512-sKq+9r1Mm4efXW1FCk7hFSeJo4QKreL/tTbR0rz/qx/r1Oa2VV83LTA/H/MuCOX7uCIJmQVRKBcbmWoySjAnSg==", + "dev": true, + "license": "MIT", + "dependencies": { + "@octokit/openapi-types": "^27.0.0" + } + }, + "node_modules/semantic-release/node_modules/@semantic-release/error": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/@semantic-release/error/-/error-4.0.0.tgz", + "integrity": "sha512-mgdxrHTLOjOddRVYIYDo0fR3/v61GNN1YGkfbrjuIKg/uMgCd+Qzo3UAXJ+woLQQpos4pl5Esuw5A7AoNlzjUQ==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=18" + } + }, + "node_modules/semantic-release/node_modules/@semantic-release/github": { + "version": "12.0.2", + "resolved": "https://registry.npmjs.org/@semantic-release/github/-/github-12.0.2.tgz", + "integrity": "sha512-qyqLS+aSGH1SfXIooBKjs7mvrv0deg8v+jemegfJg1kq6ji+GJV8CO08VJDEsvjp3O8XJmTTIAjjZbMzagzsdw==", + "dev": true, + "license": "MIT", + "dependencies": { + "@octokit/core": "^7.0.0", + "@octokit/plugin-paginate-rest": "^14.0.0", + "@octokit/plugin-retry": "^8.0.0", + "@octokit/plugin-throttling": "^11.0.0", + "@semantic-release/error": "^4.0.0", + "aggregate-error": "^5.0.0", + "debug": "^4.3.4", + "dir-glob": "^3.0.1", + "http-proxy-agent": "^7.0.0", + "https-proxy-agent": "^7.0.0", + "issue-parser": "^7.0.0", + "lodash-es": "^4.17.21", + "mime": "^4.0.0", + "p-filter": "^4.0.0", + "tinyglobby": "^0.2.14", + "undici": "^7.0.0", + "url-join": "^5.0.0" + }, + "engines": { + "node": "^22.14.0 || >= 24.10.0" + }, + "peerDependencies": { + "semantic-release": ">=24.1.0" + } + }, + "node_modules/semantic-release/node_modules/@sindresorhus/merge-streams": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/@sindresorhus/merge-streams/-/merge-streams-4.0.0.tgz", + "integrity": "sha512-tlqY9xq5ukxTUZBmoOp+m61cqwQD5pHJtFY3Mn8CA8ps6yghLH/Hw8UPdqg4OLmFW3IFlcXnQNmo/dh8HzXYIQ==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=18" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/semantic-release/node_modules/aggregate-error": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/aggregate-error/-/aggregate-error-5.0.0.tgz", + "integrity": "sha512-gOsf2YwSlleG6IjRYG2A7k0HmBMEo6qVNk9Bp/EaLgAJT5ngH6PXbqa4ItvnEwCm/velL5jAnQgsHsWnjhGmvw==", + "dev": true, + "license": "MIT", + "dependencies": { + "clean-stack": "^5.2.0", + "indent-string": "^5.0.0" + }, + "engines": { + "node": ">=18" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/semantic-release/node_modules/ansi-regex": { + "version": "6.2.2", + "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-6.2.2.tgz", + "integrity": "sha512-Bq3SmSpyFHaWjPk8If9yc6svM8c56dB5BAtW4Qbw5jHTwwXXcTLoRMkpDJp6VL0XzlWaCHTXrkFURMYmD0sLqg==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/chalk/ansi-regex?sponsor=1" + } + }, + "node_modules/semantic-release/node_modules/ansi-styles": { + "version": "6.2.3", + "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-6.2.3.tgz", + "integrity": "sha512-4Dj6M28JB+oAH8kFkTLUo+a2jwOFkuqb3yucU0CANcRRUbxS0cP0nZYCGjcc3BNXwRIsUVmDGgzawme7zvJHvg==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/chalk/ansi-styles?sponsor=1" + } + }, + "node_modules/semantic-release/node_modules/before-after-hook": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/before-after-hook/-/before-after-hook-4.0.0.tgz", + "integrity": "sha512-q6tR3RPqIB1pMiTRMFcZwuG5T8vwp+vUvEG0vuI6B+Rikh5BfPp2fQ82c925FOs+b0lcFQ8CFrL+KbilfZFhOQ==", + "dev": true, + "license": "Apache-2.0" + }, + "node_modules/semantic-release/node_modules/clean-stack": { + "version": "5.3.0", + "resolved": "https://registry.npmjs.org/clean-stack/-/clean-stack-5.3.0.tgz", + "integrity": "sha512-9ngPTOhYGQqNVSfeJkYXHmF7AGWp4/nN5D/QqNQs3Dvxd1Kk/WpjHfNujKHYUQ/5CoGyOyFNoWSPk5afzP0QVg==", + "dev": true, + "license": "MIT", + "dependencies": { + "escape-string-regexp": "5.0.0" + }, + "engines": { + "node": ">=14.16" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/semantic-release/node_modules/cliui": { + "version": "9.0.1", + "resolved": "https://registry.npmjs.org/cliui/-/cliui-9.0.1.tgz", + "integrity": "sha512-k7ndgKhwoQveBL+/1tqGJYNz097I7WOvwbmmU2AR5+magtbjPWQTS1C5vzGkBC8Ym8UWRzfKUzUUqFLypY4Q+w==", + "dev": true, + "license": "ISC", + "dependencies": { + "string-width": "^7.2.0", + "strip-ansi": "^7.1.0", + "wrap-ansi": "^9.0.0" + }, + "engines": { + "node": ">=20" + } + }, + "node_modules/semantic-release/node_modules/cosmiconfig": { + "version": "9.0.0", + "resolved": "https://registry.npmjs.org/cosmiconfig/-/cosmiconfig-9.0.0.tgz", + "integrity": "sha512-itvL5h8RETACmOTFc4UfIyB2RfEHi71Ax6E/PivVxq9NseKbOWpeyHEOIbmAw1rs8Ak0VursQNww7lf7YtUwzg==", + "dev": true, + "license": "MIT", + "dependencies": { + "env-paths": "^2.2.1", + "import-fresh": "^3.3.0", + "js-yaml": "^4.1.0", + "parse-json": "^5.2.0" + }, + "engines": { + "node": ">=14" + }, + "funding": { + "url": "https://github.com/sponsors/d-fischer" + }, + "peerDependencies": { + "typescript": ">=4.9.5" + }, + "peerDependenciesMeta": { + "typescript": { + "optional": true + } + } + }, + "node_modules/semantic-release/node_modules/emoji-regex": { + "version": "10.6.0", + "resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-10.6.0.tgz", + "integrity": "sha512-toUI84YS5YmxW219erniWD0CIVOo46xGKColeNQRgOzDorgBi1v4D71/OFzgD9GO2UGKIv1C3Sp8DAn0+j5w7A==", + "dev": true, + "license": "MIT" + }, + "node_modules/semantic-release/node_modules/escape-string-regexp": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/escape-string-regexp/-/escape-string-regexp-5.0.0.tgz", + "integrity": "sha512-/veY75JbMK4j1yjvuUxuVsiS/hr/4iHs9FTT6cgTexxdE0Ly/glccBAkloH/DofkjRbZU3bnoj38mOmhkZ0lHw==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/semantic-release/node_modules/execa": { + "version": "9.6.1", + "resolved": "https://registry.npmjs.org/execa/-/execa-9.6.1.tgz", + "integrity": "sha512-9Be3ZoN4LmYR90tUoVu2te2BsbzHfhJyfEiAVfz7N5/zv+jduIfLrV2xdQXOHbaD6KgpGdO9PRPM1Y4Q9QkPkA==", + "dev": true, + "license": "MIT", + "dependencies": { + "@sindresorhus/merge-streams": "^4.0.0", + "cross-spawn": "^7.0.6", + "figures": "^6.1.0", + "get-stream": "^9.0.0", + "human-signals": "^8.0.1", + "is-plain-obj": "^4.1.0", + "is-stream": "^4.0.1", + "npm-run-path": "^6.0.0", + "pretty-ms": "^9.2.0", + "signal-exit": "^4.1.0", + "strip-final-newline": "^4.0.0", + "yoctocolors": "^2.1.1" + }, + "engines": { + "node": "^18.19.0 || >=20.5.0" + }, + "funding": { + "url": "https://github.com/sindresorhus/execa?sponsor=1" + } + }, + "node_modules/semantic-release/node_modules/execa/node_modules/get-stream": { + "version": "9.0.1", + "resolved": "https://registry.npmjs.org/get-stream/-/get-stream-9.0.1.tgz", + "integrity": "sha512-kVCxPF3vQM/N0B1PmoqVUqgHP+EeVjmZSQn+1oCRPxd2P21P2F19lIgbR3HBosbB1PUhOAoctJnfEn2GbN2eZA==", + "dev": true, + "license": "MIT", + "dependencies": { + "@sec-ant/readable-stream": "^0.4.1", + "is-stream": "^4.0.1" + }, + "engines": { + "node": ">=18" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/semantic-release/node_modules/figures": { + "version": "6.1.0", + "resolved": "https://registry.npmjs.org/figures/-/figures-6.1.0.tgz", + "integrity": "sha512-d+l3qxjSesT4V7v2fh+QnmFnUWv9lSpjarhShNTgBOfA0ttejbQUAlHLitbjkoRiDulW0OPoQPYIGhIC8ohejg==", + "dev": true, + "license": "MIT", + "dependencies": { + "is-unicode-supported": "^2.0.0" + }, + "engines": { + "node": ">=18" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/semantic-release/node_modules/human-signals": { + "version": "8.0.1", + "resolved": "https://registry.npmjs.org/human-signals/-/human-signals-8.0.1.tgz", + "integrity": "sha512-eKCa6bwnJhvxj14kZk5NCPc6Hb6BdsU9DZcOnmQKSnO1VKrfV0zCvtttPZUsBvjmNDn8rpcJfpwSYnHBjc95MQ==", + "dev": true, + "license": "Apache-2.0", + "engines": { + "node": ">=18.18.0" + } + }, + "node_modules/semantic-release/node_modules/indent-string": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/indent-string/-/indent-string-5.0.0.tgz", + "integrity": "sha512-m6FAo/spmsW2Ab2fU35JTYwtOKa2yAwXSwgjSv1TJzh4Mh7mC3lzAOVLBprb72XsTrgkEIsl7YrFNAiDiRhIGg==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/semantic-release/node_modules/is-plain-obj": { + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/is-plain-obj/-/is-plain-obj-4.1.0.tgz", + "integrity": "sha512-+Pgi+vMuUNkJyExiMBt5IlFoMyKnr5zhJ4Uspz58WOhBF5QoIZkFyNHIbBAtHwzVAgk5RtndVNsDRN61/mmDqg==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/semantic-release/node_modules/is-stream": { + "version": "4.0.1", + "resolved": "https://registry.npmjs.org/is-stream/-/is-stream-4.0.1.tgz", + "integrity": "sha512-Dnz92NInDqYckGEUJv689RbRiTSEHCQ7wOVeALbkOz999YpqT46yMRIGtSNl2iCL1waAZSx40+h59NV/EwzV/A==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=18" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/semantic-release/node_modules/issue-parser": { + "version": "7.0.1", + "resolved": "https://registry.npmjs.org/issue-parser/-/issue-parser-7.0.1.tgz", + "integrity": "sha512-3YZcUUR2Wt1WsapF+S/WiA2WmlW0cWAoPccMqne7AxEBhCdFeTPjfv/Axb8V2gyCgY3nRw+ksZ3xSUX+R47iAg==", + "dev": true, + "license": "MIT", + "dependencies": { + "lodash.capitalize": "^4.2.1", + "lodash.escaperegexp": "^4.1.2", + "lodash.isplainobject": "^4.0.6", + "lodash.isstring": "^4.0.1", + "lodash.uniqby": "^4.7.0" + }, + "engines": { + "node": "^18.17 || >=20.6.1" + } + }, + "node_modules/semantic-release/node_modules/npm-run-path": { + "version": "6.0.0", + "resolved": "https://registry.npmjs.org/npm-run-path/-/npm-run-path-6.0.0.tgz", + "integrity": "sha512-9qny7Z9DsQU8Ou39ERsPU4OZQlSTP47ShQzuKZ6PRXpYLtIFgl/DEBYEXKlvcEa+9tHVcK8CF81Y2V72qaZhWA==", + "dev": true, + "license": "MIT", + "dependencies": { + "path-key": "^4.0.0", + "unicorn-magic": "^0.3.0" + }, + "engines": { + "node": ">=18" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/semantic-release/node_modules/p-reduce": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/p-reduce/-/p-reduce-3.0.0.tgz", + "integrity": "sha512-xsrIUgI0Kn6iyDYm9StOpOeK29XM1aboGji26+QEortiFST1hGZaUQOLhtEbqHErPpGW/aSz6allwK2qcptp0Q==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/semantic-release/node_modules/path-key": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/path-key/-/path-key-4.0.0.tgz", + "integrity": "sha512-haREypq7xkM7ErfgIyA0z+Bj4AGKlMSdlQE2jvJo6huWD1EdkKYV+G/T4nq0YEF2vgTT8kqMFKo1uHn950r4SQ==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/semantic-release/node_modules/signal-exit": { + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/signal-exit/-/signal-exit-4.1.0.tgz", + "integrity": "sha512-bzyZ1e88w9O1iNJbKnOlvYTrWPDl46O1bG0D3XInv+9tkPrxrN8jUUTiFlDkkmKWgn1M6CfIA13SuGqOa9Korw==", + "dev": true, + "license": "ISC", + "engines": { + "node": ">=14" + }, + "funding": { + "url": "https://github.com/sponsors/isaacs" + } + }, + "node_modules/semantic-release/node_modules/string-width": { + "version": "7.2.0", + "resolved": "https://registry.npmjs.org/string-width/-/string-width-7.2.0.tgz", + "integrity": "sha512-tsaTIkKW9b4N+AEj+SVA+WhJzV7/zMhcSu78mLKWSk7cXMOSHsBKFWUs0fWwq8QyK3MgJBQRX6Gbi4kYbdvGkQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "emoji-regex": "^10.3.0", + "get-east-asian-width": "^1.0.0", + "strip-ansi": "^7.1.0" + }, + "engines": { + "node": ">=18" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/semantic-release/node_modules/strip-ansi": { + "version": "7.1.2", + "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-7.1.2.tgz", + "integrity": "sha512-gmBGslpoQJtgnMAvOVqGZpEz9dyoKTCzy2nfz/n8aIFhN/jCE/rCmcxabB6jOOHV+0WNnylOxaxBQPSvcWklhA==", + "dev": true, + "license": "MIT", + "dependencies": { + "ansi-regex": "^6.0.1" + }, + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/chalk/strip-ansi?sponsor=1" + } + }, + "node_modules/semantic-release/node_modules/strip-final-newline": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/strip-final-newline/-/strip-final-newline-4.0.0.tgz", + "integrity": "sha512-aulFJcD6YK8V1G7iRB5tigAP4TsHBZZrOV8pjV++zdUwmeV8uzbY7yn6h9MswN62adStNZFuCIx4haBnRuMDaw==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=18" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/semantic-release/node_modules/undici": { + "version": "7.16.0", + "resolved": "https://registry.npmjs.org/undici/-/undici-7.16.0.tgz", + "integrity": "sha512-QEg3HPMll0o3t2ourKwOeUAZ159Kn9mx5pnzHRQO8+Wixmh88YdZRiIwat0iNzNNXn0yoEtXJqFpyW7eM8BV7g==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=20.18.1" + } + }, + "node_modules/semantic-release/node_modules/universal-user-agent": { + "version": "7.0.3", + "resolved": "https://registry.npmjs.org/universal-user-agent/-/universal-user-agent-7.0.3.tgz", + "integrity": "sha512-TmnEAEAsBJVZM/AADELsK76llnwcf9vMKuPz8JflO1frO8Lchitr0fNaN9d+Ap0BjKtqWqd/J17qeDnXh8CL2A==", + "dev": true, + "license": "ISC" + }, + "node_modules/semantic-release/node_modules/wrap-ansi": { + "version": "9.0.2", + "resolved": "https://registry.npmjs.org/wrap-ansi/-/wrap-ansi-9.0.2.tgz", + "integrity": "sha512-42AtmgqjV+X1VpdOfyTGOYRi0/zsoLqtXQckTmqTeybT+BDIbM/Guxo7x3pE2vtpr1ok6xRqM9OpBe+Jyoqyww==", + "dev": true, + "license": "MIT", + "dependencies": { + "ansi-styles": "^6.2.1", + "string-width": "^7.0.0", + "strip-ansi": "^7.1.0" + }, + "engines": { + "node": ">=18" + }, + "funding": { + "url": "https://github.com/chalk/wrap-ansi?sponsor=1" + } + }, + "node_modules/semantic-release/node_modules/yargs": { + "version": "18.0.0", + "resolved": "https://registry.npmjs.org/yargs/-/yargs-18.0.0.tgz", + "integrity": "sha512-4UEqdc2RYGHZc7Doyqkrqiln3p9X2DZVxaGbwhn2pi7MrRagKaOcIKe8L3OxYcbhXLgLFUS3zAYuQjKBQgmuNg==", + "dev": true, + "license": "MIT", + "dependencies": { + "cliui": "^9.0.1", + "escalade": "^3.1.1", + "get-caller-file": "^2.0.5", + "string-width": "^7.2.0", + "y18n": "^5.0.5", + "yargs-parser": "^22.0.0" + }, + "engines": { + "node": "^20.19.0 || ^22.12.0 || >=23" + } + }, + "node_modules/semantic-release/node_modules/yargs-parser": { + "version": "22.0.0", + "resolved": "https://registry.npmjs.org/yargs-parser/-/yargs-parser-22.0.0.tgz", + "integrity": "sha512-rwu/ClNdSMpkSrUb+d6BRsSkLUq1fmfsY6TOpYzTwvwkg1/NRG85KBy3kq++A8LKQwX6lsu+aWad+2khvuXrqw==", + "dev": true, + "license": "ISC", + "engines": { + "node": "^20.19.0 || ^22.12.0 || >=23" + } + }, + "node_modules/semver": { + "version": "7.6.0", + "resolved": "https://registry.npmjs.org/semver/-/semver-7.6.0.tgz", + "integrity": "sha512-EnwXhrlwXMk9gKu5/flx5sv/an57AkRplG3hTK68W7FRDN+k+OWBj65M7719OkA82XLBxrcX0KSHj+X5COhOVg==", + "dev": true, + "license": "ISC", + "dependencies": { + "lru-cache": "^6.0.0" + }, + "bin": { + "semver": "bin/semver.js" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/semver-diff": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/semver-diff/-/semver-diff-5.0.0.tgz", + "integrity": "sha512-0HbGtOm+S7T6NGQ/pxJSJipJvc4DK3FcRVMRkhsIwJDJ4Jcz5DQC1cPPzB5GhzyHjwttW878HaWQq46CkL3cqg==", + "deprecated": "Deprecated as the semver package now supports this built-in.", + "dev": true, + "license": "MIT", + "dependencies": { + "semver": "^7.3.5" + }, + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/semver-regex": { + "version": "4.0.5", + "resolved": "https://registry.npmjs.org/semver-regex/-/semver-regex-4.0.5.tgz", + "integrity": "sha512-hunMQrEy1T6Jr2uEVjrAIqjwWcQTgOAcIM52C8MY1EZSD3DDNft04XzvYKPqjED65bNVVko0YI38nYeEHCX3yw==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/semver/node_modules/lru-cache": { + "version": "6.0.0", + "resolved": "https://registry.npmjs.org/lru-cache/-/lru-cache-6.0.0.tgz", + "integrity": "sha512-Jo6dJ04CmSjuznwJSS3pUeWmd/H0ffTlkXXgwZi+eq1UCmqQwCh+eLsYOYCwY991i2Fah4h1BEMCx4qThGbsiA==", + "dev": true, + "license": "ISC", + "dependencies": { + "yallist": "^4.0.0" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/shebang-command": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/shebang-command/-/shebang-command-2.0.0.tgz", + "integrity": "sha512-kHxr2zZpYtdmrN1qDjrrX/Z1rR1kG8Dx+gkpK1G4eXmvXswmcE1hTWBWYUzlraYw1/yZp6YuDY77YtvbN0dmDA==", + "dev": true, + "license": "MIT", + "dependencies": { + "shebang-regex": "^3.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/shebang-regex": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/shebang-regex/-/shebang-regex-3.0.0.tgz", + "integrity": "sha512-7++dFhtcx3353uBaq8DDR4NuxBetBzC7ZQOhmTQInHEd6bSrXdiEyzCvG07Z44UYdLShWUyXt5M/yhz8ekcb1A==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/signal-exit": { + "version": "3.0.7", + "resolved": "https://registry.npmjs.org/signal-exit/-/signal-exit-3.0.7.tgz", + "integrity": "sha512-wnD2ZE+l+SPC/uoS0vXeE9L1+0wuaMqKlfz9AMUo38JsyLSBWSFcHR1Rri62LZc12vLr1gb3jl7iwQhgwpAbGQ==", + "dev": true, + "license": "ISC" + }, + "node_modules/signale": { + "version": "1.4.0", + "resolved": "https://registry.npmjs.org/signale/-/signale-1.4.0.tgz", + "integrity": "sha512-iuh+gPf28RkltuJC7W5MRi6XAjTDCAPC/prJUpQoG4vIP3MJZ+GTydVnodXA7pwvTKb2cA0m9OFZW/cdWy/I/w==", + "dev": true, + "license": "MIT", + "dependencies": { + "chalk": "^2.3.2", + "figures": "^2.0.0", + "pkg-conf": "^2.1.0" + }, + "engines": { + "node": ">=6" + } + }, + "node_modules/signale/node_modules/ansi-styles": { + "version": "3.2.1", + "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-3.2.1.tgz", + "integrity": "sha512-VT0ZI6kZRdTh8YyJw3SMbYm/u+NqfsAxEpWO0Pf9sq8/e94WxxOpPKx9FR1FlyCtOVDNOQ+8ntlqFxiRc+r5qA==", + "dev": true, + "license": "MIT", + "dependencies": { + "color-convert": "^1.9.0" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/signale/node_modules/chalk": { + "version": "2.4.2", + "resolved": "https://registry.npmjs.org/chalk/-/chalk-2.4.2.tgz", + "integrity": "sha512-Mti+f9lpJNcwF4tWV8/OrTTtF1gZi+f8FqlyAdouralcFWFQWF2+NgCHShjkCb+IFBLq9buZwE1xckQU4peSuQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "ansi-styles": "^3.2.1", + "escape-string-regexp": "^1.0.5", + "supports-color": "^5.3.0" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/signale/node_modules/color-convert": { + "version": "1.9.3", + "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-1.9.3.tgz", + "integrity": "sha512-QfAUtd+vFdAtFQcC8CCyYt1fYWxSqAiK2cSD6zDB8N3cpsEBAvRxp9zOGg6G/SHHJYAT88/az/IuDGALsNVbGg==", + "dev": true, + "license": "MIT", + "dependencies": { + "color-name": "1.1.3" + } + }, + "node_modules/signale/node_modules/color-name": { + "version": "1.1.3", + "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.3.tgz", + "integrity": "sha512-72fSenhMw2HZMTVHeCA9KCmpEIbzWiQsjN+BHcBbS9vr1mtt+vJjPdksIBNUmKAW8TFUDPJK5SUU3QhE9NEXDw==", + "dev": true, + "license": "MIT" + }, + "node_modules/signale/node_modules/figures": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/figures/-/figures-2.0.0.tgz", + "integrity": "sha512-Oa2M9atig69ZkfwiApY8F2Yy+tzMbazyvqv21R0NsSC8floSOC09BbT1ITWAdoMGQvJ/aZnR1KMwdx9tvHnTNA==", + "dev": true, + "license": "MIT", + "dependencies": { + "escape-string-regexp": "^1.0.5" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/signale/node_modules/has-flag": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-3.0.0.tgz", + "integrity": "sha512-sKJf1+ceQBr4SMkvQnBDNDtf4TXpVhVGateu0t918bl30FnbE2m4vNLX+VWe/dpjlb+HugGYzW7uQXH98HPEYw==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=4" + } + }, + "node_modules/signale/node_modules/supports-color": { + "version": "5.5.0", + "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-5.5.0.tgz", + "integrity": "sha512-QjVjwdXIt408MIiAqCX4oUKsgU2EqAGzs2Ppkm4aQYbjm+ZEWEcW4SfFNTr4uMNZma0ey4f5lgLrkB0aX0QMow==", + "dev": true, + "license": "MIT", + "dependencies": { + "has-flag": "^3.0.0" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/skin-tone": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/skin-tone/-/skin-tone-2.0.0.tgz", + "integrity": "sha512-kUMbT1oBJCpgrnKoSr0o6wPtvRWT9W9UKvGLwfJYO2WuahZRHOpEyL1ckyMGgMWh0UdpmaoFqKKD29WTomNEGA==", + "dev": true, + "license": "MIT", + "dependencies": { + "unicode-emoji-modifier-base": "^1.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/slash": { + "version": "5.1.0", + "resolved": "https://registry.npmjs.org/slash/-/slash-5.1.0.tgz", + "integrity": "sha512-ZA6oR3T/pEyuqwMgAKT0/hAv8oAXckzbkmR0UkUosQ+Mc4RxGoJkRmwHgHufaenlyAgE1Mxgpdcrf75y6XcnDg==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=14.16" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/source-map": { + "version": "0.6.1", + "resolved": "https://registry.npmjs.org/source-map/-/source-map-0.6.1.tgz", + "integrity": "sha512-UjgapumWlbMhkBgzT7Ykc5YXUT46F0iKu8SGXq0bcwP5dz/h0Plj6enJqjz1Zbq2l5WaqYnrVbwWOWMyF3F47g==", + "dev": true, + "license": "BSD-3-Clause", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/spawn-error-forwarder": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/spawn-error-forwarder/-/spawn-error-forwarder-1.0.0.tgz", + "integrity": "sha512-gRjMgK5uFjbCvdibeGJuy3I5OYz6VLoVdsOJdA6wV0WlfQVLFueoqMxwwYD9RODdgb6oUIvlRlsyFSiQkMKu0g==", + "dev": true, + "license": "MIT" + }, + "node_modules/spdx-correct": { + "version": "3.2.0", + "resolved": "https://registry.npmjs.org/spdx-correct/-/spdx-correct-3.2.0.tgz", + "integrity": "sha512-kN9dJbvnySHULIluDHy32WHRUu3Og7B9sbY7tsFLctQkIqnMh3hErYgdMjTYuqmcXX+lK5T1lnUt3G7zNswmZA==", + "dev": true, + "license": "Apache-2.0", + "dependencies": { + "spdx-expression-parse": "^3.0.0", + "spdx-license-ids": "^3.0.0" + } + }, + "node_modules/spdx-exceptions": { + "version": "2.5.0", + "resolved": "https://registry.npmjs.org/spdx-exceptions/-/spdx-exceptions-2.5.0.tgz", + "integrity": "sha512-PiU42r+xO4UbUS1buo3LPJkjlO7430Xn5SVAhdpzzsPHsjbYVflnnFdATgabnLude+Cqu25p6N+g2lw/PFsa4w==", + "dev": true, + "license": "CC-BY-3.0" + }, + "node_modules/spdx-expression-parse": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/spdx-expression-parse/-/spdx-expression-parse-3.0.1.tgz", + "integrity": "sha512-cbqHunsQWnJNE6KhVSMsMeH5H/L9EpymbzqTQ3uLwNCLZ1Q481oWaofqH7nO6V07xlXwY6PhQdQ2IedWx/ZK4Q==", + "dev": true, + "license": "MIT", + "dependencies": { + "spdx-exceptions": "^2.1.0", + "spdx-license-ids": "^3.0.0" + } + }, + "node_modules/spdx-license-ids": { + "version": "3.0.22", + "resolved": "https://registry.npmjs.org/spdx-license-ids/-/spdx-license-ids-3.0.22.tgz", + "integrity": "sha512-4PRT4nh1EImPbt2jASOKHX7PB7I+e4IWNLvkKFDxNhJlfjbYlleYQh285Z/3mPTHSAK/AvdMmw5BNNuYH8ShgQ==", + "dev": true, + "license": "CC0-1.0" + }, + "node_modules/split2": { + "version": "4.2.0", + "resolved": "https://registry.npmjs.org/split2/-/split2-4.2.0.tgz", + "integrity": "sha512-UcjcJOWknrNkF6PLX83qcHM6KHgVKNkV62Y8a5uYDVv9ydGQVwAHMKqHdJje1VTWpljG0WYpCDhrCdAOYH4TWg==", + "dev": true, + "license": "ISC", + "engines": { + "node": ">= 10.x" + } + }, + "node_modules/stream-combiner2": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/stream-combiner2/-/stream-combiner2-1.1.1.tgz", + "integrity": "sha512-3PnJbYgS56AeWgtKF5jtJRT6uFJe56Z0Hc5Ngg/6sI6rIt8iiMBTa9cvdyFfpMQjaVHr8dusbNeFGIIonxOvKw==", + "dev": true, + "license": "MIT", + "dependencies": { + "duplexer2": "~0.1.0", + "readable-stream": "^2.0.2" + } + }, + "node_modules/stream-combiner2/node_modules/readable-stream": { + "version": "2.3.8", + "resolved": "https://registry.npmjs.org/readable-stream/-/readable-stream-2.3.8.tgz", + "integrity": "sha512-8p0AUk4XODgIewSi0l8Epjs+EVnWiK7NoDIEGU0HhE7+ZyY8D1IMY7odu5lRrFXGg71L15KG8QrPmum45RTtdA==", + "dev": true, + "license": "MIT", + "dependencies": { + "core-util-is": "~1.0.0", + "inherits": "~2.0.3", + "isarray": "~1.0.0", + "process-nextick-args": "~2.0.0", + "safe-buffer": "~5.1.1", + "string_decoder": "~1.1.1", + "util-deprecate": "~1.0.1" + } + }, + "node_modules/stream-combiner2/node_modules/safe-buffer": { + "version": "5.1.2", + "resolved": "https://registry.npmjs.org/safe-buffer/-/safe-buffer-5.1.2.tgz", + "integrity": "sha512-Gd2UZBJDkXlY7GbJxfsE8/nvKkUEU1G38c1siN6QP6a9PT9MmHB8GnpscSmMJSoF8LOIrt8ud/wPtojys4G6+g==", + "dev": true, + "license": "MIT" + }, + "node_modules/stream-combiner2/node_modules/string_decoder": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/string_decoder/-/string_decoder-1.1.1.tgz", + "integrity": "sha512-n/ShnvDi6FHbbVfviro+WojiFzv+s8MPMHBczVePfUpDJLwoLT0ht1l4YwBCbi8pJAveEEdnkHyPyTP/mzRfwg==", + "dev": true, + "license": "MIT", + "dependencies": { + "safe-buffer": "~5.1.0" + } + }, + "node_modules/string_decoder": { + "version": "1.3.0", + "resolved": "https://registry.npmjs.org/string_decoder/-/string_decoder-1.3.0.tgz", + "integrity": "sha512-hkRX8U1WjJFd8LsDJ2yQ/wWWxaopEsABU1XfkM8A+j0+85JAGppt16cr1Whg6KIbb4okU6Mql6BOj+uup/wKeA==", + "dev": true, + "license": "MIT", + "dependencies": { + "safe-buffer": "~5.2.0" + } + }, + "node_modules/string-width": { + "version": "4.2.3", + "resolved": "https://registry.npmjs.org/string-width/-/string-width-4.2.3.tgz", + "integrity": "sha512-wKyQRQpjJ0sIp62ErSZdGsjMJWsap5oRNihHhu6G7JVO/9jIB6UyevL+tXuOqrng8j/cxKTWyWUwvSTriiZz/g==", + "dev": true, + "license": "MIT", + "dependencies": { + "emoji-regex": "^8.0.0", + "is-fullwidth-code-point": "^3.0.0", + "strip-ansi": "^6.0.1" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/strip-ansi": { + "version": "6.0.1", + "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-6.0.1.tgz", + "integrity": "sha512-Y38VPSHcqkFrCpFnQ9vuSXmquuv5oXOKpGeT6aGrr3o3Gc9AlVa6JBfUSOCnbxGGZF+/0ooI7KrPuUSztUdU5A==", + "dev": true, + "license": "MIT", + "dependencies": { + "ansi-regex": "^5.0.1" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/strip-bom": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/strip-bom/-/strip-bom-4.0.0.tgz", + "integrity": "sha512-3xurFv5tEgii33Zi8Jtp55wEIILR9eh34FAW00PZf+JnSsTmV/ioewSgQl97JHvgjoRGwPShsWm+IdrxB35d0w==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/strip-final-newline": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/strip-final-newline/-/strip-final-newline-2.0.0.tgz", + "integrity": "sha512-BrpvfNAE3dcvq7ll3xVumzjKjZQ5tI1sEUIKr3Uoks0XUl45St3FlatVqef9prk4jRDzhW6WZg+3bk93y6pLjA==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=6" + } + }, + "node_modules/strip-indent": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/strip-indent/-/strip-indent-3.0.0.tgz", + "integrity": "sha512-laJTa3Jb+VQpaC6DseHhF7dXVqHTfJPCRDaEbid/drOhgitgYku/letMUqOXFoWV0zIIUbjpdH2t+tYj4bQMRQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "min-indent": "^1.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/strip-json-comments": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/strip-json-comments/-/strip-json-comments-2.0.1.tgz", + "integrity": "sha512-4gB8na07fecVVkOI6Rs4e7T6NOTki5EmL7TUduTs6bu3EdnSycntVJ4re8kgZA+wx9IueI2Y11bfbgwtzuE0KQ==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/super-regex": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/super-regex/-/super-regex-1.1.0.tgz", + "integrity": "sha512-WHkws2ZflZe41zj6AolvvmaTrWds/VuyeYr9iPVv/oQeaIoVxMKaushfFWpOGDT+GuBrM/sVqF8KUCYQlSSTdQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "function-timeout": "^1.0.1", + "make-asynchronous": "^1.0.1", + "time-span": "^5.1.0" + }, + "engines": { + "node": ">=18" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/supports-color": { + "version": "7.2.0", + "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-7.2.0.tgz", + "integrity": "sha512-qpCAvRl9stuOHveKsn7HncJRvv501qIacKzQlO/+Lwxc9+0q2wLyv4Dfvt80/DPn2pqOBsJdDiogXGR9+OvwRw==", + "dev": true, + "license": "MIT", + "dependencies": { + "has-flag": "^4.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/supports-hyperlinks": { + "version": "3.2.0", + "resolved": "https://registry.npmjs.org/supports-hyperlinks/-/supports-hyperlinks-3.2.0.tgz", + "integrity": "sha512-zFObLMyZeEwzAoKCyu1B91U79K2t7ApXuQfo8OuxwXLDgcKxuwM+YvcbIhm6QWqz7mHUH1TVytR1PwVVjEuMig==", + "dev": true, + "license": "MIT", + "dependencies": { + "has-flag": "^4.0.0", + "supports-color": "^7.0.0" + }, + "engines": { + "node": ">=14.18" + }, + "funding": { + "url": "https://github.com/chalk/supports-hyperlinks?sponsor=1" + } + }, + "node_modules/supports-preserve-symlinks-flag": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/supports-preserve-symlinks-flag/-/supports-preserve-symlinks-flag-1.0.0.tgz", + "integrity": "sha512-ot0WnXS9fgdkgIcePe6RHNk1WA8+muPa6cSjeR3V8K27q9BB1rTE3R1p7Hv0z1ZyAc8s6Vvv8DIyWf681MAt0w==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/tagged-tag": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/tagged-tag/-/tagged-tag-1.0.0.tgz", + "integrity": "sha512-yEFYrVhod+hdNyx7g5Bnkkb0G6si8HJurOoOEgC8B/O0uXLHlaey/65KRv6cuWBNhBgHKAROVpc7QyYqE5gFng==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=20" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/temp-dir": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/temp-dir/-/temp-dir-3.0.0.tgz", + "integrity": "sha512-nHc6S/bwIilKHNRgK/3jlhDoIHcp45YgyiwcAk46Tr0LfEqGBVpmiAyuiuxeVE44m3mXnEeVhaipLOEWmH+Njw==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=14.16" + } + }, + "node_modules/tempy": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/tempy/-/tempy-3.1.0.tgz", + "integrity": "sha512-7jDLIdD2Zp0bDe5r3D2qtkd1QOCacylBuL7oa4udvN6v2pqr4+LcCr67C8DR1zkpaZ8XosF5m1yQSabKAW6f2g==", + "dev": true, + "license": "MIT", + "dependencies": { + "is-stream": "^3.0.0", + "temp-dir": "^3.0.0", + "type-fest": "^2.12.2", + "unique-string": "^3.0.0" + }, + "engines": { + "node": ">=14.16" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/tempy/node_modules/is-stream": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/is-stream/-/is-stream-3.0.0.tgz", + "integrity": "sha512-LnQR4bZ9IADDRSkvpqMGvt/tEJWclzklNgSw48V5EAaAeDd6qGvN8ei6k5p0tvxSR171VmGyHuTiAOfxAbr8kA==", + "dev": true, + "license": "MIT", + "engines": { + "node": "^12.20.0 || ^14.13.1 || >=16.0.0" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/tempy/node_modules/type-fest": { + "version": "2.19.0", + "resolved": "https://registry.npmjs.org/type-fest/-/type-fest-2.19.0.tgz", + "integrity": "sha512-RAH822pAdBgcNMAfWnCBU3CFZcfZ/i1eZjwFU/dsLKumyuuP3niueg2UAukXYF0E2AAoc82ZSSf9J0WQBinzHA==", + "dev": true, + "license": "(MIT OR CC0-1.0)", + "engines": { + "node": ">=12.20" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/text-extensions": { + "version": "2.4.0", + "resolved": "https://registry.npmjs.org/text-extensions/-/text-extensions-2.4.0.tgz", + "integrity": "sha512-te/NtwBwfiNRLf9Ijqx3T0nlqZiQ2XrrtBvu+cLL8ZRrGkO0NHTug8MYFKyoSrv/sHTaSKfilUkizV6XhxMJ3g==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/thenify": { + "version": "3.3.1", + "resolved": "https://registry.npmjs.org/thenify/-/thenify-3.3.1.tgz", + "integrity": "sha512-RVZSIV5IG10Hk3enotrhvz0T9em6cyHBLkH/YAZuKqd8hRkKhSfCGIcP2KUY0EPxndzANBmNllzWPwak+bheSw==", + "dev": true, + "license": "MIT", + "dependencies": { + "any-promise": "^1.0.0" + } + }, + "node_modules/thenify-all": { + "version": "1.6.0", + "resolved": "https://registry.npmjs.org/thenify-all/-/thenify-all-1.6.0.tgz", + "integrity": "sha512-RNxQH/qI8/t3thXJDwcstUO4zeqo64+Uy/+sNVRBx4Xn2OX+OZ9oP+iJnNFqplFra2ZUVeKCSa2oVWi3T4uVmA==", + "dev": true, + "license": "MIT", + "dependencies": { + "thenify": ">= 3.1.0 < 4" + }, + "engines": { + "node": ">=0.8" + } + }, + "node_modules/through": { + "version": "2.3.8", + "resolved": "https://registry.npmjs.org/through/-/through-2.3.8.tgz", + "integrity": "sha512-w89qg7PI8wAdvX60bMDP+bFoD5Dvhm9oLheFp5O4a2QF0cSBGsBX4qZmadPMvVqlLJBBci+WqGGOAPvcDeNSVg==", + "dev": true, + "license": "MIT" + }, + "node_modules/through2": { + "version": "4.0.2", + "resolved": "https://registry.npmjs.org/through2/-/through2-4.0.2.tgz", + "integrity": "sha512-iOqSav00cVxEEICeD7TjLB1sueEL+81Wpzp2bY17uZjZN0pWZPuo4suZ/61VujxmqSGFfgOcNuTZ85QJwNZQpw==", + "dev": true, + "license": "MIT", + "dependencies": { + "readable-stream": "3" + } + }, + "node_modules/time-span": { + "version": "5.1.0", + "resolved": "https://registry.npmjs.org/time-span/-/time-span-5.1.0.tgz", + "integrity": "sha512-75voc/9G4rDIJleOo4jPvN4/YC4GRZrY8yy1uU4lwrB3XEQbWve8zXoO5No4eFrGcTAMYyoY67p8jRQdtA1HbA==", + "dev": true, + "license": "MIT", + "dependencies": { + "convert-hrtime": "^5.0.0" + }, + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/tinyglobby": { + "version": "0.2.15", + "resolved": "https://registry.npmjs.org/tinyglobby/-/tinyglobby-0.2.15.tgz", + "integrity": "sha512-j2Zq4NyQYG5XMST4cbs02Ak8iJUdxRM0XI5QyxXuZOzKOINmWurp3smXu3y5wDcJrptwpSjgXHzIQxR0omXljQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "fdir": "^6.5.0", + "picomatch": "^4.0.3" + }, + "engines": { + "node": ">=12.0.0" + }, + "funding": { + "url": "https://github.com/sponsors/SuperchupuDev" + } + }, + "node_modules/tinyglobby/node_modules/fdir": { + "version": "6.5.0", + "resolved": "https://registry.npmjs.org/fdir/-/fdir-6.5.0.tgz", + "integrity": "sha512-tIbYtZbucOs0BRGqPJkshJUYdL+SDH7dVM8gjy+ERp3WAUjLEFJE+02kanyHtwjWOnwrKYBiwAmM0p4kLJAnXg==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=12.0.0" + }, + "peerDependencies": { + "picomatch": "^3 || ^4" + }, + "peerDependenciesMeta": { + "picomatch": { + "optional": true + } + } + }, + "node_modules/tinyglobby/node_modules/picomatch": { + "version": "4.0.3", + "resolved": "https://registry.npmjs.org/picomatch/-/picomatch-4.0.3.tgz", + "integrity": "sha512-5gTmgEY/sqK6gFXLIsQNH19lWb4ebPDLA4SdLP7dsWkIXHWlG66oPuVvXSGFPppYZz8ZDZq0dYYrbHfBCVUb1Q==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/sponsors/jonschlinkert" + } + }, + "node_modules/tmp": { + "version": "0.0.33", + "resolved": "https://registry.npmjs.org/tmp/-/tmp-0.0.33.tgz", + "integrity": "sha512-jRCJlojKnZ3addtTOjdIqoRuPEKBvNXcGYqzO6zWZX8KfKEpnGY5jfggJQ3EjKuu8D4bJRr0y+cYJFmYbImXGw==", + "dev": true, + "license": "MIT", + "dependencies": { + "os-tmpdir": "~1.0.2" + }, + "engines": { + "node": ">=0.6.0" + } + }, + "node_modules/to-regex-range": { + "version": "5.0.1", + "resolved": "https://registry.npmjs.org/to-regex-range/-/to-regex-range-5.0.1.tgz", + "integrity": "sha512-65P7iz6X5yEr1cwcgvQxbbIw7Uk3gOy5dIdtZ4rDveLqhrdJP+Li/Hx6tyK0NEb+2GCyneCMJiGqrADCSNk8sQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "is-number": "^7.0.0" + }, + "engines": { + "node": ">=8.0" + } + }, + "node_modules/traverse": { + "version": "0.6.8", + "resolved": "https://registry.npmjs.org/traverse/-/traverse-0.6.8.tgz", + "integrity": "sha512-aXJDbk6SnumuaZSANd21XAo15ucCDE38H4fkqiGsc3MhCK+wOlZvLP9cB/TvpHT0mOyWgC4Z8EwRlzqYSUzdsA==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/trim-newlines": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/trim-newlines/-/trim-newlines-3.0.1.tgz", + "integrity": "sha512-c1PTsA3tYrIsLGkJkzHF+w9F2EyxfXGo4UyJc4pFL++FMjnq0HJS69T3M7d//gKrFKwy429bouPescbjecU+Zw==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/tslib": { + "version": "2.8.1", + "resolved": "https://registry.npmjs.org/tslib/-/tslib-2.8.1.tgz", + "integrity": "sha512-oJFu94HQb+KVduSUQL7wnpmqnfmLsOA/nAh6b6EH0wCEoK0/mPeXU6c3wKDV83MkOuHPRHtSXKKU99IBazS/2w==", + "dev": true, + "license": "0BSD" + }, + "node_modules/tunnel": { + "version": "0.0.6", + "resolved": "https://registry.npmjs.org/tunnel/-/tunnel-0.0.6.tgz", + "integrity": "sha512-1h/Lnq9yajKY2PEbBadPXj3VxsDDu844OnaAo52UVmIzIvwwtBPIuNvkjuzBlTWpfJyUbG3ez0KSBibQkj4ojg==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=0.6.11 <=0.7.0 || >=0.7.3" + } + }, + "node_modules/type-fest": { + "version": "5.2.0", + "resolved": "https://registry.npmjs.org/type-fest/-/type-fest-5.2.0.tgz", + "integrity": "sha512-xxCJm+Bckc6kQBknN7i9fnP/xobQRsRQxR01CztFkp/h++yfVxUUcmMgfR2HttJx/dpWjS9ubVuyspJv24Q9DA==", + "dev": true, + "license": "(MIT OR CC0-1.0)", + "dependencies": { + "tagged-tag": "^1.0.0" + }, + "engines": { + "node": ">=20" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/typescript": { + "version": "5.9.3", + "resolved": "https://registry.npmjs.org/typescript/-/typescript-5.9.3.tgz", + "integrity": "sha512-jl1vZzPDinLr9eUt3J/t7V6FgNEw9QjvBPdysz9KfQDD41fQrC2Y4vKQdiaUpFT4bXlb1RHhLpp8wtm6M5TgSw==", + "dev": true, + "license": "Apache-2.0", + "peer": true, + "bin": { + "tsc": "bin/tsc", + "tsserver": "bin/tsserver" + }, + "engines": { + "node": ">=14.17" + } + }, + "node_modules/uglify-js": { + "version": "3.19.3", + "resolved": "https://registry.npmjs.org/uglify-js/-/uglify-js-3.19.3.tgz", + "integrity": "sha512-v3Xu+yuwBXisp6QYTcH4UbH+xYJXqnq2m/LtQVWKWzYc1iehYnLixoQDN9FH6/j9/oybfd6W9Ghwkl8+UMKTKQ==", + "dev": true, + "license": "BSD-2-Clause", + "optional": true, + "bin": { + "uglifyjs": "bin/uglifyjs" + }, + "engines": { + "node": ">=0.8.0" + } + }, + "node_modules/undici": { + "version": "5.29.0", + "resolved": "https://registry.npmjs.org/undici/-/undici-5.29.0.tgz", + "integrity": "sha512-raqeBD6NQK4SkWhQzeYKd1KmIG6dllBOTt55Rmkt4HtI9mwdWtJljnrXjAFUBLTSN67HWrOIZ3EPF4kjUw80Bg==", + "dev": true, + "license": "MIT", + "dependencies": { + "@fastify/busboy": "^2.0.0" + }, + "engines": { + "node": ">=14.0" + } + }, + "node_modules/undici-types": { + "version": "7.16.0", + "resolved": "https://registry.npmjs.org/undici-types/-/undici-types-7.16.0.tgz", + "integrity": "sha512-Zz+aZWSj8LE6zoxD+xrjh4VfkIG8Ya6LvYkZqtUQGJPZjYl53ypCaUwWqo7eI0x66KBGeRo+mlBEkMSeSZ38Nw==", + "dev": true, + "license": "MIT", + "peer": true + }, + "node_modules/unicode-emoji-modifier-base": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/unicode-emoji-modifier-base/-/unicode-emoji-modifier-base-1.0.0.tgz", + "integrity": "sha512-yLSH4py7oFH3oG/9K+XWrz1pSi3dfUrWEnInbxMfArOfc1+33BlGPQtLsOYwvdMy11AwUBetYuaRxSPqgkq+8g==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=4" + } + }, + "node_modules/unicorn-magic": { + "version": "0.3.0", + "resolved": "https://registry.npmjs.org/unicorn-magic/-/unicorn-magic-0.3.0.tgz", + "integrity": "sha512-+QBBXBCvifc56fsbuxZQ6Sic3wqqc3WWaqxs58gvJrcOuN83HGTCwz3oS5phzU9LthRNE9VrJCFCLUgHeeFnfA==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=18" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/unique-string": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/unique-string/-/unique-string-3.0.0.tgz", + "integrity": "sha512-VGXBUVwxKMBUznyffQweQABPRRW1vHZAbadFZud4pLFAqRGvv/96vafgjWFqzourzr8YonlQiPgH0YCJfawoGQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "crypto-random-string": "^4.0.0" + }, + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/universal-user-agent": { + "version": "6.0.1", + "resolved": "https://registry.npmjs.org/universal-user-agent/-/universal-user-agent-6.0.1.tgz", + "integrity": "sha512-yCzhz6FN2wU1NiiQRogkTQszlQSlpWaw8SvVegAc+bDxbzHgh1vX8uIe8OYyMH6DwH+sdTJsgMl36+mSMdRJIQ==", + "dev": true, + "license": "ISC" + }, + "node_modules/universalify": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/universalify/-/universalify-2.0.1.tgz", + "integrity": "sha512-gptHNQghINnc/vTGIk0SOFGFNXw7JVrlRUtConJRlvaw6DuX0wO5Jeko9sWrMBhh+PsYAZ7oXAiOnf/UKogyiw==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 10.0.0" + } + }, + "node_modules/url-join": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/url-join/-/url-join-5.0.0.tgz", + "integrity": "sha512-n2huDr9h9yzd6exQVnH/jU5mr+Pfx08LRXXZhkLLetAMESRj+anQsTAh940iMrIetKAmry9coFuZQ2jY8/p3WA==", + "dev": true, + "license": "MIT", + "engines": { + "node": "^12.20.0 || ^14.13.1 || >=16.0.0" + } + }, + "node_modules/util-deprecate": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/util-deprecate/-/util-deprecate-1.0.2.tgz", + "integrity": "sha512-EPD5q1uXyFxJpCrLnCc1nHnq3gOa6DZBocAIiI2TaSCA7VCJ1UJDMagCzIkXNsUYfD1daK//LTEQ8xiIbrHtcw==", + "dev": true, + "license": "MIT" + }, + "node_modules/validate-npm-package-license": { + "version": "3.0.4", + "resolved": "https://registry.npmjs.org/validate-npm-package-license/-/validate-npm-package-license-3.0.4.tgz", + "integrity": "sha512-DpKm2Ui/xN7/HQKCtpZxoRWBhZ9Z0kqtygG8XCgNQ8ZlDnxuQmWhj566j8fN4Cu3/JmbhsDo7fcAJq4s9h27Ew==", + "dev": true, + "license": "Apache-2.0", + "dependencies": { + "spdx-correct": "^3.0.0", + "spdx-expression-parse": "^3.0.0" + } + }, + "node_modules/wcwidth": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/wcwidth/-/wcwidth-1.0.1.tgz", + "integrity": "sha512-XHPEwS0q6TaxcvG85+8EYkbiCux2XtWG2mkc47Ng2A77BQu9+DqIOJldST4HgPkuea7dvKSj5VgX3P1d4rW8Tg==", + "dev": true, + "license": "MIT", + "dependencies": { + "defaults": "^1.0.3" + } + }, + "node_modules/web-worker": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/web-worker/-/web-worker-1.2.0.tgz", + "integrity": "sha512-PgF341avzqyx60neE9DD+XS26MMNMoUQRz9NOZwW32nPQrF6p77f1htcnjBSEV8BGMKZ16choqUG4hyI0Hx7mA==", + "dev": true, + "license": "Apache-2.0" + }, + "node_modules/which": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/which/-/which-2.0.2.tgz", + "integrity": "sha512-BLI3Tl1TW3Pvl70l3yq3Y64i+awpwXqsGBYWkkqMtnbXgrMD+yj7rhW0kuEDxzJaYXGjEW5ogapKNMEKNMjibA==", + "dev": true, + "license": "ISC", + "dependencies": { + "isexe": "^2.0.0" + }, + "bin": { + "node-which": "bin/node-which" + }, + "engines": { + "node": ">= 8" + } + }, + "node_modules/word-wrap": { + "version": "1.2.5", + "resolved": "https://registry.npmjs.org/word-wrap/-/word-wrap-1.2.5.tgz", + "integrity": "sha512-BN22B5eaMMI9UMtjrGd5g5eCYPpCPDUy0FJXbYsaT5zYxjFOckS53SQDE3pWkVoWpHXVb3BrYcEN4Twa55B5cA==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/wordwrap": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/wordwrap/-/wordwrap-1.0.0.tgz", + "integrity": "sha512-gvVzJFlPycKc5dZN4yPkP8w7Dc37BtP1yczEneOb4uq34pXZcvrtRTmWV8W+Ume+XCxKgbjM+nevkyFPMybd4Q==", + "dev": true, + "license": "MIT" + }, + "node_modules/wrap-ansi": { + "version": "7.0.0", + "resolved": "https://registry.npmjs.org/wrap-ansi/-/wrap-ansi-7.0.0.tgz", + "integrity": "sha512-YVGIj2kamLSTxw6NsZjoBxfSwsn0ycdesmc4p+Q21c5zPuZ1pl+NfxVdxPtdHvmNVOQ6XSYG4AUtyt/Fi7D16Q==", + "dev": true, + "license": "MIT", + "dependencies": { + "ansi-styles": "^4.0.0", + "string-width": "^4.1.0", + "strip-ansi": "^6.0.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/chalk/wrap-ansi?sponsor=1" + } + }, + "node_modules/wrappy": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/wrappy/-/wrappy-1.0.2.tgz", + "integrity": "sha512-l4Sp/DRseor9wL6EvV2+TuQn63dMkPjZ/sp9XkghTEbV9KlPS1xUsZ3u7/IQO4wxtcFB4bgpQPRcR3QCvezPcQ==", + "dev": true, + "license": "ISC" + }, + "node_modules/xtend": { + "version": "4.0.2", + "resolved": "https://registry.npmjs.org/xtend/-/xtend-4.0.2.tgz", + "integrity": "sha512-LKYU1iAXJXUgAXn9URjiu+MWhyUXHsvfp7mcuYm9dSUKK0/CjtrUwFAxD82/mCWbtLsGjFIad0wIsod4zrTAEQ==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=0.4" + } + }, + "node_modules/y18n": { + "version": "5.0.8", + "resolved": "https://registry.npmjs.org/y18n/-/y18n-5.0.8.tgz", + "integrity": "sha512-0pfFzegeDWJHJIAmTLRP2DwHjdF5s7jo9tuztdQxAhINCdvS+3nGINqPd00AphqJR/0LhANUS6/+7SCb98YOfA==", + "dev": true, + "license": "ISC", + "engines": { + "node": ">=10" + } + }, + "node_modules/yallist": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/yallist/-/yallist-4.0.0.tgz", + "integrity": "sha512-3wdGidZyq5PB084XLES5TpOSRA3wjXAlIWMhum2kRcv/41Sn2emQ0dycQW4uZXLejwKvg6EsvbdlVL+FYEct7A==", + "dev": true, + "license": "ISC" + }, + "node_modules/yargs": { + "version": "17.7.2", + "resolved": "https://registry.npmjs.org/yargs/-/yargs-17.7.2.tgz", + "integrity": "sha512-7dSzzRQ++CKnNI/krKnYRV7JKKPUXMEh61soaHKg9mrWEhzFWhFnxPxGl+69cD1Ou63C13NUPCnmIcrvqCuM6w==", + "dev": true, + "license": "MIT", + "dependencies": { + "cliui": "^8.0.1", + "escalade": "^3.1.1", + "get-caller-file": "^2.0.5", + "require-directory": "^2.1.1", + "string-width": "^4.2.3", + "y18n": "^5.0.5", + "yargs-parser": "^21.1.1" + }, + "engines": { + "node": ">=12" + } + }, + "node_modules/yargs-parser": { + "version": "21.1.1", + "resolved": "https://registry.npmjs.org/yargs-parser/-/yargs-parser-21.1.1.tgz", + "integrity": "sha512-tVpsJW7DdjecAiFpbIB1e3qxIQsE6NoPc5/eTdrbbIC4h0LVsWhnoa3g+m2HclBIujHzsxZ4VJVA+GUuc2/LBw==", + "dev": true, + "license": "ISC", + "engines": { + "node": ">=12" + } + }, + "node_modules/yocto-queue": { + "version": "0.1.0", + "resolved": "https://registry.npmjs.org/yocto-queue/-/yocto-queue-0.1.0.tgz", + "integrity": "sha512-rVksvsnNCdJ/ohGc6xgPwyN8eheCxsiLM8mxuE/t/mOVqJewPuO1miLpTHQiRgTKCLexL4MeAFVagts7HmNZ2Q==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/yoctocolors": { + "version": "2.1.2", + "resolved": "https://registry.npmjs.org/yoctocolors/-/yoctocolors-2.1.2.tgz", + "integrity": "sha512-CzhO+pFNo8ajLM2d2IW/R93ipy99LWjtwblvC1RsoSUMZgyLbYFr221TnSNT7GjGdYui6P459mw9JH/g/zW2ug==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=18" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + } + } +} diff --git a/package.json b/package.json index 333013f..988cb9e 100644 --- a/package.json +++ b/package.json @@ -1,20 +1,20 @@ { - "name": "{{PROJECT_NAME}}", + "name": "mcp-langfuse", "private": true, "scripts": { "commit": "cz", "semantic-release": "semantic-release" }, "devDependencies": { + "@artessan-devs/sr-uv-plugin": "github:LittleCoinCoin/sr-uv-plugin#fix/semantic-release-plugin-loading", "@commitlint/cli": "^18.6.1", "@commitlint/config-conventional": "^18.6.2", - "@covage/semantic-release-poetry-plugin": "^0.2.0-development", "@semantic-release/changelog": "^6.0.3", "@semantic-release/git": "^10.0.1", "@semantic-release/github": "^9.2.6", - "commitizen": "^4.3.0", - "cz-conventional-changelog": "^3.3.0", - "semantic-release": "^22.0.12" + "commitizen": "^4.3.1", + "cz-conventional-changelog": "^3.0.1", + "semantic-release": "^25.0.2" }, "config": { "commitizen": { diff --git a/pyproject.toml b/pyproject.toml index 7a09e91..5c441a9 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -3,12 +3,12 @@ requires = ["setuptools>=61.0"] build-backend = "setuptools.build_meta" [project] -name = "{{PROJECT_NAME}}" +name = "mcp-langfuse" version = "0.1.0" authors = [ { name = "Cracking Shells Team" }, ] -description = "{{PROJECT_DESCRIPTION}}" +description = "MCP server for Langfuse REST API with enhanced trace analysis tools" readme = "README.md" requires-python = ">=3.12" classifiers = [ @@ -32,11 +32,11 @@ dev = [ [project.scripts] # Uncomment and modify if this package provides a CLI tool -# {{PROJECT_NAME}} = "{{PACKAGE_NAME}}.cli:main" +# mcp-langfuse = "mcp_langfuse.cli:main" [project.urls] -"Homepage" = "https://github.com/CrackingShells/{{PROJECT_NAME}}" -"Bug Tracker" = "https://github.com/CrackingShells/{{PROJECT_NAME}}/issues" +"Homepage" = "https://github.com/CrackingShells/mcp-langfuse" +"Bug Tracker" = "https://github.com/CrackingShells/mcp-langfuse/issues" [tool.setuptools] package-dir = {"" = "."} diff --git a/tests/__init__.py b/tests/__init__.py index c2fed6f..ea00fee 100644 --- a/tests/__init__.py +++ b/tests/__init__.py @@ -1,2 +1,2 @@ -# Tests package for {{PROJECT_NAME}} +# Tests package for mcp-langfuse # This package contains unittest-based tests compatible with the future wobble testing framework diff --git a/tests/test_basic.py b/tests/test_basic.py index 97a9fe6..c239ec9 100644 --- a/tests/test_basic.py +++ b/tests/test_basic.py @@ -1,4 +1,4 @@ -"""Basic tests for {{PROJECT_NAME}}. +"""Basic tests for mcp-langfuse. This module contains fundamental tests to ensure the package works correctly. Tests use unittest framework for compatibility with the future wobble testing system. @@ -18,32 +18,32 @@ class TestBasicFunctionality(unittest.TestCase): def test_package_import(self): """Test that the main package can be imported successfully.""" try: - import {{PACKAGE_NAME}} - self.assertIsNotNone({{PACKAGE_NAME}}) + import mcp_langfuse + self.assertIsNotNone(mcp_langfuse) except ImportError as e: - self.fail(f"Failed to import {{PACKAGE_NAME}}: {e}") + self.fail(f"Failed to import mcp_langfuse: {e}") def test_package_has_version(self): """Test that the package has a version attribute.""" - import {{PACKAGE_NAME}} + import mcp_langfuse # Check if package has __version__ attribute - if hasattr({{PACKAGE_NAME}}, '__version__'): - self.assertIsInstance({{PACKAGE_NAME}}.__version__, str) - self.assertGreater(len({{PACKAGE_NAME}}.__version__), 0) + if hasattr(mcp_langfuse, '__version__'): + self.assertIsInstance(mcp_langfuse.__version__, str) + self.assertGreater(len(mcp_langfuse.__version__), 0) else: # If no __version__, that's okay for minimal packages self.skipTest("Package does not define __version__ (acceptable for minimal packages)") def test_package_structure(self): """Test that the package has expected structure.""" - import {{PACKAGE_NAME}} + import mcp_langfuse # Package should be importable and have a file path - self.assertTrue(hasattr({{PACKAGE_NAME}}, '__file__')) + self.assertTrue(hasattr(mcp_langfuse, '__file__')) # Package file should exist - package_file = Path({{PACKAGE_NAME}}.__file__) + package_file = Path(mcp_langfuse.__file__) self.assertTrue(package_file.exists())