diff --git a/.gitattributes b/.gitattributes index 06ab1ee..bb973e5 100644 --- a/.gitattributes +++ b/.gitattributes @@ -1,4 +1,12 @@ -*.sh text eol=lf -*.py text eol=lf -*.env text eol=lf -*.yml text eol=lf +# Unix-style line endings (LF) +*.sh text eol=lf +*.py text eol=lf +*.yml text eol=lf +*.yaml text eol=lf +*.toml text eol=lf +*.cfg text eol=lf +*.ini text eol=lf +Makefile text eol=lf + +# PowerShell +*.ps1 text eol=crlf diff --git a/.github/workflows/ci-develop.yml b/.github/workflows/ci-develop.yml index edcd143..9727868 100644 --- a/.github/workflows/ci-develop.yml +++ b/.github/workflows/ci-develop.yml @@ -55,38 +55,19 @@ jobs: # Unit-tests only (exclude integration markers) - name: Run unit tests + env: + ENVIRONMENT: test run: poetry run pytest -m "not integration" --disable-warnings # Job 2 ─ Full validation (executed only on push events) -# --------------------------------------------------------------------------- # -# Includes everything from the quick job plus: -# • PostgreSQL service container -# • Alembic migrations -# • Integration tests -# • Multi-stage Docker build and health-check - full: if: | github.event_name == 'push' && github.ref == 'refs/heads/develop' runs-on: ubuntu-latest - services: - postgres: - image: postgres:17 - env: - POSTGRES_USER: ${{ secrets.DB_USER }} - POSTGRES_PASSWORD: ${{ secrets.DB_PASSWORD }} - POSTGRES_DB: ${{ secrets.DB_NAME }} - ports: ["5432:5432"] - options: >- - --health-cmd "pg_isready -U $POSTGRES_USER -d $POSTGRES_DB" - --health-interval 10s - --health-timeout 5s - --health-retries 5 - steps: - uses: actions/checkout@v3 - uses: actions/setup-python@v4 @@ -108,41 +89,12 @@ jobs: - name: Run mypy run: poetry run mypy src - - name: Apply Alembic migrations - env: - ENVIRONMENT: test - DB_URL: postgresql+psycopg://${{ secrets.DB_USER }}:${{ secrets.DB_PASSWORD }}@localhost:5432/${{ secrets.DB_NAME }} - run: poetry run alembic upgrade head - - name: Run all tests - env: - ENVIRONMENT: test - DB_URL: postgresql+asyncpg://${{ secrets.DB_USER }}:${{ secrets.DB_PASSWORD }}@localhost:5432/${{ secrets.DB_NAME }} run: | poetry run pytest \ --cov=src --cov-report=term \ --disable-warnings - - name: Build Docker image - run: docker build --progress=plain -t backend:ci . + - - name: Smoke test container - run: | - # partiamo con --network host così il container condivide la rete del runner - docker run -d \ - --name backend_ci \ - --network host \ - -e ENVIRONMENT=test \ - -e DB_URL=postgresql+asyncpg://${{ secrets.DB_USER }}:${{ secrets.DB_PASSWORD }}@localhost:5432/${{ secrets.DB_NAME }} \ - backend:ci \ - uvicorn app.main:app --host 0.0.0.0 --port 8000 - - for i in {1..10}; do - if curl --silent --fail http://localhost:8000/health; then - echo "✔ Health OK"; break - else - echo "Waiting…"; sleep 3 - fi - done - - docker stop backend_ci \ No newline at end of file + \ No newline at end of file diff --git a/.github/workflows/ci-main.yml b/.github/workflows/ci-main.yml new file mode 100644 index 0000000..dcac0fb --- /dev/null +++ b/.github/workflows/ci-main.yml @@ -0,0 +1,51 @@ +name: CI – Main Branch + +on: + pull_request: + branches: [main] + workflow_dispatch: + +concurrency: + group: ci-main-${{ github.ref }} + cancel-in-progress: true + +env: + PYTHON_VERSION: "3.12" + MPLBACKEND: Agg + ASYNCFLOW_RUN_SYSTEM_TESTS: "1" + +jobs: + all-checks: + runs-on: ubuntu-latest + timeout-minutes: 25 + + steps: + - uses: actions/checkout@v4 + + - uses: actions/setup-python@v5 + with: + python-version: ${{ env.PYTHON_VERSION }} + cache: 'pip' + + - uses: actions/cache@v3 + with: + path: ~/.cache/pypoetry + key: ${{ runner.os }}-poetry-${{ hashFiles('**/poetry.lock') }} + + - name: Install Poetry & deps + run: | + curl -sSL https://install.python-poetry.org | python3 - + export PATH="$HOME/.local/bin:$PATH" + poetry config virtualenvs.create false + poetry install --with dev --no-interaction + + - name: Ruff (lint) + run: poetry run ruff check src tests + + - name: MyPy (type-check) + run: poetry run mypy src tests + + - name: All tests (unit + integration + system) + run: | + poetry run pytest \ + --disable-warnings diff --git a/.gitignore b/.gitignore index 56d248a..58026c9 100644 --- a/.gitignore +++ b/.gitignore @@ -30,6 +30,8 @@ pip-wheel-metadata/ venv/ ENV/ env/ +docker_fs/.env +docker_fs/.env.* # Poetry-specific .cache/pypoetry/ diff --git a/Dockerfile b/Dockerfile deleted file mode 100644 index 6bf546c..0000000 --- a/Dockerfile +++ /dev/null @@ -1,50 +0,0 @@ -# ─────────────── Build stage ─────────────── -FROM python:3.12-slim AS builder - -# Install system dependencies for psycopg and build tools -RUN apt-get update \ - && apt-get install -y --no-install-recommends gcc libpq-dev curl \ - && rm -rf /var/lib/apt/lists/* - -WORKDIR /opt/app - -# Copy only pyproject.toml, poetry.lock, README so we leverage cache -COPY pyproject.toml poetry.lock* README.md ./ - -# Install Poetry (into /root/.local/bin) -RUN curl -sSL https://install.python-poetry.org | python3 - - -# Symlink Poetry into /usr/local/bin so "poetry" is on PATH -RUN ln -s /root/.local/bin/poetry /usr/local/bin/poetry - -# Tell Poetry not to create its own venv -RUN poetry config virtualenvs.create false - -# Install only the prod deps (uvicorn, fastapi, sqlalchemy, psycopg...) -RUN poetry install --no-root --without dev - -# Now copy in your application code -COPY src/ ./src - -# ─────────── Runtime stage ─────────── -FROM python:3.12-slim AS runtime - -WORKDIR /opt/app - -# 1) Copy installed libraries -COPY --from=builder /usr/local/lib/python3.12 /usr/local/lib/python3.12 - -# 2) Copy console scripts (uvicorn, alembic, etc.) -COPY --from=builder /usr/local/bin /usr/local/bin - -# 3) Copy application code -COPY --from=builder /opt/app/src ./src - -# 4) Non-root user -RUN adduser --disabled-password --gecos '' appuser -USER appuser - -WORKDIR /opt/app/src - -# 5) Default command -CMD ["uvicorn", "app.main:app", "--host", "0.0.0.0", "--port", "8000"] diff --git a/README.md b/README.md index 7a5e6bf..fca1fce 100644 --- a/README.md +++ b/README.md @@ -1,86 +1,374 @@ -## 🚀 How to Start the Backend with Docker (Development) -To spin up the backend and its supporting services in development mode: +# AsyncFlow — Event-Loop Aware Simulator for Async Distributed Systems -1. **Install & run Docker** on your machine. -2. **Clone** the repository and `cd` into its root. -3. Execute: +Created and maintained by @GioeleB00. - ```bash - bash ./scripts/init-docker-dev.sh - ``` +[![PyPI](https://img.shields.io/pypi/v/asyncflow-sim)](https://pypi.org/project/asyncflow-sim/) +[![Python](https://img.shields.io/pypi/pyversions/asyncflow-sim)](https://pypi.org/project/asyncflow-sim/) +[![License](https://img.shields.io/github/license/AsyncFlow-Sim/AsyncFlow)](LICENSE) +[![Status](https://img.shields.io/badge/status-v0.1.0alpha-orange)](#) +[![Ruff](https://img.shields.io/badge/lint-ruff-informational)](https://github.com/astral-sh/ruff) +[![Typing](https://img.shields.io/badge/typing-mypy-blueviolet)](https://mypy-lang.org/) +[![Tests](https://img.shields.io/badge/tests-pytest-6DA55F)](https://docs.pytest.org/) +[![SimPy](https://img.shields.io/badge/built%20with-SimPy-1f425f)](https://simpy.readthedocs.io/) - This will launch: +----- - * A **PostgreSQL** container - * A **Backend** container that mounts your local `src/` folder with live-reload +AsyncFlow is a discrete-event simulator for modeling and analyzing the performance of asynchronous, distributed backend systems built with SimPy. You describe your system's topology—its servers, network links, and load balancers—and AsyncFlow simulates the entire lifecycle of requests as they move through it. ---- +It provides a **digital twin** of your service, modeling not just the high-level architecture but also the low-level behavior of each server's **event loop**, including explicit **CPU work**, **RAM residency**, and **I/O waits**. This allows you to run realistic "what-if" scenarios that behave like production systems rather than toy benchmarks. + +### What Problem Does It Solve? + +Modern async stacks like FastAPI are incredibly performant, but predicting their behavior under real-world load is difficult. Capacity planning often relies on guesswork, expensive cloud-based load tests, or discovering bottlenecks only after a production failure. AsyncFlow is designed to replace that uncertainty with **data-driven forecasting**, allowing you to understand how your system will perform before you deploy a single line of code. + +### How Does It Work? An Example Topology + +AsyncFlow models your system as a directed graph of interconnected components. A typical setup might look like this: + +![Topology at a glance](readme_img/topology.png) -## 🏗️ Development Architecture & Philosophy +### What Questions Can It Answer? -We split responsibilities between Docker-managed services and local workflows: +By running simulations on your defined topology, you can get quantitative answers to critical engineering questions, such as: -### 🐳 Docker-Compose Dev + * How does **p95 latency** change if active users increase from 100 to 200? + * What is the impact on the system if the **client-to-server network latency** increases by 3ms? + * Will a specific API endpoint—with a pipeline of parsing, RAM allocation, and database I/O—hold its **SLA at a load of 40 requests per second**? +--- + +## Installation -* **Containers** host external services (PostgreSQL) and run the FastAPI app. -* Your **local `src/` directory** is mounted into the backend container for hot-reload. -* **No tests, migrations, linting, or type checks** run inside these containers during development. +Install from PyPI: `pip install asyncflow-sim` -**Why?** -* ⚡ **Instant feedback** on code changes -* 🛠️ **Full IDE support** (debugging, autocomplete, refactoring) -* ⏱️ **Blistering speed**—no rebuilding images on every change +## Requirements +* **Python 3.12+** (tested on 3.12, 3.13) +* **OS:** Linux, macOS, or Windows +* **Installed automatically (runtime deps):** + **SimPy** (DES engine), **NumPy**, **Matplotlib**, **Pydantic** + **pydantic-settings**, **PyYAML**. --- -### 🧪 Local Quality & Testing Workflow +## Quick Start + +### 1) Define a realistic YAML + +Save as `my_service.yml`. + +The full YAML schema is explained in `docs/guides/yaml-input-builder.md` and validated by Pydantic models (see `docs/internals/simulation-input.md`). + +```yaml +rqs_input: + id: generator-1 + avg_active_users: { mean: 100, distribution: poisson } + avg_request_per_minute_per_user: { mean: 20, distribution: poisson } + user_sampling_window: 60 + +topology_graph: + nodes: + client: { id: client-1 } + + servers: + - id: app-1 + server_resources: { cpu_cores: 1, ram_mb: 2048 } + endpoints: + - endpoint_name: /api + # Realistic pipeline on one async server: + # - 2 ms CPU parsing (blocks the event loop) + # - 120 MB RAM working set (held until the request leaves the server) + # - 12 ms DB-like I/O (non-blocking wait) + steps: + - kind: initial_parsing + step_operation: { cpu_time: 0.002 } + - kind: ram + step_operation: { necessary_ram: 120 } + - kind: io_db + step_operation: { io_waiting_time: 0.012 } + + edges: + - { id: gen-client, source: generator-1, target: client-1, + latency: { mean: 0.003, distribution: exponential } } + - { id: client-app, source: client-1, target: app-1, + latency: { mean: 0.003, distribution: exponential } } + - { id: app-client, source: app-1, target: client-1, + latency: { mean: 0.003, distribution: exponential } } + +sim_settings: + total_simulation_time: 300 + sample_period_s: 0.05 + enabled_sample_metrics: + - ready_queue_len + - ram_in_use + - edge_concurrent_connection + enabled_event_metrics: + - rqs_clock +``` + +Prefer building scenarios in Python? There’s a Python builder with the same semantics (create nodes, edges, endpoints programmatically). See **`docs/guides/python-builder.md`**. + +### 2) Run and export charts + +Save as `run_my_service.py`. + +```python +from __future__ import annotations + +from pathlib import Path +import simpy +import matplotlib.pyplot as plt + +from asyncflow.runtime.simulation_runner import SimulationRunner +from asyncflow.metrics.analyzer import ResultsAnalyzer + + +def main() -> None: + script_dir = Path(__file__).parent + yaml_path = script_dir / "my_service.yml" + out_path = script_dir / "my_service_plots.png" -All code quality tools, migrations, and tests execute on your host machine: + env = simpy.Environment() + runner = SimulationRunner.from_yaml(env=env, yaml_path=yaml_path) + res: ResultsAnalyzer = runner.run() -| Task | Command | Notes | -| --------------------- | ---------------------------------------- | ------------------------------------------------- | -| **Lint & format** | `poetry run ruff check src tests` | Style and best-practice validations | -| **Type checking** | `poetry run mypy src tests` | Static type enforcement | -| **Unit tests** | `poetry run pytest -m "not integration"` | Fast, isolated tests—no DB required | -| **Integration tests** | `poetry run pytest -m integration` | Real-DB tests against Docker’s PostgreSQL | -| **DB migrations** | `poetry run alembic upgrade head` | Applies migrations to your local Docker-hosted DB | + # Print a concise latency summary + print(res.format_latency_stats()) -> **Rationale:** -> Running tests or Alembic migrations inside Docker images would force you to mount the full source tree, install dev dependencies in each build, and copy over configs—**slowing down** your feedback loop and **limiting** IDE features. + # 2x2: Latency | Throughput | Ready (first server) | RAM (first server) + fig, axes = plt.subplots(2, 2, figsize=(12, 8), dpi=160) + + res.plot_latency_distribution(axes[0, 0]) + res.plot_throughput(axes[0, 1]) + + sids = res.list_server_ids() + if sids: + sid = sids[0] + res.plot_single_server_ready_queue(axes[1, 0], sid) + res.plot_single_server_ram(axes[1, 1], sid) + else: + for ax in (axes[1, 0], axes[1, 1]): + ax.text(0.5, 0.5, "No servers", ha="center", va="center") + ax.axis("off") + + fig.tight_layout() + fig.savefig(out_path) + print(f"Plots saved to: {out_path}") + + +if __name__ == "__main__": + main() + +``` + +Run the python script + +You’ll get latency stats in the terminal and a PNG with four charts (latency distribution, throughput, server queues, RAM usage). + +**Want more?** + +For ready-to-run scenarios—including examples using the Pythonic builder and multi-server topologies—check out the `examples/` directory in the repository. + +## Development + +If you want to contribute or run the full test suite locally, follow these steps. + +### Requirements + +* **Python 3.12+** (tested on 3.12, 3.13) +* **OS:** Linux, macOS, or Windows +* **Runtime deps installed by the package:** SimPy, NumPy, Matplotlib, Pydantic, PyYAML, pydantic-settings + +**Prerequisites:** Git, Python 3.12+ in `PATH`, `curl` (Linux/macOS/WSL), PowerShell 7+ (Windows) --- -## ⚙️ CI/CD with GitHub Actions +## Project setup + +```bash +git clone https://github.com/AsyncFlow-Sim/AsyncFlow.git +cd AsyncFlow +``` + +From the repo root, run the **one-shot post-clone setup**: + +**Linux / macOS / WSL** -We maintain two jobs on the `develop` branch: +```bash +bash scripts/dev_setup.sh +``` -### 🔍 Quick (on Pull Requests) +**Windows (PowerShell)** -* Ruff & MyPy -* Unit tests only -* **No database** → < 1-minute feedback +```powershell +# If scripts are blocked by policy, run this in the same PowerShell session: +# Set-ExecutionPolicy -Scope Process -ExecutionPolicy Bypass +.\scripts\dev_setup.ps1 +``` -### 🛠️ Full (on pushes to `develop`) +**What this does (concise):** -* All **Quick** checks -* Start a **PostgreSQL** service container -* Run **Alembic** migrations -* Execute **unit + integration** tests -* Build the **Docker** image -* **Smoke-test** the `/health` endpoint +* Ensures **Poetry** is available (installs if missing). +* Uses a **project-local `.venv`**. +* Removes `poetry.lock` for a **clean dependency resolve** (dev policy). +* Installs the project **with dev extras**. +* Runs **ruff**, **mypy**, and **pytest (with coverage)**. -> **Guarantee:** Every commit in `develop` is style-checked, type-safe, DB-tested, and Docker-ready. +**Quick sanity check after setup:** + +```bash +poetry --version +poetry run python -V +``` + +> **Note (lock policy):** `dev_setup` intentionally removes `poetry.lock` to avoid cross-platform conflicts during development. + +**Scripts (for quick access):** + +* [`scripts/dev_setup.sh`](scripts/dev_setup.sh) / [`scripts/dev_setup.ps1`](scripts/dev_setup.ps1) +* [`scripts/quality_check.sh`](scripts/quality_check.sh) / [`scripts/quality_check.ps1`](scripts/quality_check.ps1) +* [`scripts/run_tests.sh`](scripts/run_tests.sh) / [`scripts/run_tests.ps1`](scripts/run_tests.ps1) --- -## 🧠 Summary +### Handy scripts (after setup) + +#### 1) Lint + type check + +**Linux / macOS / WSL** + +```bash +bash scripts/quality_check.sh +``` + +**Windows (PowerShell)** + +```powershell +.\scripts\quality_check.ps1 +``` + +Runs **ruff** (lint/format check) and **mypy** on `src` and `tests`. + +#### 2) Run tests with coverage (unit + integration) + +**Linux / macOS / WSL** + +```bash +bash scripts/run_tests.sh +``` + +**Windows (PowerShell)** + +```powershell +.\scripts\run_tests.ps1 +``` + +#### 3) Run system tests + +**Linux / macOS / WSL** + +```bash +bash scripts/run_sys_tests.sh +``` + +**Windows (PowerShell)** + +```powershell +.\scripts\run_sys_tests.ps1 +``` + +Executes **pytest** with a terminal coverage summary (no XML, no slowest list). + + + +## What AsyncFlow Models (v0.1) + +AsyncFlow provides a detailed simulation of your backend system. Here is a high-level overview of the core components it models. For a deeper technical dive into the implementation and design rationale, follow the links to the internal documentation. + +* **Async Event Loop:** Simulates a single-threaded, non-blocking event loop per server. **CPU steps** block the loop, while **I/O steps** are non-blocking, accurately modeling `asyncio` behavior. + * *(Deep Dive: `docs/internals/runtime-and-resources.md`)* + +* **System Resources:** Models finite server resources, including **CPU cores** and **RAM (MB)**. Requests must acquire these resources, creating natural back-pressure and contention when the system is under load. + * *(Deep Dive: `docs/internals/runtime-and-resources.md`)* + +* **Endpoints & Request Lifecycles:** Models server endpoints as a linear sequence of **steps**. Each step is a distinct operation, such as `cpu_bound_operation`, `io_wait`, or `ram` allocation. + * *(Schema Definition: `docs/internals/simulation-input.md`)* + +* **Network Edges:** Simulates the connections between system components. Each edge has a configurable **latency** (drawn from a probability distribution) and an optional **dropout rate** to model packet loss. + * *(Schema Definition: `docs/internals/simulation-input.md` | Runtime Behavior: `docs/internals/runtime-and-resources.md`)* + +* **Stochastic Workload:** Generates user traffic based on a two-stage sampling model, combining the number of active users and their request rate per minute to produce a realistic, fluctuating load (RPS) on the system. + * *(Modeling Details with mathematical explanation and clear assumptions: `docs/internals/requests-generator.md`)* + +* **Metrics & Outputs:** Collects two types of data: **time-series metrics** (e.g., `ready_queue_len`, `ram_in_use`) and **event-based data** (`RqsClock`). This raw data is used to calculate final KPIs like **p95/p99 latency** and **throughput**. + * *(Metric Reference: `docs/internals/metrics`)* + +## Current Limitations (v0.1) + +* Network realism: base latency + optional drops (no bandwidth/payload/TCP yet). +* Single event loop per server: no multi-process/multi-node servers yet. +* Linear endpoint flows: no branching/fan-out within an endpoint. +* No thread-level concurrency; modeling OS threads and scheduler/context switching is out of scope.” +* Stationary workload: no diurnal patterns or feedback/backpressure. +* Sampling cadence: very short spikes can be missed if `sample_period_s` is large. + + +## Roadmap (Order is not indicative of priority) + +This roadmap outlines the key development areas to transform AsyncFlow into a comprehensive framework for statistical analysis and resilience modeling of distributed systems. + +### 1. Monte Carlo Simulation Engine + +**Why:** To overcome the limitations of a single simulation run and obtain statistically robust results. This transforms the simulator from an "intuition" tool into an engineering tool for data-driven decisions with confidence intervals. + +* **Independent Replications:** Run the same simulation N times with different random seeds to sample the space of possible outcomes. +* **Warm-up Period Management:** Introduce a "warm-up" period to be discarded from the analysis, ensuring that metrics are calculated only on the steady-state portion of the simulation. +* **Ensemble Aggregation:** Calculate means, standard deviations, and confidence intervals for aggregated metrics (latency, throughput) across all replications. +* **Confidence Bands:** Visualize time-series data (e.g., queue lengths) with confidence bands to show variability over time. + +### 2. Realistic Service Times (Stochastic Service Times) + +**Why:** Constant service times underestimate tail latencies (p95/p99), which are almost always driven by "slow" requests. Modeling this variability is crucial for a realistic analysis of bottlenecks. + +* **Distributions for Steps:** Allow parameters like `cpu_time` and `io_waiting_time` in an `EndpointStep` to be sampled from statistical distributions (e.g., Lognormal, Gamma, Weibull) instead of being fixed values. +* **Per-Request Sampling:** Each request will sample its own service times independently, simulating the natural variability of a real-world system. + +### 3. Component Library Expansion + +**Why:** To increase the variety and realism of the architectures that can be modeled. + +* **New System Nodes:** + * `CacheRuntime`: To model caching layers (e.g., Redis) with hit/miss logic, TTL, and warm-up behavior. + * `APIGatewayRuntime`: To simulate API Gateways with features like rate-limiting and authentication caching. + * `DBRuntime`: A more advanced model for databases featuring connection pool contention and row-level locking. +* **New Load Balancer Algorithms:** Add more advanced routing strategies (e.g., Weighted Round Robin, Least Response Time). + +### 4. Fault and Event Injection + +**Why:** To test the resilience and behavior of the system under non-ideal conditions, a fundamental use case for Site Reliability Engineering (SRE). + +* **API for Scheduled Events:** Introduce a system to schedule events at specific simulation times, such as: + * **Node Down/Up:** Turn a server off and on to test the load balancer's failover logic. + * **Degraded Edge:** Drastically increase the latency or drop rate of a network link. + * **Error Bursts:** Simulate a temporary increase in the rate of application errors. + +### 5. Advanced Network Modeling + +**Why:** To more faithfully model network-related bottlenecks that are not solely dependent on latency. + +* **Bandwidth and Payload Size:** Introduce the concepts of link bandwidth and request/response size to simulate delays caused by data transfer. +* **Retries and Timeouts:** Model retry and timeout logic at the client or internal service level. + +### 6. Complex Endpoint Flows + +**Why:** To model more realistic business logic that does not follow a linear path. + +* **Conditional Branching:** Introduce the ability to have conditional steps within an endpoint (e.g., a different path for a cache hit vs. a cache miss). +* **Fan-out / Fan-in:** Model scenarios where a service calls multiple downstream services in parallel and waits for their responses. -1. **Docker-Compose** for services & hot-reload of the app code -2. **Local** execution of migrations, tests, and QA for speed and IDE integration -3. **CI pipeline** split into quick PR checks and full develop-branch validation +### 7. Backpressure and Autoscaling -This hybrid setup delivers **fast development** without sacrificing **production-grade safety** in CI. +**Why:** To simulate the behavior of modern, adaptive systems that react to load. +* **Dynamic Rate Limiting:** Introduce backpressure mechanisms where services slow down the acceptance of new requests if their internal queues exceed a certain threshold. +* **Autoscaling Policies:** Model simple Horizontal Pod Autoscaler (HPA) policies where the number of server replicas increases or decreases based on metrics like CPU utilization or queue length. diff --git a/alembic.ini b/alembic.ini deleted file mode 100644 index 24fc464..0000000 --- a/alembic.ini +++ /dev/null @@ -1,144 +0,0 @@ -# A generic, single database configuration. - -[alembic] -# path to migration scripts. -# this is typically a path given in POSIX (e.g. forward slashes) -# format, relative to the token %(here)s which refers to the location of this -# ini file -script_location = %(here)s/alembic - -# template used to generate migration file names; The default value is %%(rev)s_%%(slug)s -# Uncomment the line below if you want the files to be prepended with date and time -# see https://alembic.sqlalchemy.org/en/latest/tutorial.html#editing-the-ini-file -# for all available tokens -# file_template = %%(year)d_%%(month).2d_%%(day).2d_%%(hour).2d%%(minute).2d-%%(rev)s_%%(slug)s - -# sys.path path, will be prepended to sys.path if present. -# defaults to the current working directory. for multiple paths, the path separator -# is defined by "path_separator" below. -prepend_sys_path = . - - -# timezone to use when rendering the date within the migration file -# as well as the filename. -# If specified, requires the python>=3.9 or backports.zoneinfo library and tzdata library. -# Any required deps can installed by adding `alembic[tz]` to the pip requirements -# string value is passed to ZoneInfo() -# leave blank for localtime -# timezone = - -# max length of characters to apply to the "slug" field -# truncate_slug_length = 40 - -# set to 'true' to run the environment during -# the 'revision' command, regardless of autogenerate -# revision_environment = false - -# set to 'true' to allow .pyc and .pyo files without -# a source .py file to be detected as revisions in the -# versions/ directory -# sourceless = false - -# version location specification; This defaults -# to /versions. When using multiple version -# directories, initial revisions must be specified with --version-path. -# The path separator used here should be the separator specified by "path_separator" -# below. -# version_locations = %(here)s/bar:%(here)s/bat:%(here)s/alembic/versions - -# path_separator; This indicates what character is used to split lists of file -# paths, including version_locations and prepend_sys_path within configparser -# files such as alembic.ini. -# The default rendered in new alembic.ini files is "os", which uses os.pathsep -# to provide os-dependent path splitting. -# -# Note that in order to support legacy alembic.ini files, this default does NOT -# take place if path_separator is not present in alembic.ini. If this -# option is omitted entirely, fallback logic is as follows: -# -# 1. Parsing of the version_locations option falls back to using the legacy -# "version_path_separator" key, which if absent then falls back to the legacy -# behavior of splitting on spaces and/or commas. -# 2. Parsing of the prepend_sys_path option falls back to the legacy -# behavior of splitting on spaces, commas, or colons. -# -# Valid values for path_separator are: -# -# path_separator = : -# path_separator = ; -# path_separator = space -# path_separator = newline -# -# Use os.pathsep. Default configuration used for new projects. -path_separator = os - -# set to 'true' to search source files recursively -# in each "version_locations" directory -# new in Alembic version 1.10 -# recursive_version_locations = false - -# the output encoding used when revision files -# are written from script.py.mako -# output_encoding = utf-8 - -# database URL. This is consumed by the user-maintained env.py script only. -# other means of configuring database URLs may be customized within the env.py -# file. -#sqlalchemy.url = driver://user:pass@localhost/dbname -# sqlalchemy.url is overridden at runtime in alembic/env.py -# via config.set_main_option(..., settings.db_url) -# leave this here as a placeholder or for offline mode if needed -# sqlalchemy.url = postgresql+asyncpg://user:pass@localhost:5432/dbname - -[post_write_hooks] -# post_write_hooks defines scripts or Python functions that are run -# on newly generated revision scripts. See the documentation for further -# detail and examples - -# format using "black" - use the console_scripts runner, against the "black" entrypoint -# hooks = black -# black.type = console_scripts -# black.entrypoint = black -# black.options = -l 79 REVISION_SCRIPT_FILENAME - -# lint with attempts to fix using "ruff" - use the exec runner, execute a binary -# hooks = ruff -# ruff.type = exec -# ruff.executable = %(here)s/.venv/bin/ruff -# ruff.options = check --fix REVISION_SCRIPT_FILENAME - -# Logging configuration. This is also consumed by the user-maintained -# env.py script only. -[loggers] -keys = root,sqlalchemy,alembic - -[handlers] -keys = console - -[formatters] -keys = generic - -[logger_root] -level = WARNING -handlers = console -qualname = - -[logger_sqlalchemy] -level = WARNING -handlers = -qualname = sqlalchemy.engine - -[logger_alembic] -level = INFO -handlers = -qualname = alembic - -[handler_console] -class = StreamHandler -args = (sys.stderr,) -level = NOTSET -formatter = generic - -[formatter_generic] -format = %(levelname)-5.5s [%(name)s] %(message)s -datefmt = %H:%M:%S diff --git a/alembic/README b/alembic/README deleted file mode 100644 index 98e4f9c..0000000 --- a/alembic/README +++ /dev/null @@ -1 +0,0 @@ -Generic single-database configuration. \ No newline at end of file diff --git a/alembic/env.py b/alembic/env.py deleted file mode 100644 index 9f0ce10..0000000 --- a/alembic/env.py +++ /dev/null @@ -1,83 +0,0 @@ -from logging.config import fileConfig - -from alembic import context -from sqlalchemy import engine_from_config, pool - -from app.config.settings import settings -from app.db.base import Base -import app.models.table_users - -# this is the Alembic Config object, which provides -# access to the values within the .ini file in use. -config = context.config - -# replace sqlalchemy.url with the value of the url in env.dev -raw_url = settings.db_url.replace("+asyncpg", "+psycopg") -config.set_main_option("sqlalchemy.url", raw_url) - - -# Interpret the config file for Python logging. -# This line sets up loggers basically. -if config.config_file_name is not None: - fileConfig(config.config_file_name) - -# add your model's MetaData object here -# for 'autogenerate' support -# from myapp import mymodel -# target_metadata = mymodel.Base.metadata -target_metadata = Base.metadata - -# other values from the config, defined by the needs of env.py, -# can be acquired: -# my_important_option = config.get_main_option("my_important_option") -# ... etc. - - -def run_migrations_offline() -> None: - """Run migrations in 'offline' mode. - - This configures the context with just a URL - and not an Engine, though an Engine is acceptable - here as well. By skipping the Engine creation - we don't even need a DBAPI to be available. - - Calls to context.execute() here emit the given string to the - script output. - - """ - url = config.get_main_option("sqlalchemy.url") - context.configure( - url=url, - target_metadata=target_metadata, - literal_binds=True, - dialect_opts={"paramstyle": "named"}, - ) - - with context.begin_transaction(): - context.run_migrations() - - -def run_migrations_online() -> None: - """Run migrations in 'online' mode. - - In this scenario we need to create an Engine - and associate a connection with the context. - - """ - connectable = engine_from_config( - config.get_section(config.config_ini_section, {}), - prefix="sqlalchemy.", - poolclass=pool.NullPool, - ) - - with connectable.connect() as connection: - context.configure(connection=connection, target_metadata=target_metadata) - - with context.begin_transaction(): - context.run_migrations() - - -if context.is_offline_mode(): - run_migrations_offline() -else: - run_migrations_online() diff --git a/alembic/script.py.mako b/alembic/script.py.mako deleted file mode 100644 index 1101630..0000000 --- a/alembic/script.py.mako +++ /dev/null @@ -1,28 +0,0 @@ -"""${message} - -Revision ID: ${up_revision} -Revises: ${down_revision | comma,n} -Create Date: ${create_date} - -""" -from typing import Sequence, Union - -from alembic import op -import sqlalchemy as sa -${imports if imports else ""} - -# revision identifiers, used by Alembic. -revision: str = ${repr(up_revision)} -down_revision: Union[str, Sequence[str], None] = ${repr(down_revision)} -branch_labels: Union[str, Sequence[str], None] = ${repr(branch_labels)} -depends_on: Union[str, Sequence[str], None] = ${repr(depends_on)} - - -def upgrade() -> None: - """Upgrade schema.""" - ${upgrades if upgrades else "pass"} - - -def downgrade() -> None: - """Downgrade schema.""" - ${downgrades if downgrades else "pass"} diff --git a/docker/.env.dev b/docker/.env.dev deleted file mode 100644 index 911bc85..0000000 --- a/docker/.env.dev +++ /dev/null @@ -1,11 +0,0 @@ -DB_HOST=db -DB_PORT=5432 -DB_NAME=project_backend_dev -DB_USER=dev_user -DB_PASSWORD=dev_pass -DB_URL=postgresql+asyncpg://dev_user:dev_pass@db:5432/project_backend_dev - -PGADMIN_DEFAULT_EMAIL=admin@example.com -PGADMIN_DEFAULT_PASSWORD=secret - -ENVIRONMENT=development \ No newline at end of file diff --git a/docker/.env.example b/docker/.env.example deleted file mode 100644 index 1ee9b47..0000000 --- a/docker/.env.example +++ /dev/null @@ -1,8 +0,0 @@ -DB_HOST= -DB_USER= -DB_PASSWORD= -DB_NAME= -DB_URL= -PGADMIN_DEFAULT_EMAIL= -PGADMIN_DEFAULT_PASSWORD= -ENVIRONMENT= \ No newline at end of file diff --git a/docker/.env.test b/docker/.env.test deleted file mode 100644 index 2ae2f2e..0000000 --- a/docker/.env.test +++ /dev/null @@ -1,12 +0,0 @@ - -ENVIRONMENT=test - - -DB_HOST=db -DB_PORT=5432 -DB_USER=postgres -DB_PASSWORD=testpassword -DB_NAME=test_db - - -DB_URL=postgresql+asyncpg://postgres:testpassword@db:5432/test_db diff --git a/docker/docker-compose.dev.yml b/docker/docker-compose.dev.yml deleted file mode 100644 index cea5fa4..0000000 --- a/docker/docker-compose.dev.yml +++ /dev/null @@ -1,40 +0,0 @@ -services: - db: - image: postgres:17 - ports: ["5432:5432"] - - env_file: - - .env.dev - environment: - POSTGRES_USER: ${DB_USER} - POSTGRES_PASSWORD: ${DB_PASSWORD} - POSTGRES_DB: ${DB_NAME} - - volumes: - - dev_db:/var/lib/postgresql/data - - pgadmin: - image: dpage/pgadmin4 - ports: ["8080:80"] - env_file: - - .env.dev - environment: - PGADMIN_DEFAULT_EMAIL: ${PGADMIN_DEFAULT_EMAIL} - PGADMIN_DEFAULT_PASSWORD: ${PGADMIN_DEFAULT_PASSWORD} - depends_on: - - db - - backend: - build: .. - command: uvicorn app.main:app --reload --host 0.0.0.0 --port 8000 - volumes: - - ../src:/opt/app/src - ports: - - "8000:8000" - env_file: - - .env.dev - depends_on: - - db - -volumes: - dev_db: \ No newline at end of file diff --git a/docker/docker-compose.prod.yml b/docker/docker-compose.prod.yml deleted file mode 100644 index e69de29..0000000 diff --git a/docs/api/analyzer.md b/docs/api/analyzer.md new file mode 100644 index 0000000..eb86e6b --- /dev/null +++ b/docs/api/analyzer.md @@ -0,0 +1,208 @@ +# ResultsAnalyzer — Public API Documentation + +Analyze and visualize the outcome of an AsyncFlow simulation. +`ResultsAnalyzer` consumes raw runtime objects (client, servers, edges, settings), +computes latency and throughput aggregates, exposes sampled series, and offers +compact plotting helpers built on Matplotlib. + +--- + +## Quick start + +```python +import simpy +from matplotlib import pyplot as plt +from asyncflow.runtime.simulation_runner import SimulationRunner +from asyncflow.metrics.analyzer import ResultsAnalyzer, SampledMetricName + +# 1) Run a simulation and get an analyzer +env = simpy.Environment() +runner = SimulationRunner.from_yaml(env=env, yaml_path="data/single_server.yml") +res: ResultsAnalyzer = runner.run() + +# 2) Text summary +print(res.format_latency_stats()) + +# 3) Plot the dashboard (latency histogram + throughput) +fig, (ax_lat, ax_rps) = plt.subplots(1, 2, figsize=(12, 4), dpi=160) +res.plot_base_dashboard(ax_lat, ax_rps) +fig.tight_layout() +fig.savefig("dashboard.png") + +# 4) Single-server plots +server_id = res.list_server_ids()[0] +fig_rdy, ax_rdy = plt.subplots(figsize=(8, 4), dpi=160) +res.plot_single_server_ready_queue(ax_rdy, server_id) +fig_rdy.tight_layout() +fig_rdy.savefig(f"ready_{server_id}.png") +``` + +--- + +## Data model & units + +* **Latency**: seconds (s). +* **Throughput**: requests per second (RPS). +* **Sampled metrics** (per server/edge): series captured at a fixed sampling + period `settings.sample_period_s` (e.g., queue length, RAM usage). + Units depend on the metric (RAM is typically MB). + +--- + +## Computed metrics + +* **Latency statistics** (global): + `TOTAL_REQUESTS, MEAN, MEDIAN, STD_DEV, P95, P99, MIN, MAX`. +* **Throughput time series**: per-window RPS (default cached at 1 s buckets). +* **Sampled metrics**: raw, per-entity series keyed by + `SampledMetricName` (or its string value). + +--- + +## Class reference + +### Constructor + +```python +ResultsAnalyzer( + *, + client: ClientRuntime, + servers: list[ServerRuntime], + edges: list[EdgeRuntime], + settings: SimulationSettings, +) +``` + +The analyzer is **lazy**: metrics are computed on first access. + +### Core methods + +* `process_all_metrics() -> None` + Forces computation of latency stats, throughput cache (1 s), and sampled metrics. + +* `get_latency_stats() -> dict[LatencyKey, float]` + Returns the global latency stats. Computes them if needed. + +* `format_latency_stats() -> str` + Returns a ready-to-print block with latency statistics. + +* `get_throughput_series(window_s: float | None = None) -> tuple[list[float], list[float]]` + Returns `(timestamps, rps)`. If `window_s` is `None` or `1.0`, the cached + 1-second series is returned; otherwise a fresh series is computed. + +* `get_sampled_metrics() -> dict[str, dict[str, list[float]]]` + Returns sampled metrics as `{metric_key: {entity_id: [values...]}}`. + +* `get_metric_map(key: SampledMetricName | str) -> dict[str, list[float]]` + Gets the per-entity series map for a metric. Accepts either the enum value or + the raw string key. + +* `get_series(key: SampledMetricName | str, entity_id: str) -> tuple[list[float], list[float]]` + Returns time/value series for a given metric and entity. + Time coordinates are `i * settings.sample_period_s`. + +* `list_server_ids() -> list[str]` + Returns server IDs in a stable, topology order. + +--- + +## Plotting helpers + +All plotting methods draw on a **Matplotlib `Axes`** provided by the caller and +do **not** manage figure lifecycles. + +> When there is no data for the requested plot, the axis is annotated with the +> corresponding `no_data` message from `plot_constants`. + +### Dashboard + +* `plot_base_dashboard(ax_latency: Axes, ax_throughput: Axes) -> None` + Convenience: calls the two methods below. + +* `plot_latency_distribution(ax: Axes) -> None` + Latency histogram with **vertical overlays** (mean, P50, P95, P99) and a + **single legend box** (top-right) that shows each statistic with its matching + colored handle. + +* `plot_throughput(ax: Axes, *, window_s: float | None = None) -> None` + Throughput line with **horizontal overlays** (mean, P95, max) and a + **single legend box** (top-right) that shows values and colors for each line. + +### Single-server plots + +Each single-server plot: + +* draws the main series, + +* overlays **mean / min / max** as horizontal lines (distinct styles/colors), + +* shows a **single legend box** with values for mean/min/max, + +* **does not** include a legend entry for the main series (title suffices). + +* `plot_single_server_ready_queue(ax: Axes, server_id: str) -> None` + Ready queue length over time (per server). + +* `plot_single_server_io_queue(ax: Axes, server_id: str) -> None` + I/O queue/sleep metric over time (per server). + +* `plot_single_server_ram(ax: Axes, server_id: str) -> None` + RAM usage over time (per server). + +## Behavior & design notes + +* **Laziness & caching** + + * Latency stats and the 1 s throughput series are cached on first use. + * Calling `get_throughput_series(window_s=...)` with a custom window computes + a fresh series (not cached). + +* **Stability** + + * `list_server_ids()` follows the topology order for readability across runs. + +* **Error handling** + + * Multi-server plotting methods validate the number of axes and raise + `ValueError` with a descriptive message. + +* **Matplotlib integration** + + * The analyzer **does not** close figures or call `plt.show()`. + * Titles, axes labels, and “no data” messages are taken from + `asyncflow.config.plot_constants`. + +* **Thread-safety** + + * The analyzer is not designed for concurrent mutation. Use from a single + thread after the simulation completes. + +--- + +## Examples + +### Custom throughput window + +```python +fig, ax = plt.subplots(figsize=(8, 3), dpi=160) +res.plot_throughput(ax, window_s=2.0) # 2-second buckets +fig.tight_layout() +fig.savefig("throughput_2s.png") +``` + +### Access a sampled metric series + +```python +from asyncflow.metrics.analyzer import SampledMetricName + +server_id = res.list_server_ids()[0] +t, qlen = res.get_series(SampledMetricName.READY_QUEUE_LEN, server_id) +# t: [0.0, 0.1, 0.2, ...] (scaled by sample_period_s) +# qlen: [.. values ..] +``` + +--- + +If you need additional KPIs (e.g., tail latency over time, backlog, or +utilization), the current structure makes it straightforward to add new helpers +alongside the existing plotting methods. diff --git a/docs/api/components.md b/docs/api/components.md new file mode 100644 index 0000000..15f97c2 --- /dev/null +++ b/docs/api/components.md @@ -0,0 +1,295 @@ +# AsyncFlow — Public API Reference: `components` + +This page documents the **public topology components** you can import from +`asyncflow.components` to construct a simulation scenario in Python. +These classes are Pydantic models with strict validation and are the +**only pieces you need** to define the *structure* of your system: nodes +(client/servers/LB), endpoints (steps), and network edges. + +> The builder (`AsyncFlow`) will assemble these into the internal graph for you. +> You **do not** need to import internal graph classes. + +--- + +## Imports + +```python +from asyncflow.components import ( + Client, + Server, + ServerResources, + LoadBalancer, + Endpoint, + Edge, +) +# Optional enums (strings are also accepted): +from asyncflow.enums import Distribution +``` + +--- + +## Quick example + +```python +from asyncflow.components import ( + Client, Server, ServerResources, LoadBalancer, Endpoint, Edge +) + +# Nodes +client = Client(id="client-1") + +endpoint = Endpoint( + endpoint_name="/predict", + steps=[ + {"kind": "ram", "step_operation": {"necessary_ram": 64}}, + {"kind": "initial_parsing", "step_operation": {"cpu_time": 0.002}}, + {"kind": "io_wait", "step_operation": {"io_waiting_time": 0.010}}, + ], +) + +server = Server( + id="srv-1", + server_resources=ServerResources(cpu_cores=2, ram_mb=2048), + endpoints=[endpoint], +) + +lb = LoadBalancer(id="lb-1", algorithms="round_robin", server_covered={"srv-1"}) + +# Edges (directed) +edges = [ + Edge( + id="gen-to-client", + source="rqs-1", # external sources allowed (e.g., generator id) + target="client-1", # targets must be declared nodes + latency={"mean": 0.003, "distribution": "exponential"}, + ), + Edge( + id="client-to-lb", + source="client-1", + target="lb-1", + latency={"mean": 0.002, "distribution": "exponential"}, + ), + Edge( + id="lb-to-srv1", + source="lb-1", + target="srv-1", + latency={"mean": 0.002, "distribution": "exponential"}, + ), + Edge( + id="srv1-to-client", + source="srv-1", + target="client-1", + latency={"mean": 0.003, "distribution": "exponential"}, + ), +] +``` + +You can then feed these to the `AsyncFlow` builder (not shown here) along with +workload and settings. + +--- + +## Component reference + +### `Client` + +```python +Client(id: str) +``` + +* Represents the client node. +* `type` is fixed internally to `"client"`. +* **Validation:** any non-standard `type` is rejected (guardrail). + +--- + +### `ServerResources` + +```python +ServerResources( + cpu_cores: int = 1, # ≥ 1 NOW MUST BE FIXED TO ONE + ram_mb: int = 1024, # ≥ 256 + db_connection_pool: int | None = None, +) +``` + +* Server capacity knobs used by the runtime (CPU tokens, RAM reservoir, optional DB pool). +* You may pass a **dict** instead of `ServerResources`; Pydantic will coerce it. + +**Bounds & defaults** + +* `cpu_cores ≥ 1` +* `ram_mb ≥ 256` +* `db_connection_pool` optional + +--- + +### `Endpoint` + +```python +Endpoint( + endpoint_name: str, # normalized to lowercase + steps: list[dict], # or Pydantic Step objects (dict is simpler) +) +``` + +Each step is a dict with **exactly one** operation: + +```python +{"kind": , "step_operation": { : }} +``` + +**Valid step kinds and operation keys** + +| Kind (enum string) | Operation dict (exactly 1 key) | Units / constraints | | +| --------------------- | -------------------------------- | ------------------- | ------- | +| `initial_parsing` | `{ "cpu_time": }` | seconds, > 0 | | +| `cpu_bound_operation` | `{ "cpu_time": }` | seconds, > 0 | | +| `ram` | \`{ "necessary\_ram": \ }\` | MB, > 0 | +| `io_task_spawn` | `{ "io_waiting_time": }` | seconds, > 0 | | +| `io_llm` | `{ "io_waiting_time": }` | seconds, > 0 | | +| `io_wait` | `{ "io_waiting_time": }` | seconds, > 0 | | +| `io_db` | `{ "io_waiting_time": }` | seconds, > 0 | | +| `io_cache` | `{ "io_waiting_time": }` | seconds, > 0 | | + +**Validation** + +* `endpoint_name` is lowercased automatically. +* `step_operation` must have **one and only one** entry. +* The operation **must match** the step kind (CPU ↔ `cpu_time`, RAM ↔ `necessary_ram`, IO ↔ `io_waiting_time`). +* All numeric values must be **strictly positive**. + +--- + +### `Server` + +```python +Server( + id: str, + server_resources: ServerResources | dict, + endpoints: list[Endpoint], +) +``` + +* Represents a server node hosting one or more endpoints. +* `type` is fixed internally to `"server"`. +* **Validation:** any non-standard `type` is rejected. + +--- + +### `LoadBalancer` (optional) + +```python +LoadBalancer( + id: str, + algorithms: Literal["round_robin", "least_connection"] = "round_robin", + server_covered: set[str] = set(), +) +``` + +* Declares a logical load balancer and the set of server IDs it can route to. +* **Graph-level rules** (checked when the payload is built): + + * `server_covered` must be a subset of declared server IDs. + * There must be an **edge from the LB to each covered server** (e.g., `lb-1 → srv-1`). + +--- + +### `Edge` + +```python +Edge( + id: str, + source: str, + target: str, + latency: dict | RVConfig, # recommend dict: {"mean": , "distribution": , "variance": } + edge_type: Literal["network_connection"] = "network_connection", + dropout_rate: float = 0.01, # in [0.0, 1.0] +) +``` + +* Directed link between two nodes. +* **Latency** is a random variable; most users pass a dict: + + * `mean: float` (required) + * `distribution: "poisson" | "normal" | "log_normal" | "exponential" | "uniform"` (default: `"poisson"`) + * `variance: float?` (for `normal`/`log_normal`, defaults to `mean` if omitted) + +**Validation** + +* `mean > 0` +* if provided, `variance ≥ 0` +* `dropout_rate ∈ [0.0, 1.0]` +* `source != target` + +**Graph-level rules** (enforced when the full payload is validated) + +* Every **target** must be a **declared node** (`client`, `server`, or `load_balancer`). +* **External IDs** (e.g., `"rqs-1"`) are allowed **only** as **sources**; they cannot appear as targets. +* **Unique edge IDs**. +* **No fan-out except LB**: only the load balancer is allowed to have multiple outgoing edges among declared nodes. + +--- + +## Type coercion & enums + +* You may pass strings for enums (`kind`, `distribution`, etc.); they will be validated against the allowed values. +* For `ServerResources` and `Edge.latency` you can pass dictionaries; Pydantic will coerce them to typed models. +* If you prefer, you can import and use the enums: + + ```python + from asyncflow.enums import Distribution + Edge(..., latency={"mean": 0.003, "distribution": Distribution.EXPONENTIAL}) + ``` + +--- + +## Best practices & pitfalls + +**Do** + +* Keep IDs unique across nodes of the same category and across edges. +* Ensure LB coverage and LB→server edges are in sync. +* Use small, measurable step values first; iterate once you see where queues and delays form. + +**Don’t** + +* Create multiple outgoing edges from non-LB nodes (graph validator will fail). +* Use zero/negative times or RAM (validators will raise). +* Target external IDs (only sources may be external). + +--- + +## Where these components fit + +You will typically combine these **components** with: + +* **workload** (`RqsGenerator`) from `asyncflow.workload` +* **settings** (`SimulationSettings`) from `asyncflow.settings` +* the **builder** (`AsyncFlow`) and **runner** (`SimulationRunner`) from the root package + +Example (wiring, abbreviated): + +```python +from asyncflow import AsyncFlow, SimulationRunner +from asyncflow.workload import RqsGenerator +from asyncflow.settings import SimulationSettings + +flow = ( + AsyncFlow() + .add_generator(RqsGenerator(...)) + .add_client(client) + .add_servers(server) + .add_edges(*edges) + .add_load_balancer(lb) # optional + .add_simulation_settings(SimulationSettings(...)) +) +payload = flow.build_payload() # validates graph-level rules +SimulationRunner(..., simulation_input=payload).run() +``` + +--- + +With these `components`, you can model any topology supported by AsyncFlow— +cleanly, type-checked, and with **clear, early** validation errors when something +is inconsistent. diff --git a/docs/api/enums.md b/docs/api/enums.md new file mode 100644 index 0000000..09aaeb6 --- /dev/null +++ b/docs/api/enums.md @@ -0,0 +1,197 @@ +# AsyncFlow — Public Enums API + +This page documents the **public, user-facing** enums exported from `asyncflow.enums`. These enums exist to remove “magic strings” from scenario code, offer IDE autocomplete, and make input validation more robust. Using them is optional — all Pydantic models still accept the corresponding string values — but recommended for Python users. + +```python +from asyncflow.enums import ( + Distribution, + LbAlgorithmsName, + SampledMetricName, + EventMetricName, + # advanced (optional, if you define steps in Python) + EndpointStepCPU, EndpointStepIO, EndpointStepRAM, StepOperation, +) +``` + +> **Stability:** Values in these enums form part of the **public input contract**. They are semver-stable: new members may be added in minor releases, existing members won’t be renamed or removed except in a major release. + +--- + +## 1) Distribution + +Enumeration of probability distributions accepted by `RVConfig`. + +* `Distribution.POISSON` → `"poisson"` +* `Distribution.NORMAL` → `"normal"` +* `Distribution.LOG_NORMAL` → `"log_normal"` +* `Distribution.EXPONENTIAL` → `"exponential"` +* `Distribution.UNIFORM` → `"uniform"` + +**Used in:** `RVConfig` (e.g., workload users / rpm, edge latency). + +**Notes & validation:** + +* `mean` is required (coerced to float). +* For `NORMAL` and `LOG_NORMAL`, missing `variance` defaults to `mean`. +* For **edge latency** specifically, `mean > 0` and (if present) `variance ≥ 0`. + +**Example** + +```python +from asyncflow.enums import Distribution +from asyncflow.schemas.common.random_variables import RVConfig + +rv = RVConfig(mean=0.003, distribution=Distribution.EXPONENTIAL) +``` + +--- + +## 2) LbAlgorithmsName + +Load-balancing strategies available to the `LoadBalancer` node. + +* `LbAlgorithmsName.ROUND_ROBIN` → `"round_robin"` +* `LbAlgorithmsName.LEAST_CONNECTIONS` → `"least_connection"` + +**Used in:** `LoadBalancer(algorithms=...)`. + +**Example** + +```python +from asyncflow.enums import LbAlgorithmsName +from asyncflow.schemas.topology.nodes import LoadBalancer + +lb = LoadBalancer(id="lb-1", algorithms=LbAlgorithmsName.ROUND_ROBIN, server_covered={"srv-1", "srv-2"}) +``` + +--- + +## 3) SampledMetricName + +Time-series metrics collected at a fixed cadence (`sample_period_s`). + +* `READY_QUEUE_LEN` → `"ready_queue_len"` +* `EVENT_LOOP_IO_SLEEP` → `"event_loop_io_sleep"` +* `RAM_IN_USE` → `"ram_in_use"` +* `EDGE_CONCURRENT_CONNECTION` → `"edge_concurrent_connection"` + +**Used in:** `SimulationSettings(enabled_sample_metrics=...)`. + +**Example** + +```python +from asyncflow.enums import SampledMetricName +from asyncflow.schemas.settings.simulation import SimulationSettings + +settings = SimulationSettings( + total_simulation_time=300, + sample_period_s=0.01, + enabled_sample_metrics={ + SampledMetricName.READY_QUEUE_LEN, + SampledMetricName.RAM_IN_USE, + }, +) +``` + +--- + +## 4) EventMetricName + +Per-event metrics (not sampled). + +* `RQS_CLOCK` → `"rqs_clock"` +* `LLM_COST` → `"llm_cost"` (reserved for future accounting) + +**Used in:** `SimulationSettings(enabled_event_metrics=...)`. + +**Example** + +```python +from asyncflow.enums import EventMetricName +SimulationSettings(enabled_event_metrics={EventMetricName.RQS_CLOCK}) +``` + +--- + +## 5) (Advanced) Endpoint step enums + +You only need these if you create `Endpoint` steps **programmatically** in Python. In YAML you’ll write strings; both modes are supported. + +### 5.1 EndpointStepCPU + +CPU-bound step kinds: + +* `INITIAL_PARSING` → `"initial_parsing"` +* `CPU_BOUND_OPERATION` → `"cpu_bound_operation"` + +### 5.2 EndpointStepRAM + +RAM step kind: + +* `RAM` → `"ram"` + +### 5.3 EndpointStepIO + +I/O-bound step kinds: + +* `TASK_SPAWN` → `"io_task_spawn"` +* `LLM` → `"io_llm"` +* `WAIT` → `"io_wait"` +* `DB` → `"io_db"` +* `CACHE` → `"io_cache"` + +### 5.4 StepOperation + +Operation keys allowed inside `Step.step_operation`: + +* `CPU_TIME` → `"cpu_time"` (seconds, positive) +* `NECESSARY_RAM` → `"necessary_ram"` (MB, positive) +* `IO_WAITING_TIME` → `"io_waiting_time"` (seconds, positive) + +**Validation rules (enforced by the schema):** + +* Every `Step` must have **exactly one** operation key. +* The operation must **match** the step kind: + + * CPU step → `CPU_TIME` + * RAM step → `NECESSARY_RAM` + * I/O step → `IO_WAITING_TIME` + +**Example** + +```python +from asyncflow.enums import EndpointStepCPU, EndpointStepIO, EndpointStepRAM, StepOperation +from asyncflow.schemas.topology.endpoint import Endpoint + +ep = Endpoint( + endpoint_name="/predict", + steps=[ + { "kind": EndpointStepRAM.RAM, "step_operation": { StepOperation.NECESSARY_RAM: 128 } }, + { "kind": EndpointStepCPU.INITIAL_PARSING, "step_operation": { StepOperation.CPU_TIME: 0.002 } }, + { "kind": EndpointStepIO.DB, "step_operation": { StepOperation.IO_WAITING_TIME: 0.012 } }, + ], +) +``` + +--- + +## Usage patterns & tips + +* **Strings vs Enums:** All models accept both. Enums help with IDE hints and prevent typos; strings keep YAML compact. Mix as you like. +* **Keep it public, not internal:** Only the enums above are considered public and stable. Internals like `SystemNodes`, `SystemEdges`, `ServerResourceName`, etc. are intentionally **not exported** (they may change). +* **Forward compatibility:** New enum members may appear in minor releases (e.g., a new `SampledMetricName`). Your existing configs remain valid; just opt in when you need them. + +--- + +## Quick Reference + +| Enum | Where it’s used | Members (strings) | +| ------------------- | ------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------ | +| `Distribution` | `RVConfig` | `poisson`, `normal`, `log_normal`, `exponential`, `uniform` | +| `LbAlgorithmsName` | `LoadBalancer.algorithms` | `round_robin`, `least_connection` | +| `SampledMetricName` | `SimulationSettings.enabled_sample_metrics` | `ready_queue_len`, `event_loop_io_sleep`, `ram_in_use`, `edge_concurrent_connection` | +| `EventMetricName` | `SimulationSettings.enabled_event_metrics` | `rqs_clock`, `llm_cost` | +| `EndpointStep*` | `Endpoint.steps[*].kind` (Python) | CPU: `initial_parsing`, `cpu_bound_operation`; RAM: `ram`; IO: `io_task_spawn`, `io_llm`, `io_wait`, `io_db`, `io_cache` | +| `StepOperation` | `Endpoint.steps[*].step_operation` | `cpu_time`, `necessary_ram`, `io_waiting_time` | + +--- diff --git a/docs/api/high-level/analyzer.md b/docs/api/high-level/analyzer.md new file mode 100644 index 0000000..8804449 --- /dev/null +++ b/docs/api/high-level/analyzer.md @@ -0,0 +1,211 @@ +# AsyncFlow — Public API Reference: `ResultsAnalyzer` + +`ResultsAnalyzer` is the public object you use **after** a run to compute +latency statistics, derive throughput time-series, and visualize sampled +metrics collected from servers and edges. + +* **Input:** created and returned by `SimulationRunner.run()` +* **Output:** dictionaries and time-series you can print, log, chart, or export + +> **Import (public):** +> +> ```python +> from asyncflow.analysis import ResultsAnalyzer +> ``` + + +--- + +## TL;DR (minimal usage) + +```python +results = SimulationRunner(env=env, simulation_input=payload).run() + +# Aggregates +lat = results.get_latency_stats() # dict of p50, p95, p99, ... +ts, rps = results.get_throughput_series() # per-second timestamps & RPS +series = results.get_sampled_metrics() # nested dict of time-series + +# Plotting (matplotlib) +import matplotlib.pyplot as plt +fig, axes = plt.subplots(2, 2, figsize=(12, 8)) +results.plot_latency_distribution(axes[0, 0]) +results.plot_throughput(axes[0, 1]) +results.plot_server_queues(axes[1, 0]) +results.plot_ram_usage(axes[1, 1]) +fig.tight_layout() +``` + +--- + +## What the analyzer computes + +### Event-level aggregates (from `RQS_CLOCK`) + +* **Latency stats** from per-request `(start_time, finish_time)` tuples: + + * keys: `total_requests, mean, median, std_dev, p95, p99, min, max` +* **Throughput (RPS)** as a time-series: + + * 1-second windows by default (see “Advanced: throughput window”) + +### Sampled time-series (from runtime collectors) + +* Per-entity (server/edge) series for the **baseline mandatory** metrics: + + * `ready_queue_len` (server) + * `event_loop_io_sleep` (server) + * `ram_in_use` (server) + * `edge_concurrent_connection` (edge) + +> These are sampled every `sample_period_s` defined in `SimulationSettings`. + +--- + +## Public API + +### Aggregates + +```python +get_latency_stats() -> dict[LatencyKey, float] +``` + +Returns latency summary statistics. If no requests completed, returns `{}`. + +```python +get_throughput_series() -> tuple[list[float], list[float]] +``` + +Returns `(timestamps_in_seconds, rps_values)`. If no traffic, returns `([], [])`. + +### Sampled metrics + +```python +get_sampled_metrics() -> dict[str, dict[str, list[float]]] +``` + +Returns a nested dictionary: + +```python +{ + "": { "": [v0, v1, ...] } +} +``` + +* Metric names are strings matching the public enums (e.g. `"ready_queue_len"`). +* `entity_id` is a **server id** (for server metrics) or an **edge id** (for edge metrics). + +### Plotting helpers + +All plotting helpers draw on a provided `matplotlib.axes.Axes`: + +```python +plot_latency_distribution(ax: Axes) -> None +plot_throughput(ax: Axes) -> None +plot_server_queues(ax: Axes) -> None +plot_ram_usage(ax: Axes) -> None +``` + +Behavior: + +* If data is missing/empty, the plot shows a “no data” message. +* With a load balancer (multiple servers), per-server lines are labeled by server id automatically. + +--- + +## Return contracts (shapes & keys) + +### `get_latency_stats()` + +Example: + +```python +{ + 'total_requests': 1200.0, + 'mean': 0.0123, + 'median': 0.0108, + 'std_dev': 0.0041, + 'p95': 0.0217, + 'p99': 0.0302, + 'min': 0.0048, + 'max': 0.0625 +} +``` + +### `get_throughput_series()` + +Example: + +```python +timestamps = [1.0, 2.0, 3.0, ...] # seconds from t=0 +rps = [ 36, 41, 38, ...] # requests per second +``` + +### `get_sampled_metrics()` + +Example subset: + +```python +{ + "ready_queue_len": { + "srv-1": [0, 1, 2, 1, ...], + "srv-2": [0, 0, 1, 0, ...], + }, + "event_loop_io_sleep": { + "srv-1": [3, 5, 4, 6, ...], + }, + "ram_in_use": { + "srv-1": [128.0, 160.0, 192.0, ...], + }, + "edge_concurrent_connection": { + "lb-1->srv-1": [0, 1, 1, 2, ...], # your edge ids + } +} +``` + +Time base for these lists is implicit: index `i` corresponds to time `i * sample_period_s`. + +--- + +## Plotting recipes + +### Multi-panel overview + +```python +import matplotlib.pyplot as plt + +fig, axes = plt.subplots(2, 2, figsize=(12, 8)) +results.plot_latency_distribution(axes[0, 0]) +results.plot_throughput(axes[0, 1]) +results.plot_server_queues(axes[1, 0]) +results.plot_ram_usage(axes[1, 1]) + +fig.suptitle("AsyncFlow – Simulation Overview", y=1.02) +fig.tight_layout() +``` + +## Edge cases & guarantees + +* **No traffic:** all getters are safe: + + * `get_latency_stats()` → `{}` + * `get_throughput_series()` → `([], [])` + * Plots show “no data”. +* **Multiple servers / LB:** queue and RAM plots include **one line per server id**. +* **Metric availability:** the analyzer only exposes the **baseline mandatory** sampled metrics; if a metric wasn’t enabled/recorded, it won’t appear in the nested dict. +* **Units:** times are in **seconds**; RAM is in **MB**; RPS is **requests/second**. + +--- + +## Performance characteristics + +* Aggregations (percentiles, std) are **vectorized** via NumPy. +* Memory footprint of sampled series ≈ + `total_simulation_time / sample_period_s × (#metrics × #entities)`. +* Prefer a coarser `sample_period_s` for very long runs. + +--- + + + + diff --git a/docs/api/high-level/builder.md b/docs/api/high-level/builder.md new file mode 100644 index 0000000..4a8ff6c --- /dev/null +++ b/docs/api/high-level/builder.md @@ -0,0 +1,288 @@ +# AsyncFlow — Public API Reference: `AsyncFlow` Builder + +`AsyncFlow` is the **fluent builder** that assembles a complete, validated +`SimulationPayload`. It lets you compose workload, topology, edges, and global +settings with clear types and fail-fast validation. The resulting payload can be +run with `SimulationRunner`. + +* **You write:** small, typed building blocks (workload + components + settings) +* **Builder does:** composition & Pydantic validation (graph integrity, rules) +* **Runner does:** execution & metrics collection + +--- + +## Imports + +```python +# Builder + Runner +from asyncflow import AsyncFlow, SimulationRunner + +# Public leaf schemas +from asyncflow.workload import RqsGenerator, RVConfig +from asyncflow.components import Client, Server, Endpoint, Edge, LoadBalancer +from asyncflow.settings import SimulationSettings +``` + +--- + +## Quick start + +```python +import simpy +from asyncflow import AsyncFlow, SimulationRunner +from asyncflow.workload import RqsGenerator, RVConfig +from asyncflow.components import Client, Server, Endpoint, Edge +from asyncflow.settings import SimulationSettings + +# 1) Workload +rqs = RqsGenerator( + id="rqs-1", + avg_active_users=RVConfig(mean=50), # Poisson by default + avg_request_per_minute_per_user=RVConfig(mean=30), # MUST be Poisson +) + +# 2) Components +client = Client(id="client-1") +server = Server( + id="srv-1", + server_resources={"cpu_cores": 1, "ram_mb": 1024}, + endpoints=[ + Endpoint( + endpoint_name="/hello", + steps=[ + {"kind": "ram", "step_operation": {"necessary_ram": 32}}, + {"kind": "initial_parsing", "step_operation": {"cpu_time": 0.002}}, + {"kind": "io_wait", "step_operation": {"io_waiting_time": 0.010}}, + ], + ) + ], +) + +edges = [ + Edge(id="gen-client", source="rqs-1", target="client-1", + latency={"mean": 0.003, "distribution": "exponential"}), + Edge(id="client-srv1", source="client-1", target="srv-1", + latency={"mean": 0.003, "distribution": "exponential"}), + Edge(id="srv1-client", source="srv-1", target="client-1", + latency={"mean": 0.003, "distribution": "exponential"}), +] + +# 3) Settings (baseline sampled metrics are mandatory by design) +settings = SimulationSettings(total_simulation_time=300, sample_period_s=0.01) + +# 4) Build (validates everything) +payload = ( + AsyncFlow() + .add_generator(rqs) + .add_client(client) + .add_servers(server) + .add_edges(*edges) + .add_simulation_settings(settings) + .build_payload() +) + +# 5) Run +env = simpy.Environment() +results = SimulationRunner(env=env, simulation_input=payload).run() +``` + +--- + +## API + +```python +class AsyncFlow: + def add_generator(self, rqs_generator: RqsGenerator) -> Self: ... + def add_client(self, client: Client) -> Self: ... + def add_servers(self, *servers: Server) -> Self: ... + def add_edges(self, *edges: Edge) -> Self: ... + def add_simulation_settings(self, sim_settings: SimulationSettings) -> Self: ... + def add_load_balancer(self, load_balancer: LoadBalancer) -> Self: ... + def build_payload(self) -> SimulationPayload: ... +``` + +### Method details + +* **`add_generator(rqs_generator)`** + Adds the stochastic workload model. + Errors: `TypeError` if not a `RqsGenerator`. + +* **`add_client(client)`** + Adds the single client node. + Errors: `TypeError` if not a `Client`. + +* **`add_servers(*servers)`** + Adds one or more servers (varargs). + Errors: `TypeError` if any arg is not a `Server`. + +* **`add_edges(*edges)`** + Adds one or more directed edges (varargs). + Errors: `TypeError` if any arg is not an `Edge`. + Notes: *Targets must be declared nodes; sources may be external (e.g. `"rqs-1"`).* + +* **`add_load_balancer(load_balancer)`** *(optional)* + Adds a load balancer node. + Errors: `TypeError` if not a `LoadBalancer`. + +* **`add_simulation_settings(sim_settings)`** + Adds global settings (duration, sampling period, metric selection). + Errors: `TypeError` if not a `SimulationSettings`. + +* **`build_payload()` → `SimulationPayload`** + Finalize composition and run full validation. + Errors: `ValueError` on missing parts or invalid graph. + +--- + +## Validation performed by `build_payload()` + +(Implemented via Pydantic model validation across the payload’s schemas.) + +1. **Presence** + + * Requires: generator, client, **≥ 1 server**, **≥ 1 edge**, settings. + +2. **Unique IDs** + + * Duplicate server IDs or edge IDs are rejected. + +3. **Node types** + + * `client`, `server`, and `load_balancer` are fixed enums; enforced per node. + +4. **Edge integrity** + + * Every **target** must be a declared node ID. + * **External IDs** (e.g. the generator id) are allowed **only** as **sources**. + * **No self-loops** (`source != target`). + +5. **Load balancer sanity** (if present) + + * `server_covered ⊆ declared servers`. + * There is an **outgoing edge from the LB to every covered server**. + +6. **Engine rule: no fan-out except LB** + + * Among declared nodes, only the LB may have multiple outgoing edges. + +7. **Latency RV constraints (edges)** + + * `latency.mean > 0`; if `variance` provided, `variance ≥ 0`. + +If any rule fails, a **descriptive `ValueError`** points to the offending field/entity. + +--- + +## Typical errors & how to fix + +* **Missing parts** + `ValueError: The generator input must be instantiated before the simulation` + → Call the missing `add_*` method before `build_payload()`. + +* **Wrong type passed** + `TypeError: All the instances must be of the type Server` + → Ensure you pass `Server` objects to `add_servers(...)` (not dicts). + +* **Unknown edge target** + `ValueError: Edge client-1->srv-X references unknown target node 'srv-X'` + → Add a `Server(id="srv-X", ...)` or fix the edge target. + +* **LB coverage without edges** + `ValueError: Servers ['srv-2'] are covered by LB 'lb-1' but have no outgoing edge from it.` + → Add `Edge(source="lb-1", target="srv-2", ...)`. + +* **Illegal fan-out** + `ValueError: Only the load balancer can have multiple outgoing edges. Offending sources: ['client-1']` + → Route fan-out through a `LoadBalancer`. + +--- + +## Extended example — with Load Balancer + +```python +from asyncflow import AsyncFlow, SimulationRunner +from asyncflow.workload import RqsGenerator, RVConfig +from asyncflow.components import Client, Server, Endpoint, Edge, LoadBalancer +from asyncflow.settings import SimulationSettings +import simpy + +client = Client(id="client-1") + +srv1 = Server( + id="srv-1", + server_resources={"cpu_cores": 1, "ram_mb": 1024}, + endpoints=[Endpoint(endpoint_name="/api", + steps=[{"kind":"ram","step_operation":{"necessary_ram":64}}])], +) +srv2 = Server( + id="srv-2", + server_resources={"cpu_cores": 2, "ram_mb": 2048}, + endpoints=[Endpoint(endpoint_name="/api", + steps=[{"kind":"io_db","step_operation":{"io_waiting_time":0.012}}])], +) + +lb = LoadBalancer(id="lb-1", algorithms="round_robin", server_covered={"srv-1","srv-2"}) + +edges = [ + Edge(id="gen-client", source="rqs-1", target="client-1", + latency={"mean":0.002,"distribution":"exponential"}), + Edge(id="client-lb", source="client-1", target="lb-1", + latency={"mean":0.002,"distribution":"exponential"}), + Edge(id="lb-srv1", source="lb-1", target="srv-1", + latency={"mean":0.002,"distribution":"exponential"}), + Edge(id="lb-srv2", source="lb-1", target="srv-2", + latency={"mean":0.002,"distribution":"exponential"}), + Edge(id="srv1-client", source="srv-1", target="client-1", + latency={"mean":0.003,"distribution":"exponential"}), + Edge(id="srv2-client", source="srv-2", target="client-1", + latency={"mean":0.003,"distribution":"exponential"}), +] + +payload = ( + AsyncFlow() + .add_generator(RqsGenerator( + id="rqs-1", + avg_active_users=RVConfig(mean=120), + avg_request_per_minute_per_user=RVConfig(mean=20), + user_sampling_window=60, + )) + .add_client(client) + .add_servers(srv1, srv2) + .add_load_balancer(lb) + .add_edges(*edges) + .add_simulation_settings(SimulationSettings(total_simulation_time=600, sample_period_s=0.02)) + .build_payload() +) + +env = simpy.Environment() +results = SimulationRunner(env=env, simulation_input=payload).run() +``` + +--- + +## Tips & pitfalls + +* **IDs are case-sensitive** and must be unique per category (servers, edges, LB). +* **Edge targets must be declared nodes.** External IDs (like the generator) can only appear as **sources**. +* **LB fan-out only.** If you need branching, introduce a `LoadBalancer`. +* **RqsGenerator constraints:** + `avg_request_per_minute_per_user` **must** be Poisson; + `avg_active_users` must be **Poisson** or **Normal** (variance auto-filled if missing). +* **Step coherence:** + CPU step → `cpu_time`; RAM step → `necessary_ram`; I/O step → `io_waiting_time`. Exactly **one** per step. + +--- + +## Interop: YAML ↔ Python + +You can build the same payload from YAML and then use `SimulationRunner.from_yaml(...)`. Field names mirror the Python model names and the enum values (strings) are identical. + +--- + +## Versioning & stability + +* Exceptions: `TypeError` for wrong types passed to builder; `ValueError` for invalid or incomplete payloads. +* Validation rules and enum names are part of the public contract (semantic versioning applies). +* The builder does not mutate your objects; it assembles and validates them into a `SimulationPayload`. + + diff --git a/docs/api/high-level/runner.md b/docs/api/high-level/runner.md new file mode 100644 index 0000000..27a0c87 --- /dev/null +++ b/docs/api/high-level/runner.md @@ -0,0 +1,230 @@ +# AsyncFlow — Public API Reference: `SimulationRunner` + +`SimulationRunner` is the **orchestrator** of a simulation run. It takes a fully +validated `SimulationPayload`, instantiates the runtime actors, wires their +connections, starts the processes inside a `simpy.Environment`, collects sampled +metrics, advances the virtual clock, and returns a `ResultsAnalyzer` for +post-run querying and plotting. + +Use it together with the `AsyncFlow` builder (Python) or a YAML payload. + +--- + +## Imports + +```python +from asyncflow import SimulationRunner, AsyncFlow # high-level API +from asyncflow.settings import SimulationSettings +from asyncflow.components import Client, Server, Endpoint, Edge, LoadBalancer +from asyncflow.workload import RqsGenerator, RVConfig +import simpy +``` + +--- + +## Quick start + +```python +# 1) Build a validated payload (see the builder docs for details) +payload = ( + AsyncFlow() + .add_generator(RqsGenerator( + id="rqs-1", + avg_active_users=RVConfig(mean=50), + avg_request_per_minute_per_user=RVConfig(mean=30), + )) + .add_client(Client(id="client-1")) + .add_servers( + Server( + id="srv-1", + server_resources={"cpu_cores": 1, "ram_mb": 1024}, + endpoints=[Endpoint(endpoint_name="/hello", steps=[ + {"kind": "ram", "step_operation": {"necessary_ram": 32}}, + {"kind": "initial_parsing", "step_operation": {"cpu_time": 0.002}}, + {"kind": "io_wait", "step_operation": {"io_waiting_time": 0.010}}, + ])], + ) + ) + .add_edges( + Edge(id="gen-client", source="rqs-1", target="client-1", + latency={"mean": 0.003, "distribution": "exponential"}), + Edge(id="client-srv1", source="client-1", target="srv-1", + latency={"mean": 0.003, "distribution": "exponential"}), + Edge(id="srv1-client", source="srv-1", target="client-1", + latency={"mean": 0.003, "distribution": "exponential"}), + ) + .add_simulation_settings(SimulationSettings(total_simulation_time=300, sample_period_s=0.01)) + .build_payload() +) + +# 2) Run +env = simpy.Environment() +results = SimulationRunner(env=env, simulation_input=payload).run() + +# 3) Analyze +print(results.get_latency_stats()) +ts, rps = results.get_throughput_series() +sampled = results.get_sampled_metrics() +``` + +--- + +## Class reference + +```python +class SimulationRunner: + def __init__(self, *, env: simpy.Environment, simulation_input: SimulationPayload) -> None: ... + def run(self) -> ResultsAnalyzer: ... + @classmethod + def from_yaml(cls, *, env: simpy.Environment, yaml_path: str | Path) -> "SimulationRunner": ... +``` + +### Parameters + +* **`env: simpy.Environment`** + The SimPy environment that controls virtual time. You own its lifetime. + +* **`simulation_input: SimulationPayload`** + A fully validated payload (typically created with `AsyncFlow.build_payload()` or + parsed from YAML). It contains workload, topology graph, and settings. + +### Returns + +* **`run() -> ResultsAnalyzer`** + A results façade exposing: + + * `get_latency_stats() -> dict` (mean, median, p95, p99, …) + * `get_throughput_series() -> (timestamps, rps)` + * `get_sampled_metrics() -> dict[str, dict[str, list[float]]]` + * plotting helpers: `plot_latency_distribution(ax)`, `plot_throughput(ax)`, + `plot_server_queues(ax)`, `plot_ram_usage(ax)` + +### Convenience: YAML entry point + +```python +env = simpy.Environment() +runner = SimulationRunner.from_yaml(env=env, yaml_path="scenario.yml") +results = runner.run() +``` + +`from_yaml` uses `yaml.safe_load` and validates with the same Pydantic schemas, +so it enforces the exact same contract as the builder. + +--- + +## Lifecycle & internal phases + +`run()` performs the following steps: + +1. **Build runtimes** + + * `RqsGeneratorRuntime` (workload) + * `ClientRuntime` + * `ServerRuntime` for each server (CPU/RAM resources bound) + * `LoadBalancerRuntime` (optional) + +2. **Wire edges** + Creates an `EdgeRuntime` for each edge and assigns the appropriate *inbox* + (`simpy.Store`) of the target actor. Sets the `out_edge` (or `out_edges` for + the load balancer) on the source actor. + +3. **Start processes** + Registers every actor’s `.start()` coroutine in the environment and starts the + **SampledMetricCollector** that snapshots: + + * server **ready queue length**, **I/O queue length**, **RAM in use** + * edge **concurrent connections** + at the configured `sample_period_s`. These sampled metrics are **mandatory** + in this version. + +4. **Advance the clock** + `env.run(until=SimulationSettings.total_simulation_time)` + +5. **Return analyzer** + Wraps the collected state into `ResultsAnalyzer` for stats & plots. + +--- + +## Input contract (what the runner expects) + +The runner assumes `simulation_input` has already passed full validation: + +* All edge targets are declared nodes; external IDs appear only as sources. +* Load balancer coverage and edges are coherent. +* No self-loops; only the LB fans out among declared nodes. +* Edge latency RVs have `mean > 0` (and `variance ≥ 0` if provided). +* Server resources meet minimums (≥ 1 core, ≥ 256 MB RAM), etc. + +> Build with `AsyncFlow` or load from YAML — both paths enforce the same rules. + +--- + +## Error handling + +* **Type errors (builder misuse)** should not reach the runner; they’re raised by the builder (`TypeError`) before `build_payload()`. +* **Validation errors** (`ValueError`) are raised during payload construction/validation, not by the runner. +* **Wiring errors** (`TypeError`) are guarded by validation and indicate an unexpected mismatch between payload and runtime types. With a validated payload, you shouldn’t see them. + +--- + +## Determinism & RNG + +The runner uses `numpy.random.default_rng()` internally. Seeding is not yet a +public parameter; exact reproducibility across runs is **not guaranteed** in +this version. If you need strict reproducibility, pin your environment and keep +payloads identical; a dedicated seeding hook may be added in a future release. + +--- + +## Performance characteristics + +* **Runtime cost** scales with the number of requests and the complexity of + endpoint steps (CPU vs I/O waits). +* **Sampling memory** roughly scales as + `(#entities × #enabled sampled metrics) × (total_simulation_time / sample_period_s)`. + For long runs, consider a larger `sample_period_s` (e.g., `0.02–0.05`) to + reduce the size of time series. +* The collector is a single coroutine that performs `O(entities)` appends on + each tick; the hot path inside actors remains `O(1)` per event. + +--- + +## Usage with Load Balancers + +Topologies **with a LB** are first-class: + +* Only the LB may have multiple outgoing edges (fan-out). +* The analyzer operates on **lists** of servers and edges; plots will naturally + show one line per server/edge where appropriate. +* Validation ensures every `server_covered` by the LB has a corresponding LB→server edge. + +--- + +## One-shot runner (recommended) + +A `SimulationRunner` instance is designed to **run once**. For a new scenario +(or new settings), create a **new** `simpy.Environment` and a **new** +`SimulationRunner`. Reusing a runner after `run()` is not supported. + +--- + +## Best practices + +* **Let the builder fail fast.** Always construct payloads via `AsyncFlow` (or YAML + validation) before running. +* **Keep steps coherent.** CPU step → `cpu_time`, RAM step → `necessary_ram`, I/O step → `io_waiting_time`. Exactly one key per step. +* **Model the network realistically.** Put latency RVs on **every** hop that matters (client↔LB, LB↔server, server↔client). +* **Tune sampling.** High-frequency sampling is useful for short diagnostic runs; increase `sample_period_s` for long capacity sweeps. + +--- + +## See also + +* **Builder:** `AsyncFlow` — compose and validate the payload (workload, topology, settings). +* **Analyzer:** `ResultsAnalyzer` — query KPIs and plot latency/throughput/queues/RAM. +* **Workload:** `RqsGenerator`, `RVConfig` — define traffic models (Poisson or Gaussian–Poisson). +* **Components:** `Client`, `Server`, `Endpoint`, `Edge`, `LoadBalancer`. + +This API keeps **assembly** and **execution** separate: you design and validate +your system with `AsyncFlow`, then hand it to `SimulationRunner` to execute and +measure — a clean workflow that scales from minimal examples to complex, +load-balanced topologies. diff --git a/docs/api/settings.md b/docs/api/settings.md new file mode 100644 index 0000000..2970596 --- /dev/null +++ b/docs/api/settings.md @@ -0,0 +1,200 @@ + +# AsyncFlow — Public API Reference: `settings` + +This page documents the **public settings schema** you import from: + +```python +from asyncflow.settings import SimulationSettings +``` + +These settings control **simulation duration**, **sampling cadence**, and **which metrics are collected**. The model is validated with Pydantic and ships with safe defaults. + +> **Contract note** +> The four **baseline sampled metrics** are **mandatory** in the current release: +> +> * `ready_queue_len` +> * `event_loop_io_sleep` +> * `ram_in_use` +> * `edge_concurrent_connection` +> Future metrics may be opt-in; these four must remain enabled. + +--- + +## Imports + +```python +from asyncflow.settings import SimulationSettings + +# Optional: use enums instead of strings (recommended for IDE/type-checking) +from asyncflow.enums import SampledMetricName, EventMetricName +``` + +--- + +## Quick start + +```python +from asyncflow.settings import SimulationSettings +from asyncflow.enums import SampledMetricName as S, EventMetricName as E + +settings = SimulationSettings( + total_simulation_time=300, # seconds (≥ 5) + sample_period_s=0.01, # seconds, 0.001 ≤ value ≤ 0.1 + # Baseline sampled metrics are mandatory (may include more in future): + enabled_sample_metrics={S.READY_QUEUE_LEN, + S.EVENT_LOOP_IO_SLEEP, + S.RAM_IN_USE, + S.EDGE_CONCURRENT_CONNECTION}, + # Event metrics (RQS_CLOCK is the default/mandatory one today): + enabled_event_metrics={E.RQS_CLOCK}, +) +``` + +Pass the object to the builder: + +```python +from asyncflow import AsyncFlow + +payload = ( + AsyncFlow() + # … add workload, topology, edges … + .add_simulation_settings(settings) + .build_payload() +) +``` + +--- + +## Schema reference + +### `SimulationSettings` + +```python +SimulationSettings( + total_simulation_time: int = 3600, # ≥ 5 + sample_period_s: float = 0.01, # 0.001 ≤ value ≤ 0.1 + enabled_sample_metrics: set[SampledMetricName] = { + "ready_queue_len", + "event_loop_io_sleep", + "ram_in_use", + "edge_concurrent_connection", + }, + enabled_event_metrics: set[EventMetricName] = {"rqs_clock"}, +) +``` + +**Fields** + +* **`total_simulation_time`** *(int, default `3600`)* + Simulation horizon in **seconds**. **Validation:** `≥ 5`. + +* **`sample_period_s`** *(float, default `0.01`)* + Sampling cadence for time-series metrics (seconds). + **Validation:** `0.001 ≤ sample_period_s ≤ 0.1`. + **Trade-off:** lower ⇒ higher temporal resolution but more samples/memory. + +* **`enabled_sample_metrics`** *(set of enums/strings; default = baseline 4)* + **Must include at least the baseline set** shown above. You can pass enum + values or the corresponding strings. + +* **`enabled_event_metrics`** *(set of enums/strings; default `{"rqs_clock"}`)* + Per-event KPIs (not tied to `sample_period_s`). `rqs_clock` is required today; + `llm_cost` is reserved for future use. + +--- + +## Supported metric enums + +You may pass **strings** or import the enums (recommended). + +### Sampled (time-series) + +* `ready_queue_len` — event-loop ready-queue length +* `event_loop_io_sleep` — time spent waiting on I/O in the loop +* `ram_in_use` — MB of RAM in use (per server) +* `edge_concurrent_connection` — concurrent connections per edge + +```python +from asyncflow.enums import SampledMetricName as S +baseline = {S.READY_QUEUE_LEN, S.EVENT_LOOP_IO_SLEEP, S.RAM_IN_USE, S.EDGE_CONCURRENT_CONNECTION} +``` + +### Event (per-event) + +* `rqs_clock` — start/end timestamps for each request (basis for latency) +* `llm_cost` — reserved for future cost accounting + +```python +from asyncflow.enums import EventMetricName as E +SimulationSettings(enabled_event_metrics={E.RQS_CLOCK}) +``` + +--- + +## Practical presets + +* **Lean but compliant (fast inner-loop dev)** + Keep baseline metrics; increase the sampling period to reduce cost: + + ```python + SimulationSettings( + total_simulation_time=10, + sample_period_s=0.05, # fewer samples + # enabled_* use defaults with mandatory baseline & rqs_clock + ) + ``` + +* **High-resolution debugging (short runs)** + + ```python + SimulationSettings( + total_simulation_time=60, + sample_period_s=0.002, # finer resolution + ) + ``` + +* **Long scenarios (memory-friendly)** + + ```python + SimulationSettings( + total_simulation_time=1800, + sample_period_s=0.05, # fewer samples over long runs + ) + ``` + +--- + +## YAML ⇄ Python mapping + +| YAML (`sim_settings`) | Python (`SimulationSettings`) | +| -------------------------- | ------------------------------ | +| `total_simulation_time` | `total_simulation_time` | +| `sample_period_s` | `sample_period_s` | +| `enabled_sample_metrics[]` | `enabled_sample_metrics={...}` | +| `enabled_event_metrics[]` | `enabled_event_metrics={...}` | + +Strings in YAML map to the same enum names used by Python. + +--- + +## Validation & guarantees + +* `total_simulation_time ≥ 5` +* `0.001 ≤ sample_period_s ≤ 0.1` +* `enabled_sample_metrics ⊇ {ready_queue_len, event_loop_io_sleep, ram_in_use, edge_concurrent_connection}` +* `enabled_event_metrics` must include `rqs_clock` (current contract) +* Enum names are part of the public contract (stable; new values may be added in minor versions) + +--- + +## Tips & pitfalls + +* **Memory/CPU budgeting**: total samples per metric ≈ + `total_simulation_time / sample_period_s`. Long runs with very small + sampling periods produce large arrays. +* **Use enums for safety**: strings work, but enums enable IDE completion and mypy checks. +* **Forward compatibility**: additional sampled/event metrics may become available; the four baseline sampled metrics remain mandatory for the engine’s collectors. + +--- + +This reflects your current implementation: baseline sampled metrics are **required**; event metrics currently require `rqs_clock`; and sampling bounds match the `SamplePeriods` constants. diff --git a/docs/api/workload.md b/docs/api/workload.md new file mode 100644 index 0000000..c560a65 --- /dev/null +++ b/docs/api/workload.md @@ -0,0 +1,197 @@ +# AsyncFlow — Public Workload API + +This page documents the **workload models** exported from: + +```python +from asyncflow.workload import RqsGenerator, RVConfig +``` + +Use these to describe **how traffic is generated** (active users, per-user RPM, and the re-sampling window). The workload is independent from your topology and settings and plugs into the builder or payload directly. + +> **Stability:** This is part of the public API. Fields and enum values are semver-stable (new options may be added in minor releases; breaking changes only in a major). + +--- + +## Quick start + +```python +from asyncflow.workload import RqsGenerator, RVConfig + +rqs = RqsGenerator( + id="rqs-1", + avg_active_users=RVConfig(mean=100, distribution="poisson"), # or "normal" + avg_request_per_minute_per_user=RVConfig(mean=20, distribution="poisson"), + user_sampling_window=60, # seconds, re-sample active users every 60s +) + +# … then compose with the builder +from asyncflow.builder.asyncflow_builder import AsyncFlow +payload = (AsyncFlow() + .add_generator(rqs) + # .add_client(...).add_servers(...).add_edges(...).add_simulation_settings(...) + .build_payload()) +``` + +--- + +## `RqsGenerator` (workload root) + +```python +class RqsGenerator(BaseModel): + id: str + type: SystemNodes = SystemNodes.GENERATOR # fixed + avg_active_users: RVConfig # Poisson or Normal + avg_request_per_minute_per_user: RVConfig # Poisson (required) + user_sampling_window: int = 60 # seconds, bounds [1, 120] +``` + +### Field reference + +| Field | Type | Allowed / Bounds | Description | +| --------------------------------- | --------------- | --------------------------------------- | ------------------------------------------------------------------------------------------------------------ | +| `id` | `str` | — | Identifier used by edges (e.g., `source="rqs-1"`). | +| `type` | enum (fixed) | `generator` | Constant; not user-set. | +| `avg_active_users` | `RVConfig` | **Distribution**: `poisson` or `normal` | Random variable for active concurrent users. If `normal`, variance is auto-filled (see `RVConfig`). | +| `avg_request_per_minute_per_user` | `RVConfig` | **Distribution**: **must be** `poisson` | Per-user rate (RPM). Enforced to Poisson by validator. | +| `user_sampling_window` | `int` (seconds) | `1 ≤ value ≤ 120` | How often to re-sample `avg_active_users`. Larger windows → slower drift; smaller windows → more volatility. | + +> Units: RPM = requests per **minute**; times are in **seconds**. + +--- + +## `RVConfig` (random variables) + +```python +class RVConfig(BaseModel): + mean: float + distribution: Distribution = "poisson" + variance: float | None = None +``` + +### Behavior & validation + +* **`mean`** is required and coerced to `float`. (Generic numeric check; positivity is **contextual**. For example, edge latency enforces `mean > 0`, while workloads accept `mean ≥ 0` and rely on samplers to truncate at 0 when needed.) +* **`distribution`** defaults to `"poisson"`. +* **Variance auto-fill:** if `distribution` is `"normal"` or `"log_normal"` **and** `variance` is omitted, it is set to `variance = mean`. + +### Supported distributions + +* `"poisson"`, `"normal"`, `"log_normal"`, `"exponential"`, `"uniform"` + (For **workload**: `avg_active_users` → Poisson/Normal; `avg_request_per_minute_per_user` → **Poisson only**.) + +--- + +## How the workload is sampled (engine semantics) + +AsyncFlow provides **joint samplers** for the two main cases: + +1. **Poisson–Poisson** (`avg_active_users ~ Poisson`, `rpm ~ Poisson`) + +* Every `user_sampling_window` seconds, draw users: + `U ~ Poisson(mean_users)`. +* Aggregate rate: `Λ = U * (rpm_per_user / 60)` (requests/second). +* Within the window, inter-arrival gaps are exponential: + `Δt ~ Exponential(Λ)` (via inverse CDF). +* If `U == 0`, no arrivals until the next window. + +2. **Gaussian–Poisson** (`avg_active_users ~ Normal`, `rpm ~ Poisson`) + +* Draw users with **truncation at 0** (negative draws become 0): + `U ~ max(N(mean, variance), 0)`. +* Then same steps as above: `Λ = U * (rpm_per_user / 60)`, `Δt ~ Exponential(Λ)`. + +**Implications of `user_sampling_window`:** + +* Smaller windows → more frequent changes in `U` (bursty arrivals). +* Larger windows → steadier rate within each window, fewer rate shifts. + +--- + +## Examples + +### A. Steady mid-load (Poisson–Poisson) + +```python +rqs = RqsGenerator( + id="steady", + avg_active_users=RVConfig(mean=80, distribution="poisson"), + avg_request_per_minute_per_user=RVConfig(mean=15, distribution="poisson"), + user_sampling_window=60, +) +``` + +### B. Bursty users (Gaussian–Poisson) + +```python +rqs = RqsGenerator( + id="bursty", + avg_active_users=RVConfig(mean=50, distribution="normal", variance=200), # bigger var → burstier + avg_request_per_minute_per_user=RVConfig(mean=18, distribution="poisson"), + user_sampling_window=15, # faster re-sampling → faster drift +) +``` + +### C. Tiny smoke test + +```python +rqs = RqsGenerator( + id="smoke", + avg_active_users=RVConfig(mean=1, distribution="poisson"), + avg_request_per_minute_per_user=RVConfig(mean=2, distribution="poisson"), + user_sampling_window=30, +) +``` + +--- + +## YAML / JSON equivalence + +If you configure via YAML/JSON, the equivalent block is: + +```yaml +rqs_input: + id: rqs-1 + avg_active_users: + mean: 100 + distribution: poisson # or normal + # variance: 100 # optional; auto=mean if normal/log_normal + avg_request_per_minute_per_user: + mean: 20 + distribution: poisson # must be poisson + user_sampling_window: 60 # [1..120] seconds +``` + +--- + +## Validation & error messages (what you can expect) + +* `avg_request_per_minute_per_user.distribution != poisson` + → `ValueError("At the moment the variable avg request must be Poisson")` +* `avg_active_users.distribution` not in `{poisson, normal}` + → `ValueError("At the moment the variable active user must be Poisson or Gaussian")` +* Non-numeric `mean` in any `RVConfig` + → `ValueError("mean must be a number (int or float)")` +* `user_sampling_window` outside `[1, 120]` + → Pydantic range validation error with clear bounds in the message. + +> Note: Positivity for means is enforced **contextually**. For workload, negative draws are handled by the samplers (e.g., truncated Normal). For edge latency, positivity is enforced at the edge model level. + +--- + +## Common pitfalls & tips + +* **Using Normal without variance:** If you set `distribution="normal"` and omit `variance`, it auto-fills to `variance=mean`. Set it explicitly if you want heavier or lighter variance than the default. +* **Confusing units:** RPM is **per minute**, not per second. The engine converts internally. +* **Over-reactive windows:** Very small `user_sampling_window` (e.g., `1–5s`) makes the rate jumpy; this is fine for “bursty” scenarios but can be noisy. +* **Zero arrivals:** If a window samples `U=0`, you’ll get no arrivals until the next window; this is expected. + +--- + +## Interplay with Settings & Metrics + +* The workload **does not** depend on the sampling cadence of time-series metrics (`SimulationSettings.sample_period_s`). Sampling controls **observability**, not arrivals. +* **Baseline sampled metrics are mandatory** in the current release (ready-queue length, I/O sleep, RAM, edge concurrency). Future metrics can be opt-in. + +--- + +With `RqsGenerator` + `RVConfig` you can describe steady, bursty, or sparse loads with a few lines—then reuse the same topology and settings to compare how architecture choices behave under different traffic profiles. diff --git a/docs/guides/dev-workflow.md b/docs/guides/dev-workflow.md new file mode 100644 index 0000000..c5d06ef --- /dev/null +++ b/docs/guides/dev-workflow.md @@ -0,0 +1,273 @@ +# **Development Workflow & Architecture Guide** + +This document describes the development workflow, repository architecture, branching strategy and CI/CD for **AsyncFlow** +--- + +## 1) Repository Layout + +### 1.1 Project tree (backend) + +``` +AsyncFlow-backend/ +├─ examples/ # runnable examples (YAML & Python) +│ └─ data/ +├─ scripts/ # helper bash scripts (lint, quality, etc.) +│ └─ quality-check.sh +├─ docs/ # product & technical docs +├─ tests/ # unit & integration tests +│ ├─ unit/ +│ └─ integration/ +├─ src/ +│ └─ asyncflow/ # Python package (library) +│ ├─ __init__.py # public "high-level" facade (re-exports) +│ ├─ builder/ +│ │ └─ asyncflow_builder.py # internal builder implementation +│ ├─ components/ # PUBLIC FACADE: leaf Pydantic components +│ │ └─ __init__.py # (barrel: re-exports Client, Server, Endpoint, Edge) +│ ├─ config/ +│ │ ├─ constants.py # enums/constants (source of truth) +│ │ └─ plot_constants.py +│ ├─ enums/ # PUBLIC FACADE: selected enums +│ │ └─ __init__.py # (barrel: re-exports Distribution, SampledMetricName, …) +│ ├─ metrics/ +│ │ ├─ analyzer.py # results post-processing +│ │ ├─ collector.py # sampling collectors +│ │ ├─ client.py +│ │ ├─ edge.py +│ │ └─ server.py +│ ├─ resources/ +│ │ ├─ registry.py +│ │ └─ server_containers.py +│ ├─ runtime/ +│ │ ├─ simulation_runner.py # engine entry-point +│ │ ├─ rqs_state.py +│ │ ├─ actors/ # INTERNAL: Client/Server/Edge/Generator actors +│ │ └─ routing/ +│ │ └─ lb_algorithms.py +│ ├─ samplers/ +│ │ ├─ poisson_poisson.py +│ │ ├─ gaussian_poisson.py +│ │ └─ common_helpers.py +│ ├─ schemas/ # INTERNAL: full Pydantic schema impls +│ │ ├─ payload.py +│ │ ├─ common/ +│ │ │ └─ random_variables.py +│ │ ├─ settings/ +│ │ │ └─ simulation.py +│ │ ├─ topology/ +│ │ │ ├─ edges.py +│ │ │ ├─ endpoint.py +│ │ │ ├─ graph.py +│ │ │ └─ nodes.py +│ │ └─ workload/ +│ │ └─ rqs_generator.py +│ ├─ settings/ # PUBLIC FACADE: SimulationSettings +│ │ └─ __init__.py +│ └─ workload/ # PUBLIC FACADE: RqsGenerator +│ └─ __init__.py +├─ poetry.lock +├─ pyproject.toml +└─ README.md +``` + +**Public API surface (what you guarantee as stable):** + +* High-level: + + ```py + from asyncflow import AsyncFlow, SimulationRunner + ``` +* Components: + + ```py + from asyncflow.components import Client, Server, Endpoint, Edge + ``` +* Workload & Settings: + + ```py + from asyncflow.workload import RqsGenerator + from asyncflow.settings import SimulationSettings + ``` +* Enums: + + ```py + from asyncflow.enums import Distribution, SampledMetricName, EventMetricName, LbAlgorithmsName + ``` + +> Everything under `asyncflow.schemas/`, `asyncflow.runtime/actors/`, `asyncflow.builder/` is **internal** (implementation details). The facades re-export only what users should import. + +### 1.2 What each top-level area does + +| Area | Purpose | +| --------------- | ------------------------------------------------------------------------------------------------------------------------------------ | +| **builder/** | Internal implementation of the pybuilder used by `AsyncFlow`. Users shouldn’t import from here directly. | +| **components/** | **Public facade** for *leaf* Pydantic models used in payloads: `Client`, `Server`, `Endpoint`, `Edge`. | +| **config/** | Constants & enums source-of-truth (kept internal; only *selected* enums are re-exported via `asyncflow.enums`). | +| **enums/** | **Public facade** re-exporting the enums that appear in public payloads (`Distribution`, `SampledMetricName`, `EventMetricName`, …). | +| **metrics/** | Post-processing & visualization support (analyzer & collectors). | +| **resources/** | Runtime SimPy resource wiring (CPU/RAM containers, registries). | +| **runtime/** | The simulation engine entry-point (`SimulationRunner`), request lifecycle, and internal actors. | +| **samplers/** | Random-variable samplers for the generator and steps. | +| **schemas/** | Full Pydantic schema implementation and validation rules (internal). | +| **settings/** | **Public facade** re-exporting `SimulationSettings`. | +| **workload/** | **Public facade** re-exporting `RqsGenerator`. | + +--- + +## 2) Branching Strategy: Git Flow (+ `refactor/*`) + +We use **Git Flow** with an extra branch family for clean refactors. + +### Diagram + +```mermaid +--- +title: Git Flow (with refactor/*) +--- +gitGraph + commit id: "Initial commit" + branch develop + checkout develop + commit id: "Setup Project" + + branch feature/user-authentication + checkout feature/user-authentication + commit id: "feat: Add login form" + commit id: "feat: Add form validation" + checkout develop + merge feature/user-authentication + + branch refactor/performance-cleanup + checkout refactor/performance-cleanup + commit id: "refactor: simplify hot path" + commit id: "refactor: remove dead code" + checkout develop + merge refactor/performance-cleanup + + branch release/v1.0.0 + checkout release/v1.0.0 + commit id: "fix: Pre-release bug fixes" tag: "v1.0.0" + checkout main + merge release/v1.0.0 + checkout develop + merge release/v1.0.0 + + checkout main + branch hotfix/critical-login-bug + checkout hotfix/critical-login-bug + commit id: "fix: Critical production bug" tag: "v1.0.1" + checkout main + merge hotfix/critical-login-bug + checkout develop + merge hotfix/critical-login-bug +``` + +### Branch families + +* **main** – production-ready, tagged releases only (no direct commits). +* **develop** – integration branch; base for `feature/*` and `refactor/*`. +* **feature/**\* – user-visible features (new endpoints/behavior, DB changes). +* **refactor/**\* – **no new features**; internal changes, performance, renames, file moves, debt trimming. Use `refactor:` commit prefix. +* **release/**\* – freeze, harden, docs; merge into `main` (tag) and back into `develop`. +* **hotfix/**\* – urgent production fixes; branch off `main` tag; merge into `main` (tag) and `develop`. + +**When to pick which:** + +* New behavior or external contract → `feature/*` +* Internal cleanup only → `refactor/*` +* Ship a version → `release/*` +* Patch production now → `hotfix/*` + +--- + +## 3) CI/CD Pipeline + +A layered pipeline (GitHub Actions recommended) mirrors the branching model. + +### 3.1 CI on PRs to `develop` (feature/refactor) + +**Quick Suite** (fast, no external services): + +* **Ruff** (or Black/isort/Flake8) → style/lint +* **mypy** → type checking +* **pytest** unit-only: `pytest -m "not integration"` + +### 3.2 CI on push to `develop` + +**Full Suite** (slower; with services): + +* Full tests, including `@pytest.mark.integration` +* Spin up **PostgreSQL**/**Redis** if required by integration tests +* Build multi-stage Docker image & smoke test +* Optionally build docs (mkdocs) to catch docstring regressions + +### 3.3 CI on `release/*` + +* Always run **Full Suite** +* Build and publish versioned images/artifacts +* Generate release notes/changelog + +### 3.4 CI on `hotfix/*` + +* Run **Full Suite** +* Tag patch release on merge to `main` +* Merge back to `develop` + +> Refactors should be **behavior-preserving**. If a refactor touches hot paths, add micro-benchmarks or targeted integration tests and consider running the Full Suite pre-merge. + +--- + +## 4) Quality Gates & Conventions + +* **Style & Lint**: Ruff (or Black + isort + Flake8). No violations. +* **Types**: mypy clean. +* **Tests**: + + * Unit tests for new/refactored code paths + * Integration tests for end-to-end behavior +* **Commits**: Conventional commits (`feat:`, `fix:`, `refactor:`, `docs:`, `test:`, `chore:`). +* **PRs**: Review required; refactors must include rationale and scope. +* **Docs**: Update `docs/` and public API references when you touch public facades. + +--- + +## 5) Public API & Stability Contract + +Only **facade modules** are considered public and stable: + +* High-level: + + ```py + from asyncflow import AsyncFlow, SimulationRunner + ``` +* Components: + + ```py + from asyncflow.components import Client, Server, Endpoint, Edge + ``` +* Workload & Settings: + + ```py + from asyncflow.workload import RqsGenerator + from asyncflow.settings import SimulationSettings + ``` +* Enums: + + ```py + from asyncflow.enums import Distribution, SampledMetricName, EventMetricName, LbAlgorithmsName + ``` + +Everything else—`schemas/`, `runtime/actors/`, `builder/`, `samplers/`, `resources/`—is **internal** and can change without notice. Use **SemVer** for releases; any change to the public facades that breaks compatibility requires a **major** bump. + +--- + +## 6) Developer Commands (Poetry) + +* Install: `poetry install` +* Lint/format: `bash scripts/quality-check.sh` (or your Ruff/Black commands) +* Test (unit only): `pytest -m "not integration"` +* Test (full): `pytest` +* Run example: `python examples/single_server_pybuilder.py` + +--- + diff --git a/docs/guides/python-builder.md b/docs/guides/python-builder.md new file mode 100644 index 0000000..2a99daf --- /dev/null +++ b/docs/guides/python-builder.md @@ -0,0 +1,390 @@ +# AsyncFlow – Programmatic Input Guide (builder) + +This guide shows how to **build the full simulation input in Python** using the +`AsyncFlow` builder, with the same precision and validation guarantees as the YAML flow. +You’ll see **all components, valid values, units, constraints, and how validation is enforced**. + +Under the hood, the builder assembles a single `SimulationPayload`: + +```python +SimulationPayload( + rqs_input=RqsGenerator(...), # traffic generator (workload) + topology_graph=TopologyGraph(...), # system as a graph + sim_settings=SimulationSettings(...), # global settings and metrics +) +``` + +Everything is **validated up front** by Pydantic. If something is inconsistent +(e.g., an edge points to a non-existent node), a clear error is raised +**before** running the simulation. + +--- + +## Quick Start (Minimal Example) + +```python +from __future__ import annotations + +import simpy + +# Public, user-facing API +from asyncflow import AsyncFlow, SimulationRunner +from asyncflow.components import ( + RqsGenerator, SimulationSettings, Endpoint, Client, Server, Edge +) +from asyncflow.schemas.payload import SimulationPayload # optional, for typing + +# 1) Workload +generator = RqsGenerator( + id="rqs-1", + avg_active_users={"mean": 50, "distribution": "poisson"}, + avg_request_per_minute_per_user={"mean": 30, "distribution": "poisson"}, + user_sampling_window=60, # seconds +) + +# 2) Nodes (client + one server) +client = Client(id="client-1") +endpoint = Endpoint( + endpoint_name="/hello", + steps=[ + {"kind": "ram", "step_operation": {"necessary_ram": 32}}, + {"kind": "initial_parsing", "step_operation": {"cpu_time": 0.002}}, + {"kind": "io_wait", "step_operation": {"io_waiting_time": 0.010}}, + ], +) +server = Server( + id="srv-1", + server_resources={"cpu_cores": 1, "ram_mb": 1024}, + endpoints=[endpoint], +) + +# 3) Edges (directed, with latency as RV) +edges = [ + Edge( + id="gen-to-client", + source="rqs-1", # external sources allowed + target="client-1", # targets must be declared nodes + latency={"mean": 0.003, "distribution": "exponential"}, + ), + Edge( + id="client-to-server", + source="client-1", + target="srv-1", + latency={"mean": 0.003, "distribution": "exponential"}, + ), + Edge( + id="server-to-client", + source="srv-1", + target="client-1", + latency={"mean": 0.003, "distribution": "exponential"}, + ), +] + +# 4) Global settings +settings = SimulationSettings( + total_simulation_time=300, # seconds, min 5 + sample_period_s=0.01, # seconds, [0.001 .. 0.1] + enabled_sample_metrics=[ + "ready_queue_len", + "event_loop_io_sleep", + "ram_in_use", + "edge_concurrent_connection", + ], + enabled_event_metrics=["rqs_clock"], +) + +# 5) Build (validates everything) +payload: SimulationPayload = ( + AsyncFlow() + .add_generator(generator) + .add_client(client) + .add_servers(server) # varargs; supports multiple + .add_edges(*edges) # varargs; supports multiple + # .add_load_balancer(lb) # optional + .add_simulation_settings(settings) + .build_payload() +) + +# 6) Run +env = simpy.Environment() +results = SimulationRunner(env=env, simulation_input=payload).run() +``` + +--- + +## 1) Random Variables (`RVConfig`) + +Where a parameter is stochastic (e.g., edge latency, users, RPM), you pass a +dictionary that Pydantic converts into an `RVConfig`: + +```python +{"mean": , "distribution": , "variance": } +``` + +### Supported distributions + +* `"poisson"` +* `"normal"` +* `"log_normal"` +* `"exponential"` +* `"uniform"` + +### Rules & defaults + +* `mean` is **required** and numeric; coerced to `float`. +* If `distribution` is `"normal"` or `"log_normal"` and `variance` is absent, + it defaults to **`variance = mean`**. +* For **edge latency**: **`mean > 0`** and, if provided, **`variance ≥ 0`**. + +**Units** + +* Time values are **seconds**. +* Rates are **requests per minute** (where noted). + +--- + +## 2) Workload: `RqsGenerator` + +```python +from asyncflow.components import RqsGenerator + +generator = RqsGenerator( + id="rqs-1", + avg_active_users={ + "mean": 100, + "distribution": "poisson", # or "normal" + # "variance": , # optional; auto=mean if "normal" + }, + avg_request_per_minute_per_user={ + "mean": 20, + "distribution": "poisson", # must be poisson in current samplers + }, + user_sampling_window=60, # [1 .. 120] seconds +) +``` + +**Semantics** + +* `avg_active_users`: active users as a random variable (**Poisson** or **Normal**). +* `avg_request_per_minute_per_user`: per-user RPM (**Poisson** only). +* `user_sampling_window`: re-sample active users every N seconds. + +--- + +## 3) System Topology (`Client`, `Server`, `LoadBalancer`, `Edge`) + +Represent the system as a **directed graph**: nodes (client, servers, optional +LB) and edges (network links). + +### 3.1 Client + +```python +from asyncflow.components import Client + +client = Client(id="client-1") # type is fixed to 'client' +``` + +### 3.2 Server & Endpoints + +```python +from asyncflow.components import Endpoint, Server + +endpoint = Endpoint( + endpoint_name="/api", # normalized to lowercase internally + steps=[ + {"kind": "ram", "step_operation": {"necessary_ram": 64}}, + {"kind": "cpu_bound_operation", "step_operation": {"cpu_time": 0.004}}, + {"kind": "io_db", "step_operation": {"io_waiting_time": 0.012}}, + ], +) + +server = Server( + id="srv-1", # type fixed to 'server' + server_resources={ + "cpu_cores": 2, # int ≥ 1 + "ram_mb": 2048, # int ≥ 256 + "db_connection_pool": None, # optional + }, + endpoints=[endpoint], +) +``` + +**Step kinds** (enums) + +* **CPU**: `"initial_parsing"`, `"cpu_bound_operation"` +* **RAM**: `"ram"` +* **I/O**: `"io_task_spawn"`, `"io_llm"`, `"io_wait"`, `"io_db"`, `"io_cache"` + +**Operation keys** (enum `StepOperation`) + +* `cpu_time` (seconds, positive) +* `necessary_ram` (MB, positive int/float) +* `io_waiting_time` (seconds, positive) + +**Validation enforced** + +* Each step’s `step_operation` has **exactly one** entry. +* The operation **must match** the step kind. +* All numeric values **> 0**. + +**Runtime semantics (high level)** + +* RAM is reserved before CPU, then released at the end. +* CPU tokens are acquired for CPU-bound segments; released when switching to I/O. +* I/O waits **do not** hold a CPU core. + +### 3.3 Load Balancer (optional) + +```python +from asyncflow.schemas.topology.nodes import LoadBalancer # internal type +# (Use only if you build the graph manually. AsyncFlow builder hides the graph.) + +lb = LoadBalancer( + id="lb-1", + algorithms="round_robin", # or "least_connection" + server_covered={"srv-1", "srv-2"}, +) +``` + +**LB validation** + +* `server_covered` must be a subset of declared servers. +* You must define **edges from the LB to each covered server** (see below). + +### 3.4 Edges + +```python +from asyncflow.components import Edge + +edge = Edge( + id="client-to-srv1", + source="client-1", # may be external only for sources + target="srv-1", # MUST be a declared node + latency={"mean": 0.003, "distribution": "exponential"}, + # edge_type defaults to "network_connection" + # dropout_rate defaults to 0.01 (0.0 .. 1.0) +) +``` + +**Semantics** + +* `source`: can be an **external** ID for entry points (e.g., `"rqs-1"`). +* `target`: **must** be a declared node (`client`, `server`, `load_balancer`). +* `latency`: random variable; **`mean > 0`**, `variance ≥ 0` (if provided). +* **Fan-out rule**: the model enforces **“no fan-out except LB”**—i.e., only the load balancer may have multiple outgoing edges. + +--- + +## 4) Global Settings: `SimulationSettings` + +```python +from asyncflow.components import SimulationSettings + +settings = SimulationSettings( + total_simulation_time=600, # seconds, default 3600, min 5 + sample_period_s=0.02, # seconds, [0.001 .. 0.1], default 0.01 + enabled_sample_metrics=[ + "ready_queue_len", + "event_loop_io_sleep", + "ram_in_use", + "edge_concurrent_connection", + ], + enabled_event_metrics=[ + "rqs_clock", + # "llm_cost", # optional future accounting + ], +) +``` + +**Notes** + +* Sampled metrics are time-series collected at `sample_period_s`. +* Event metrics are per-event (e.g., per request), not sampled. + +--- + +## 5) Building the Payload with `AsyncFlow` + +```python +from asyncflow import AsyncFlow +from asyncflow.schemas.payload import SimulationPayload # optional typing + +flow = ( + AsyncFlow() + .add_generator(generator) + .add_client(client) + .add_servers(server) # varargs + .add_edges(*edges) # varargs + # .add_load_balancer(lb) + .add_simulation_settings(settings) +) + +payload: SimulationPayload = flow.build_payload() +``` + +**What `build_payload()` validates** + +1. **Presence**: generator, client, ≥1 server, ≥1 edge, settings. +2. **Unique IDs**: servers and edges have unique IDs. +3. **Node types**: fixed enums: `client`, `server`, `load_balancer`. +4. **Edge integrity**: every target is a declared node; external IDs allowed only as sources; no self-loops. +5. **Load balancer sanity**: `server_covered ⊆ declared_servers` **and** there is an edge from the LB to **each** covered server. +6. **No fan-out except LB**: only the LB may have multiple outgoing edges. + +If any rule is violated, a **descriptive `ValueError`** pinpoints the problem. + +--- + +## 6) Running the Simulation + +```python +import simpy +from asyncflow import SimulationRunner + +env = simpy.Environment() +runner = SimulationRunner(env=env, simulation_input=payload) +results = runner.run() # blocks until total_simulation_time + +# Access results via the ResultsAnalyzer API: +stats = results.get_latency_stats() +ts, rps = results.get_throughput_series() +sampled = results.get_sampled_metrics() +``` + +You can also plot with the analyzer methods: + +```python +from matplotlib import pyplot as plt # optional +fig, axes = plt.subplots(2, 2, figsize=(12, 8)) +results.plot_latency_distribution(axes[0, 0]) +results.plot_throughput(axes[0, 1]) +results.plot_server_queues(axes[1, 0]) +results.plot_ram_usage(axes[1, 1]) +fig.tight_layout() +fig.savefig("single_server_builder.png") +``` + +--- + +## 7) Enums, Units & Conventions (Cheat Sheet) + +* **Distributions**: `"poisson"`, `"normal"`, `"log_normal"`, `"exponential"`, `"uniform"` +* **Node types**: fixed internally to `generator`, `server`, `client`, `load_balancer` +* **Edge type**: `network_connection` +* **LB algorithms**: `"round_robin"`, `"least_connection"` +* **Step kinds** + CPU: `"initial_parsing"`, `"cpu_bound_operation"` + RAM: `"ram"` + I/O: `"io_task_spawn"`, `"io_llm"`, `"io_wait"`, `"io_db"`, `"io_cache"` +* **Step operation keys**: `cpu_time`, `io_waiting_time`, `necessary_ram` +* **Sampled metrics**: `ready_queue_len`, `event_loop_io_sleep`, `ram_in_use`, `edge_concurrent_connection` +* **Event metrics**: `rqs_clock` (and `llm_cost` reserved for future use) + +**Units & ranges** + +* **Time**: seconds (`cpu_time`, `io_waiting_time`, latencies, `total_simulation_time`, `sample_period_s`, `user_sampling_window`) +* **RAM**: megabytes (`ram_mb`, `necessary_ram`) +* **Rates**: requests/minute (`avg_request_per_minute_per_user.mean`) +* **Probabilities**: `[0.0, 1.0]` (`dropout_rate`) +* **Bounds**: `total_simulation_time ≥ 5`, `sample_period_s ∈ [0.001, 0.1]`, `cpu_cores ≥ 1`, `ram_mb ≥ 256`, numeric step values > 0 + diff --git a/docs/guides/yaml-builder.md b/docs/guides/yaml-builder.md new file mode 100644 index 0000000..113b879 --- /dev/null +++ b/docs/guides/yaml-builder.md @@ -0,0 +1,431 @@ +# AsyncFlow – YAML Input Guide + +This guide explains **how to author the simulation YAML** for AsyncFlow, covering every field, valid values, units, constraints, and the validation rules enforced by the Pydantic schemas. + +The YAML you write is parsed into a single model: + +```yaml +rqs_input: # traffic generator (workload) +topology_graph: # system architecture as a directed graph +sim_settings: # global settings and metric collection config +``` + +Everything is **validated up front**. If something is inconsistent (e.g., an edge points to a non-existent node), the simulator raises a clear error before running. + +--- + +## 1) Random Variables (`RVConfig`) + +Many knobs use a **random variable** specification: + +```yaml +mean: # required +distribution: # optional, default: poisson +variance: # optional; required by some distributions +``` + +### Supported distributions + +* `poisson` +* `normal` +* `log_normal` +* `exponential` +* `uniform` + +### Rules & defaults + +* **`mean`** must be numeric (int or float). It is coerced to float. +* If `distribution` is `normal` or `log_normal` **and** `variance` is missing, it is set to `variance = mean`. +* For **edge latency** (see §3.3), additional checks apply: `mean > 0`, and if provided, `variance ≥ 0`. + +**Units** + +* Time values are **seconds**. +* Rates are **requests per minute** (where noted). + +--- + +## 2) Workload: `rqs_input` (Request Generator) + +```yaml +rqs_input: + id: + # type is implicit and fixed to "generator" + avg_active_users: + mean: + distribution: poisson | normal # ONLY these two are allowed + variance: # required if normal and not provided (auto=mean) + avg_request_per_minute_per_user: + mean: + distribution: poisson # MUST be poisson + user_sampling_window: # default 60, bounds [1, 120] +``` + +### Semantics + +* **`avg_active_users`**: expected concurrent users (a random variable). + Allowed distributions: **Poisson** or **Normal**. +* **`avg_request_per_minute_per_user`**: per-user request rate (RPM). + Must be **Poisson**.\* +* **`user_sampling_window`**: every N seconds the generator re-samples the active user count. + +\* Current joint-sampler support covers Poisson–Poisson and Gaussian–Poisson. + +--- + +## 3) System Topology: `topology_graph` + +The system is a **directed graph** of nodes and edges. + +```yaml +topology_graph: + nodes: + client: { id: } + load_balancer: # optional + id: + algorithms: round_robin | least_connection + server_covered: [ , ... ] + servers: + - id: + server_resources: + cpu_cores: + ram_mb: + db_connection_pool: # optional + endpoints: + - endpoint_name: # normalized to lowercase + steps: + - kind: # see §3.2 + step_operation: { : } # exactly ONE key (see §3.2) + edges: + - id: + source: + target: # must be a declared node + latency: { mean: , distribution: , variance: } + edge_type: network_connection # (enum; current default/only) + dropout_rate: <0..1> # default 0.01 +``` + +### 3.1 Nodes + +#### Client + +```yaml +client: + id: client-1 + # type is fixed to "client" +``` + +#### Server + +```yaml +- id: srv-1 + # type is fixed to "server" + server_resources: + cpu_cores: 1 # default 1, min 1 + ram_mb: 1024 # default 1024, min 256 + db_connection_pool: null # optional; set an integer to enable pool modeling + endpoints: + - endpoint_name: /predict + steps: + # defined in §3.2 +``` + +**Resource semantics** + +* `cpu_cores`: number of worker “core tokens” available for CPU-bound step execution. +* `ram_mb`: total available RAM (MB) tracked as a reservoir; steps reserve then release. +* `db_connection_pool`: optional capacity bound for DB-like steps (future-use). + +#### Load Balancer (optional) + +```yaml +load_balancer: + id: lb-1 + algorithms: round_robin | least_connection + server_covered: [ srv-1, srv-2 ] # must be a subset of declared server IDs +``` + +LB **validation**: + +* `server_covered` must be a subset of declared servers. +* You must also define **edges from the LB to each covered server** (see §3.3); otherwise validation fails. + +### 3.2 Endpoints & Steps + +An endpoint is a **linear sequence** of steps. +Each step must declare **exactly one** operation (`step_operation`) whose key matches the step’s kind. + +#### Step kinds (enums) + +**CPU-bound** + +* `initial_parsing` +* `cpu_bound_operation` + +**RAM** + +* `ram` + +**I/O-bound** (all use `io_waiting_time` as the operation key) + +* `io_task_spawn` (spawns a background task, returns immediately) +* `io_llm` (LLM inference call) +* `io_wait` (generic wait, non-blocking) +* `io_db` (DB roundtrip) +* `io_cache` (cache access) + +#### Operation keys (enum `StepOperation`) + +* `cpu_time`: service time (seconds) that **occupies a CPU core/GIL**. +* `necessary_ram`: peak RAM (MB) reserved for the step. +* `io_waiting_time`: passive wait (seconds) **without a CPU core**. + +#### Valid pairings + +* CPU step → `{ cpu_time: }` +* RAM step → `{ necessary_ram: }` +* I/O step → `{ io_waiting_time: }` + +**Example** + +```yaml +endpoints: + - endpoint_name: /hello + steps: + - kind: ram + step_operation: { necessary_ram: 64 } + - kind: initial_parsing + step_operation: { cpu_time: 0.002 } + - kind: io_cache + step_operation: { io_waiting_time: 0.003 } + - kind: io_db + step_operation: { io_waiting_time: 0.012 } + - kind: cpu_bound_operation + step_operation: { cpu_time: 0.001 } +``` + +**Validation enforced** + +* `step_operation` must contain **exactly one** entry. +* The operation key must match the step kind (e.g., RAM cannot use `cpu_time`). +* All numeric values are **strictly positive**. + +### 3.3 Edges + +```yaml +- id: c2s + source: client-1 # may be an external ID only for sources + target: srv-1 # MUST be a declared node + latency: + mean: 0.003 + distribution: exponential + # variance optional; if normal/log_normal and missing → set to mean + edge_type: network_connection + dropout_rate: 0.01 # optional [0..1] +``` + +**Semantics** + +* **`source`** can be an external entry point (e.g., `rqs-1`) for inbound edges. +* **`target`** must always reference a declared node: client, server, or LB. +* **`latency`** is a random variable; **`mean > 0`**, **`variance ≥ 0`** (if provided). +* **Fan-out rule**: only the **load balancer** may have multiple outgoing edges. + +--- + +## 4) Global Settings: `sim_settings` + +```yaml +sim_settings: + total_simulation_time: # default 3600, min 5 + sample_period_s: # default 0.01, bounds [0.001, 0.1] + enabled_sample_metrics: + - ready_queue_len + - event_loop_io_sleep + - ram_in_use + - edge_concurrent_connection + enabled_event_metrics: + - rqs_clock + # - llm_cost # optional, for future accounting +``` + +**Notes** + +* `enabled_sample_metrics` are **time-series** collected every `sample_period_s`. +* `enabled_event_metrics` are **per-event** (e.g., per request) and not tied to the sampling period. +* The defaults already include the four main sampled metrics and `rqs_clock`. + +--- + +## 5) Graph-level Validation Rules (what’s checked before running) + +AsyncFlow validates the entire payload. Key checks include: + +1. **Unique IDs** + + * All server IDs are unique. + * Edge IDs are unique. + +2. **Node Types** + + * `type` fields on nodes are fixed to: `client`, `server`, `load_balancer`. + +3. **Edge referential integrity** + + * Every **target** is a declared node ID. + * **External IDs** are allowed **only** as **sources**. If an ID appears as an external source, it must **never** appear as a target anywhere. + +4. **No self-loops** + + * `source != target` for every edge. + +5. **Load balancer sanity** + + * `server_covered` is a subset of declared servers. + * There is an **edge from the LB to every covered server**. + +6. **No fan-out except LB** + + * Only the load balancer may have multiple outgoing edges in the declared node set. + +If any rule is violated, the simulator raises a descriptive error. + +--- + +## 6) End-to-End Examples + +### 6.1 Minimal single-server scenario + +```yaml +rqs_input: + id: rqs-1 + avg_active_users: { mean: 50, distribution: poisson } + avg_request_per_minute_per_user: { mean: 30, distribution: poisson } + user_sampling_window: 60 + +topology_graph: + nodes: + client: { id: client-1 } + servers: + - id: srv-1 + server_resources: { cpu_cores: 1, ram_mb: 1024 } + endpoints: + - endpoint_name: /hello + steps: + - kind: ram + step_operation: { necessary_ram: 32 } + - kind: initial_parsing + step_operation: { cpu_time: 0.002 } + - kind: io_wait + step_operation: { io_waiting_time: 0.010 } + edges: + - id: gen-to-client + source: rqs-1 + target: client-1 + latency: { mean: 0.003, distribution: exponential } + - id: client-to-server + source: client-1 + target: srv-1 + latency: { mean: 0.003, distribution: exponential } + - id: server-to-client + source: srv-1 + target: client-1 + latency: { mean: 0.003, distribution: exponential } + +sim_settings: + total_simulation_time: 300 + sample_period_s: 0.01 + enabled_sample_metrics: + - ready_queue_len + - event_loop_io_sleep + - ram_in_use + - edge_concurrent_connection + enabled_event_metrics: + - rqs_clock +``` + +### 6.2 With a load balancer and two servers + +```yaml +rqs_input: + id: rqs-1 + avg_active_users: { mean: 120, distribution: poisson } + avg_request_per_minute_per_user: { mean: 20, distribution: poisson } + +topology_graph: + nodes: + client: { id: client-1 } + load_balancer: + id: lb-1 + algorithms: round_robin + server_covered: [ srv-1, srv-2 ] + servers: + - id: srv-1 + server_resources: { cpu_cores: 1, ram_mb: 1024 } + endpoints: + - endpoint_name: /api + steps: + - kind: ram + step_operation: { necessary_ram: 64 } + - kind: cpu_bound_operation + step_operation: { cpu_time: 0.004 } + - id: srv-2 + server_resources: { cpu_cores: 2, ram_mb: 2048 } + endpoints: + - endpoint_name: /api + steps: + - kind: ram + step_operation: { necessary_ram: 64 } + - kind: io_db + step_operation: { io_waiting_time: 0.012 } + + edges: + - { id: gen-client, source: rqs-1, target: client-1, + latency: { mean: 0.003, distribution: exponential } } + + - { id: client-lb, source: client-1, target: lb-1, + latency: { mean: 0.002, distribution: exponential } } + + - { id: lb-srv1, source: lb-1, target: srv-1, + latency: { mean: 0.002, distribution: exponential } } + - { id: lb-srv2, source: lb-1, target: srv-2, + latency: { mean: 0.002, distribution: exponential } } + + - { id: srv1-client, source: srv-1, target: client-1, + latency: { mean: 0.003, distribution: exponential } } + - { id: srv2-client, source: srv-2, target: client-1, + latency: { mean: 0.003, distribution: exponential } } + +sim_settings: + total_simulation_time: 600 + sample_period_s: 0.02 + enabled_sample_metrics: [ ready_queue_len, ram_in_use, edge_concurrent_connection ] + enabled_event_metrics: [ rqs_clock ] +``` + +## 7) Quick Reference (Enums) + +* **Distributions**: `poisson`, `normal`, `log_normal`, `exponential`, `uniform` +* **Node types**: `generator`, `server`, `client`, `load_balancer` (fixed by model) +* **Edge types**: `network_connection` +* **LB algorithms**: `round_robin`, `least_connection` +* **Step kinds** + CPU: `initial_parsing`, `cpu_bound_operation` + RAM: `ram` + I/O: `io_task_spawn`, `io_llm`, `io_wait`, `io_db`, `io_cache` +* **Step operation keys**: `cpu_time`, `io_waiting_time`, `necessary_ram` +* **Sampled metrics**: `ready_queue_len`, `event_loop_io_sleep`, `ram_in_use`, `edge_concurrent_connection` +* **Event metrics**: `rqs_clock` (and `llm_cost` reserved for future use) + +--- + +## 8) Units & Conventions + +* **Time**: seconds (`cpu_time`, `io_waiting_time`, latencies, `total_simulation_time`, `sample_period_s`, `user_sampling_window`) +* **RAM**: megabytes (`ram_mb`, `necessary_ram`) +* **Rates**: requests/minute (`avg_request_per_minute_per_user.mean`) +* **Probabilities**: `[0.0, 1.0]` (`dropout_rate`) +* **IDs**: strings; must be **unique** per category (servers, edges, LB). + +--- + diff --git a/docs/index.md b/docs/index.md new file mode 100644 index 0000000..a4affc2 --- /dev/null +++ b/docs/index.md @@ -0,0 +1,58 @@ +Here’s an updated, list-style `index.md` (in English) **without any Tutorials section** and with a clear pointer to the math details in the workload samplers. + +--- + +# AsyncFlow Documentation + +AsyncFlow is a discrete-event simulator for Python async backends (FastAPI/Uvicorn–style). It builds a **digital twin** of your service—traffic, topology, and resources—so you can measure latency, throughput, queueing, RAM, and more **before** you deploy. + +> ⚠️ The package README with `pip install` & a Quickstart will land after the first PyPI release. + +--- + + +## Public API (stable surface) + +* **[High-Level API](api/high-level.md)** — The two entry points you’ll use most: `AsyncFlow` (builder) and `SimulationRunner` (orchestrator). +* **[Components](api/components.md)** — Public Pydantic models for topology: `Client`, `Server`, `Endpoint`, `Edge`, `LoadBalancer`, `ServerResources`. +* **[Workload](api/workload.md)** — Traffic inputs: `RqsGenerator` and `RVConfig` (random variables). +* **[Settings](api/settings.md)** — Global controls: `SimulationSettings` (duration, sampling cadence, metrics). +* **[Enums](api/enums.md)** — Optional importable enums: distributions, step kinds/ops, metric names, node/edge types, LB algorithms. + +--- + +## How-to Guides + +* **[Builder Guide](guides/builder.md)** — Programmatically assemble a `SimulationPayload` in Python with validation and examples. +* **[YAML Input Guide](guides/yaml-builder.md)** — Author scenarios in YAML: exact schema, units, constraints, runnable samples. +* **[Dev workflow Guide](guides/dev-workflow.md)** — Describes the development workflow, repository architecture, branching strategy and CI/CD for **AsyncFlow** + +--- + +## Internals (design & rationale) + +> Prefer formal underpinnings? The **Workload Samplers** section includes mathematical details (compound Poisson–Poisson and Normal–Poisson processes, inverse-CDF gaps, truncated Gaussians). + +* **[Simulation Input (contract)](internals/simulation-input.md)** — The complete `SimulationPayload` schema and all validation guarantees (graph integrity, step coherence, etc.). +* **[Simulation Runner](internals/simulation-runner.md)** — Orchestrator design; build → wire → start → run flow; sequence diagrams; extensibility hooks. +* **[Runtime & Resources](internals/runtime-and-resources.md)** — How CPU/RAM/DB are modeled with SimPy containers; decoupling of runtime logic and resources. +* **Metrics** + + * **[Time-Series Architecture](internals/metrics/time-series-architecture.md)** — Registry → runtime state → collector pipeline; why the `if key in …` guard keeps extensibility with zero hot-path cost. +* **[Workload Samplers (math)](internals/workload-samplers.md)** — Formalization of traffic generators: windowed user resampling, rate construction $\Lambda = U \cdot \text{RPM}/60$, exponential inter-arrival via inverse-CDF, latency RV constraints. + +--- + +## Useful mental model + +Every run boils down to this validated input: + +```python +SimulationPayload( + rqs_input=RqsGenerator(...), # workload + topology_graph=TopologyGraph(...), # nodes & edges + sim_settings=SimulationSettings(...), +) +``` + +Build it (via **Builder** or **YAML**) and hand it to `SimulationRunner` to execute and analyze. diff --git a/docs/internals/metrics/overview.md b/docs/internals/metrics/overview.md new file mode 100644 index 0000000..bcffd33 --- /dev/null +++ b/docs/internals/metrics/overview.md @@ -0,0 +1,49 @@ +### **AsyncFlow — Simulation Metrics** + +Metrics are the lifeblood of any simulation, transforming a series of abstract events into concrete, actionable insights about system performance, resource utilization, and potential bottlenecks. AsyncFlow provides a flexible and robust metrics collection system designed to give you a multi-faceted view of your system's behavior under load. + +To achieve this, AsyncFlow categorizes metrics into three distinct types: + +1. **Sampled Metrics (`SampledMetricName`):** These metrics provide a **time-series view** of the system's state. They are captured at fixed, regular intervals (e.g., every 5 milliseconds). This methodology is ideal for understanding trends and measuring the continuous utilization of finite resources. Think of them as periodic snapshots of your system's health. + +2. **Event-based Metrics (`EventMetricName`):** These metrics are raw data points recorded **only when a specific event occurs**, such as the completion of a request. Their collection is asynchronous and irregular. This approach is designed to capture the fundamental data needed for post-simulation analysis with maximum efficiency and flexibility. + +3. **Aggregated Metrics (`AggregatedMetricName`):** These are not collected directly but are **calculated after the simulation ends**. They provide high-level statistical summaries (like mean, median, and percentiles) or rate calculations derived from the raw data collected by event-based metrics. They distill thousands of individual data points into a handful of key performance indicators (KPIs). + +The following sections provide a detailed breakdown of each metric within these categories. + +----- + +### **1. Sampled Metrics: A Time-Series Perspective** + +Sampled metrics are configured in the `SimulationSettings` payload. Enabling them allows you to plot the evolution of system resources over time, which is crucial for identifying saturation points and transient performance issues. + +| Metric Name (`SampledMetricName`) | Description & Rationale | +| :--- | :--- | +| **`READY_QUEUE_LEN`** | **What it is:** The number of tasks in the event loop's "ready" queue waiting for their turn to run on a CPU core. \\ **Rationale:** This is arguably the most critical indicator of **CPU saturation**. If this queue length is consistently greater than zero, it means tasks are ready to do work but are forced to wait because the CPU is busy. A long or growing queue is a definitive sign that your application is CPU-bound. | +| **`EVENT_LOOP_IO_SLEEP`** | **What it is:** The number of tasks currently suspended and waiting for an I/O operation to complete (e.g., a database query or a network call). \\ **Rationale:** This metric helps you determine if your system is **I/O-bound**. If this queue is long, it means the CPU is potentially underutilized because it has no ready tasks to run and is instead waiting for external systems to respond. | +| **`RAM_IN_USE`** | **What it is:** The total amount of memory (in MB) currently allocated by all active requests within a server. \\ **Rationale:** Essential for **capacity planning and stability analysis**. This metric allows you to visualize your system's memory footprint under load. A steadily increasing `RAM_IN_USE` value that never returns to a baseline is the classic signature of a **memory leak**. | +| **`EDGE_CONCURRENT_CONNECTION`** | **What it is:** The number of requests currently in transit across a network edge. \\ **Rationale:** This metric helps visualize the load on your network links. A high number of concurrent connections can indicate that downstream services are slow to respond, causing requests to pile up. | + +----- + +### **2. Event-based Metrics: The Raw Data Foundation** + +The goal of event-based metrics is to collect the most fundamental data points with minimal overhead during the simulation. This raw data becomes the single source of truth for all post-simulation transactional analysis. + +| Metric Name (`EventMetricName`) | Description & Rationale | +| :--- | :--- | +| **`RQS_CLOCK`** | **What it is:** A collection of `(start_time, finish_time)` tuples, with one tuple recorded for every single request that is fully processed. \\ **Rationale:** This is the **most efficient and flexible way to capture transactional data**. Instead of storing separate lists for latencies and completion times, we store a single, cohesive data structure. This design choice is deliberate: this raw data is all that is needed to calculate both latency and throughput after the simulation, providing maximum flexibility for analysis. | +| **`LLM_COST`** | **What it is:** A collection of the estimated monetary cost (e.g., in USD) incurred by each individual call to an external Large Language Model (LLM) API. \\ **Rationale:** In modern AI-powered applications, API calls can be a major operational expense. This metric moves beyond technical performance to measure **financial performance**, allowing for cost optimization. | + +----- + +### **3. Aggregated Metrics: Post-Simulation Insights** + +These metrics are calculated by an analysis module after the simulation finishes, using the raw data from the event-based metrics. This approach provides flexibility and keeps the simulation core lean. + +| Metric Name (`AggregatedMetricName`) | Description & Rationale | +| :--- | :--- | +| **`THROUGHPUT_RPS`** | **What it is:** The number of requests successfully completed per second, calculated by aggregating the `finish_time` timestamps from the `RQS_CLOCK` data over a specific time window. \\ **Rationale:** This is a fundamental measure of **system performance and capacity**. Calculating it post-simulation is superior because the same raw data can be analyzed with different window sizes (e.g., per-second, per-minute) without re-running the simulation. | +| **`LATENCY_STATS`** | **What it is:** Statistical summaries (mean, median, standard deviation, p50, p95, p99) calculated from the `RQS_CLOCK` data by taking `finish_time - start_time` for each tuple. \\ **Rationale:** These statistics distill thousands of raw data points into key indicators. They tell you about the average user experience (`mean`/`p50`) and, more critically, the worst-case experience (`p95`/`p99`) needed to validate Service Level Objectives (SLOs). | +| **`LLM_STATS`** | **What it is:** Statistical summaries (total cost, average cost per request, etc.) calculated from the raw `LLM_COST` data. \\ **Rationale:** Provides high-level insights into the financial performance of AI-driven features, enabling strategic decisions on cost optimization. | \ No newline at end of file diff --git a/docs/internals/metrics/time-series-architecture.md b/docs/internals/metrics/time-series-architecture.md new file mode 100644 index 0000000..572ba71 --- /dev/null +++ b/docs/internals/metrics/time-series-architecture.md @@ -0,0 +1,91 @@ +## Time‑Series Metrics: Architectural Overview + +Collecting high‑frequency, time‑series metrics from a complex simulation requires an architecture that is **performant, maintainable, and extensible**. Our design meets those goals by keeping metric *declaration*, *state management,* and *data collection* in strictly separate layers. + +\### 1  Guiding Principles & Architecture + +1. **Minimal Hot‑Path Overhead** — every state update in the simulation core is `O(1)`. +2. **Single Source of Truth** — one “Registry” enumerates every metric that can exist. +3. **User‑Defined Extensibility** — advanced users can register custom metrics without touching the framework. +4. **Predictable Memory Footprint** — data structures are pre‑allocated once, never rebuilt at each sample tick. + +| Layer | Responsibility | Lifetime | +| ------------- | ---------------------------------------------------------------- | ---------------------- | +| **Registry** | Declare *which* metrics exist for each component type | Module import (once) | +| **Runtime** | Maintain the **current value** of each metric per component | Per component instance | +| **Collector** | Periodically read runtime values and append to time‑series lists | One per simulation run | + +--- + +\### 2  Case Study — Edge Metric Collection + +```mermaid +graph TD + subgraph Init + A(Registry: EDGE_METRICS) -- builds --> B{Metric Dict} + end + subgraph Loop + C[EdgeRuntime] -- inc/dec --> D(_concurrent_connections) + E[SampledMetricCollector] -->|every N ms| F{iterate} + F -->|read property| D + F -->|append| B + end + C -- owns --> B +``` + +#### Layer Walk‑through + +1. **Registry (`metrics/edge.py`)** + + ```python + EDGE_METRICS = (SampledMetricName.EDGE_CONCURRENT_CONNECTION,) + + def build_edge_metrics(enabled): + return {m: [] for m in EDGE_METRICS if m in enabled} + ``` + +2. **Runtime (`EdgeRuntime`)** + + * Updates the counter `_concurrent_connections` in `O(1)`. + * Holds the dict produced by `build_edge_metrics`. + * **New:** exposes read‑only properties so external modules never touch private fields directly. + + ```python + class EdgeRuntime: + @property + def concurrent_connections(self) -> int: + return self._concurrent_connections + + @property + def enabled_metrics(self) -> dict[SampledMetricName, list[float | int]]: + return self._edge_enabled_metrics + ``` + +3. **Collector (`SampledMetricCollector`)** + + ```python + while True: + yield env.timeout(sample_period_s) + for edge in self.edges: + key = SampledMetricName.EDGE_CONCURRENT_CONNECTION + # properties keep encapsulation intact + if key in edge.enabled_metrics: + edge.enabled_metrics[key].append(edge.concurrent_connections) + ``` + +--- + +\### 3  Why the `if key in …` Guard Still Matters + +Even with the new properties, the guard remains essential: + +* **Robustness** — prevents `KeyError` when a metric is disabled for a given edge. +* **Extensibility** — a user can add `EDGE_PACKET_LOSS` (or any custom metric) to a subset of edges; the collector automatically respects that configuration. + +This single `O(1)` check keeps the system plug‑and‑play while preserving full encapsulation: + +* Runtime internals stay protected behind properties. +* The collector never needs to know which metrics exist ahead of time. + +--- + diff --git a/docs/internals/requests-generator.md b/docs/internals/requests-generator.md new file mode 100644 index 0000000..eb40121 --- /dev/null +++ b/docs/internals/requests-generator.md @@ -0,0 +1,389 @@ +# Requests Generator + +This document describes the design of the **requests generator**, which models a stream of user requests to a given endpoint over time. + +--- + +## Model Inputs and Output + +Following the AsyncFlow philosophy, we accept a small set of input parameters to drive a “what-if” analysis in a pre-production environment. These inputs let you explore reliability and cost implications under different traffic scenarios. + +## **Inputs** + +1. **Average Concurrent Users (`avg_active_users`)** + Expected number of simultaneous active users (or sessions) interacting with the system. + + * Modeled as a random variable (`RVConfig`). + * Allowed distributions: **Poisson** or **Normal**. + +2. **Average Requests per Minute per User (`avg_request_per_minute_per_user`)** + Average request rate per user, expressed in requests per minute. + + * Modeled as a random variable (`RVConfig`). + * **Must** use the **Poisson** distribution. + +3. **User Sampling Window (`user_sampling_window`)** + Time interval (in seconds) over which active users are resampled. + + * Constrained between `MIN_USER_SAMPLING_WINDOW` and `MAX_USER_SAMPLING_WINDOW`. + * Defaults to `USER_SAMPLING_WINDOW`. + +--- + +## **Model Assumptions** + +* **Random variables**: + + * *Concurrent users* and *requests per minute per user* are independent random variables. + * Each is configured via the `RVConfig` model, which specifies: + + * **mean** (mandatory, must be numeric and positive), + * **distribution** (default: Poisson), + * **variance** (optional; defaults to `mean` for Normal and Log-Normal distributions). + +* **Supported joint sampling cases**: + + * Poisson (users) × Poisson (requests) + * Normal (users) × Poisson (requests) + + Other combinations are currently unsupported. + +* **Variance handling**: + + * If the distribution is **Normal** or **Log-Normal** and `variance` is not provided, it is automatically set to the `mean`. + +--- + +## **Validation Rules** + +* `avg_request_per_minute_per_user`: + + * **Must** be Poisson-distributed. + * Validation enforces this constraint. + +* `avg_active_users`: + + * Must be either Poisson or Normal. + * Validation enforces this constraint. + +* `mean` in `RVConfig`: + + * Must be a positive number (int or float). + * Automatically coerced to `float`. + +```python +class RVConfig(BaseModel): + """class to configure random variables""" + + mean: float + distribution: Distribution = Distribution.POISSON + variance: float | None = None + + @field_validator("mean", mode="before") + def ensure_mean_is_numeric_and_positive( + cls, # noqa: N805 + v: float, + ) -> float: + """Ensure `mean` is numeric, then coerce to float.""" + err_msg = "mean must be a number (int or float)" + if not isinstance(v, (float, int)): + raise ValueError(err_msg) # noqa: TRY004 + + return float(v) + + @model_validator(mode="after") # type: ignore[arg-type] + def default_variance(cls, model: "RVConfig") -> "RVConfig": # noqa: N805 + """Set variance = mean when distribution require and variance is missing.""" + needs_variance: set[Distribution] = { + Distribution.NORMAL, + Distribution.LOG_NORMAL, + } + + if model.variance is None and model.distribution in needs_variance: + model.variance = model.mean + return model + + +class RqsGenerator(BaseModel): + """Define the expected variables for the simulation""" + + id: str + type: SystemNodes = SystemNodes.GENERATOR + avg_active_users: RVConfig + avg_request_per_minute_per_user: RVConfig + + user_sampling_window: int = Field( + default=TimeDefaults.USER_SAMPLING_WINDOW, + ge=TimeDefaults.MIN_USER_SAMPLING_WINDOW, + le=TimeDefaults.MAX_USER_SAMPLING_WINDOW, + description=( + "Sampling window in seconds " + f"({TimeDefaults.MIN_USER_SAMPLING_WINDOW}-" + f"{TimeDefaults.MAX_USER_SAMPLING_WINDOW})." + ), + ) + + @field_validator("avg_request_per_minute_per_user", mode="after") + def ensure_avg_request_is_poisson( + cls, # noqa: N805 + v: RVConfig, + ) -> RVConfig: + """ + Force the distribution for the rqs generator to be poisson + at the moment we have a joint sampler just for the poisson-poisson + and gaussian-poisson case + """ + if v.distribution != Distribution.POISSON: + msg = "At the moment the variable avg request must be Poisson" + raise ValueError(msg) + return v + + @field_validator("avg_active_users", mode="after") + def ensure_avg_user_is_poisson_or_gaussian( + cls, # noqa: N805 + v: RVConfig, + ) -> RVConfig: + """ + Force the distribution for the rqs generator to be poisson + at the moment we have a joint sampler just for the poisson-poisson + and gaussian-poisson case + """ + if v.distribution not in {Distribution.POISSON, Distribution.NORMAL}: + msg = "At the moment the variable active user must be Poisson or Gaussian" + raise ValueError(msg) + return v + +``` + +--- + +## Aggregate Request Rate + +From the two random inputs we define the **per-second aggregate rate** $\Lambda$: + +$$ +\Lambda + = \text{concurrent\_users} + \;\times\; + \frac{\text{requests\_per\_minute\_per\_user}}{60} + \quad[\text{requests/s}]. +$$ + +--- + +## 1. Poisson → Exponential Refresher + +### 1.1 Homogeneous Poisson process + +A Poisson process of rate $\lambda$ has + +$$ +\Pr\{N(t)=k\} + = e^{-\lambda t}\,\frac{(\lambda t)^{k}}{k!},\quad k=0,1,2,\dots +$$ + +### 1.2 Waiting time to first event + +Define $T_1=\inf\{t>0:N(t)=1\}$. +The survival function is + +$$ +\Pr\{T_1>t\} + = \Pr\{N(t)=0\} + = e^{-\lambda t}, +$$ + +so the CDF is + +$$ +F_{T_1}(t) = 1 - e^{-\lambda t},\quad t\ge0, +$$ + +and the density $f(t)=\lambda\,e^{-\lambda t}$. Thus + +$$ +T_1 \sim \mathrm{Exp}(\lambda), +$$ + +and by memorylessness every inter-arrival gap $\Delta t_i$ is i.i.d. Exp($\lambda$). + +### 1.3 Inverse-CDF sampling + +To draw $\Delta t\sim\mathrm{Exp}(\lambda)$: + +1. Sample $U\sim\mathcal U(0,1)$. +2. Solve $U=1-e^{-\lambda\,\Delta t}$;$\Rightarrow\;\Delta t=-\ln(1-U)/\lambda$. +3. Equivalent compact form: + $\displaystyle \Delta t = -\,\ln(U)/\lambda$. + +--- + +## 2. Poisson × Poisson Workload + +### 2.1 Notation + +| Symbol | Meaning | Law | +| --------------------------------- | --------------------------------------- | -------- | +| $U\sim\mathrm{Pois}(\lambda_u)$ | active users in current 1-minute window | Poisson | +| $R_i\sim\mathrm{Pois}(\lambda_r)$ | requests per minute by user *i* | Poisson | +| $N=\sum_{i=1}^U R_i$ | total requests in that minute | compound | +| $\Lambda=N/60$ | aggregate rate (requests / second) | compound | + +The procedure here rely heavily on the independence of our random variables. + +### 2.2 Conditional sum ⇒ Poisson + +Given $U=u$: + +$$ +N\mid U=u +=\sum_{i=1}^{u}R_i +\;\sim\;\mathrm{Pois}(u\,\lambda_r). +$$ + +### 2.3 Unconditional law of $N$ + +By the law of total probability: + +$$ +\Pr\{N=n\} +=\sum_{u=0}^{\infty} +\Pr\{U=u\}\; +\Pr\{N=n\mid U=u\} +\;=\; +e^{-\lambda_u}\,\frac1{n!} +\sum_{u=0}^{\infty} +\frac{\lambda_u^u}{u!}\, +e^{-u\lambda_r}\,(u\lambda_r)^n. +$$ + +This is the **Poisson–Poisson compound** (Borel–Tanner) distribution. + +--- + +## 3. Exact Hierarchical Sampler + +Rather than invert the discrete CDF above, we exploit the conditional structure: + +```python +# Hierarchical sampler code snippet +now = 0.0 # virtual clock (s) +window_end = 0.0 # end of the current user window +Lambda = 0.0 # aggregate rate Λ (req/s) + +while now < simulation_time: + # (Re)sample U at the start of each window + if now >= window_end: + window_end = now + float(sampling_window_s) + users = poisson_variable_generator(mean_concurrent_user, rng) + Lambda = users * mean_req_per_sec_per_user + + # No users → fast-forward to next window + if Lambda <= 0.0: + now = window_end + continue + + # Exponential gap from a protected uniform value + u_raw = max(uniform_variable_generator(rng), 1e-15) + delta_t = -math.log(1.0 - u_raw) / Lambda + + # End simulation if the next event exceeds the horizon + if now + delta_t > simulation_time: + break + + # If the gap crosses the window boundary, jump to it + if now + delta_t >= window_end: + now = window_end + continue + + now += delta_t + yield delta_t +``` + +Because each conditional step matches the exact Poisson→Exponential law, this two-stage algorithm reproduces the same joint distribution as analytically inverting the compound CDF, but with minimal computation. + +--- + +## 4. Validity of the hierarchical sampler + +The validity of the hierarchical sampler relies on a structural property of the model: + +$$ +N \;=\; \sum_{i=1}^{U} R_i, +$$ + +where each $R_i \sim \mathrm{Pois}(\lambda_r)$ is independent of the others and of $U$. Because the Poisson family is closed under convolution, + +$$ +N \,\big|\, U=u \;\sim\; \mathrm{Pois}\!\bigl(u\,\lambda_r\bigr). +$$ + +This result has two important consequences: + +1. **Deterministic conditional rate** – Given $U=u$, the aggregate request arrivals constitute a homogeneous Poisson process with the *deterministic* rate + + $$ + \Lambda = \frac{u\,\lambda_r}{60}. + $$ + + All inter-arrival gaps are therefore i.i.d. exponential with parameter $\Lambda$, allowing us to use the standard inverse–CDF formula for each gap. + +2. **Layered uncertainty handling** – The randomness associated with $U$ is handled in an outer step (sampling $U$ once per window), while the inner step leverages the well-known Poisson→Exponential correspondence. This two-level construction reproduces exactly the joint distribution obtained by first drawing $\Lambda = N/60$ from the compound Poisson law and then drawing gaps conditional on $\Lambda$. + +If the total count could **not** be written as a sum of independent Poisson variables, the conditional distribution of $N$ would no longer be Poisson and the exponential-gap shortcut would not apply. In that situation one would need to work directly with the (generally more complex) mixed distribution of $\Lambda$ or adopt another specialized sampling scheme. + + + +## 5. Equivalence to CDF Inversion + +By the law of total probability, for any event set $A$: + +$$ +\Pr\{(\Lambda,\Delta t_1,\dots)\in A\} +=\sum_{u=0}^\infty +\Pr\{U=u\}\; +\Pr\{(\Lambda,\Delta t_1,\dots)\in A\mid U=u\}. +$$ + +Step 1 samples $\Pr\{U=u\}$, step 2–3 sample the conditional exponential gaps. Because these two factors exactly match the mixture definition of the compound CDF, the hierarchical sampler **is** an exact implementation of two-stage CDF inversion, avoiding any explicit inversion of an infinite series. + +--- + +## 6. Gaussian × Poisson Variant + +If concurrent users follow a truncated Normal, + +$$ +U\sim \max\{0,\;\mathcal N(\mu_u,\sigma_u^2)\}, +$$ + +steps 2–3 remain unchanged; only step 1 draws $U$ from a continuous law. The resulting mixture is continuous, yet the hierarchical sampler remains exact. + +--- + +## 7. Time Window + +The sampling window length governs how often we re-sample $U$. It should reflect the timescale over which user count fluctuations become significant. Our default is **60 s**, but you can adjust this parameter in your configuration before each simulation. + +--- + +## Limitations of the Requests Model + +1. **Independence assumption** + Assumes per-user streams and $U$ are independent. Real traffic often exhibits user-behavior correlations (e.g., flash crowds). + +2. **Exponential inter-arrival times** + Implies memorylessness; cannot capture self-throttling or long-range dependence found in real workloads. + +3. **No diurnal/trend component** + User count $U$ is IID per window. To model seasonality or trends, you must vary $\lambda_u(t)$ externally. + +4. **No burst-control or rate-limiting** + Does not simulate client-side throttling or server back-pressure. Any rate-limit logic must be added externally. + +5. **Gaussian truncation artifacts** + In the Gaussian–Poisson variant, truncating negatives to zero and rounding can under-estimate extreme user counts. + + +**Key takeaway:** By structuring the generator as +$\Lambda = U\,\lambda_r/60$ with a two-stage Poisson→Exponential sampler, AsyncFlow efficiently reproduces compound Poisson traffic dynamics without any complex CDF inversion. diff --git a/docs/internals/runtime-and-resources.md b/docs/internals/runtime-and-resources.md new file mode 100644 index 0000000..32f1611 --- /dev/null +++ b/docs/internals/runtime-and-resources.md @@ -0,0 +1,451 @@ +Of course. This is an excellent request. A deep dive into the "why" and the real-world analogies is what makes documentation truly valuable. + +Here is the comprehensive, detailed documentation for the AsyncFlow Runtime Layer, written in English, incorporating all your requests. + +----- + +# **AsyncFlow — The Runtime Layer Documentation** + +*(Version July 2025 – Aligned with `app/runtime` and `app/resources`)* + +## **1. The Runtime Philosophy: From Blueprint to Living System** + +If the `SimulationPayload` is the static **blueprint** of a system, the `runtime` package is the **engine** that brings that blueprint to life. It translates a validated, declarative configuration into a dynamic, interacting set of processes within a SimPy simulation environment. The entire design is guided by a few core principles to ensure robustness, testability, and a faithful reflection of real-world systems. + +### **The Actor Model & Process Management** + +Distributed systems are, by nature, composed of independent components that communicate with each other concurrently. To model this, we've adopted an **Actor Model**. Each major component of the architecture (`Generator`, `Server`, `Client`) is implemented as an "Actor"—a self-contained object with its own internal state and behavior that communicates with other actors by sending and receiving messages (`RequestState` objects). + +SimPy's process management is a perfect fit for this model. It uses **cooperative multitasking** within a single-threaded event loop. An actor "runs" until it `yield`s control to the SimPy environment, typically to wait for a duration (`timeout`), a resource (`Container.get`), or an event (`Store.get`). This elegantly mimics modern, non-blocking I/O frameworks (like Python's `asyncio`, Node.js, or Go's goroutines) where a process performs work until it hits an I/O-bound operation, at which point it yields control, allowing the event loop to run other ready tasks. + +### **The "Validation-First" Contract** + +A crucial design decision is the strict separation between configuration and execution. The `runtime` layer operates under the assumption that the input `SimulationPayload` is **100% valid and logically consistent**. This "validation-first" contract means the runtime code is streamlined and free of defensive checks. It doesn't need to validate if a server ID exists or if a resource is defined; it can focus entirely on its core responsibility: accurately modeling the passage of time and contention for resources. + +----- + +## **2. High-Level Architecture & Data Flow** + +The simulation is a choreography of Actors passing a `RequestState` object between them. Communication and resource access are mediated exclusively by the SimPy environment, ensuring all interactions are captured on the simulation timeline. + +```text + .start() .transport(state) .start() +┌───────────┐ Starts ┌───────────┐ Forwards ┌───────────┐ Processes ┌───────────┐ +│ ├─────────►│ ├─────────────►│ ├────────────►│ │ +│ Generator │ │ Edge │ │ Server │ │ Client │ +│ ◄─────────┤ ◄─────────────┤ ◄────────────┤ │ +└───────────┘ └───────────┘ └───────────┘ └───────────┘ + ▲ Creates ▲ Delays RequestState ▲ Consumes ▲ Finishes + │ RequestState │ (Latency & Drops) │ Resources │ Request +``` + + * **Actors** (`runtime/actors/`): The active, stateful processes that perform work (`RqsGeneratorRuntime`, `ServerRuntime`, `ClientRuntime`, `EdgeRuntime`). + * **State Object** (`RequestState`): The message passed between actors. It acts as a digital passport, collecting stamps (`Hop` objects) at every stage of its journey. + * **Resource Registry** (`resources/`): A central authority that creates and allocates finite system resources (CPU cores, RAM) to the actors that need them. + +----- + +## **3. The Anatomy of a Request: State & History** + +At the heart of the simulation is the `RequestState` object, which represents a single user request flowing through the system. + +### **3.1. `Hop` – The Immutable Breadcrumb** + +A `Hop` is a `NamedTuple` that records a single, atomic event in a request's lifecycle: its arrival at a specific component at a specific time. Being an immutable `NamedTuple` makes it lightweight and safe to use in analysis. + +### **3.2. `RequestState` – The Digital Passport** + +```python +@dataclass +class RequestState: + id: int + initial_time: float + finish_time: float | None = None + history: list[Hop] = field(default_factory=list) +``` + +This mutable dataclass is the sole carrier of a request's identity and history. + + * `id`: A unique identifier for the request, assigned by the generator. + * `initial_time`: The simulation timestamp (`env.now`) when the request was created. + * `finish_time`: The timestamp when the request completes its lifecycle. It remains `None` until then. + * `history`: A chronologically ordered list of `Hop` objects, creating a complete, traceable path of the request's journey. + +#### **Real-World Analogy** + +Think of `RequestState` as a request context in a modern microservices architecture. The `id` is analogous to a **Trace ID** (like from OpenTelemetry or Jaeger). The `history` of `Hop` objects is the collection of **spans** associated with that trace, providing a detailed, end-to-end view of where the request spent its time, which is invaluable for performance analysis and debugging. + +----- + +## **4 The Resource Layer — Modelling Contention ⚙️** + +In real infrastructures every machine has a hard ceiling: only *N* CPU cores, only *M* MB of RAM. +AsyncFlow mirrors that physical constraint through the **Resource layer**, which exposes pre-filled SimPy containers that actors must draw from. If a token is not available the coroutine simply blocks — giving you back-pressure “for free”. + +--- + +### **4.1 `ResourcesRuntime` — The Central Bank of Resources** + +| Responsibility | Implementation detail | +| --------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| **Discover capacity** | Walks the *validated* `TopologyGraph.nodes.servers`, reading `cpu_cores` and `ram_mb` from each `ServerResources` spec. | +| **Mint containers** | Calls `build_containers(env, spec)` which returns
`{"CPU": simpy.Container(init=cpu_cores), "RAM": simpy.Container(init=ram_mb)}` — the containers start **full** so a server can immediately consume tokens. | +| **Registry map** | Stores them in a private dict `_by_server: dict[str, ServerContainers]`. | +| **Public API** | `registry[server_id] → ServerContainers` (raises `KeyError` if the ID is unknown). | + +```python +registry = ResourcesRuntime(env, topology) +cpu_bucket = registry["srv-1"]["CPU"] # simpy.Container, level == capacity at t=0 +ram_bucket = registry["srv-1"]["RAM"] +``` + +Because the schema guarantees that every `server_id` is unique and every +server referenced in an edge actually exists, `ResourcesRuntime` never needs +defensive checks beyond the simple dictionary lookup. + +--- + +### **4.2 How Contention Emerges** + +* **CPU** – Each `yield CPU.get(1)` represents “claiming one core”. + When all tokens are gone the coroutine waits, modelling a thread-pool or worker saturation. +* **RAM** – `yield RAM.get(amount)` blocks until enough memory is free. + Large requests can starve, reproducing OOM throttling or JVM heap pressure. +* **Automatic fairness** – SimPy’s event loop resumes whichever coroutine became ready first, giving a natural first-come, first-served order. + +> **No bespoke semaphore or queueing code is required** — the SimPy +> containers *are* the semaphore. + +--- + +### **Real-World Analogy** + +| Runtime Component | Real Infrastructure Counterpart | +| -------------------- | --------------------------------------------------------------------------------------------------------- | +| `ResourcesRuntime` | A **cloud provider control plane** or **Kubernetes scheduler**: single source of truth for node capacity. | +| CPU container tokens | **Worker threads / processes** in Gunicorn, uWSGI, or an OS CPU-quota. | +| RAM container tokens | **cgroup memory limit** or a pod’s allocatable memory; once exhausted new workloads must wait. | + +Just like a Kubernetes scheduler won’t place a pod if a node lacks free CPU/RAM, +AsyncFlow won’t let an actor proceed until it obtains the necessary tokens. + +## **5. The Actors: Bringing the System to Life** + +Actors are the core drivers of the simulation. Each represents a key component of the system architecture. They all expose a consistent `.start()` method, which registers their primary behavior as a process with the SimPy environment, allowing for clean and uniform orchestration. + +### **5.1. RqsGeneratorRuntime: The Source of Load** + +This actor's sole purpose is to create `RequestState` objects according to a specified stochastic model, initiating all traffic in the system. + +| Key Parameter (`__init__`) | Meaning | +| :--- | :--- | +| `env` | The SimPy simulation environment. | +| `out_edge` | The `EdgeRuntime` instance to which newly created requests are immediately sent. | +| `rqs_generator_data` | The validated Pydantic schema containing the statistical model for traffic (e.g., user count, request rate). | +| `rng` | A NumPy random number generator instance for deterministic, reproducible randomness. | + +**Core Logic (`.start()`):** +The generator's main process uses a statistical sampler (e.g., `poisson_poisson_sampling`) to yield a series of inter-arrival time gaps. It waits for each gap (`yield self.env.timeout(gap)`), then creates a new `RequestState`, records its first `Hop`, and immediately forwards it to the outbound edge via `out_edge.transport()`. + +**Real-World Analogy:** +The `RqsGeneratorRuntime` represents the collective behavior of your entire user base or the output of an upstream service. It's equivalent to a **load-testing tool** like **k6, Locust, or JMeter**, configured to simulate a specific traffic pattern (e.g., 500 users with an average of 30 requests per minute). + +----- + +### **5.2. EdgeRuntime: The Network Fabric 🚚** + +This actor models the connection *between* two nodes. It simulates the two most important factors of network transit: latency and unreliability. + +| Key Parameter (`__init__`) | Meaning | +| :--- | :--- | +| `env` | The SimPy simulation environment. | +| `edge_config` | The Pydantic `Edge` model containing this link's configuration (latency distribution, dropout rate). | +| `target_box` | A `simpy.Store` that acts as the "inbox" for the destination node. | +| `rng` | The random number generator for sampling latency and dropout. | + +**Core Logic (`.transport()`):** +Unlike other actors, `EdgeRuntime`'s primary method is `.transport(state)`. When called, it doesn't block the caller. Instead, it spawns a new, temporary SimPy process (`_deliver`) for that specific `RequestState`. This process: + +1. Checks for a **dropout** (packet loss) based on `dropout_rate`. If dropped, the request's `finish_time` is set, and its journey ends. +2. If not dropped, it samples a **latency** value from the configured probability distribution. +3. It `yield`s a `timeout` for the sampled latency, simulating network travel time. +4. After the wait, it records a successful `Hop` and places the `RequestState` into the `target_box` of the destination node. + +**Real-World Analogy:** +An `EdgeRuntime` is a direct analog for a **physical or virtual network link**. This could be the public **internet** between a user and your server, a **LAN connection** between two services in a data center, or a **VPC link** between two cloud resources. `latency` represents round-trip time (RTT), and `dropout_rate` models packet loss. + +----- + +### **5.3  `ServerRuntime` — The Workhorse 📦 (2025 edition)** + +`ServerRuntime` emulates an application server that owns **finite CPU / RAM containers** and executes an ordered chain of **Step** objects for every incoming request. +The 2025 refactor keeps the classic **dispatcher / handler** pattern, adds **live metric counters** (ready‑queue length, I/O‑queue length, RAM‑in‑use) and implements the **lazy‑CPU lock** algorithm described earlier. + +| `__init__` parameter | Meaning | +| ---------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| **`env`** | Shared `simpy.Environment`. Every timeout or resource operation is scheduled here. | +| **`server_resources`** | A `ServerContainers` mapping `{"CPU": Container, "RAM": Container}` created by `ResourcesRuntime`. Containers start **full** so a server can immediately pull tokens. | +| **`server_config`** | Validated Pydantic `Server` model: ID, resource spec, list `endpoints: list[Endpoint]`. | +| **`out_edge`** | `EdgeRuntime` (or stub) that receives the `RequestState` once processing finishes. | +| **`server_box`** | `simpy.Store` acting as the server’s inbox. Up‑stream actors drop `RequestState`s here. | +| **`rng`** | `numpy.random.Generator`; defaults to `default_rng()`. Used to pick a random endpoint. | + +--- + +#### **Live metric fields** + +| Field | Unit | Updated when… | Used for… | +| --------------------- | -------- | ------------------------------------------------ | ------------------------------------------------------------------------ | +| `_el_ready_queue_len` | requests | a CPU step acquires / releases a core | **Ready queue length** (how many coroutines wait for the GIL / a worker) | +| `_el_io_queue_len` | requests | an I/O step enters / leaves the socket wait list | **I/O queue length** (awaits in progress) | +| `_ram_in_use` | MB | RAM `get` / `put` | Instant **RAM usage** per server | + +Accessor properties expose them read‑only: + +```python +@property +def ready_queue_len(self) -> int: return self._el_ready_queue_len + +@property +def io_queue_len(self) -> int: return self._el_io_queue_len + +@property +def ram_in_use(self) -> int: return self._ram_in_use +``` + +--- + +#### **Public API** + +```python +def start(self) -> simpy.Process +``` + +Registers the **dispatcher** coroutine in the environment and returns the created `Process`. + +--- + +#### **Internal Workflow** + +```text +┌───────────┐ server_box.get() ┌──────────────┐ +│ dispatcher │ ────────────────────► │ handle_req N │ +└───────────┘ spawn new process └──────────────┘ + │ + ▼ + RAM get → CPU/IO steps → RAM put → out_edge.transport() + ▲ │ + │ └── metric counters updated here + └── lazy CPU lock (get once, put on first I/O) +``` + +1. **Dispatcher loop** + +```python +while True: + raw_state = yield self.server_box.get() # blocks until a request arrives + state = cast(RequestState, raw_state) + self.env.process(self._handle_request(state)) # fire‑and‑forget +``` + +2. **Handler coroutine (`_handle_request`)** + +| Stage | Implementation detail | +| ------------------------------- | ----------------------------------------------------------------------------------------- | +| **Record arrival** | `state.record_hop(SystemNodes.SERVER, self.server_config.id, env.now)` | +| **Endpoint selection** | Uniform random index `rng.integers(0, len(endpoints))` (plug‑in point for custom routing) | +| **Reserve RAM (back‑pressure)** | compute `total_ram` → `yield RAM.get(total_ram)` → `_ram_in_use += total_ram` | +| **Execute steps** | handled in a loop with *lazy CPU lock* and metric updates (see edge‑case notes below) | +| **Release RAM** | `_ram_in_use -= total_ram` → `yield RAM.put(total_ram)` | +| **Forward** | `out_edge.transport(state)` — send to next hop without awaiting latency | + +--- + +#### **CPU / I‑O loop details** + +* **Lazy‑CPU lock** – first CPU step acquires one core; all following contiguous CPU steps reuse it. +* **Release on I/O** – on the first I/O step the core is released; it remains free until the next CPU step. +* **Metric updates** – counters are modified only on the **state transition** (CPU→I/O, I/O→CPU) so there is never double‑counting. + +```python +if isinstance(step.kind, EndpointStepCPU): + if not core_locked: + yield CPU.get(1) + core_locked = True + self._el_ready_queue_len += 1 # entered ready queue + if is_in_io_queue: + self._el_io_queue_len -= 1 + is_in_io_queue = False + yield env.timeout(cpu_time) + +elif isinstance(step.kind, EndpointStepIO): + if core_locked: + yield CPU.put(1) + core_locked = False + self._el_ready_queue_len -= 1 + if not is_in_io_queue: + self._el_io_queue_len += 1 + is_in_io_queue = True + yield env.timeout(io_time) +``` + +**Handler epilogue** + +```python +# at exit, remove ourselves from whichever queue we are in +if core_locked: # we are still in ready queue + self._el_ready_queue_len -= 1 + yield CPU.put(1) +elif is_in_io_queue: # finished while awaiting I/O + self._el_io_queue_len -= 1 +``` + +> This guarantees both queues always balance back to 0 after the last request completes. + +--- + +#### **Concurrency Guarantees** + +* **CPU contention** – the `CPU` container is a token bucket; max concurrent CPU‑bound steps = `cpu_cores`. +* **RAM contention** – requests block at `RAM.get()` until memory is free (models cgroup / OOM throttling). +* **Non‑blocking I/O** – while in `env.timeout(io_wait)` no core token is held, so other handlers can run; mirrors an async server where workers return to the event‑loop on each `await`. + +--- + +#### **Edge‑case handling (metrics)** + +* **First‑step I/O** – counted only in I/O queue (`+1`), never touches ready queue. +* **Consecutive I/O steps** – second I/O sees `is_in_io_queue == True`, so no extra increment (no double count). +* **CPU → I/O → CPU** – +  – CPU step: `core_locked=True`, `+1` ready queue +  – I/O step: core released, `‑1` ready queue, `+1` I/O queue +  – next CPU: core reacquired, `‑1` I/O queue, `+1` ready queue +* **Endpoint finishes** – epilogue removes the request from whichever queue it still occupies, avoiding “ghost” entries. + +--- + +#### **Real‑World Analogy** + +| Runtime concept | Real server analogue | +| --------------------------------------- | --------------------------------------------------------------------------------------- | +| `server_box` | Web server accept queue (e.g., `accept()` backlog). | +| `CPU.get(1)` / `CPU.put(1)` | Claiming / releasing a worker thread or GIL slot (Gunicorn, uWSGI, Node.js event‑loop). | +| `env.timeout(io_wait)` (without a core) | `await redis.get()` – coroutine parked while the kernel handles the socket. | +| RAM token bucket | cgroup memory limit / container hard‑RSS; requests block when heap is exhausted. | + +Thus a **CPU‑bound step** models tight Python code holding the GIL, while an **I/O‑bound step** models an `await` that yields control back to the event loop, freeing the core. + +--- + + + +### **5.4. ClientRuntime: The Destination** + +This actor typically represents the end-user or system that initiated the request, serving as the final destination. + +| Key Parameter (`__init__`) | Meaning | +| :--- | :--- | +| `env` | The SimPy simulation environment. | +| `out_edge` | The `EdgeRuntime` to use if the client needs to forward the request (acting as a relay). | +| `client_box` | This client's "inbox". | +| `completed_box` | A global `simpy.Store` where all finished requests are placed for final collection and analysis. | + +**Core Logic (`.start()`):** +The client pulls requests from its `client_box`. It then makes a critical decision: + + * **If the request is new** (coming directly from the `RqsGeneratorRuntime`), it acts as a **relay**, immediately forwarding the request to its `out_edge`. + * **If the request is returning** (coming from a `ServerRuntime`), it acts as the **terminus**. It sets the request's `finish_time`, completing its lifecycle, and puts it into the global `completed_box`. + +**Design Note & Real-World Analogy:** +The current logic for this decision—`if state.history[-2].component_type != SystemNodes.GENERATOR`—is **fragile**. While it works, it's not robust. A future improvement would be to add a more explicit routing mechanism. +In the real world, the `ClientRuntime` could be a user's **web browser**, a **mobile application**, or even a **Backend-For-Frontend (BFF)** service that both initiates requests and receives the final aggregated responses. + +## **5.5 `LoadBalancerRuntime` — The Traffic Cop 🚦** + +The **Load Balancer** actor lives in `app/runtime/actors/load_balancer_runtime.py`. +It receives a `RequestState` from the client side, decides **which outbound +edge** should carry it to a server, and immediately forwards the request. + +```text +lb_box.get() choose edge transport(state) +┌───────────────┐ ───────────────────────────► ┌────────────────┐ +│ LoadBalancer │ │ EdgeRuntime n │ +└───────────────┘ ◄───────────────────────────┘ └────────────────┘ +``` + +### **Constructor Parameters** + +| Parameter | Meaning | +| ------------- | ----------------------------------------------------------------------- | +| `env` | Shared `simpy.Environment` | +| `lb_config` | Validated `LoadBalancer` schema (ID, chosen algorithm enum) | +| `outer_edges` | Immutable list `list[EdgeRuntime]`, one per target server | +| `lb_box` | `simpy.Store` acting as the inbox for requests arriving from the client | + +```python +self._round_robin_index: int = 0 # round-robin pointer (private state) +``` + +### **Supported Algorithms** + +| Enum value (`LbAlgorithmsName`) | Function used | Signature | +| ------------------------------- | --------------------------------------------- | --------- | +| `ROUND_ROBIN` | `round_robin(edges, idx)` → `(edge, new_idx)` | O(1) | +| `LEAST_CONNECTIONS` | `least_connections(edges)` → `edge` | O(N) scan | + +*Both helpers live in* `app/runtime/actors/helpers/lb_algorithms.py`. + +#### **Why an index and not list rotation?** + +`outer_edges` is **shared** with other components (e.g. metrics collector, +tests). Rotating it in-place would mutate a shared object and create +hard-to-trace bugs (aliasing). +Keeping `_round_robin_index` inside the LB runtime preserves immutability while +still advancing the pointer on every request. + +### **Process Workflow** + +```python +def _forwarder(self) -> Generator[simpy.Event, None, None]: + while True: + state: RequestState = yield self.lb_box.get() # ① wait for a request + + state.record_hop(SystemNodes.LOAD_BALANCER, + self.lb_config.id, + self.env.now) # ② trace + + if self.lb_config.algorithms is LbAlgorithmsName.ROUND_ROBIN: + edge, self._round_robin_index = round_robin( + self.outer_edges, + self._round_robin_index, + ) # ③a choose RR edge + else: # LEAST_CONNECTIONS + edge = least_connections(self.outer_edges) # ③b choose LC edge + + edge.transport(state) # ④ forward +``` + +| Step | What happens | Real-world analogue | +| ---- | ------------------------------------------------------------------------ | ---------------------------------------- | +| ① | Pull next `RequestState` out of `lb_box`. | Socket `accept()` on the LB front-end. | +| ② | Add a `Hop` stamped `LOAD_BALANCER`. | Trace span in NGINX/Envoy access log. | +| ③a | **Round-Robin** – pick `outer_edges[_round_robin_index]`, advance index. | Classic DNS-RR or NGINX default. | +| ③b | **Least-Connections** – `min(edges, key=concurrent_connections)`. | HAProxy `leastconn`, NGINX `least_conn`. | +| ④ | Spawn network transit via `EdgeRuntime.transport()`. | LB writes request to backend socket. | + +### **Edge-Case Safety** + +* **Empty `outer_edges`** → impossible by schema validation (LB must cover >1 server). +* **Single server** → RR degenerates to index 0; LC always returns that edge. +* **Concurrency metric** (`edge.concurrent_connections`) is updated inside + `EdgeRuntime` in real time, so `least_connections` adapts instantly to load spikes. + +### **Key Takeaways** + +1. **Stateful but side-effect-free** – `_round_robin_index` keeps per-LB state without mutating the shared edge list. +2. **Uniform API** – both algorithms integrate through a simple `if/else`; additional strategies can be added with negligible changes. +3. **Deterministic & reproducible** – no randomness inside the LB, ensuring repeatable simulations. + +With these mechanics the `LoadBalancerRuntime` faithfully emulates behaviour of +production LBs (NGINX, HAProxy, AWS ALB) while remaining lightweight and +fully deterministic inside the AsyncFlow event loop. diff --git a/docs/internals/simulation-input.md b/docs/internals/simulation-input.md new file mode 100644 index 0000000..f6bbbcd --- /dev/null +++ b/docs/internals/simulation-input.md @@ -0,0 +1,329 @@ +# AsyncFlow — Simulation Input Schema (v2) + +This document describes the **complete input contract** used by AsyncFlow to run a simulation, the **design rationale** behind it, and the **validation guarantees** enforced by the Pydantic layer. At the end you’ll find an **end-to-end YAML example** you can run as-is. + +The entry point is: + +```python +class SimulationPayload(BaseModel): + """Full input structure to perform a simulation""" + rqs_input: RqsGenerator + topology_graph: TopologyGraph + sim_settings: SimulationSettings +``` + +Everything the engine needs is captured by these three components: + +* **`rqs_input`** — workload model (how traffic is generated). +* **`topology_graph`** — system under test as a directed graph (nodes & edges). +* **`sim_settings`** — global simulation controls and which metrics to collect. + +--- + +## Rationale + +### 1) Separation of concerns + +* **Workload** (traffic intensity & arrival process) is independent from **topology** (architecture) and **simulation control** (duration & metrics). +* You can reuse the same topology with different workloads (or vice versa) without touching unrelated parts. + +### 2) Validation-first, fail-fast + +* Inputs are **typed** and **validated** before the engine starts. +* Validation catches type errors, dangling references, illegal step definitions, and inconsistent graphs. +* Once a payload parses, the runtime code can remain lean (no defensive checks scattered everywhere). + +### 3) Small-to-large composition + +* The smallest unit is a **`Step`** (one resource-bound operation). +* Steps compose into an **`Endpoint`** (ordered workflow). +* Endpoints live on a **`Server`** node with finite resources. +* Nodes and **Edges** form a **`TopologyGraph`**. +* A closed set of **Enums** eliminates magic strings. + +--- + +## 1) Workload: `RqsGenerator` + +**Purpose:** Defines the stochastic traffic generator that produces request arrivals. + +```python +class RqsGenerator(BaseModel): + id: str + type: SystemNodes = SystemNodes.GENERATOR + avg_active_users: RVConfig + avg_request_per_minute_per_user: RVConfig + user_sampling_window: int = Field( + default=TimeDefaults.USER_SAMPLING_WINDOW, + ge=TimeDefaults.MIN_USER_SAMPLING_WINDOW, + le=TimeDefaults.MAX_USER_SAMPLING_WINDOW, + ) +``` + +### Random variables (`RVConfig`) + +```python +class RVConfig(BaseModel): + mean: float + distribution: Distribution = Distribution.POISSON + variance: float | None = None +``` + +**Validators & guarantees** + +* `mean` is **numeric** and coerced to `float`. (Non-numeric → `ValueError`.) +* If `distribution ∈ {NORMAL, LOG_NORMAL}` and `variance is None`, then `variance := mean`. +* Workload-specific constraints: + + * `avg_request_per_minute_per_user.distribution` **must be** `POISSON`. + * `avg_active_users.distribution` **must be** `POISSON` or `NORMAL`. +* `user_sampling_window` is an **integer in seconds**, bounded to `[1, 120]`. + +**Why these constraints?** +They match the currently implemented samplers (Poisson–Poisson and Normal–Poisson). + +--- + +## 2) System Graph: `TopologyGraph` + +**Purpose:** Describes the architecture as a **directed graph**. Nodes are macro-components (client, server, optional load balancer); edges are network links with latency models. + +```python +class TopologyGraph(BaseModel): + nodes: TopologyNodes + edges: list[Edge] +``` + +### 2.1 Nodes + +```python +class TopologyNodes(BaseModel): + servers: list[Server] + client: Client + load_balancer: LoadBalancer | None = None + + # also: model_config = ConfigDict(extra="forbid") +``` + +#### `Client` + +```python +class Client(BaseModel): + id: str + type: SystemNodes = SystemNodes.CLIENT + # validator: type must equal SystemNodes.CLIENT +``` + +#### `ServerResources` + +```python +class ServerResources(BaseModel): + cpu_cores: PositiveInt = Field(ServerResourcesDefaults.CPU_CORES, + ge=ServerResourcesDefaults.MINIMUM_CPU_CORES) + db_connection_pool: PositiveInt | None = Field(ServerResourcesDefaults.DB_CONNECTION_POOL) + ram_mb: PositiveInt = Field(ServerResourcesDefaults.RAM_MB, + ge=ServerResourcesDefaults.MINIMUM_RAM_MB) +``` + +Each attribute maps directly to a SimPy primitive (core tokens, RAM container, optional DB pool). + +#### `Step` (atomic unit) + +```python +class Step(BaseModel): + kind: EndpointStepIO | EndpointStepCPU | EndpointStepRAM + step_operation: dict[StepOperation, PositiveFloat | PositiveInt] +``` + +**Coherence validator** + +* `step_operation` must contain **exactly one** key. +* Valid pairings: + + * CPU step → `{ cpu_time: PositiveFloat }` + * RAM step → `{ necessary_ram: PositiveInt | PositiveFloat }` + * I/O step → `{ io_waiting_time: PositiveFloat }` +* Any mismatch (e.g., RAM step with `cpu_time`) → `ValueError`. + +#### `Endpoint` + +```python +class Endpoint(BaseModel): + endpoint_name: str + steps: list[Step] + + @field_validator("endpoint_name", mode="before") + def name_to_lower(cls, v): return v.lower() +``` + +Canonical lowercase names avoid accidental duplicates by case. + +#### `Server` + +```python +class Server(BaseModel): + id: str + type: SystemNodes = SystemNodes.SERVER + server_resources: ServerResources + endpoints: list[Endpoint] + # validator: type must equal SystemNodes.SERVER +``` + +#### `LoadBalancer` (optional) + +```python +class LoadBalancer(BaseModel): + id: str + type: SystemNodes = SystemNodes.LOAD_BALANCER + algorithms: LbAlgorithmsName = LbAlgorithmsName.ROUND_ROBIN + server_covered: set[str] = Field(default_factory=set) + # validator: type must equal SystemNodes.LOAD_BALANCER +``` + +### 2.2 Edges + +```python +class Edge(BaseModel): + id: str + source: str # may be an external entrypoint (e.g., generator id) + target: str # MUST be a declared node id + latency: RVConfig + edge_type: SystemEdges = SystemEdges.NETWORK_CONNECTION + dropout_rate: float = Field(NetworkParameters.DROPOUT_RATE, + ge=NetworkParameters.MIN_DROPOUT_RATE, + le=NetworkParameters.MAX_DROPOUT_RATE) + # validator: source != target + # validator on latency: mean > 0, variance >= 0 if provided +``` + +> **Note:** The former `probability` field has been **removed**. Fan-out is controlled at the **load balancer** via `algorithms` (e.g., round-robin, least-connections). Non-LB nodes are not allowed to have multiple outgoing edges (see graph-level validators below). + +### 2.3 Graph-level validators + +The `TopologyGraph` class performs several global checks: + +1. **Unique edge IDs** + + * Duplicate edge ids → `ValueError`. + +2. **Referential integrity** + + * Every **`target`** must be a declared node (`client`, any `server`, optional `load_balancer`). + * **External IDs** (e.g., generator id) are **allowed only as sources** and **must never appear as a target** anywhere. + +3. **Load balancer integrity (if present)** + + * `server_covered ⊆ declared server ids`. + * There must be **an outgoing edge from the LB to every covered server**; missing links → `ValueError`. + +4. **Fan-out restriction** + + * Among **declared nodes**, **only the load balancer** may have **multiple outgoing edges**. + * Edges originating from non-declared external sources (e.g., generator) are ignored by this check. + * Violations list the offending source ids. + +--- + +## 3) Simulation Control: `SimulationSettings` + +```python +class SimulationSettings(BaseModel): + total_simulation_time: int = Field( + default=TimeDefaults.SIMULATION_TIME, + ge=TimeDefaults.MIN_SIMULATION_TIME, + ) + enabled_sample_metrics: set[SampledMetricName] = Field(default_factory=...) + enabled_event_metrics: set[EventMetricName] = Field(default_factory=...) + sample_period_s: float = Field( + default=SamplePeriods.STANDARD_TIME, + ge=SamplePeriods.MINIMUM_TIME, + le=SamplePeriods.MAXIMUM_TIME, + ) +``` + +**What it controls** + +* **Clock** — `total_simulation_time` in seconds (default 3600, min 5). +* **Sampling cadence** — `sample_period_s` in seconds (default 0.01; bounds `[0.001, 0.1]`). +* **Metric selection** — default sets include: + + * Sampled (time-series): `ready_queue_len`, `event_loop_io_sleep`, `ram_in_use`, `edge_concurrent_connection`. + * Event (per-request): `rqs_clock`. + +--- + +## 4) Enums & Units (Quick Reference) + +**Distributions:** `poisson`, `normal`, `log_normal`, `exponential`, `uniform` +**Node types:** `generator`, `server`, `client`, `load_balancer` (fixed by models) +**Edge types:** `network_connection` +**LB algorithms:** `round_robin`, `least_connection` +**Step kinds:** + +* CPU: `initial_parsing`, `cpu_bound_operation` +* RAM: `ram` +* I/O: `io_task_spawn`, `io_llm`, `io_wait`, `io_db`, `io_cache` + **Step operation keys:** `cpu_time`, `io_waiting_time`, `necessary_ram` + **Sampled metrics:** `ready_queue_len`, `event_loop_io_sleep`, `ram_in_use`, `edge_concurrent_connection` + **Event metrics:** `rqs_clock` (and `llm_cost` reserved) + +**Units & conventions** + +* **Time:** seconds (`cpu_time`, `io_waiting_time`, latencies, `total_simulation_time`, `sample_period_s`, `user_sampling_window`) +* **RAM:** megabytes (`ram_mb`, `necessary_ram`) +* **Rates:** requests/minute (`avg_request_per_minute_per_user.mean`) +* **Probabilities:** `[0.0, 1.0]` (`dropout_rate`) +* **IDs:** strings; must be **unique** within their category + +--- + +## 5) Validation Checklist (What is guaranteed if the payload parses) + +### Workload (`RqsGenerator`, `RVConfig`) + +* `mean` is numeric (`int|float`) and coerced to `float`. +* If `distribution ∈ {NORMAL, LOG_NORMAL}` and `variance is None` → `variance := mean`. +* `avg_request_per_minute_per_user.distribution == POISSON`. +* `avg_active_users.distribution ∈ {POISSON, NORMAL}`. +* `user_sampling_window ∈ [1, 120]` seconds. +* `type` fields default to the correct enum (`generator`) and are strongly typed. + +### Steps & Endpoints + +* `endpoint_name` is normalized to lowercase. +* Each `Step` has **exactly one** `step_operation` key. +* `Step.kind` and `step_operation` key **must match**: + + * CPU ↔ `cpu_time` + * RAM ↔ `necessary_ram` + * I/O ↔ `io_waiting_time` +* All step operation values are strictly **positive**. + +### Nodes + +* `Client.type == client`, `Server.type == server`, `LoadBalancer.type == load_balancer` (enforced). +* `ServerResources` obey lower bounds: `cpu_cores ≥ 1`, `ram_mb ≥ 256`. +* `TopologyNodes` contains **unique ids** across `client`, `servers[]`, and (optional) `load_balancer`. Duplicates → `ValueError`. +* `TopologyNodes` forbids unknown fields (`extra="forbid"`). + +### Edges + +* **No self-loops:** `source != target`. +* **Latency sanity:** `latency.mean > 0`; if `variance` is provided, `variance ≥ 0`. Error messages reference the **edge id**. +* `dropout_rate ∈ [0, 1]`. + +### Graph (`TopologyGraph`) + +* **Edge ids are unique.** +* **Targets** are always **declared node ids**. +* **External ids** (e.g., generator) are allowed only as **sources**; they must **never** appear as **targets**. +* **Load balancer integrity:** + + * `server_covered` is a subset of declared servers. + * Every covered server has a **corresponding edge from the LB** (LB → srv). Missing links → `ValueError`. +* **Fan-out restriction:** among **declared nodes**, only the **LB** can have **multiple outgoing edges**. Offenders are listed. + +If your payload passes validation, the engine can wire and run the simulation deterministically with consistent semantics. + + + diff --git a/docs/internals/simulation-runner.md b/docs/internals/simulation-runner.md new file mode 100644 index 0000000..300b226 --- /dev/null +++ b/docs/internals/simulation-runner.md @@ -0,0 +1,236 @@ + +# **Simulation Runner — Technical Documentation** + +## **Overview** + +The `SimulationRunner` is the **orchestrator** of the AsyncFlow engine. +Its main responsibility is to: + +1. **Build** simulation actors from a structured input (`SimulationPayload`). +2. **Wire** actors together via `EdgeRuntime` connections. +3. **Start** all simulation processes in a SimPy environment. +4. **Run** the simulation clock for the configured duration. +5. **Collect and return** performance metrics through a `ResultsAnalyzer` instance. + +This design separates **topology definition** (data models) from **execution logic** (runtime actors), ensuring clarity, testability, and future extensibility. + +--- + +## **High-Level Flow** + +```mermaid +flowchart TD + subgraph Input + A["SimulationPayload (Pydantic)"] + end + + subgraph Build_Phase + B1[Build RqsGeneratorRuntime] + B2[Build ClientRuntime] + B3[Build ServerRuntimes] + B4["Build LoadBalancerRuntime (optional)"] + end + + subgraph Wire_Phase + C["Build EdgeRuntimes + assign target boxes"] + end + + subgraph Start_Phase + D1[Start all Startable actors] + D2[Start SampledMetricCollector] + end + + subgraph Run_Phase + E["SimPy env.run(until=total_simulation_time)"] + end + + subgraph Output + F[ResultsAnalyzer] + end + + A --> B1 & B2 & B3 & B4 + B1 & B2 & B3 & B4 --> C + C --> D1 & D2 + D1 & D2 --> E + E --> F +``` + +--- + +## **Component Responsibilities** + +### **SimulationRunner** + +* **Inputs:** + + * `env`: a `simpy.Environment` controlling discrete-event simulation time. + * `simulation_input`: a `SimulationPayload` describing topology, request generation parameters, and simulation settings. + +* **Responsibilities:** + + * Build **all runtime actors** (`RqsGeneratorRuntime`, `ClientRuntime`, `ServerRuntime`, `LoadBalancerRuntime`). + * Instantiate **EdgeRuntime** objects to connect actors. + * Start processes and the metric collector. + * Advance the simulation clock. + * Package results into a `ResultsAnalyzer`. + +--- + +### **Actors** + +All runtime actors implement the `Startable` protocol (i.e., they expose a `.start()` method returning a `simpy.Process`). + +| Actor | Responsibility | +| ----------------------- | -------------------------------------------------------------------------------------------- | +| **RqsGeneratorRuntime** | Produces incoming requests according to stochastic models (Poisson, Gaussian-Poisson, etc.). | +| **ClientRuntime** | Consumes responses, tracks completion, and stores latency samples. | +| **ServerRuntime** | Processes incoming requests, interacts with CPU/RAM containers, measures processing times. | +| **LoadBalancerRuntime** | Distributes incoming requests across available servers according to configured policy. | +| **EdgeRuntime** | Models the connection between two nodes (latency, bandwidth, loss). | + +--- + +### **ResourcesRuntime** + +* Registry mapping server IDs to their SimPy `Container` resources (CPU, RAM). +* Keeps resource allocation/consumption logic decoupled from server logic. + +--- + +### **Metrics** + +* **SampledMetricCollector**: Periodically snapshots runtime metrics (queue sizes, RAM usage, connection counts). +* **ResultsAnalyzer**: Consumes raw metrics and computes aggregated KPIs (latency distribution, throughput series, etc.). + +--- + +## **Execution Timeline** + +```mermaid +sequenceDiagram + participant Runner as SimulationRunner + participant RqsGen as RqsGeneratorRuntime + participant Client as ClientRuntime + participant Server as ServerRuntime + participant LB as LoadBalancerRuntime + participant Edge as EdgeRuntime + participant Metrics as SampledMetricCollector + + Runner->>RqsGen: Build from input + Runner->>Client: Build from input + Runner->>Server: Build each from input + Runner->>LB: Build if present + Runner->>Edge: Build + assign target_box + Runner->>RqsGen: Set out_edge + Runner->>Client: Set out_edge + Runner->>Server: Set out_edge + Runner->>LB: Append out_edges + Runner->>Metrics: Start collector + Runner->>RqsGen: start() + Runner->>Client: start() + Runner->>Server: start() + Runner->>LB: start() + Runner->>Runner: env.run(until=total_simulation_time) + Runner->>Metrics: Gather results + Runner->>Runner: Return ResultsAnalyzer +``` + +--- + +## **Detailed Build & Wire Steps** + +### 1️⃣ **Build Phase** + +* **`_build_rqs_generator()`**: Creates a single `RqsGeneratorRuntime` for now; architecture allows for multiple (future CDN scenarios). +* **`_build_client()`**: Instantiates the single client node; stored in a dict for future multi-client extensions. +* **`_build_servers()`**: Creates one `ServerRuntime` per configured server. Pulls CPU/RAM resources from `ResourcesRuntime`. +* **`_build_load_balancer()`**: Optional; created only if present in the topology. + +--- + +### 2️⃣ **Wire Phase** + +* Merges all runtime actor dictionaries into a `all_nodes` map. +* For each `Edge` in the topology: + + * Looks up **target** object and assigns the correct inbox (`simpy.Store`). + * Creates an `EdgeRuntime` and assigns it as `out_edge` (or appends to `out_edges` for LBs). + +--- + +### 3️⃣ **Start Phase** + +* Uses `itertools.chain` to lazily iterate over all runtime actors in the correct deterministic order. +* Casts to `Iterable[Startable]` to make Mypy type-checking explicit. +* Starts `SampledMetricCollector` to record periodic metrics. + +--- + +### 4️⃣ **Run Phase** + +* Advances SimPy’s event loop until `total_simulation_time` from the simulation settings. +* Returns a `ResultsAnalyzer` for downstream reporting and plotting. + +--- + +## **Extensibility Hooks** + +* **Multiple Generators / Clients**: Dictionaries keyed by node ID already prepared. +* **CDN or Multi-tier Architectures**: Easily extendable via new actor types + wiring rules. +* **Different LB Policies**: Swap `LoadBalancerRuntime` strategy without touching the runner. +* **Metric Expansion**: `SampledMetricCollector` can be extended to capture additional KPIs. + +--- + +## **Architecture Diagram** + +``` + ┌───────────────────────┐ + │ SimulationPayload │ + │ (input topology + cfg) │ + └─────────┬─────────────┘ + │ + ▼ + ┌───────────────────────┐ + │ SimulationRunner │ + └─────────┬─────────────┘ + │ build actors + ▼ + ┌─────────────────────────────────────────────────┐ + │ Runtime Actors (Startable) │ + │ ┌──────────────────┐ ┌──────────────────────┐ │ + │ │ RqsGenerator │→│ ClientRuntime │ │ + │ └──────────────────┘ └──────────────────────┘ │ + │ ↓ edges ↑ edges │ + │ ┌──────────────────┐ ┌──────────────────────┐ │ + │ │ ServerRuntime(s) │←→│ LoadBalancerRuntime │ │ + │ └──────────────────┘ └──────────────────────┘ │ + └─────────────────────────────────────────────────┘ + │ + ▼ + ┌────────────────────────────┐ + │ SampledMetricCollector │ + └──────────────┬─────────────┘ + ▼ + ┌────────────────┐ + │ ResultsAnalyzer │ + └────────────────┘ +``` + +--- + +## Architectural rationale + +✅ **Separation of concerns** — Topology definition, resource allocation, runtime behaviour, and metric processing are decoupled. + +✅ **Extensible** — Adding new node types or connection logic requires minimal changes. + +✅ **Testable** — Each phase can be tested in isolation (unit + integration). + +✅ **Deterministic order** — Startup sequence guarantees reproducibility. + +✅ **Scalable** — Supports larger topologies by design. + +--- + + diff --git a/docs/why-asyncflow.md b/docs/why-asyncflow.md new file mode 100644 index 0000000..d50d401 --- /dev/null +++ b/docs/why-asyncflow.md @@ -0,0 +1,93 @@ +# Why AsyncFlow + +> **TL;DR**: AsyncFlow is a *digital twin* of your FastAPI/Uvicorn service. It simulates traffic, async steps, and resource limits in seconds—so you can size CPU/pools/replicas and hit your latency SLOs **before** touching the cloud. + +## What it is + +* **Event-loop faithful**: Replays FastAPI-style async behavior in SimPy (parsing, CPU-bound work, I/O waits, LLM calls). +* **Resource-aware**: Models CPU cores (tokens), RAM, DB pools, and routing so you see queueing, contention, and scheduling delays. +* **Prod-style metrics**: Emits p50/p95/p99 latency, throughput, ready-queue lag, concurrency per edge/server—even estimated LLM cost. + +## What you get + +* **Numbers you can plan with**: p95, max concurrency, queue lengths, RAM usage, RPS over time. +* **Rapid “what-if” loops**: Double traffic, change cores/pools, add a replica—see the impact immediately. +* **Cheap, offline iteration**: Results in seconds, no clusters, no load-test bills. + +## 10-second example + +**Minimal scenario (YAML)** + +```yaml +# examples/data/minimal.yml +rqs_input: + id: rqs-1 + avg_active_users: { mean: 50 } # Poisson by default + avg_request_per_minute_per_user: { mean: 20 } # must be Poisson + user_sampling_window: 60 + +topology_graph: + nodes: + client: { id: client-1 } + servers: + - id: srv-1 + server_resources: { cpu_cores: 2, ram_mb: 2048 } + endpoints: + - endpoint_name: /predict + steps: + - kind: initial_parsing + step_operation: { cpu_time: 0.002 } + - kind: io_wait + step_operation: { io_waiting_time: 0.010 } + edges: + - { id: gen-client, source: rqs-1, target: client-1, + latency: { mean: 0.003, distribution: exponential } } + - { id: client-srv, source: client-1, target: srv-1, + latency: { mean: 0.003, distribution: exponential } } + - { id: srv-client, source: srv-1, target: client-1, + latency: { mean: 0.003, distribution: exponential } } + +sim_settings: + total_simulation_time: 300 + sample_period_s: 0.01 + enabled_sample_metrics: [ ready_queue_len, ram_in_use, edge_concurrent_connection ] + enabled_event_metrics: [ rqs_clock ] +``` + +**Run it (Python)** + +```python +from pathlib import Path +import simpy +from asyncflow.runtime.simulation_runner import SimulationRunner + +env = simpy.Environment() +runner = SimulationRunner.from_yaml(env=env, yaml_path=Path("examples/data/minimal.yml")) +results = runner.run() + +print(results.get_latency_stats()) # p50/p95/p99, etc. +print(results.get_throughput_series()) # (timestamps, rps) +``` + +## The mental model + +```mermaid +flowchart LR + RQS[generator] --> C[client] + C --> S[srv-1] + S --> C +``` + +* Each arrow is a **network edge** with its own latency RV. +* Server endpoints are **linear step chains**: CPU → RAM → I/O, etc. +* CPU/DB/RAM are **capacity-limited resources** → queues form under load. + +## Non-goals (by design) + +* Not a replacement for **production** load tests or packet-level network simulators. +* Not a micro-profiler; it models service times and queues, not byte-level protocol details. +* Not an auto-tuner—**you** iterate quickly with data to choose the best configuration. + +--- + +**Bottom line:** AsyncFlow turns your architecture diagram into hard numbers—p95, concurrency, queue lengths—so you can plan capacity, de-risk launches, and explain trade-offs with evidence, not guesswork. diff --git a/examples/builder_input/load_balancer/lb_dashboard.png b/examples/builder_input/load_balancer/lb_dashboard.png new file mode 100644 index 0000000..4d94cfe Binary files /dev/null and b/examples/builder_input/load_balancer/lb_dashboard.png differ diff --git a/examples/builder_input/load_balancer/lb_server_srv-1_metrics.png b/examples/builder_input/load_balancer/lb_server_srv-1_metrics.png new file mode 100644 index 0000000..1665766 Binary files /dev/null and b/examples/builder_input/load_balancer/lb_server_srv-1_metrics.png differ diff --git a/examples/builder_input/load_balancer/lb_server_srv-2_metrics.png b/examples/builder_input/load_balancer/lb_server_srv-2_metrics.png new file mode 100644 index 0000000..cdda50f Binary files /dev/null and b/examples/builder_input/load_balancer/lb_server_srv-2_metrics.png differ diff --git a/examples/builder_input/load_balancer/two_servers.py b/examples/builder_input/load_balancer/two_servers.py new file mode 100644 index 0000000..fb2eb35 --- /dev/null +++ b/examples/builder_input/load_balancer/two_servers.py @@ -0,0 +1,201 @@ +#!/usr/bin/env python3 +""" +Didactic example: AsyncFlow with a Load Balancer and two **identical** servers. + +Goal +---- +Show a realistic, symmetric backend behind a load balancer, and export plots +that match the public `ResultsAnalyzer` API (no YAML needed). + +Topology +-------- + generator ──edge──> client ──edge──> LB ──edge──> srv-1 + └──edge──> srv-2 + srv-1 ──edge──> client + srv-2 ──edge──> client + +Load model +---------- +~120 active users, 20 requests/min each (Poisson-like aggregate by default). + +Server model (both srv-1 and srv-2) +----------------------------------- +• 1 CPU cores, 2 GB RAM +• Endpoint pipeline: CPU(2 ms) → RAM(128 MB) → I/O wait (15 ms) + - CPU step blocks the event loop + - RAM step holds a working set until the request completes + - I/O step is non-blocking (event-loop friendly) + +Network model +------------- +Every edge uses an exponential latency with mean 3 ms. + +Outputs +------- +• Prints latency statistics to stdout +• Saves, in the same folder as this script: + - `lb_dashboard.png` (Latency histogram + Throughput) + - `lb_server__metrics.png` for each server (Ready / I/O / RAM) +""" + +from __future__ import annotations + +from pathlib import Path + +import simpy +import matplotlib.pyplot as plt + +# Public AsyncFlow API (builder-style) +from asyncflow import AsyncFlow +from asyncflow.components import Client, Server, Edge, Endpoint, LoadBalancer +from asyncflow.settings import SimulationSettings +from asyncflow.workload import RqsGenerator + +# Runner + Analyzer +from asyncflow.runtime.simulation_runner import SimulationRunner +from asyncflow.metrics.analyzer import ResultsAnalyzer + + +def main() -> None: + # ── 1) Build the scenario programmatically (no YAML) ──────────────────── + # Workload (traffic generator) + generator = RqsGenerator( + id="rqs-1", + avg_active_users={"mean": 120}, + avg_request_per_minute_per_user={"mean": 20}, + user_sampling_window=60, + ) + + # Client + client = Client(id="client-1") + + # Two identical servers: CPU(2ms) → RAM(128MB) → IO(15ms) + endpoint = Endpoint( + endpoint_name="/api", + probability=1.0, + steps=[ + {"kind": "initial_parsing", "step_operation": {"cpu_time": 0.002}}, + {"kind": "ram", "step_operation": {"necessary_ram": 128}}, + {"kind": "io_wait", "step_operation": {"io_waiting_time": 0.015}}, + ], + ) + + srv1 = Server( + id="srv-1", + server_resources={"cpu_cores": 1, "ram_mb": 2048}, + endpoints=[endpoint], + ) + srv2 = Server( + id="srv-2", + server_resources={"cpu_cores": 1, "ram_mb": 2048}, + endpoints=[endpoint], + ) + + # Load balancer (round-robin) + lb = LoadBalancer( + id="lb-1", + algorithms="round_robin", + server_covered={"srv-1", "srv-2"}, + ) + + # Network edges (3 ms mean, exponential) + edges = [ + Edge( + id="gen-client", + source="rqs-1", + target="client-1", + latency={"mean": 0.003, "distribution": "exponential"}, + ), + Edge( + id="client-lb", + source="client-1", + target="lb-1", + latency={"mean": 0.003, "distribution": "exponential"}, + ), + Edge( + id="lb-srv1", + source="lb-1", + target="srv-1", + latency={"mean": 0.003, "distribution": "exponential"}, + ), + Edge( + id="lb-srv2", + source="lb-1", + target="srv-2", + latency={"mean": 0.003, "distribution": "exponential"}, + ), + Edge( + id="srv1-client", + source="srv-1", + target="client-1", + latency={"mean": 0.003, "distribution": "exponential"}, + ), + Edge( + id="srv2-client", + source="srv-2", + target="client-1", + latency={"mean": 0.003, "distribution": "exponential"}, + ), + ] + + # Simulation settings + settings = SimulationSettings( + total_simulation_time=600, + sample_period_s=0.05, + enabled_sample_metrics=[ + "ready_queue_len", + "event_loop_io_sleep", + "ram_in_use", + "edge_concurrent_connection", + ], + enabled_event_metrics=["rqs_clock"], + ) + + # Assemble the payload with the builder + payload = ( + AsyncFlow() + .add_generator(generator) + .add_client(client) + .add_servers(srv1, srv2) + .add_load_balancer(lb) + .add_edges(*edges) + .add_simulation_settings(settings) + ).build_payload() + + # ── 2) Run the simulation ─────────────────────────────────────────────── + env = simpy.Environment() + runner = SimulationRunner(env=env, simulation_input=payload) + results: ResultsAnalyzer = runner.run() + + # ── 3) Print a concise latency summary ────────────────────────────────── + print(results.format_latency_stats()) + + # ── 4) Save plots (same directory as this script) ─────────────────────── + out_dir = Path(__file__).parent + + # 4a) Dashboard: latency + throughput (single figure) + fig_dash, axes = plt.subplots( + 1, 2, figsize=(14, 5), dpi=160, constrained_layout=True + ) + results.plot_latency_distribution(axes[0]) + results.plot_throughput(axes[1]) + dash_path = out_dir / "lb_dashboard.png" + fig_dash.savefig(dash_path, bbox_inches="tight") + print(f"🖼️ Dashboard saved to: {dash_path}") + + # 4b) Per-server figures: Ready | I/O | RAM (one row per server) + for sid in results.list_server_ids(): + fig_srv, axs = plt.subplots( + 1, 3, figsize=(18, 4.2), dpi=160, constrained_layout=True + ) + results.plot_single_server_ready_queue(axs[0], sid) + results.plot_single_server_io_queue(axs[1], sid) + results.plot_single_server_ram(axs[2], sid) + fig_srv.suptitle(f"Server metrics — {sid}", fontsize=16) + srv_path = out_dir / f"lb_server_{sid}_metrics.png" + fig_srv.savefig(srv_path, bbox_inches="tight") + print(f"🖼️ Per-server plots saved to: {srv_path}") + + +if __name__ == "__main__": + main() diff --git a/examples/builder_input/single_server/builder_service_plots.png b/examples/builder_input/single_server/builder_service_plots.png new file mode 100644 index 0000000..22fc27d Binary files /dev/null and b/examples/builder_input/single_server/builder_service_plots.png differ diff --git a/examples/builder_input/single_server/single_server.py b/examples/builder_input/single_server/single_server.py new file mode 100644 index 0000000..bb54344 --- /dev/null +++ b/examples/builder_input/single_server/single_server.py @@ -0,0 +1,162 @@ +#!/usr/bin/env python3 +""" +AsyncFlow builder example — build, run, and visualize a single-server async system. + +Topology (single server) + generator ──edge──> client ──edge──> server ──edge──> client + +Load model + ~100 active users, 20 requests/min each (Poisson-like aggregate). + +Server model + 1 CPU core, 2 GB RAM + Endpoint pipeline: CPU(1 ms) → RAM(100 MB) → I/O wait (100 ms) + Semantics: + - CPU step blocks the event loop + - RAM step holds a working set until request completion + - I/O step is non-blocking (event-loop friendly) + +Network model + Each edge has exponential latency with mean 3 ms. + +Outputs + - Prints latency statistics to stdout + - Saves a 2×2 PNG in the same directory as this script: + [0,0] Latency histogram (with mean/P50/P95/P99) + [0,1] Throughput (with mean/P95/max overlays) + [1,0] Ready queue for the first server + [1,1] RAM usage for the first server +""" + +from __future__ import annotations + +from pathlib import Path +import simpy +import matplotlib.pyplot as plt + +# Public AsyncFlow API (builder) +from asyncflow import AsyncFlow +from asyncflow.components import Client, Server, Edge, Endpoint +from asyncflow.settings import SimulationSettings +from asyncflow.workload import RqsGenerator + +# Runner + Analyzer +from asyncflow.runtime.simulation_runner import SimulationRunner +from asyncflow.metrics.analyzer import ResultsAnalyzer + + +def build_and_run() -> ResultsAnalyzer: + """Build the scenario via the Pythonic builder and run the simulation.""" + # Workload (generator) + generator = RqsGenerator( + id="rqs-1", + avg_active_users={"mean": 100}, + avg_request_per_minute_per_user={"mean": 20}, + user_sampling_window=60, + ) + + # Client + client = Client(id="client-1") + + # Server + endpoint (CPU → RAM → I/O) + endpoint = Endpoint( + endpoint_name="/api", + probability=1.0, + steps=[ + {"kind": "initial_parsing", "step_operation": {"cpu_time": 0.001}}, # 1 ms + {"kind": "ram", "step_operation": {"necessary_ram": 100}}, # 100 MB + {"kind": "io_wait", "step_operation": {"io_waiting_time": 0.100}}, # 100 ms + ], + ) + server = Server( + id="app-1", + server_resources={"cpu_cores": 1, "ram_mb": 2048}, + endpoints=[endpoint], + ) + + # Network edges (3 ms mean, exponential) + e_gen_client = Edge( + id="gen-client", + source="rqs-1", + target="client-1", + latency={"mean": 0.003, "distribution": "exponential"}, + ) + e_client_app = Edge( + id="client-app", + source="client-1", + target="app-1", + latency={"mean": 0.003, "distribution": "exponential"}, + ) + e_app_client = Edge( + id="app-client", + source="app-1", + target="client-1", + latency={"mean": 0.003, "distribution": "exponential"}, + ) + + # Simulation settings + settings = SimulationSettings( + total_simulation_time=300, + sample_period_s=0.05, + enabled_sample_metrics=[ + "ready_queue_len", + "event_loop_io_sleep", + "ram_in_use", + "edge_concurrent_connection", + ], + enabled_event_metrics=["rqs_clock"], + ) + + # Assemble payload with the builder + payload = ( + AsyncFlow() + .add_generator(generator) + .add_client(client) + .add_servers(server) + .add_edges(e_gen_client, e_client_app, e_app_client) + .add_simulation_settings(settings) + ).build_payload() + + # Run + env = simpy.Environment() + runner = SimulationRunner(env=env, simulation_input=payload) + results: ResultsAnalyzer = runner.run() + return results + + +def main() -> None: + # Build & run + res = build_and_run() + + # Print concise latency summary + print(res.format_latency_stats()) + + # Prepare figure in the same folder as this script + script_dir = Path(__file__).parent + out_path = script_dir / "builder_service_plots.png" + + # 2×2: Latency | Throughput | Ready (first server) | RAM (first server) + fig, axes = plt.subplots(2, 2, figsize=(12, 8), dpi=160) + + # Top row + res.plot_latency_distribution(axes[0, 0]) + res.plot_throughput(axes[0, 1]) + + # Bottom row — first server, if present + sids = res.list_server_ids() + if sids: + sid = sids[0] + res.plot_single_server_ready_queue(axes[1, 0], sid) + res.plot_single_server_ram(axes[1, 1], sid) + else: + for ax in (axes[1, 0], axes[1, 1]): + ax.text(0.5, 0.5, "No servers", ha="center", va="center") + ax.axis("off") + + fig.tight_layout() + fig.savefig(out_path) + print(f"Plots saved to: {out_path}") + + +if __name__ == "__main__": + main() diff --git a/examples/yaml_input/data/single_server.yml b/examples/yaml_input/data/single_server.yml new file mode 100644 index 0000000..844b1ad --- /dev/null +++ b/examples/yaml_input/data/single_server.yml @@ -0,0 +1,56 @@ +# ─────────────────────────────────────────────────────────────── +# AsyncFlow scenario: generator ➜ client ➜ server ➜ client +# ─────────────────────────────────────────────────────────────── + +# 1. Traffic generator (light load) +rqs_input: + id: rqs-1 + avg_active_users: { mean: 100 } + avg_request_per_minute_per_user: { mean: 20 } + user_sampling_window: 60 + +# 2. Topology +topology_graph: + nodes: + client: { id: client-1 } + servers: + - id: srv-1 + server_resources: { cpu_cores: 1, ram_mb: 2048 } + endpoints: + - endpoint_name: ep-1 + probability: 1.0 + steps: + - kind: initial_parsing + step_operation: { cpu_time: 0.001 } + - kind: ram + step_operation: { necessary_ram: 100} + - kind: io_wait + step_operation: { io_waiting_time: 0.1 } + + edges: + - id: gen-to-client + source: rqs-1 + target: client-1 + latency: { mean: 0.003, distribution: exponential } + + - id: client-to-server + source: client-1 + target: srv-1 + latency: { mean: 0.003, distribution: exponential } + + - id: server-to-client + source: srv-1 + target: client-1 + latency: { mean: 0.003, distribution: exponential } + +# 3. Simulation settings +sim_settings: + total_simulation_time: 500 + sample_period_s: 0.05 + enabled_sample_metrics: + - ready_queue_len + - event_loop_io_sleep + - ram_in_use + - edge_concurrent_connection + enabled_event_metrics: + - rqs_clock diff --git a/examples/yaml_input/data/two_servers_lb.yml b/examples/yaml_input/data/two_servers_lb.yml new file mode 100644 index 0000000..100a46b --- /dev/null +++ b/examples/yaml_input/data/two_servers_lb.yml @@ -0,0 +1,71 @@ +# AsyncFlow SimulationPayload — Load Balancer + 2 identical app servers +# +# Topology: +# generator → client → LB → srv-1 +# └→ srv-2 +# srv-1 → client +# srv-2 → client +# +# Each server runs: CPU(2 ms) → RAM(128 MB) → IO wait(12 ms) +# All network links use exponential latency with small means (2–3 ms). +# +# Workload targets ~40 rps (120 users × 20 req/min ÷ 60). + +rqs_input: + id: rqs-1 + avg_active_users: { mean: 120 } + avg_request_per_minute_per_user: { mean: 20 } + user_sampling_window: 60 + +topology_graph: + nodes: + client: { id: client-1 } + + load_balancer: + id: lb-1 + algorithms: round_robin + server_covered: [srv-1, srv-2] + + servers: + - id: srv-1 + server_resources: { cpu_cores: 1, ram_mb: 2048 } + endpoints: + - endpoint_name: /api + steps: + - kind: initial_parsing + step_operation: { cpu_time: 0.002 } # 2 ms CPU (blocks event loop) + - kind: ram + step_operation: { necessary_ram: 128 } # 128 MB working set + - kind: io_wait + step_operation: { io_waiting_time: 0.012 } # 12 ms non-blocking I/O + + - id: srv-2 + server_resources: { cpu_cores: 1, ram_mb: 2048 } + endpoints: + - endpoint_name: /api + steps: + - kind: initial_parsing + step_operation: { cpu_time: 0.002 } + - kind: ram + step_operation: { necessary_ram: 128 } + - kind: io_wait + step_operation: { io_waiting_time: 0.012 } + + edges: + - { id: gen-client, source: rqs-1, target: client-1, latency: { mean: 0.003, distribution: exponential } } + - { id: client-lb, source: client-1, target: lb-1, latency: { mean: 0.002, distribution: exponential } } + - { id: lb-srv1, source: lb-1, target: srv-1, latency: { mean: 0.002, distribution: exponential } } + - { id: lb-srv2, source: lb-1, target: srv-2, latency: { mean: 0.002, distribution: exponential } } + - { id: srv1-client, source: srv-1, target: client-1, latency: { mean: 0.003, distribution: exponential } } + - { id: srv2-client, source: srv-2, target: client-1, latency: { mean: 0.003, distribution: exponential } } + +sim_settings: + total_simulation_time: 600 + sample_period_s: 0.05 + enabled_sample_metrics: + - ready_queue_len + - event_loop_io_sleep + - ram_in_use + - edge_concurrent_connection + enabled_event_metrics: + - rqs_clock diff --git a/examples/yaml_input/lb_dashboard.png b/examples/yaml_input/lb_dashboard.png new file mode 100644 index 0000000..dbe7d42 Binary files /dev/null and b/examples/yaml_input/lb_dashboard.png differ diff --git a/examples/yaml_input/lb_server_srv-1_metrics.png b/examples/yaml_input/lb_server_srv-1_metrics.png new file mode 100644 index 0000000..6d3ac35 Binary files /dev/null and b/examples/yaml_input/lb_server_srv-1_metrics.png differ diff --git a/examples/yaml_input/lb_server_srv-2_metrics.png b/examples/yaml_input/lb_server_srv-2_metrics.png new file mode 100644 index 0000000..916c60f Binary files /dev/null and b/examples/yaml_input/lb_server_srv-2_metrics.png differ diff --git a/examples/yaml_input/load_balancer/two_servers.py b/examples/yaml_input/load_balancer/two_servers.py new file mode 100644 index 0000000..1a01277 --- /dev/null +++ b/examples/yaml_input/load_balancer/two_servers.py @@ -0,0 +1,70 @@ +#!/usr/bin/env python3 +""" +Walkthrough: run a Load-Balanced (2 servers) AsyncFlow scenario from YAML. + +What this script does +--------------------- +1) Loads the SimulationPayload from a YAML file (round-robin LB, 2 identical servers). +2) Runs the simulation via `SimulationRunner`. +3) Prints a concise latency summary to stdout. +4) Saves plots **in the same folder as this script**: + • `lb_dashboard.png` (Latency histogram + Throughput) + • One figure per server with 3 panels: Ready Queue, I/O Queue, RAM usage. + +How to use +---------- +- Put this script and `two_servers_lb.yml` in the same directory. +- Run: `python run_lb_from_yaml.py` +""" + +from __future__ import annotations + +from pathlib import Path +import simpy +import matplotlib.pyplot as plt + +from asyncflow.runtime.simulation_runner import SimulationRunner +from asyncflow.metrics.analyzer import ResultsAnalyzer + + +def main() -> None: + # Paths (same directory as this script) + script_dir = Path(__file__).parent.parent + yaml_path = script_dir / "data" / "two_servers_lb.yml" + if not yaml_path.exists(): + raise FileNotFoundError(f"YAML configuration not found: {yaml_path}") + + # Run the simulation + print(f"🚀 Loading and running simulation from: {yaml_path}") + env = simpy.Environment() + runner = SimulationRunner.from_yaml(env=env, yaml_path=yaml_path) + results: ResultsAnalyzer = runner.run() + print("✅ Simulation finished!") + + # Print concise latency summary + print(results.format_latency_stats()) + + # ---- Plots: dashboard (latency + throughput) ---- + fig_dash, axes_dash = plt.subplots(1, 2, figsize=(14, 5), dpi=160) + results.plot_latency_distribution(axes_dash[0]) + results.plot_throughput(axes_dash[1]) + fig_dash.tight_layout() + out_dashboard = script_dir / "lb_dashboard.png" + fig_dash.savefig(out_dashboard) + print(f"🖼️ Dashboard saved to: {out_dashboard}") + + # ---- Per-server metrics: one figure per server (Ready | I/O | RAM) ---- + for sid in results.list_server_ids(): + fig_row, axes = plt.subplots(1, 3, figsize=(16, 3.8), dpi=160) + results.plot_single_server_ready_queue(axes[0], sid) + results.plot_single_server_io_queue(axes[1], sid) + results.plot_single_server_ram(axes[2], sid) + fig_row.suptitle(f"Server metrics — {sid}", y=1.04, fontsize=14) + fig_row.tight_layout() + out_path = script_dir / f"lb_server_{sid}_metrics.png" + fig_row.savefig(out_path, bbox_inches="tight") + print(f"🖼️ Server metrics for '{sid}' saved to: {out_path}") + + +if __name__ == "__main__": + main() diff --git a/examples/yaml_input/single_server/single_server.py b/examples/yaml_input/single_server/single_server.py new file mode 100644 index 0000000..ec14998 --- /dev/null +++ b/examples/yaml_input/single_server/single_server.py @@ -0,0 +1,109 @@ +""" +AsyncFlow — YAML single-server example: run and export charts. + +System (single server) + generator → client → server → client + +Load + ~100 active users, ~20 requests/min each (stochastic aggregate). + +Server + 1 CPU core, 2 GB RAM, endpoint "ep-1": + CPU(1 ms) → RAM(100 MB) → I/O wait (100 ms) + Semantics: + - CPU step blocks the event loop + - RAM step holds a working set until the request leaves the server + - I/O step is non-blocking (event-loop friendly) + +Network + Each edge has exponential latency with mean 3 ms. + +Simulation settings + Duration: 500 s + Sampling period: 50 ms + +What this script does + 1) Loads the YAML scenario and runs the simulation. + 2) Prints latency statistics to stdout. + 3) Saves charts next to this script: + - Dashboard PNG: latency histogram (mean/P50/P95/P99) + and throughput (mean/P95/max) side-by-side. + - Per-server PNGs: Ready queue, I/O queue, and RAM usage for each server. +""" + + +from __future__ import annotations + +import logging +from pathlib import Path + +# SimPy environment is required by SimulationRunner.from_yaml +import simpy + +# matplotlib is needed to create figures for plotting +import matplotlib.pyplot as plt + +# The only imports a user needs to run a simulation +from asyncflow.metrics.analyzer import ResultsAnalyzer +from asyncflow.runtime.simulation_runner import SimulationRunner + +# --- Basic Logging Setup --- +logging.basicConfig(level=logging.INFO, format="%(levelname)s: %(message)s") + + +def main() -> None: + """Defines paths, runs the simulation, and generates all outputs.""" + # --- 1. Define File Paths --- + script_dir = Path(__file__).parent # <-- same folder as this file + out_dir = script_dir # <-- save outputs here + yaml_path = script_dir.parent / "data" / "single_server.yml" + output_base_name = "single_server_results" # prefix for all output files + + if not yaml_path.exists(): + raise FileNotFoundError(f"YAML configuration file not found: {yaml_path}") + + # --- 2. Run the Simulation --- + print(f"🚀 Loading and running simulation from: {yaml_path}") + env = simpy.Environment() # Create the SimPy environment + runner = SimulationRunner.from_yaml(env=env, yaml_path=yaml_path) # pass env + results: ResultsAnalyzer = runner.run() + print("✅ Simulation finished!") + + # Plot 1: The main dashboard (Latency Distribution + Throughput) + fig_base, axes_base = plt.subplots(1, 2, figsize=(14, 5)) + results.plot_base_dashboard(axes_base[0], axes_base[1]) + fig_base.tight_layout() + base_plot_path = out_dir / f"{output_base_name}_dashboard.png" + fig_base.savefig(base_plot_path) + print(f"🖼️ Base dashboard saved to: {base_plot_path}") + + # Plot 2: Individual plots for each server's metrics + server_ids = results.list_server_ids() + for sid in server_ids: + # Ready queue (separate) + fig_rdy, ax_rdy = plt.subplots(figsize=(10, 5)) + results.plot_single_server_ready_queue(ax_rdy, sid) + fig_rdy.tight_layout() + rdy_path = out_dir / f"{output_base_name}_ready_queue_{sid}.png" + fig_rdy.savefig(rdy_path) + print(f"🖼️ Ready queue for '{sid}' saved to: {rdy_path}") + + # I/O queue (separate) + fig_io, ax_io = plt.subplots(figsize=(10, 5)) + results.plot_single_server_io_queue(ax_io, sid) + fig_io.tight_layout() + io_path = out_dir / f"{output_base_name}_io_queue_{sid}.png" + fig_io.savefig(io_path) + print(f"🖼️ I/O queue for '{sid}' saved to: {io_path}") + + # RAM (separate) + fig_r, ax_r = plt.subplots(figsize=(10, 5)) + results.plot_single_server_ram(ax_r, sid) + fig_r.tight_layout() + r_path = out_dir / f"{output_base_name}_ram_{sid}.png" + fig_r.savefig(r_path) + print(f"🖼️ RAM plot for '{sid}' saved to: {r_path}") + + +if __name__ == "__main__": + main() diff --git a/examples/yaml_input/single_server/single_server_results_dashboard.png b/examples/yaml_input/single_server/single_server_results_dashboard.png new file mode 100644 index 0000000..0a6f994 Binary files /dev/null and b/examples/yaml_input/single_server/single_server_results_dashboard.png differ diff --git a/examples/yaml_input/single_server/single_server_results_io_queue_srv-1.png b/examples/yaml_input/single_server/single_server_results_io_queue_srv-1.png new file mode 100644 index 0000000..f2bb1f0 Binary files /dev/null and b/examples/yaml_input/single_server/single_server_results_io_queue_srv-1.png differ diff --git a/examples/yaml_input/single_server/single_server_results_ram_srv-1.png b/examples/yaml_input/single_server/single_server_results_ram_srv-1.png new file mode 100644 index 0000000..c7a33af Binary files /dev/null and b/examples/yaml_input/single_server/single_server_results_ram_srv-1.png differ diff --git a/examples/yaml_input/single_server/single_server_results_ready_queue_srv-1.png b/examples/yaml_input/single_server/single_server_results_ready_queue_srv-1.png new file mode 100644 index 0000000..7539852 Binary files /dev/null and b/examples/yaml_input/single_server/single_server_results_ready_queue_srv-1.png differ diff --git a/poetry.lock b/poetry.lock new file mode 100644 index 0000000..493ce80 --- /dev/null +++ b/poetry.lock @@ -0,0 +1,1294 @@ +# This file is automatically @generated by Poetry 1.8.2 and should not be changed by hand. + +[[package]] +name = "annotated-types" +version = "0.7.0" +description = "Reusable constraint types to use with typing.Annotated" +optional = false +python-versions = ">=3.8" +files = [ + {file = "annotated_types-0.7.0-py3-none-any.whl", hash = "sha256:1f02e8b43a8fbbc3f3e0d4f0f4bfc8131bcb4eebe8849b8e5c773f3a1c582a53"}, + {file = "annotated_types-0.7.0.tar.gz", hash = "sha256:aff07c09a53a08bc8cfccb9c85b05f1aa9a2a6f23728d790723543408344ce89"}, +] + +[[package]] +name = "colorama" +version = "0.4.6" +description = "Cross-platform colored terminal text." +optional = false +python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,!=3.6.*,>=2.7" +files = [ + {file = "colorama-0.4.6-py2.py3-none-any.whl", hash = "sha256:4f1d9991f5acc0ca119f9d443620b77f9d6b33703e51011c16baf57afb285fc6"}, + {file = "colorama-0.4.6.tar.gz", hash = "sha256:08695f5cb7ed6e0531a20572697297273c47b8cae5a63ffc6d6ed5c201be6e44"}, +] + +[[package]] +name = "contourpy" +version = "1.3.3" +description = "Python library for calculating contours of 2D quadrilateral grids" +optional = false +python-versions = ">=3.11" +files = [ + {file = "contourpy-1.3.3-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:709a48ef9a690e1343202916450bc48b9e51c049b089c7f79a267b46cffcdaa1"}, + {file = "contourpy-1.3.3-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:23416f38bfd74d5d28ab8429cc4d63fa67d5068bd711a85edb1c3fb0c3e2f381"}, + {file = "contourpy-1.3.3-cp311-cp311-manylinux_2_26_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:929ddf8c4c7f348e4c0a5a3a714b5c8542ffaa8c22954862a46ca1813b667ee7"}, + {file = "contourpy-1.3.3-cp311-cp311-manylinux_2_26_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:9e999574eddae35f1312c2b4b717b7885d4edd6cb46700e04f7f02db454e67c1"}, + {file = "contourpy-1.3.3-cp311-cp311-manylinux_2_26_s390x.manylinux_2_28_s390x.whl", hash = "sha256:0bf67e0e3f482cb69779dd3061b534eb35ac9b17f163d851e2a547d56dba0a3a"}, + {file = "contourpy-1.3.3-cp311-cp311-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:51e79c1f7470158e838808d4a996fa9bac72c498e93d8ebe5119bc1e6becb0db"}, + {file = "contourpy-1.3.3-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:598c3aaece21c503615fd59c92a3598b428b2f01bfb4b8ca9c4edeecc2438620"}, + {file = "contourpy-1.3.3-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:322ab1c99b008dad206d406bb61d014cf0174df491ae9d9d0fac6a6fda4f977f"}, + {file = "contourpy-1.3.3-cp311-cp311-win32.whl", hash = "sha256:fd907ae12cd483cd83e414b12941c632a969171bf90fc937d0c9f268a31cafff"}, + {file = "contourpy-1.3.3-cp311-cp311-win_amd64.whl", hash = "sha256:3519428f6be58431c56581f1694ba8e50626f2dd550af225f82fb5f5814d2a42"}, + {file = "contourpy-1.3.3-cp311-cp311-win_arm64.whl", hash = "sha256:15ff10bfada4bf92ec8b31c62bf7c1834c244019b4a33095a68000d7075df470"}, + {file = "contourpy-1.3.3-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:b08a32ea2f8e42cf1d4be3169a98dd4be32bafe4f22b6c4cb4ba810fa9e5d2cb"}, + {file = "contourpy-1.3.3-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:556dba8fb6f5d8742f2923fe9457dbdd51e1049c4a43fd3986a0b14a1d815fc6"}, + {file = "contourpy-1.3.3-cp312-cp312-manylinux_2_26_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:92d9abc807cf7d0e047b95ca5d957cf4792fcd04e920ca70d48add15c1a90ea7"}, + {file = "contourpy-1.3.3-cp312-cp312-manylinux_2_26_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:b2e8faa0ed68cb29af51edd8e24798bb661eac3bd9f65420c1887b6ca89987c8"}, + {file = "contourpy-1.3.3-cp312-cp312-manylinux_2_26_s390x.manylinux_2_28_s390x.whl", hash = "sha256:626d60935cf668e70a5ce6ff184fd713e9683fb458898e4249b63be9e28286ea"}, + {file = "contourpy-1.3.3-cp312-cp312-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:4d00e655fcef08aba35ec9610536bfe90267d7ab5ba944f7032549c55a146da1"}, + {file = "contourpy-1.3.3-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:451e71b5a7d597379ef572de31eeb909a87246974d960049a9848c3bc6c41bf7"}, + {file = "contourpy-1.3.3-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:459c1f020cd59fcfe6650180678a9993932d80d44ccde1fa1868977438f0b411"}, + {file = "contourpy-1.3.3-cp312-cp312-win32.whl", hash = "sha256:023b44101dfe49d7d53932be418477dba359649246075c996866106da069af69"}, + {file = "contourpy-1.3.3-cp312-cp312-win_amd64.whl", hash = "sha256:8153b8bfc11e1e4d75bcb0bff1db232f9e10b274e0929de9d608027e0d34ff8b"}, + {file = "contourpy-1.3.3-cp312-cp312-win_arm64.whl", hash = "sha256:07ce5ed73ecdc4a03ffe3e1b3e3c1166db35ae7584be76f65dbbe28a7791b0cc"}, + {file = "contourpy-1.3.3-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:177fb367556747a686509d6fef71d221a4b198a3905fe824430e5ea0fda54eb5"}, + {file = "contourpy-1.3.3-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:d002b6f00d73d69333dac9d0b8d5e84d9724ff9ef044fd63c5986e62b7c9e1b1"}, + {file = "contourpy-1.3.3-cp313-cp313-manylinux_2_26_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:348ac1f5d4f1d66d3322420f01d42e43122f43616e0f194fc1c9f5d830c5b286"}, + {file = "contourpy-1.3.3-cp313-cp313-manylinux_2_26_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:655456777ff65c2c548b7c454af9c6f33f16c8884f11083244b5819cc214f1b5"}, + {file = "contourpy-1.3.3-cp313-cp313-manylinux_2_26_s390x.manylinux_2_28_s390x.whl", hash = "sha256:644a6853d15b2512d67881586bd03f462c7ab755db95f16f14d7e238f2852c67"}, + {file = "contourpy-1.3.3-cp313-cp313-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:4debd64f124ca62069f313a9cb86656ff087786016d76927ae2cf37846b006c9"}, + {file = "contourpy-1.3.3-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:a15459b0f4615b00bbd1e91f1b9e19b7e63aea7483d03d804186f278c0af2659"}, + {file = "contourpy-1.3.3-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:ca0fdcd73925568ca027e0b17ab07aad764be4706d0a925b89227e447d9737b7"}, + {file = "contourpy-1.3.3-cp313-cp313-win32.whl", hash = "sha256:b20c7c9a3bf701366556e1b1984ed2d0cedf999903c51311417cf5f591d8c78d"}, + {file = "contourpy-1.3.3-cp313-cp313-win_amd64.whl", hash = "sha256:1cadd8b8969f060ba45ed7c1b714fe69185812ab43bd6b86a9123fe8f99c3263"}, + {file = "contourpy-1.3.3-cp313-cp313-win_arm64.whl", hash = "sha256:fd914713266421b7536de2bfa8181aa8c699432b6763a0ea64195ebe28bff6a9"}, + {file = "contourpy-1.3.3-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:88df9880d507169449d434c293467418b9f6cbe82edd19284aa0409e7fdb933d"}, + {file = "contourpy-1.3.3-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:d06bb1f751ba5d417047db62bca3c8fde202b8c11fb50742ab3ab962c81e8216"}, + {file = "contourpy-1.3.3-cp313-cp313t-manylinux_2_26_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:e4e6b05a45525357e382909a4c1600444e2a45b4795163d3b22669285591c1ae"}, + {file = "contourpy-1.3.3-cp313-cp313t-manylinux_2_26_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:ab3074b48c4e2cf1a960e6bbeb7f04566bf36b1861d5c9d4d8ac04b82e38ba20"}, + {file = "contourpy-1.3.3-cp313-cp313t-manylinux_2_26_s390x.manylinux_2_28_s390x.whl", hash = "sha256:6c3d53c796f8647d6deb1abe867daeb66dcc8a97e8455efa729516b997b8ed99"}, + {file = "contourpy-1.3.3-cp313-cp313t-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:50ed930df7289ff2a8d7afeb9603f8289e5704755c7e5c3bbd929c90c817164b"}, + {file = "contourpy-1.3.3-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:4feffb6537d64b84877da813a5c30f1422ea5739566abf0bd18065ac040e120a"}, + {file = "contourpy-1.3.3-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:2b7e9480ffe2b0cd2e787e4df64270e3a0440d9db8dc823312e2c940c167df7e"}, + {file = "contourpy-1.3.3-cp313-cp313t-win32.whl", hash = "sha256:283edd842a01e3dcd435b1c5116798d661378d83d36d337b8dde1d16a5fc9ba3"}, + {file = "contourpy-1.3.3-cp313-cp313t-win_amd64.whl", hash = "sha256:87acf5963fc2b34825e5b6b048f40e3635dd547f590b04d2ab317c2619ef7ae8"}, + {file = "contourpy-1.3.3-cp313-cp313t-win_arm64.whl", hash = "sha256:3c30273eb2a55024ff31ba7d052dde990d7d8e5450f4bbb6e913558b3d6c2301"}, + {file = "contourpy-1.3.3-cp314-cp314-macosx_10_13_x86_64.whl", hash = "sha256:fde6c716d51c04b1c25d0b90364d0be954624a0ee9d60e23e850e8d48353d07a"}, + {file = "contourpy-1.3.3-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:cbedb772ed74ff5be440fa8eee9bd49f64f6e3fc09436d9c7d8f1c287b121d77"}, + {file = "contourpy-1.3.3-cp314-cp314-manylinux_2_26_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:22e9b1bd7a9b1d652cd77388465dc358dafcd2e217d35552424aa4f996f524f5"}, + {file = "contourpy-1.3.3-cp314-cp314-manylinux_2_26_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:a22738912262aa3e254e4f3cb079a95a67132fc5a063890e224393596902f5a4"}, + {file = "contourpy-1.3.3-cp314-cp314-manylinux_2_26_s390x.manylinux_2_28_s390x.whl", hash = "sha256:afe5a512f31ee6bd7d0dda52ec9864c984ca3d66664444f2d72e0dc4eb832e36"}, + {file = "contourpy-1.3.3-cp314-cp314-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:f64836de09927cba6f79dcd00fdd7d5329f3fccc633468507079c829ca4db4e3"}, + {file = "contourpy-1.3.3-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:1fd43c3be4c8e5fd6e4f2baeae35ae18176cf2e5cced681cca908addf1cdd53b"}, + {file = "contourpy-1.3.3-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:6afc576f7b33cf00996e5c1102dc2a8f7cc89e39c0b55df93a0b78c1bd992b36"}, + {file = "contourpy-1.3.3-cp314-cp314-win32.whl", hash = "sha256:66c8a43a4f7b8df8b71ee1840e4211a3c8d93b214b213f590e18a1beca458f7d"}, + {file = "contourpy-1.3.3-cp314-cp314-win_amd64.whl", hash = "sha256:cf9022ef053f2694e31d630feaacb21ea24224be1c3ad0520b13d844274614fd"}, + {file = "contourpy-1.3.3-cp314-cp314-win_arm64.whl", hash = "sha256:95b181891b4c71de4bb404c6621e7e2390745f887f2a026b2d99e92c17892339"}, + {file = "contourpy-1.3.3-cp314-cp314t-macosx_10_13_x86_64.whl", hash = "sha256:33c82d0138c0a062380332c861387650c82e4cf1747aaa6938b9b6516762e772"}, + {file = "contourpy-1.3.3-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:ea37e7b45949df430fe649e5de8351c423430046a2af20b1c1961cae3afcda77"}, + {file = "contourpy-1.3.3-cp314-cp314t-manylinux_2_26_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:d304906ecc71672e9c89e87c4675dc5c2645e1f4269a5063b99b0bb29f232d13"}, + {file = "contourpy-1.3.3-cp314-cp314t-manylinux_2_26_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:ca658cd1a680a5c9ea96dc61cdbae1e85c8f25849843aa799dfd3cb370ad4fbe"}, + {file = "contourpy-1.3.3-cp314-cp314t-manylinux_2_26_s390x.manylinux_2_28_s390x.whl", hash = "sha256:ab2fd90904c503739a75b7c8c5c01160130ba67944a7b77bbf36ef8054576e7f"}, + {file = "contourpy-1.3.3-cp314-cp314t-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:b7301b89040075c30e5768810bc96a8e8d78085b47d8be6e4c3f5a0b4ed478a0"}, + {file = "contourpy-1.3.3-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:2a2a8b627d5cc6b7c41a4beff6c5ad5eb848c88255fda4a8745f7e901b32d8e4"}, + {file = "contourpy-1.3.3-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:fd6ec6be509c787f1caf6b247f0b1ca598bef13f4ddeaa126b7658215529ba0f"}, + {file = "contourpy-1.3.3-cp314-cp314t-win32.whl", hash = "sha256:e74a9a0f5e3fff48fb5a7f2fd2b9b70a3fe014a67522f79b7cca4c0c7e43c9ae"}, + {file = "contourpy-1.3.3-cp314-cp314t-win_amd64.whl", hash = "sha256:13b68d6a62db8eafaebb8039218921399baf6e47bf85006fd8529f2a08ef33fc"}, + {file = "contourpy-1.3.3-cp314-cp314t-win_arm64.whl", hash = "sha256:b7448cb5a725bb1e35ce88771b86fba35ef418952474492cf7c764059933ff8b"}, + {file = "contourpy-1.3.3-pp311-pypy311_pp73-macosx_10_15_x86_64.whl", hash = "sha256:cd5dfcaeb10f7b7f9dc8941717c6c2ade08f587be2226222c12b25f0483ed497"}, + {file = "contourpy-1.3.3-pp311-pypy311_pp73-macosx_11_0_arm64.whl", hash = "sha256:0c1fc238306b35f246d61a1d416a627348b5cf0648648a031e14bb8705fcdfe8"}, + {file = "contourpy-1.3.3-pp311-pypy311_pp73-manylinux_2_26_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:70f9aad7de812d6541d29d2bbf8feb22ff7e1c299523db288004e3157ff4674e"}, + {file = "contourpy-1.3.3-pp311-pypy311_pp73-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:5ed3657edf08512fc3fe81b510e35c2012fbd3081d2e26160f27ca28affec989"}, + {file = "contourpy-1.3.3-pp311-pypy311_pp73-win_amd64.whl", hash = "sha256:3d1a3799d62d45c18bafd41c5fa05120b96a28079f2393af559b843d1a966a77"}, + {file = "contourpy-1.3.3.tar.gz", hash = "sha256:083e12155b210502d0bca491432bb04d56dc3432f95a979b429f2848c3dbe880"}, +] + +[package.dependencies] +numpy = ">=1.25" + +[package.extras] +bokeh = ["bokeh", "selenium"] +docs = ["furo", "sphinx (>=7.2)", "sphinx-copybutton"] +mypy = ["bokeh", "contourpy[bokeh,docs]", "docutils-stubs", "mypy (==1.17.0)", "types-Pillow"] +test = ["Pillow", "contourpy[test-no-images]", "matplotlib"] +test-no-images = ["pytest", "pytest-cov", "pytest-rerunfailures", "pytest-xdist", "wurlitzer"] + +[[package]] +name = "coverage" +version = "7.10.4" +description = "Code coverage measurement for Python" +optional = false +python-versions = ">=3.9" +files = [ + {file = "coverage-7.10.4-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:d92d6edb0ccafd20c6fbf9891ca720b39c2a6a4b4a6f9cf323ca2c986f33e475"}, + {file = "coverage-7.10.4-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:7202da14dc0236884fcc45665ffb2d79d4991a53fbdf152ab22f69f70923cc22"}, + {file = "coverage-7.10.4-cp310-cp310-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:ada418633ae24ec8d0fcad5efe6fc7aa3c62497c6ed86589e57844ad04365674"}, + {file = "coverage-7.10.4-cp310-cp310-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:b828e33eca6c3322adda3b5884456f98c435182a44917ded05005adfa1415500"}, + {file = "coverage-7.10.4-cp310-cp310-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:802793ba397afcfdbe9f91f89d65ae88b958d95edc8caf948e1f47d8b6b2b606"}, + {file = "coverage-7.10.4-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:d0b23512338c54101d3bf7a1ab107d9d75abda1d5f69bc0887fd079253e4c27e"}, + {file = "coverage-7.10.4-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:f36b7dcf72d06a8c5e2dd3aca02be2b1b5db5f86404627dff834396efce958f2"}, + {file = "coverage-7.10.4-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:fce316c367a1dc2c411821365592eeb335ff1781956d87a0410eae248188ba51"}, + {file = "coverage-7.10.4-cp310-cp310-win32.whl", hash = "sha256:8c5dab29fc8070b3766b5fc85f8d89b19634584429a2da6d42da5edfadaf32ae"}, + {file = "coverage-7.10.4-cp310-cp310-win_amd64.whl", hash = "sha256:4b0d114616f0fccb529a1817457d5fb52a10e106f86c5fb3b0bd0d45d0d69b93"}, + {file = "coverage-7.10.4-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:05d5f98ec893d4a2abc8bc5f046f2f4367404e7e5d5d18b83de8fde1093ebc4f"}, + {file = "coverage-7.10.4-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:9267efd28f8994b750d171e58e481e3bbd69e44baed540e4c789f8e368b24b88"}, + {file = "coverage-7.10.4-cp311-cp311-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:4456a039fdc1a89ea60823d0330f1ac6f97b0dbe9e2b6fb4873e889584b085fb"}, + {file = "coverage-7.10.4-cp311-cp311-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:c2bfbd2a9f7e68a21c5bd191be94bfdb2691ac40d325bac9ef3ae45ff5c753d9"}, + {file = "coverage-7.10.4-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:0ab7765f10ae1df7e7fe37de9e64b5a269b812ee22e2da3f84f97b1c7732a0d8"}, + {file = "coverage-7.10.4-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:0a09b13695166236e171ec1627ff8434b9a9bae47528d0ba9d944c912d33b3d2"}, + {file = "coverage-7.10.4-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:5c9e75dfdc0167d5675e9804f04a56b2cf47fb83a524654297000b578b8adcb7"}, + {file = "coverage-7.10.4-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:c751261bfe6481caba15ec005a194cb60aad06f29235a74c24f18546d8377df0"}, + {file = "coverage-7.10.4-cp311-cp311-win32.whl", hash = "sha256:051c7c9e765f003c2ff6e8c81ccea28a70fb5b0142671e4e3ede7cebd45c80af"}, + {file = "coverage-7.10.4-cp311-cp311-win_amd64.whl", hash = "sha256:1a647b152f10be08fb771ae4a1421dbff66141e3d8ab27d543b5eb9ea5af8e52"}, + {file = "coverage-7.10.4-cp311-cp311-win_arm64.whl", hash = "sha256:b09b9e4e1de0d406ca9f19a371c2beefe3193b542f64a6dd40cfcf435b7d6aa0"}, + {file = "coverage-7.10.4-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:a1f0264abcabd4853d4cb9b3d164adbf1565da7dab1da1669e93f3ea60162d79"}, + {file = "coverage-7.10.4-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:536cbe6b118a4df231b11af3e0f974a72a095182ff8ec5f4868c931e8043ef3e"}, + {file = "coverage-7.10.4-cp312-cp312-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:9a4c0d84134797b7bf3f080599d0cd501471f6c98b715405166860d79cfaa97e"}, + {file = "coverage-7.10.4-cp312-cp312-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:7c155fc0f9cee8c9803ea0ad153ab6a3b956baa5d4cd993405dc0b45b2a0b9e0"}, + {file = "coverage-7.10.4-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:0a5f2ab6e451d4b07855d8bcf063adf11e199bff421a4ba57f5bb95b7444ca62"}, + {file = "coverage-7.10.4-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:685b67d99b945b0c221be0780c336b303a7753b3e0ec0d618c795aada25d5e7a"}, + {file = "coverage-7.10.4-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:0c079027e50c2ae44da51c2e294596cbc9dbb58f7ca45b30651c7e411060fc23"}, + {file = "coverage-7.10.4-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:3749aa72b93ce516f77cf5034d8e3c0dfd45c6e8a163a602ede2dc5f9a0bb927"}, + {file = "coverage-7.10.4-cp312-cp312-win32.whl", hash = "sha256:fecb97b3a52fa9bcd5a7375e72fae209088faf671d39fae67261f37772d5559a"}, + {file = "coverage-7.10.4-cp312-cp312-win_amd64.whl", hash = "sha256:26de58f355626628a21fe6a70e1e1fad95702dafebfb0685280962ae1449f17b"}, + {file = "coverage-7.10.4-cp312-cp312-win_arm64.whl", hash = "sha256:67e8885408f8325198862bc487038a4980c9277d753cb8812510927f2176437a"}, + {file = "coverage-7.10.4-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:2b8e1d2015d5dfdbf964ecef12944c0c8c55b885bb5c0467ae8ef55e0e151233"}, + {file = "coverage-7.10.4-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:25735c299439018d66eb2dccf54f625aceb78645687a05f9f848f6e6c751e169"}, + {file = "coverage-7.10.4-cp313-cp313-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:715c06cb5eceac4d9b7cdf783ce04aa495f6aff657543fea75c30215b28ddb74"}, + {file = "coverage-7.10.4-cp313-cp313-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:e017ac69fac9aacd7df6dc464c05833e834dc5b00c914d7af9a5249fcccf07ef"}, + {file = "coverage-7.10.4-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:bad180cc40b3fccb0f0e8c702d781492654ac2580d468e3ffc8065e38c6c2408"}, + {file = "coverage-7.10.4-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:becbdcd14f685fada010a5f792bf0895675ecf7481304fe159f0cd3f289550bd"}, + {file = "coverage-7.10.4-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:0b485ca21e16a76f68060911f97ebbe3e0d891da1dbbce6af7ca1ab3f98b9097"}, + {file = "coverage-7.10.4-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:6c1d098ccfe8e1e0a1ed9a0249138899948afd2978cbf48eb1cc3fcd38469690"}, + {file = "coverage-7.10.4-cp313-cp313-win32.whl", hash = "sha256:8630f8af2ca84b5c367c3df907b1706621abe06d6929f5045fd628968d421e6e"}, + {file = "coverage-7.10.4-cp313-cp313-win_amd64.whl", hash = "sha256:f68835d31c421736be367d32f179e14ca932978293fe1b4c7a6a49b555dff5b2"}, + {file = "coverage-7.10.4-cp313-cp313-win_arm64.whl", hash = "sha256:6eaa61ff6724ca7ebc5326d1fae062d85e19b38dd922d50903702e6078370ae7"}, + {file = "coverage-7.10.4-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:702978108876bfb3d997604930b05fe769462cc3000150b0e607b7b444f2fd84"}, + {file = "coverage-7.10.4-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:e8f978e8c5521d9c8f2086ac60d931d583fab0a16f382f6eb89453fe998e2484"}, + {file = "coverage-7.10.4-cp313-cp313t-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:df0ac2ccfd19351411c45e43ab60932b74472e4648b0a9edf6a3b58846e246a9"}, + {file = "coverage-7.10.4-cp313-cp313t-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:73a0d1aaaa3796179f336448e1576a3de6fc95ff4f07c2d7251d4caf5d18cf8d"}, + {file = "coverage-7.10.4-cp313-cp313t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:873da6d0ed6b3ffc0bc01f2c7e3ad7e2023751c0d8d86c26fe7322c314b031dc"}, + {file = "coverage-7.10.4-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:c6446c75b0e7dda5daa876a1c87b480b2b52affb972fedd6c22edf1aaf2e00ec"}, + {file = "coverage-7.10.4-cp313-cp313t-musllinux_1_2_i686.whl", hash = "sha256:6e73933e296634e520390c44758d553d3b573b321608118363e52113790633b9"}, + {file = "coverage-7.10.4-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:52073d4b08d2cb571234c8a71eb32af3c6923149cf644a51d5957ac128cf6aa4"}, + {file = "coverage-7.10.4-cp313-cp313t-win32.whl", hash = "sha256:e24afb178f21f9ceb1aefbc73eb524769aa9b504a42b26857243f881af56880c"}, + {file = "coverage-7.10.4-cp313-cp313t-win_amd64.whl", hash = "sha256:be04507ff1ad206f4be3d156a674e3fb84bbb751ea1b23b142979ac9eebaa15f"}, + {file = "coverage-7.10.4-cp313-cp313t-win_arm64.whl", hash = "sha256:f3e3ff3f69d02b5dad67a6eac68cc9c71ae343b6328aae96e914f9f2f23a22e2"}, + {file = "coverage-7.10.4-cp314-cp314-macosx_10_13_x86_64.whl", hash = "sha256:a59fe0af7dd7211ba595cf7e2867458381f7e5d7b4cffe46274e0b2f5b9f4eb4"}, + {file = "coverage-7.10.4-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:3a6c35c5b70f569ee38dc3350cd14fdd0347a8b389a18bb37538cc43e6f730e6"}, + {file = "coverage-7.10.4-cp314-cp314-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:acb7baf49f513554c4af6ef8e2bd6e8ac74e6ea0c7386df8b3eb586d82ccccc4"}, + {file = "coverage-7.10.4-cp314-cp314-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:a89afecec1ed12ac13ed203238b560cbfad3522bae37d91c102e690b8b1dc46c"}, + {file = "coverage-7.10.4-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:480442727f464407d8ade6e677b7f21f3b96a9838ab541b9a28ce9e44123c14e"}, + {file = "coverage-7.10.4-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:a89bf193707f4a17f1ed461504031074d87f035153239f16ce86dfb8f8c7ac76"}, + {file = "coverage-7.10.4-cp314-cp314-musllinux_1_2_i686.whl", hash = "sha256:3ddd912c2fc440f0fb3229e764feec85669d5d80a988ff1b336a27d73f63c818"}, + {file = "coverage-7.10.4-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:8a538944ee3a42265e61c7298aeba9ea43f31c01271cf028f437a7b4075592cf"}, + {file = "coverage-7.10.4-cp314-cp314-win32.whl", hash = "sha256:fd2e6002be1c62476eb862b8514b1ba7e7684c50165f2a8d389e77da6c9a2ebd"}, + {file = "coverage-7.10.4-cp314-cp314-win_amd64.whl", hash = "sha256:ec113277f2b5cf188d95fb66a65c7431f2b9192ee7e6ec9b72b30bbfb53c244a"}, + {file = "coverage-7.10.4-cp314-cp314-win_arm64.whl", hash = "sha256:9744954bfd387796c6a091b50d55ca7cac3d08767795b5eec69ad0f7dbf12d38"}, + {file = "coverage-7.10.4-cp314-cp314t-macosx_10_13_x86_64.whl", hash = "sha256:5af4829904dda6aabb54a23879f0f4412094ba9ef153aaa464e3c1b1c9bc98e6"}, + {file = "coverage-7.10.4-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:7bba5ed85e034831fac761ae506c0644d24fd5594727e174b5a73aff343a7508"}, + {file = "coverage-7.10.4-cp314-cp314t-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:d57d555b0719834b55ad35045de6cc80fc2b28e05adb6b03c98479f9553b387f"}, + {file = "coverage-7.10.4-cp314-cp314t-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:ba62c51a72048bb1ea72db265e6bd8beaabf9809cd2125bbb5306c6ce105f214"}, + {file = "coverage-7.10.4-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:0acf0c62a6095f07e9db4ec365cc58c0ef5babb757e54745a1aa2ea2a2564af1"}, + {file = "coverage-7.10.4-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:e1033bf0f763f5cf49ffe6594314b11027dcc1073ac590b415ea93463466deec"}, + {file = "coverage-7.10.4-cp314-cp314t-musllinux_1_2_i686.whl", hash = "sha256:92c29eff894832b6a40da1789b1f252305af921750b03ee4535919db9179453d"}, + {file = "coverage-7.10.4-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:822c4c830989c2093527e92acd97be4638a44eb042b1bdc0e7a278d84a070bd3"}, + {file = "coverage-7.10.4-cp314-cp314t-win32.whl", hash = "sha256:e694d855dac2e7cf194ba33653e4ba7aad7267a802a7b3fc4347d0517d5d65cd"}, + {file = "coverage-7.10.4-cp314-cp314t-win_amd64.whl", hash = "sha256:efcc54b38ef7d5bfa98050f220b415bc5bb3d432bd6350a861cf6da0ede2cdcd"}, + {file = "coverage-7.10.4-cp314-cp314t-win_arm64.whl", hash = "sha256:6f3a3496c0fa26bfac4ebc458747b778cff201c8ae94fa05e1391bab0dbc473c"}, + {file = "coverage-7.10.4-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:48fd4d52600c2a9d5622e52dfae674a7845c5e1dceaf68b88c99feb511fbcfd6"}, + {file = "coverage-7.10.4-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:56217b470d09d69e6b7dcae38200f95e389a77db801cb129101697a4553b18b6"}, + {file = "coverage-7.10.4-cp39-cp39-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:44ac3f21a6e28c5ff7f7a47bca5f87885f6a1e623e637899125ba47acd87334d"}, + {file = "coverage-7.10.4-cp39-cp39-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:3387739d72c84d17b4d2f7348749cac2e6700e7152026912b60998ee9a40066b"}, + {file = "coverage-7.10.4-cp39-cp39-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:3f111ff20d9a6348e0125be892608e33408dd268f73b020940dfa8511ad05503"}, + {file = "coverage-7.10.4-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:01a852f0a9859734b018a3f483cc962d0b381d48d350b1a0c47d618c73a0c398"}, + {file = "coverage-7.10.4-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:225111dd06759ba4e37cee4c0b4f3df2b15c879e9e3c37bf986389300b9917c3"}, + {file = "coverage-7.10.4-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:2178d4183bd1ba608f0bb12e71e55838ba1b7dbb730264f8b08de9f8ef0c27d0"}, + {file = "coverage-7.10.4-cp39-cp39-win32.whl", hash = "sha256:93d175fe81913aee7a6ea430abbdf2a79f1d9fd451610e12e334e4fe3264f563"}, + {file = "coverage-7.10.4-cp39-cp39-win_amd64.whl", hash = "sha256:2221a823404bb941c7721cf0ef55ac6ee5c25d905beb60c0bba5e5e85415d353"}, + {file = "coverage-7.10.4-py3-none-any.whl", hash = "sha256:065d75447228d05121e5c938ca8f0e91eed60a1eb2d1258d42d5084fecfc3302"}, + {file = "coverage-7.10.4.tar.gz", hash = "sha256:25f5130af6c8e7297fd14634955ba9e1697f47143f289e2a23284177c0061d27"}, +] + +[package.extras] +toml = ["tomli"] + +[[package]] +name = "cycler" +version = "0.12.1" +description = "Composable style cycles" +optional = false +python-versions = ">=3.8" +files = [ + {file = "cycler-0.12.1-py3-none-any.whl", hash = "sha256:85cef7cff222d8644161529808465972e51340599459b8ac3ccbac5a854e0d30"}, + {file = "cycler-0.12.1.tar.gz", hash = "sha256:88bb128f02ba341da8ef447245a9e138fae777f6a23943da4540077d3601eb1c"}, +] + +[package.extras] +docs = ["ipython", "matplotlib", "numpydoc", "sphinx"] +tests = ["pytest", "pytest-cov", "pytest-xdist"] + +[[package]] +name = "dnspython" +version = "2.7.0" +description = "DNS toolkit" +optional = false +python-versions = ">=3.9" +files = [ + {file = "dnspython-2.7.0-py3-none-any.whl", hash = "sha256:b4c34b7d10b51bcc3a5071e7b8dee77939f1e878477eeecc965e9835f63c6c86"}, + {file = "dnspython-2.7.0.tar.gz", hash = "sha256:ce9c432eda0dc91cf618a5cedf1a4e142651196bbcd2c80e89ed5a907e5cfaf1"}, +] + +[package.extras] +dev = ["black (>=23.1.0)", "coverage (>=7.0)", "flake8 (>=7)", "hypercorn (>=0.16.0)", "mypy (>=1.8)", "pylint (>=3)", "pytest (>=7.4)", "pytest-cov (>=4.1.0)", "quart-trio (>=0.11.0)", "sphinx (>=7.2.0)", "sphinx-rtd-theme (>=2.0.0)", "twine (>=4.0.0)", "wheel (>=0.42.0)"] +dnssec = ["cryptography (>=43)"] +doh = ["h2 (>=4.1.0)", "httpcore (>=1.0.0)", "httpx (>=0.26.0)"] +doq = ["aioquic (>=1.0.0)"] +idna = ["idna (>=3.7)"] +trio = ["trio (>=0.23)"] +wmi = ["wmi (>=1.5.1)"] + +[[package]] +name = "email-validator" +version = "2.2.0" +description = "A robust email address syntax and deliverability validation library." +optional = false +python-versions = ">=3.8" +files = [ + {file = "email_validator-2.2.0-py3-none-any.whl", hash = "sha256:561977c2d73ce3611850a06fa56b414621e0c8faa9d66f2611407d87465da631"}, + {file = "email_validator-2.2.0.tar.gz", hash = "sha256:cb690f344c617a714f22e66ae771445a1ceb46821152df8e165c5f9a364582b7"}, +] + +[package.dependencies] +dnspython = ">=2.0.0" +idna = ">=2.0.0" + +[[package]] +name = "fonttools" +version = "4.59.1" +description = "Tools to manipulate font files" +optional = false +python-versions = ">=3.9" +files = [ + {file = "fonttools-4.59.1-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:e90a89e52deb56b928e761bb5b5f65f13f669bfd96ed5962975debea09776a23"}, + {file = "fonttools-4.59.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:5d29ab70658d2ec19422b25e6ace00a0b0ae4181ee31e03335eaef53907d2d83"}, + {file = "fonttools-4.59.1-cp310-cp310-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:94f9721a564978a10d5c12927f99170d18e9a32e5a727c61eae56f956a4d118b"}, + {file = "fonttools-4.59.1-cp310-cp310-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:8c8758a7d97848fc8b514b3d9b4cb95243714b2f838dde5e1e3c007375de6214"}, + {file = "fonttools-4.59.1-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:2aeb829ad9d41a2ef17cab8bb5d186049ba38a840f10352e654aa9062ec32dc1"}, + {file = "fonttools-4.59.1-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:ac216a2980a2d2b3b88c68a24f8a9bfb203e2490e991b3238502ad8f1e7bfed0"}, + {file = "fonttools-4.59.1-cp310-cp310-win32.whl", hash = "sha256:d31dc137ed8ec71dbc446949eba9035926e6e967b90378805dcf667ff57cabb1"}, + {file = "fonttools-4.59.1-cp310-cp310-win_amd64.whl", hash = "sha256:5265bc52ed447187d39891b5f21d7217722735d0de9fe81326566570d12851a9"}, + {file = "fonttools-4.59.1-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:4909cce2e35706f3d18c54d3dcce0414ba5e0fb436a454dffec459c61653b513"}, + {file = "fonttools-4.59.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:efbec204fa9f877641747f2d9612b2b656071390d7a7ef07a9dbf0ecf9c7195c"}, + {file = "fonttools-4.59.1-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:39dfd42cc2dc647b2c5469bc7a5b234d9a49e72565b96dd14ae6f11c2c59ef15"}, + {file = "fonttools-4.59.1-cp311-cp311-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:b11bc177a0d428b37890825d7d025040d591aa833f85f8d8878ed183354f47df"}, + {file = "fonttools-4.59.1-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:5b9b4c35b3be45e5bc774d3fc9608bbf4f9a8d371103b858c80edbeed31dd5aa"}, + {file = "fonttools-4.59.1-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:01158376b8a418a0bae9625c476cebfcfcb5e6761e9d243b219cd58341e7afbb"}, + {file = "fonttools-4.59.1-cp311-cp311-win32.whl", hash = "sha256:cf7c5089d37787387123f1cb8f1793a47c5e1e3d1e4e7bfbc1cc96e0f925eabe"}, + {file = "fonttools-4.59.1-cp311-cp311-win_amd64.whl", hash = "sha256:c866eef7a0ba320486ade6c32bfc12813d1a5db8567e6904fb56d3d40acc5116"}, + {file = "fonttools-4.59.1-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:43ab814bbba5f02a93a152ee61a04182bb5809bd2bc3609f7822e12c53ae2c91"}, + {file = "fonttools-4.59.1-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:4f04c3ffbfa0baafcbc550657cf83657034eb63304d27b05cff1653b448ccff6"}, + {file = "fonttools-4.59.1-cp312-cp312-manylinux1_x86_64.manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:d601b153e51a5a6221f0d4ec077b6bfc6ac35bfe6c19aeaa233d8990b2b71726"}, + {file = "fonttools-4.59.1-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:c735e385e30278c54f43a0d056736942023c9043f84ee1021eff9fd616d17693"}, + {file = "fonttools-4.59.1-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:1017413cdc8555dce7ee23720da490282ab7ec1cf022af90a241f33f9a49afc4"}, + {file = "fonttools-4.59.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:5c6d8d773470a5107052874341ed3c487c16ecd179976d81afed89dea5cd7406"}, + {file = "fonttools-4.59.1-cp312-cp312-win32.whl", hash = "sha256:2a2d0d33307f6ad3a2086a95dd607c202ea8852fa9fb52af9b48811154d1428a"}, + {file = "fonttools-4.59.1-cp312-cp312-win_amd64.whl", hash = "sha256:0b9e4fa7eaf046ed6ac470f6033d52c052481ff7a6e0a92373d14f556f298dc0"}, + {file = "fonttools-4.59.1-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:89d9957b54246c6251345297dddf77a84d2c19df96af30d2de24093bbdf0528b"}, + {file = "fonttools-4.59.1-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:8156b11c0d5405810d216f53907bd0f8b982aa5f1e7e3127ab3be1a4062154ff"}, + {file = "fonttools-4.59.1-cp313-cp313-manylinux1_x86_64.manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:8387876a8011caec52d327d5e5bca705d9399ec4b17afb8b431ec50d47c17d23"}, + {file = "fonttools-4.59.1-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:fb13823a74b3a9204a8ed76d3d6d5ec12e64cc5bc44914eb9ff1cdac04facd43"}, + {file = "fonttools-4.59.1-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:e1ca10da138c300f768bb68e40e5b20b6ecfbd95f91aac4cc15010b6b9d65455"}, + {file = "fonttools-4.59.1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:2beb5bfc4887a3130f8625349605a3a45fe345655ce6031d1bac11017454b943"}, + {file = "fonttools-4.59.1-cp313-cp313-win32.whl", hash = "sha256:419f16d750d78e6d704bfe97b48bba2f73b15c9418f817d0cb8a9ca87a5b94bf"}, + {file = "fonttools-4.59.1-cp313-cp313-win_amd64.whl", hash = "sha256:c536f8a852e8d3fa71dde1ec03892aee50be59f7154b533f0bf3c1174cfd5126"}, + {file = "fonttools-4.59.1-cp314-cp314-macosx_10_13_universal2.whl", hash = "sha256:d5c3bfdc9663f3d4b565f9cb3b8c1efb3e178186435b45105bde7328cfddd7fe"}, + {file = "fonttools-4.59.1-cp314-cp314-macosx_10_13_x86_64.whl", hash = "sha256:ea03f1da0d722fe3c2278a05957e6550175571a4894fbf9d178ceef4a3783d2b"}, + {file = "fonttools-4.59.1-cp314-cp314-manylinux1_x86_64.manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:57a3708ca6bfccb790f585fa6d8f29432ec329618a09ff94c16bcb3c55994643"}, + {file = "fonttools-4.59.1-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:729367c91eb1ee84e61a733acc485065a00590618ca31c438e7dd4d600c01486"}, + {file = "fonttools-4.59.1-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:8f8ef66ac6db450193ed150e10b3b45dde7aded10c5d279968bc63368027f62b"}, + {file = "fonttools-4.59.1-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:075f745d539a998cd92cb84c339a82e53e49114ec62aaea8307c80d3ad3aef3a"}, + {file = "fonttools-4.59.1-cp314-cp314-win32.whl", hash = "sha256:c2b0597522d4c5bb18aa5cf258746a2d4a90f25878cbe865e4d35526abd1b9fc"}, + {file = "fonttools-4.59.1-cp314-cp314-win_amd64.whl", hash = "sha256:e9ad4ce044e3236f0814c906ccce8647046cc557539661e35211faadf76f283b"}, + {file = "fonttools-4.59.1-cp314-cp314t-macosx_10_13_universal2.whl", hash = "sha256:652159e8214eb4856e8387ebcd6b6bd336ee258cbeb639c8be52005b122b9609"}, + {file = "fonttools-4.59.1-cp314-cp314t-macosx_10_13_x86_64.whl", hash = "sha256:43d177cd0e847ea026fedd9f099dc917da136ed8792d142298a252836390c478"}, + {file = "fonttools-4.59.1-cp314-cp314t-manylinux1_x86_64.manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:e54437651e1440ee53a95e6ceb6ee440b67a3d348c76f45f4f48de1a5ecab019"}, + {file = "fonttools-4.59.1-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:6065fdec8ff44c32a483fd44abe5bcdb40dd5e2571a5034b555348f2b3a52cea"}, + {file = "fonttools-4.59.1-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:42052b56d176f8b315fbc09259439c013c0cb2109df72447148aeda677599612"}, + {file = "fonttools-4.59.1-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:bcd52eaa5c4c593ae9f447c1d13e7e4a00ca21d755645efa660b6999425b3c88"}, + {file = "fonttools-4.59.1-cp314-cp314t-win32.whl", hash = "sha256:02e4fdf27c550dded10fe038a5981c29f81cb9bc649ff2eaa48e80dab8998f97"}, + {file = "fonttools-4.59.1-cp314-cp314t-win_amd64.whl", hash = "sha256:412a5fd6345872a7c249dac5bcce380393f40c1c316ac07f447bc17d51900922"}, + {file = "fonttools-4.59.1-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:1ab4c1fb45f2984b8b4a3face7cff0f67f9766e9414cbb6fd061e9d77819de98"}, + {file = "fonttools-4.59.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:8ee39da0227950f88626c91e219659e6cd725ede826b1c13edd85fc4cec9bbe6"}, + {file = "fonttools-4.59.1-cp39-cp39-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:58a8844f96cff35860647a65345bfca87f47a2494bfb4bef754e58c082511443"}, + {file = "fonttools-4.59.1-cp39-cp39-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:5f3f021cea6e36410874763f4a517a5e2d6ac36ca8f95521f3a9fdaad0fe73dc"}, + {file = "fonttools-4.59.1-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:bf5fb864f80061a40c1747e0dbc4f6e738de58dd6675b07eb80bd06a93b063c4"}, + {file = "fonttools-4.59.1-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:c29ea087843e27a7cffc78406d32a5abf166d92afde7890394e9e079c9b4dbe9"}, + {file = "fonttools-4.59.1-cp39-cp39-win32.whl", hash = "sha256:a960b09ff50c2e87864e83f352e5a90bcf1ad5233df579b1124660e1643de272"}, + {file = "fonttools-4.59.1-cp39-cp39-win_amd64.whl", hash = "sha256:e3680884189e2b7c3549f6d304376e64711fd15118e4b1ae81940cb6b1eaa267"}, + {file = "fonttools-4.59.1-py3-none-any.whl", hash = "sha256:647db657073672a8330608970a984d51573557f328030566521bc03415535042"}, + {file = "fonttools-4.59.1.tar.gz", hash = "sha256:74995b402ad09822a4c8002438e54940d9f1ecda898d2bb057729d7da983e4cb"}, +] + +[package.extras] +all = ["brotli (>=1.0.1)", "brotlicffi (>=0.8.0)", "lxml (>=4.0)", "lz4 (>=1.7.4.2)", "matplotlib", "munkres", "pycairo", "scipy", "skia-pathops (>=0.5.0)", "sympy", "uharfbuzz (>=0.23.0)", "unicodedata2 (>=15.1.0)", "xattr", "zopfli (>=0.1.4)"] +graphite = ["lz4 (>=1.7.4.2)"] +interpolatable = ["munkres", "pycairo", "scipy"] +lxml = ["lxml (>=4.0)"] +pathops = ["skia-pathops (>=0.5.0)"] +plot = ["matplotlib"] +repacker = ["uharfbuzz (>=0.23.0)"] +symfont = ["sympy"] +type1 = ["xattr"] +unicode = ["unicodedata2 (>=15.1.0)"] +woff = ["brotli (>=1.0.1)", "brotlicffi (>=0.8.0)", "zopfli (>=0.1.4)"] + +[[package]] +name = "idna" +version = "3.10" +description = "Internationalized Domain Names in Applications (IDNA)" +optional = false +python-versions = ">=3.6" +files = [ + {file = "idna-3.10-py3-none-any.whl", hash = "sha256:946d195a0d259cbba61165e88e65941f16e9b36ea6ddb97f00452bae8b1287d3"}, + {file = "idna-3.10.tar.gz", hash = "sha256:12f65c9b470abda6dc35cf8e63cc574b1c52b11df2c86030af0ac09b01b13ea9"}, +] + +[package.extras] +all = ["flake8 (>=7.1.1)", "mypy (>=1.11.2)", "pytest (>=8.3.2)", "ruff (>=0.6.2)"] + +[[package]] +name = "iniconfig" +version = "2.1.0" +description = "brain-dead simple config-ini parsing" +optional = false +python-versions = ">=3.8" +files = [ + {file = "iniconfig-2.1.0-py3-none-any.whl", hash = "sha256:9deba5723312380e77435581c6bf4935c94cbfab9b1ed33ef8d238ea168eb760"}, + {file = "iniconfig-2.1.0.tar.gz", hash = "sha256:3abbd2e30b36733fee78f9c7f7308f2d0050e88f0087fd25c2645f63c773e1c7"}, +] + +[[package]] +name = "kiwisolver" +version = "1.4.9" +description = "A fast implementation of the Cassowary constraint solver" +optional = false +python-versions = ">=3.10" +files = [ + {file = "kiwisolver-1.4.9-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:b4b4d74bda2b8ebf4da5bd42af11d02d04428b2c32846e4c2c93219df8a7987b"}, + {file = "kiwisolver-1.4.9-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:fb3b8132019ea572f4611d770991000d7f58127560c4889729248eb5852a102f"}, + {file = "kiwisolver-1.4.9-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:84fd60810829c27ae375114cd379da1fa65e6918e1da405f356a775d49a62bcf"}, + {file = "kiwisolver-1.4.9-cp310-cp310-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:b78efa4c6e804ecdf727e580dbb9cba85624d2e1c6b5cb059c66290063bd99a9"}, + {file = "kiwisolver-1.4.9-cp310-cp310-manylinux_2_24_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:d4efec7bcf21671db6a3294ff301d2fc861c31faa3c8740d1a94689234d1b415"}, + {file = "kiwisolver-1.4.9-cp310-cp310-manylinux_2_24_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:90f47e70293fc3688b71271100a1a5453aa9944a81d27ff779c108372cf5567b"}, + {file = "kiwisolver-1.4.9-cp310-cp310-manylinux_2_24_s390x.manylinux_2_28_s390x.whl", hash = "sha256:8fdca1def57a2e88ef339de1737a1449d6dbf5fab184c54a1fca01d541317154"}, + {file = "kiwisolver-1.4.9-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:9cf554f21be770f5111a1690d42313e140355e687e05cf82cb23d0a721a64a48"}, + {file = "kiwisolver-1.4.9-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:fc1795ac5cd0510207482c3d1d3ed781143383b8cfd36f5c645f3897ce066220"}, + {file = "kiwisolver-1.4.9-cp310-cp310-musllinux_1_2_s390x.whl", hash = "sha256:ccd09f20ccdbbd341b21a67ab50a119b64a403b09288c27481575105283c1586"}, + {file = "kiwisolver-1.4.9-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:540c7c72324d864406a009d72f5d6856f49693db95d1fbb46cf86febef873634"}, + {file = "kiwisolver-1.4.9-cp310-cp310-win_amd64.whl", hash = "sha256:ede8c6d533bc6601a47ad4046080d36b8fc99f81e6f1c17b0ac3c2dc91ac7611"}, + {file = "kiwisolver-1.4.9-cp310-cp310-win_arm64.whl", hash = "sha256:7b4da0d01ac866a57dd61ac258c5607b4cd677f63abaec7b148354d2b2cdd536"}, + {file = "kiwisolver-1.4.9-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:eb14a5da6dc7642b0f3a18f13654847cd8b7a2550e2645a5bda677862b03ba16"}, + {file = "kiwisolver-1.4.9-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:39a219e1c81ae3b103643d2aedb90f1ef22650deb266ff12a19e7773f3e5f089"}, + {file = "kiwisolver-1.4.9-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:2405a7d98604b87f3fc28b1716783534b1b4b8510d8142adca34ee0bc3c87543"}, + {file = "kiwisolver-1.4.9-cp311-cp311-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:dc1ae486f9abcef254b5618dfb4113dd49f94c68e3e027d03cf0143f3f772b61"}, + {file = "kiwisolver-1.4.9-cp311-cp311-manylinux_2_24_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:8a1f570ce4d62d718dce3f179ee78dac3b545ac16c0c04bb363b7607a949c0d1"}, + {file = "kiwisolver-1.4.9-cp311-cp311-manylinux_2_24_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:cb27e7b78d716c591e88e0a09a2139c6577865d7f2e152488c2cc6257f460872"}, + {file = "kiwisolver-1.4.9-cp311-cp311-manylinux_2_24_s390x.manylinux_2_28_s390x.whl", hash = "sha256:15163165efc2f627eb9687ea5f3a28137217d217ac4024893d753f46bce9de26"}, + {file = "kiwisolver-1.4.9-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:bdee92c56a71d2b24c33a7d4c2856bd6419d017e08caa7802d2963870e315028"}, + {file = "kiwisolver-1.4.9-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:412f287c55a6f54b0650bd9b6dce5aceddb95864a1a90c87af16979d37c89771"}, + {file = "kiwisolver-1.4.9-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:2c93f00dcba2eea70af2be5f11a830a742fe6b579a1d4e00f47760ef13be247a"}, + {file = "kiwisolver-1.4.9-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:f117e1a089d9411663a3207ba874f31be9ac8eaa5b533787024dc07aeb74f464"}, + {file = "kiwisolver-1.4.9-cp311-cp311-win_amd64.whl", hash = "sha256:be6a04e6c79819c9a8c2373317d19a96048e5a3f90bec587787e86a1153883c2"}, + {file = "kiwisolver-1.4.9-cp311-cp311-win_arm64.whl", hash = "sha256:0ae37737256ba2de764ddc12aed4956460277f00c4996d51a197e72f62f5eec7"}, + {file = "kiwisolver-1.4.9-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:ac5a486ac389dddcc5bef4f365b6ae3ffff2c433324fb38dd35e3fab7c957999"}, + {file = "kiwisolver-1.4.9-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:f2ba92255faa7309d06fe44c3a4a97efe1c8d640c2a79a5ef728b685762a6fd2"}, + {file = "kiwisolver-1.4.9-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:4a2899935e724dd1074cb568ce7ac0dce28b2cd6ab539c8e001a8578eb106d14"}, + {file = "kiwisolver-1.4.9-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:f6008a4919fdbc0b0097089f67a1eb55d950ed7e90ce2cc3e640abadd2757a04"}, + {file = "kiwisolver-1.4.9-cp312-cp312-manylinux_2_24_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:67bb8b474b4181770f926f7b7d2f8c0248cbcb78b660fdd41a47054b28d2a752"}, + {file = "kiwisolver-1.4.9-cp312-cp312-manylinux_2_24_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:2327a4a30d3ee07d2fbe2e7933e8a37c591663b96ce42a00bc67461a87d7df77"}, + {file = "kiwisolver-1.4.9-cp312-cp312-manylinux_2_24_s390x.manylinux_2_28_s390x.whl", hash = "sha256:7a08b491ec91b1d5053ac177afe5290adacf1f0f6307d771ccac5de30592d198"}, + {file = "kiwisolver-1.4.9-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:d8fc5c867c22b828001b6a38d2eaeb88160bf5783c6cb4a5e440efc981ce286d"}, + {file = "kiwisolver-1.4.9-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:3b3115b2581ea35bb6d1f24a4c90af37e5d9b49dcff267eeed14c3893c5b86ab"}, + {file = "kiwisolver-1.4.9-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:858e4c22fb075920b96a291928cb7dea5644e94c0ee4fcd5af7e865655e4ccf2"}, + {file = "kiwisolver-1.4.9-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:ed0fecd28cc62c54b262e3736f8bb2512d8dcfdc2bcf08be5f47f96bf405b145"}, + {file = "kiwisolver-1.4.9-cp312-cp312-win_amd64.whl", hash = "sha256:f68208a520c3d86ea51acf688a3e3002615a7f0238002cccc17affecc86a8a54"}, + {file = "kiwisolver-1.4.9-cp312-cp312-win_arm64.whl", hash = "sha256:2c1a4f57df73965f3f14df20b80ee29e6a7930a57d2d9e8491a25f676e197c60"}, + {file = "kiwisolver-1.4.9-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:a5d0432ccf1c7ab14f9949eec60c5d1f924f17c037e9f8b33352fa05799359b8"}, + {file = "kiwisolver-1.4.9-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:efb3a45b35622bb6c16dbfab491a8f5a391fe0e9d45ef32f4df85658232ca0e2"}, + {file = "kiwisolver-1.4.9-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:1a12cf6398e8a0a001a059747a1cbf24705e18fe413bc22de7b3d15c67cffe3f"}, + {file = "kiwisolver-1.4.9-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:b67e6efbf68e077dd71d1a6b37e43e1a99d0bff1a3d51867d45ee8908b931098"}, + {file = "kiwisolver-1.4.9-cp313-cp313-manylinux_2_24_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:5656aa670507437af0207645273ccdfee4f14bacd7f7c67a4306d0dcaeaf6eed"}, + {file = "kiwisolver-1.4.9-cp313-cp313-manylinux_2_24_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:bfc08add558155345129c7803b3671cf195e6a56e7a12f3dde7c57d9b417f525"}, + {file = "kiwisolver-1.4.9-cp313-cp313-manylinux_2_24_s390x.manylinux_2_28_s390x.whl", hash = "sha256:40092754720b174e6ccf9e845d0d8c7d8e12c3d71e7fc35f55f3813e96376f78"}, + {file = "kiwisolver-1.4.9-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:497d05f29a1300d14e02e6441cf0f5ee81c1ff5a304b0d9fb77423974684e08b"}, + {file = "kiwisolver-1.4.9-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:bdd1a81a1860476eb41ac4bc1e07b3f07259e6d55bbf739b79c8aaedcf512799"}, + {file = "kiwisolver-1.4.9-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:e6b93f13371d341afee3be9f7c5964e3fe61d5fa30f6a30eb49856935dfe4fc3"}, + {file = "kiwisolver-1.4.9-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:d75aa530ccfaa593da12834b86a0724f58bff12706659baa9227c2ccaa06264c"}, + {file = "kiwisolver-1.4.9-cp313-cp313-win_amd64.whl", hash = "sha256:dd0a578400839256df88c16abddf9ba14813ec5f21362e1fe65022e00c883d4d"}, + {file = "kiwisolver-1.4.9-cp313-cp313-win_arm64.whl", hash = "sha256:d4188e73af84ca82468f09cadc5ac4db578109e52acb4518d8154698d3a87ca2"}, + {file = "kiwisolver-1.4.9-cp313-cp313t-macosx_10_13_universal2.whl", hash = "sha256:5a0f2724dfd4e3b3ac5a82436a8e6fd16baa7d507117e4279b660fe8ca38a3a1"}, + {file = "kiwisolver-1.4.9-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:1b11d6a633e4ed84fc0ddafd4ebfd8ea49b3f25082c04ad12b8315c11d504dc1"}, + {file = "kiwisolver-1.4.9-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:61874cdb0a36016354853593cffc38e56fc9ca5aa97d2c05d3dcf6922cd55a11"}, + {file = "kiwisolver-1.4.9-cp313-cp313t-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:60c439763a969a6af93b4881db0eed8fadf93ee98e18cbc35bc8da868d0c4f0c"}, + {file = "kiwisolver-1.4.9-cp313-cp313t-manylinux_2_24_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:92a2f997387a1b79a75e7803aa7ded2cfbe2823852ccf1ba3bcf613b62ae3197"}, + {file = "kiwisolver-1.4.9-cp313-cp313t-manylinux_2_24_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:a31d512c812daea6d8b3be3b2bfcbeb091dbb09177706569bcfc6240dcf8b41c"}, + {file = "kiwisolver-1.4.9-cp313-cp313t-manylinux_2_24_s390x.manylinux_2_28_s390x.whl", hash = "sha256:52a15b0f35dad39862d376df10c5230155243a2c1a436e39eb55623ccbd68185"}, + {file = "kiwisolver-1.4.9-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:a30fd6fdef1430fd9e1ba7b3398b5ee4e2887783917a687d86ba69985fb08748"}, + {file = "kiwisolver-1.4.9-cp313-cp313t-musllinux_1_2_ppc64le.whl", hash = "sha256:cc9617b46837c6468197b5945e196ee9ca43057bb7d9d1ae688101e4e1dddf64"}, + {file = "kiwisolver-1.4.9-cp313-cp313t-musllinux_1_2_s390x.whl", hash = "sha256:0ab74e19f6a2b027ea4f845a78827969af45ce790e6cb3e1ebab71bdf9f215ff"}, + {file = "kiwisolver-1.4.9-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:dba5ee5d3981160c28d5490f0d1b7ed730c22470ff7f6cc26cfcfaacb9896a07"}, + {file = "kiwisolver-1.4.9-cp313-cp313t-win_arm64.whl", hash = "sha256:0749fd8f4218ad2e851e11cc4dc05c7cbc0cbc4267bdfdb31782e65aace4ee9c"}, + {file = "kiwisolver-1.4.9-cp314-cp314-macosx_10_13_universal2.whl", hash = "sha256:9928fe1eb816d11ae170885a74d074f57af3a0d65777ca47e9aeb854a1fba386"}, + {file = "kiwisolver-1.4.9-cp314-cp314-macosx_10_13_x86_64.whl", hash = "sha256:d0005b053977e7b43388ddec89fa567f43d4f6d5c2c0affe57de5ebf290dc552"}, + {file = "kiwisolver-1.4.9-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:2635d352d67458b66fd0667c14cb1d4145e9560d503219034a18a87e971ce4f3"}, + {file = "kiwisolver-1.4.9-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:767c23ad1c58c9e827b649a9ab7809fd5fd9db266a9cf02b0e926ddc2c680d58"}, + {file = "kiwisolver-1.4.9-cp314-cp314-manylinux_2_24_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:72d0eb9fba308b8311685c2268cf7d0a0639a6cd027d8128659f72bdd8a024b4"}, + {file = "kiwisolver-1.4.9-cp314-cp314-manylinux_2_24_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:f68e4f3eeca8fb22cc3d731f9715a13b652795ef657a13df1ad0c7dc0e9731df"}, + {file = "kiwisolver-1.4.9-cp314-cp314-manylinux_2_24_s390x.manylinux_2_28_s390x.whl", hash = "sha256:d84cd4061ae292d8ac367b2c3fa3aad11cb8625a95d135fe93f286f914f3f5a6"}, + {file = "kiwisolver-1.4.9-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:a60ea74330b91bd22a29638940d115df9dc00af5035a9a2a6ad9399ffb4ceca5"}, + {file = "kiwisolver-1.4.9-cp314-cp314-musllinux_1_2_ppc64le.whl", hash = "sha256:ce6a3a4e106cf35c2d9c4fa17c05ce0b180db622736845d4315519397a77beaf"}, + {file = "kiwisolver-1.4.9-cp314-cp314-musllinux_1_2_s390x.whl", hash = "sha256:77937e5e2a38a7b48eef0585114fe7930346993a88060d0bf886086d2aa49ef5"}, + {file = "kiwisolver-1.4.9-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:24c175051354f4a28c5d6a31c93906dc653e2bf234e8a4bbfb964892078898ce"}, + {file = "kiwisolver-1.4.9-cp314-cp314-win_amd64.whl", hash = "sha256:0763515d4df10edf6d06a3c19734e2566368980d21ebec439f33f9eb936c07b7"}, + {file = "kiwisolver-1.4.9-cp314-cp314-win_arm64.whl", hash = "sha256:0e4e2bf29574a6a7b7f6cb5fa69293b9f96c928949ac4a53ba3f525dffb87f9c"}, + {file = "kiwisolver-1.4.9-cp314-cp314t-macosx_10_13_universal2.whl", hash = "sha256:d976bbb382b202f71c67f77b0ac11244021cfa3f7dfd9e562eefcea2df711548"}, + {file = "kiwisolver-1.4.9-cp314-cp314t-macosx_10_13_x86_64.whl", hash = "sha256:2489e4e5d7ef9a1c300a5e0196e43d9c739f066ef23270607d45aba368b91f2d"}, + {file = "kiwisolver-1.4.9-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:e2ea9f7ab7fbf18fffb1b5434ce7c69a07582f7acc7717720f1d69f3e806f90c"}, + {file = "kiwisolver-1.4.9-cp314-cp314t-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:b34e51affded8faee0dfdb705416153819d8ea9250bbbf7ea1b249bdeb5f1122"}, + {file = "kiwisolver-1.4.9-cp314-cp314t-manylinux_2_24_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:d8aacd3d4b33b772542b2e01beb50187536967b514b00003bdda7589722d2a64"}, + {file = "kiwisolver-1.4.9-cp314-cp314t-manylinux_2_24_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:7cf974dd4e35fa315563ac99d6287a1024e4dc2077b8a7d7cd3d2fb65d283134"}, + {file = "kiwisolver-1.4.9-cp314-cp314t-manylinux_2_24_s390x.manylinux_2_28_s390x.whl", hash = "sha256:85bd218b5ecfbee8c8a82e121802dcb519a86044c9c3b2e4aef02fa05c6da370"}, + {file = "kiwisolver-1.4.9-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:0856e241c2d3df4efef7c04a1e46b1936b6120c9bcf36dd216e3acd84bc4fb21"}, + {file = "kiwisolver-1.4.9-cp314-cp314t-musllinux_1_2_ppc64le.whl", hash = "sha256:9af39d6551f97d31a4deebeac6f45b156f9755ddc59c07b402c148f5dbb6482a"}, + {file = "kiwisolver-1.4.9-cp314-cp314t-musllinux_1_2_s390x.whl", hash = "sha256:bb4ae2b57fc1d8cbd1cf7b1d9913803681ffa903e7488012be5b76dedf49297f"}, + {file = "kiwisolver-1.4.9-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:aedff62918805fb62d43a4aa2ecd4482c380dc76cd31bd7c8878588a61bd0369"}, + {file = "kiwisolver-1.4.9-cp314-cp314t-win_amd64.whl", hash = "sha256:1fa333e8b2ce4d9660f2cda9c0e1b6bafcfb2457a9d259faa82289e73ec24891"}, + {file = "kiwisolver-1.4.9-cp314-cp314t-win_arm64.whl", hash = "sha256:4a48a2ce79d65d363597ef7b567ce3d14d68783d2b2263d98db3d9477805ba32"}, + {file = "kiwisolver-1.4.9-pp310-pypy310_pp73-macosx_10_15_x86_64.whl", hash = "sha256:4d1d9e582ad4d63062d34077a9a1e9f3c34088a2ec5135b1f7190c07cf366527"}, + {file = "kiwisolver-1.4.9-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:deed0c7258ceb4c44ad5ec7d9918f9f14fd05b2be86378d86cf50e63d1e7b771"}, + {file = "kiwisolver-1.4.9-pp310-pypy310_pp73-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:0a590506f303f512dff6b7f75fd2fd18e16943efee932008fe7140e5fa91d80e"}, + {file = "kiwisolver-1.4.9-pp310-pypy310_pp73-manylinux_2_24_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:e09c2279a4d01f099f52d5c4b3d9e208e91edcbd1a175c9662a8b16e000fece9"}, + {file = "kiwisolver-1.4.9-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:c9e7cdf45d594ee04d5be1b24dd9d49f3d1590959b2271fb30b5ca2b262c00fb"}, + {file = "kiwisolver-1.4.9-pp311-pypy311_pp73-macosx_10_15_x86_64.whl", hash = "sha256:720e05574713db64c356e86732c0f3c5252818d05f9df320f0ad8380641acea5"}, + {file = "kiwisolver-1.4.9-pp311-pypy311_pp73-macosx_11_0_arm64.whl", hash = "sha256:17680d737d5335b552994a2008fab4c851bcd7de33094a82067ef3a576ff02fa"}, + {file = "kiwisolver-1.4.9-pp311-pypy311_pp73-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:85b5352f94e490c028926ea567fc569c52ec79ce131dadb968d3853e809518c2"}, + {file = "kiwisolver-1.4.9-pp311-pypy311_pp73-manylinux_2_24_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:464415881e4801295659462c49461a24fb107c140de781d55518c4b80cb6790f"}, + {file = "kiwisolver-1.4.9-pp311-pypy311_pp73-win_amd64.whl", hash = "sha256:fb940820c63a9590d31d88b815e7a3aa5915cad3ce735ab45f0c730b39547de1"}, + {file = "kiwisolver-1.4.9.tar.gz", hash = "sha256:c3b22c26c6fd6811b0ae8363b95ca8ce4ea3c202d3d0975b2914310ceb1bcc4d"}, +] + +[[package]] +name = "matplotlib" +version = "3.10.5" +description = "Python plotting package" +optional = false +python-versions = ">=3.10" +files = [ + {file = "matplotlib-3.10.5-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:5d4773a6d1c106ca05cb5a5515d277a6bb96ed09e5c8fab6b7741b8fcaa62c8f"}, + {file = "matplotlib-3.10.5-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:dc88af74e7ba27de6cbe6faee916024ea35d895ed3d61ef6f58c4ce97da7185a"}, + {file = "matplotlib-3.10.5-cp310-cp310-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:64c4535419d5617f7363dad171a5a59963308e0f3f813c4bed6c9e6e2c131512"}, + {file = "matplotlib-3.10.5-cp310-cp310-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:a277033048ab22d34f88a3c5243938cef776493f6201a8742ed5f8b553201343"}, + {file = "matplotlib-3.10.5-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:e4a6470a118a2e93022ecc7d3bd16b3114b2004ea2bf014fff875b3bc99b70c6"}, + {file = "matplotlib-3.10.5-cp310-cp310-win_amd64.whl", hash = "sha256:7e44cada61bec8833c106547786814dd4a266c1b2964fd25daa3804f1b8d4467"}, + {file = "matplotlib-3.10.5-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:dcfc39c452c6a9f9028d3e44d2d721484f665304857188124b505b2c95e1eecf"}, + {file = "matplotlib-3.10.5-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:903352681b59f3efbf4546985142a9686ea1d616bb054b09a537a06e4b892ccf"}, + {file = "matplotlib-3.10.5-cp311-cp311-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:080c3676a56b8ee1c762bcf8fca3fe709daa1ee23e6ef06ad9f3fc17332f2d2a"}, + {file = "matplotlib-3.10.5-cp311-cp311-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:4b4984d5064a35b6f66d2c11d668565f4389b1119cc64db7a4c1725bc11adffc"}, + {file = "matplotlib-3.10.5-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:3967424121d3a46705c9fa9bdb0931de3228f13f73d7bb03c999c88343a89d89"}, + {file = "matplotlib-3.10.5-cp311-cp311-win_amd64.whl", hash = "sha256:33775bbeb75528555a15ac29396940128ef5613cf9a2d31fb1bfd18b3c0c0903"}, + {file = "matplotlib-3.10.5-cp311-cp311-win_arm64.whl", hash = "sha256:c61333a8e5e6240e73769d5826b9a31d8b22df76c0778f8480baf1b4b01c9420"}, + {file = "matplotlib-3.10.5-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:00b6feadc28a08bd3c65b2894f56cf3c94fc8f7adcbc6ab4516ae1e8ed8f62e2"}, + {file = "matplotlib-3.10.5-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:ee98a5c5344dc7f48dc261b6ba5d9900c008fc12beb3fa6ebda81273602cc389"}, + {file = "matplotlib-3.10.5-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:a17e57e33de901d221a07af32c08870ed4528db0b6059dce7d7e65c1122d4bea"}, + {file = "matplotlib-3.10.5-cp312-cp312-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:97b9d6443419085950ee4a5b1ee08c363e5c43d7176e55513479e53669e88468"}, + {file = "matplotlib-3.10.5-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:ceefe5d40807d29a66ae916c6a3915d60ef9f028ce1927b84e727be91d884369"}, + {file = "matplotlib-3.10.5-cp312-cp312-win_amd64.whl", hash = "sha256:c04cba0f93d40e45b3c187c6c52c17f24535b27d545f757a2fffebc06c12b98b"}, + {file = "matplotlib-3.10.5-cp312-cp312-win_arm64.whl", hash = "sha256:a41bcb6e2c8e79dc99c5511ae6f7787d2fb52efd3d805fff06d5d4f667db16b2"}, + {file = "matplotlib-3.10.5-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:354204db3f7d5caaa10e5de74549ef6a05a4550fdd1c8f831ab9bca81efd39ed"}, + {file = "matplotlib-3.10.5-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:b072aac0c3ad563a2b3318124756cb6112157017f7431626600ecbe890df57a1"}, + {file = "matplotlib-3.10.5-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:d52fd5b684d541b5a51fb276b2b97b010c75bee9aa392f96b4a07aeb491e33c7"}, + {file = "matplotlib-3.10.5-cp313-cp313-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:ee7a09ae2f4676276f5a65bd9f2bd91b4f9fbaedf49f40267ce3f9b448de501f"}, + {file = "matplotlib-3.10.5-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:ba6c3c9c067b83481d647af88b4e441d532acdb5ef22178a14935b0b881188f4"}, + {file = "matplotlib-3.10.5-cp313-cp313-win_amd64.whl", hash = "sha256:07442d2692c9bd1cceaa4afb4bbe5b57b98a7599de4dabfcca92d3eea70f9ebe"}, + {file = "matplotlib-3.10.5-cp313-cp313-win_arm64.whl", hash = "sha256:48fe6d47380b68a37ccfcc94f009530e84d41f71f5dae7eda7c4a5a84aa0a674"}, + {file = "matplotlib-3.10.5-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:3b80eb8621331449fc519541a7461987f10afa4f9cfd91afcd2276ebe19bd56c"}, + {file = "matplotlib-3.10.5-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:47a388908e469d6ca2a6015858fa924e0e8a2345a37125948d8e93a91c47933e"}, + {file = "matplotlib-3.10.5-cp313-cp313t-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:8b6b49167d208358983ce26e43aa4196073b4702858670f2eb111f9a10652b4b"}, + {file = "matplotlib-3.10.5-cp313-cp313t-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:8a8da0453a7fd8e3da114234ba70c5ba9ef0e98f190309ddfde0f089accd46ea"}, + {file = "matplotlib-3.10.5-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:52c6573dfcb7726a9907b482cd5b92e6b5499b284ffacb04ffbfe06b3e568124"}, + {file = "matplotlib-3.10.5-cp313-cp313t-win_amd64.whl", hash = "sha256:a23193db2e9d64ece69cac0c8231849db7dd77ce59c7b89948cf9d0ce655a3ce"}, + {file = "matplotlib-3.10.5-cp313-cp313t-win_arm64.whl", hash = "sha256:56da3b102cf6da2776fef3e71cd96fcf22103a13594a18ac9a9b31314e0be154"}, + {file = "matplotlib-3.10.5-cp314-cp314-macosx_10_13_x86_64.whl", hash = "sha256:96ef8f5a3696f20f55597ffa91c28e2e73088df25c555f8d4754931515512715"}, + {file = "matplotlib-3.10.5-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:77fab633e94b9da60512d4fa0213daeb76d5a7b05156840c4fd0399b4b818837"}, + {file = "matplotlib-3.10.5-cp314-cp314-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:27f52634315e96b1debbfdc5c416592edcd9c4221bc2f520fd39c33db5d9f202"}, + {file = "matplotlib-3.10.5-cp314-cp314-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:525f6e28c485c769d1f07935b660c864de41c37fd716bfa64158ea646f7084bb"}, + {file = "matplotlib-3.10.5-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:1f5f3ec4c191253c5f2b7c07096a142c6a1c024d9f738247bfc8e3f9643fc975"}, + {file = "matplotlib-3.10.5-cp314-cp314-win_amd64.whl", hash = "sha256:707f9c292c4cd4716f19ab8a1f93f26598222cd931e0cd98fbbb1c5994bf7667"}, + {file = "matplotlib-3.10.5-cp314-cp314-win_arm64.whl", hash = "sha256:21a95b9bf408178d372814de7baacd61c712a62cae560b5e6f35d791776f6516"}, + {file = "matplotlib-3.10.5-cp314-cp314t-macosx_10_13_x86_64.whl", hash = "sha256:a6b310f95e1102a8c7c817ef17b60ee5d1851b8c71b63d9286b66b177963039e"}, + {file = "matplotlib-3.10.5-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:94986a242747a0605cb3ff1cb98691c736f28a59f8ffe5175acaeb7397c49a5a"}, + {file = "matplotlib-3.10.5-cp314-cp314t-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:1ff10ea43288f0c8bab608a305dc6c918cc729d429c31dcbbecde3b9f4d5b569"}, + {file = "matplotlib-3.10.5-cp314-cp314t-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:f6adb644c9d040ffb0d3434e440490a66cf73dbfa118a6f79cd7568431f7a012"}, + {file = "matplotlib-3.10.5-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:4fa40a8f98428f789a9dcacd625f59b7bc4e3ef6c8c7c80187a7a709475cf592"}, + {file = "matplotlib-3.10.5-cp314-cp314t-win_amd64.whl", hash = "sha256:95672a5d628b44207aab91ec20bf59c26da99de12b88f7e0b1fb0a84a86ff959"}, + {file = "matplotlib-3.10.5-cp314-cp314t-win_arm64.whl", hash = "sha256:2efaf97d72629e74252e0b5e3c46813e9eeaa94e011ecf8084a971a31a97f40b"}, + {file = "matplotlib-3.10.5-pp310-pypy310_pp73-macosx_10_15_x86_64.whl", hash = "sha256:b5fa2e941f77eb579005fb804026f9d0a1082276118d01cc6051d0d9626eaa7f"}, + {file = "matplotlib-3.10.5-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:1fc0d2a3241cdcb9daaca279204a3351ce9df3c0e7e621c7e04ec28aaacaca30"}, + {file = "matplotlib-3.10.5-pp310-pypy310_pp73-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:8dee65cb1424b7dc982fe87895b5613d4e691cc57117e8af840da0148ca6c1d7"}, + {file = "matplotlib-3.10.5-pp311-pypy311_pp73-macosx_10_15_x86_64.whl", hash = "sha256:160e125da27a749481eaddc0627962990f6029811dbeae23881833a011a0907f"}, + {file = "matplotlib-3.10.5-pp311-pypy311_pp73-macosx_11_0_arm64.whl", hash = "sha256:ac3d50760394d78a3c9be6b28318fe22b494c4fcf6407e8fd4794b538251899b"}, + {file = "matplotlib-3.10.5-pp311-pypy311_pp73-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:6c49465bf689c4d59d174d0c7795fb42a21d4244d11d70e52b8011987367ac61"}, + {file = "matplotlib-3.10.5.tar.gz", hash = "sha256:352ed6ccfb7998a00881692f38b4ca083c691d3e275b4145423704c34c909076"}, +] + +[package.dependencies] +contourpy = ">=1.0.1" +cycler = ">=0.10" +fonttools = ">=4.22.0" +kiwisolver = ">=1.3.1" +numpy = ">=1.23" +packaging = ">=20.0" +pillow = ">=8" +pyparsing = ">=2.3.1" +python-dateutil = ">=2.7" + +[package.extras] +dev = ["meson-python (>=0.13.1,<0.17.0)", "pybind11 (>=2.13.2,!=2.13.3)", "setuptools (>=64)", "setuptools_scm (>=7)"] + +[[package]] +name = "mypy" +version = "1.17.1" +description = "Optional static typing for Python" +optional = false +python-versions = ">=3.9" +files = [ + {file = "mypy-1.17.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:3fbe6d5555bf608c47203baa3e72dbc6ec9965b3d7c318aa9a4ca76f465bd972"}, + {file = "mypy-1.17.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:80ef5c058b7bce08c83cac668158cb7edea692e458d21098c7d3bce35a5d43e7"}, + {file = "mypy-1.17.1-cp310-cp310-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:c4a580f8a70c69e4a75587bd925d298434057fe2a428faaf927ffe6e4b9a98df"}, + {file = "mypy-1.17.1-cp310-cp310-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:dd86bb649299f09d987a2eebb4d52d10603224500792e1bee18303bbcc1ce390"}, + {file = "mypy-1.17.1-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:a76906f26bd8d51ea9504966a9c25419f2e668f012e0bdf3da4ea1526c534d94"}, + {file = "mypy-1.17.1-cp310-cp310-win_amd64.whl", hash = "sha256:e79311f2d904ccb59787477b7bd5d26f3347789c06fcd7656fa500875290264b"}, + {file = "mypy-1.17.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:ad37544be07c5d7fba814eb370e006df58fed8ad1ef33ed1649cb1889ba6ff58"}, + {file = "mypy-1.17.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:064e2ff508e5464b4bd807a7c1625bc5047c5022b85c70f030680e18f37273a5"}, + {file = "mypy-1.17.1-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:70401bbabd2fa1aa7c43bb358f54037baf0586f41e83b0ae67dd0534fc64edfd"}, + {file = "mypy-1.17.1-cp311-cp311-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:e92bdc656b7757c438660f775f872a669b8ff374edc4d18277d86b63edba6b8b"}, + {file = "mypy-1.17.1-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:c1fdf4abb29ed1cb091cf432979e162c208a5ac676ce35010373ff29247bcad5"}, + {file = "mypy-1.17.1-cp311-cp311-win_amd64.whl", hash = "sha256:ff2933428516ab63f961644bc49bc4cbe42bbffb2cd3b71cc7277c07d16b1a8b"}, + {file = "mypy-1.17.1-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:69e83ea6553a3ba79c08c6e15dbd9bfa912ec1e493bf75489ef93beb65209aeb"}, + {file = "mypy-1.17.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:1b16708a66d38abb1e6b5702f5c2c87e133289da36f6a1d15f6a5221085c6403"}, + {file = "mypy-1.17.1-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:89e972c0035e9e05823907ad5398c5a73b9f47a002b22359b177d40bdaee7056"}, + {file = "mypy-1.17.1-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:03b6d0ed2b188e35ee6d5c36b5580cffd6da23319991c49ab5556c023ccf1341"}, + {file = "mypy-1.17.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:c837b896b37cd103570d776bda106eabb8737aa6dd4f248451aecf53030cdbeb"}, + {file = "mypy-1.17.1-cp312-cp312-win_amd64.whl", hash = "sha256:665afab0963a4b39dff7c1fa563cc8b11ecff7910206db4b2e64dd1ba25aed19"}, + {file = "mypy-1.17.1-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:93378d3203a5c0800c6b6d850ad2f19f7a3cdf1a3701d3416dbf128805c6a6a7"}, + {file = "mypy-1.17.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:15d54056f7fe7a826d897789f53dd6377ec2ea8ba6f776dc83c2902b899fee81"}, + {file = "mypy-1.17.1-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:209a58fed9987eccc20f2ca94afe7257a8f46eb5df1fb69958650973230f91e6"}, + {file = "mypy-1.17.1-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:099b9a5da47de9e2cb5165e581f158e854d9e19d2e96b6698c0d64de911dd849"}, + {file = "mypy-1.17.1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:fa6ffadfbe6994d724c5a1bb6123a7d27dd68fc9c059561cd33b664a79578e14"}, + {file = "mypy-1.17.1-cp313-cp313-win_amd64.whl", hash = "sha256:9a2b7d9180aed171f033c9f2fc6c204c1245cf60b0cb61cf2e7acc24eea78e0a"}, + {file = "mypy-1.17.1-cp314-cp314-macosx_10_13_x86_64.whl", hash = "sha256:15a83369400454c41ed3a118e0cc58bd8123921a602f385cb6d6ea5df050c733"}, + {file = "mypy-1.17.1-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:55b918670f692fc9fba55c3298d8a3beae295c5cded0a55dccdc5bbead814acd"}, + {file = "mypy-1.17.1-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:62761474061feef6f720149d7ba876122007ddc64adff5ba6f374fda35a018a0"}, + {file = "mypy-1.17.1-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:c49562d3d908fd49ed0938e5423daed8d407774a479b595b143a3d7f87cdae6a"}, + {file = "mypy-1.17.1-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:397fba5d7616a5bc60b45c7ed204717eaddc38f826e3645402c426057ead9a91"}, + {file = "mypy-1.17.1-cp314-cp314-win_amd64.whl", hash = "sha256:9d6b20b97d373f41617bd0708fd46aa656059af57f2ef72aa8c7d6a2b73b74ed"}, + {file = "mypy-1.17.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:5d1092694f166a7e56c805caaf794e0585cabdbf1df36911c414e4e9abb62ae9"}, + {file = "mypy-1.17.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:79d44f9bfb004941ebb0abe8eff6504223a9c1ac51ef967d1263c6572bbebc99"}, + {file = "mypy-1.17.1-cp39-cp39-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:b01586eed696ec905e61bd2568f48740f7ac4a45b3a468e6423a03d3788a51a8"}, + {file = "mypy-1.17.1-cp39-cp39-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:43808d9476c36b927fbcd0b0255ce75efe1b68a080154a38ae68a7e62de8f0f8"}, + {file = "mypy-1.17.1-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:feb8cc32d319edd5859da2cc084493b3e2ce5e49a946377663cc90f6c15fb259"}, + {file = "mypy-1.17.1-cp39-cp39-win_amd64.whl", hash = "sha256:d7598cf74c3e16539d4e2f0b8d8c318e00041553d83d4861f87c7a72e95ac24d"}, + {file = "mypy-1.17.1-py3-none-any.whl", hash = "sha256:a9f52c0351c21fe24c21d8c0eb1f62967b262d6729393397b6f443c3b773c3b9"}, + {file = "mypy-1.17.1.tar.gz", hash = "sha256:25e01ec741ab5bb3eec8ba9cdb0f769230368a22c959c4937360efb89b7e9f01"}, +] + +[package.dependencies] +mypy_extensions = ">=1.0.0" +pathspec = ">=0.9.0" +typing_extensions = ">=4.6.0" + +[package.extras] +dmypy = ["psutil (>=4.0)"] +faster-cache = ["orjson"] +install-types = ["pip"] +mypyc = ["setuptools (>=50)"] +reports = ["lxml"] + +[[package]] +name = "mypy-extensions" +version = "1.1.0" +description = "Type system extensions for programs checked with the mypy type checker." +optional = false +python-versions = ">=3.8" +files = [ + {file = "mypy_extensions-1.1.0-py3-none-any.whl", hash = "sha256:1be4cccdb0f2482337c4743e60421de3a356cd97508abadd57d47403e94f5505"}, + {file = "mypy_extensions-1.1.0.tar.gz", hash = "sha256:52e68efc3284861e772bbcd66823fde5ae21fd2fdb51c62a211403730b916558"}, +] + +[[package]] +name = "numpy" +version = "2.3.2" +description = "Fundamental package for array computing in Python" +optional = false +python-versions = ">=3.11" +files = [ + {file = "numpy-2.3.2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:852ae5bed3478b92f093e30f785c98e0cb62fa0a939ed057c31716e18a7a22b9"}, + {file = "numpy-2.3.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:7a0e27186e781a69959d0230dd9909b5e26024f8da10683bd6344baea1885168"}, + {file = "numpy-2.3.2-cp311-cp311-macosx_14_0_arm64.whl", hash = "sha256:f0a1a8476ad77a228e41619af2fa9505cf69df928e9aaa165746584ea17fed2b"}, + {file = "numpy-2.3.2-cp311-cp311-macosx_14_0_x86_64.whl", hash = "sha256:cbc95b3813920145032412f7e33d12080f11dc776262df1712e1638207dde9e8"}, + {file = "numpy-2.3.2-cp311-cp311-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:f75018be4980a7324edc5930fe39aa391d5734531b1926968605416ff58c332d"}, + {file = "numpy-2.3.2-cp311-cp311-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:20b8200721840f5621b7bd03f8dcd78de33ec522fc40dc2641aa09537df010c3"}, + {file = "numpy-2.3.2-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:1f91e5c028504660d606340a084db4b216567ded1056ea2b4be4f9d10b67197f"}, + {file = "numpy-2.3.2-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:fb1752a3bb9a3ad2d6b090b88a9a0ae1cd6f004ef95f75825e2f382c183b2097"}, + {file = "numpy-2.3.2-cp311-cp311-win32.whl", hash = "sha256:4ae6863868aaee2f57503c7a5052b3a2807cf7a3914475e637a0ecd366ced220"}, + {file = "numpy-2.3.2-cp311-cp311-win_amd64.whl", hash = "sha256:240259d6564f1c65424bcd10f435145a7644a65a6811cfc3201c4a429ba79170"}, + {file = "numpy-2.3.2-cp311-cp311-win_arm64.whl", hash = "sha256:4209f874d45f921bde2cff1ffcd8a3695f545ad2ffbef6d3d3c6768162efab89"}, + {file = "numpy-2.3.2-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:bc3186bea41fae9d8e90c2b4fb5f0a1f5a690682da79b92574d63f56b529080b"}, + {file = "numpy-2.3.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:2f4f0215edb189048a3c03bd5b19345bdfa7b45a7a6f72ae5945d2a28272727f"}, + {file = "numpy-2.3.2-cp312-cp312-macosx_14_0_arm64.whl", hash = "sha256:8b1224a734cd509f70816455c3cffe13a4f599b1bf7130f913ba0e2c0b2006c0"}, + {file = "numpy-2.3.2-cp312-cp312-macosx_14_0_x86_64.whl", hash = "sha256:3dcf02866b977a38ba3ec10215220609ab9667378a9e2150615673f3ffd6c73b"}, + {file = "numpy-2.3.2-cp312-cp312-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:572d5512df5470f50ada8d1972c5f1082d9a0b7aa5944db8084077570cf98370"}, + {file = "numpy-2.3.2-cp312-cp312-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:8145dd6d10df13c559d1e4314df29695613575183fa2e2d11fac4c208c8a1f73"}, + {file = "numpy-2.3.2-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:103ea7063fa624af04a791c39f97070bf93b96d7af7eb23530cd087dc8dbe9dc"}, + {file = "numpy-2.3.2-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:fc927d7f289d14f5e037be917539620603294454130b6de200091e23d27dc9be"}, + {file = "numpy-2.3.2-cp312-cp312-win32.whl", hash = "sha256:d95f59afe7f808c103be692175008bab926b59309ade3e6d25009e9a171f7036"}, + {file = "numpy-2.3.2-cp312-cp312-win_amd64.whl", hash = "sha256:9e196ade2400c0c737d93465327d1ae7c06c7cb8a1756121ebf54b06ca183c7f"}, + {file = "numpy-2.3.2-cp312-cp312-win_arm64.whl", hash = "sha256:ee807923782faaf60d0d7331f5e86da7d5e3079e28b291973c545476c2b00d07"}, + {file = "numpy-2.3.2-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:c8d9727f5316a256425892b043736d63e89ed15bbfe6556c5ff4d9d4448ff3b3"}, + {file = "numpy-2.3.2-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:efc81393f25f14d11c9d161e46e6ee348637c0a1e8a54bf9dedc472a3fae993b"}, + {file = "numpy-2.3.2-cp313-cp313-macosx_14_0_arm64.whl", hash = "sha256:dd937f088a2df683cbb79dda9a772b62a3e5a8a7e76690612c2737f38c6ef1b6"}, + {file = "numpy-2.3.2-cp313-cp313-macosx_14_0_x86_64.whl", hash = "sha256:11e58218c0c46c80509186e460d79fbdc9ca1eb8d8aee39d8f2dc768eb781089"}, + {file = "numpy-2.3.2-cp313-cp313-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:5ad4ebcb683a1f99f4f392cc522ee20a18b2bb12a2c1c42c3d48d5a1adc9d3d2"}, + {file = "numpy-2.3.2-cp313-cp313-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:938065908d1d869c7d75d8ec45f735a034771c6ea07088867f713d1cd3bbbe4f"}, + {file = "numpy-2.3.2-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:66459dccc65d8ec98cc7df61307b64bf9e08101f9598755d42d8ae65d9a7a6ee"}, + {file = "numpy-2.3.2-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:a7af9ed2aa9ec5950daf05bb11abc4076a108bd3c7db9aa7251d5f107079b6a6"}, + {file = "numpy-2.3.2-cp313-cp313-win32.whl", hash = "sha256:906a30249315f9c8e17b085cc5f87d3f369b35fedd0051d4a84686967bdbbd0b"}, + {file = "numpy-2.3.2-cp313-cp313-win_amd64.whl", hash = "sha256:c63d95dc9d67b676e9108fe0d2182987ccb0f11933c1e8959f42fa0da8d4fa56"}, + {file = "numpy-2.3.2-cp313-cp313-win_arm64.whl", hash = "sha256:b05a89f2fb84d21235f93de47129dd4f11c16f64c87c33f5e284e6a3a54e43f2"}, + {file = "numpy-2.3.2-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:4e6ecfeddfa83b02318f4d84acf15fbdbf9ded18e46989a15a8b6995dfbf85ab"}, + {file = "numpy-2.3.2-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:508b0eada3eded10a3b55725b40806a4b855961040180028f52580c4729916a2"}, + {file = "numpy-2.3.2-cp313-cp313t-macosx_14_0_arm64.whl", hash = "sha256:754d6755d9a7588bdc6ac47dc4ee97867271b17cee39cb87aef079574366db0a"}, + {file = "numpy-2.3.2-cp313-cp313t-macosx_14_0_x86_64.whl", hash = "sha256:a9f66e7d2b2d7712410d3bc5684149040ef5f19856f20277cd17ea83e5006286"}, + {file = "numpy-2.3.2-cp313-cp313t-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:de6ea4e5a65d5a90c7d286ddff2b87f3f4ad61faa3db8dabe936b34c2275b6f8"}, + {file = "numpy-2.3.2-cp313-cp313t-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:a3ef07ec8cbc8fc9e369c8dcd52019510c12da4de81367d8b20bc692aa07573a"}, + {file = "numpy-2.3.2-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:27c9f90e7481275c7800dc9c24b7cc40ace3fdb970ae4d21eaff983a32f70c91"}, + {file = "numpy-2.3.2-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:07b62978075b67eee4065b166d000d457c82a1efe726cce608b9db9dd66a73a5"}, + {file = "numpy-2.3.2-cp313-cp313t-win32.whl", hash = "sha256:c771cfac34a4f2c0de8e8c97312d07d64fd8f8ed45bc9f5726a7e947270152b5"}, + {file = "numpy-2.3.2-cp313-cp313t-win_amd64.whl", hash = "sha256:72dbebb2dcc8305c431b2836bcc66af967df91be793d63a24e3d9b741374c450"}, + {file = "numpy-2.3.2-cp313-cp313t-win_arm64.whl", hash = "sha256:72c6df2267e926a6d5286b0a6d556ebe49eae261062059317837fda12ddf0c1a"}, + {file = "numpy-2.3.2-cp314-cp314-macosx_10_13_x86_64.whl", hash = "sha256:448a66d052d0cf14ce9865d159bfc403282c9bc7bb2a31b03cc18b651eca8b1a"}, + {file = "numpy-2.3.2-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:546aaf78e81b4081b2eba1d105c3b34064783027a06b3ab20b6eba21fb64132b"}, + {file = "numpy-2.3.2-cp314-cp314-macosx_14_0_arm64.whl", hash = "sha256:87c930d52f45df092f7578889711a0768094debf73cfcde105e2d66954358125"}, + {file = "numpy-2.3.2-cp314-cp314-macosx_14_0_x86_64.whl", hash = "sha256:8dc082ea901a62edb8f59713c6a7e28a85daddcb67454c839de57656478f5b19"}, + {file = "numpy-2.3.2-cp314-cp314-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:af58de8745f7fa9ca1c0c7c943616c6fe28e75d0c81f5c295810e3c83b5be92f"}, + {file = "numpy-2.3.2-cp314-cp314-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:fed5527c4cf10f16c6d0b6bee1f89958bccb0ad2522c8cadc2efd318bcd545f5"}, + {file = "numpy-2.3.2-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:095737ed986e00393ec18ec0b21b47c22889ae4b0cd2d5e88342e08b01141f58"}, + {file = "numpy-2.3.2-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:b5e40e80299607f597e1a8a247ff8d71d79c5b52baa11cc1cce30aa92d2da6e0"}, + {file = "numpy-2.3.2-cp314-cp314-win32.whl", hash = "sha256:7d6e390423cc1f76e1b8108c9b6889d20a7a1f59d9a60cac4a050fa734d6c1e2"}, + {file = "numpy-2.3.2-cp314-cp314-win_amd64.whl", hash = "sha256:b9d0878b21e3918d76d2209c924ebb272340da1fb51abc00f986c258cd5e957b"}, + {file = "numpy-2.3.2-cp314-cp314-win_arm64.whl", hash = "sha256:2738534837c6a1d0c39340a190177d7d66fdf432894f469728da901f8f6dc910"}, + {file = "numpy-2.3.2-cp314-cp314t-macosx_10_13_x86_64.whl", hash = "sha256:4d002ecf7c9b53240be3bb69d80f86ddbd34078bae04d87be81c1f58466f264e"}, + {file = "numpy-2.3.2-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:293b2192c6bcce487dbc6326de5853787f870aeb6c43f8f9c6496db5b1781e45"}, + {file = "numpy-2.3.2-cp314-cp314t-macosx_14_0_arm64.whl", hash = "sha256:0a4f2021a6da53a0d580d6ef5db29947025ae8b35b3250141805ea9a32bbe86b"}, + {file = "numpy-2.3.2-cp314-cp314t-macosx_14_0_x86_64.whl", hash = "sha256:9c144440db4bf3bb6372d2c3e49834cc0ff7bb4c24975ab33e01199e645416f2"}, + {file = "numpy-2.3.2-cp314-cp314t-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:f92d6c2a8535dc4fe4419562294ff957f83a16ebdec66df0805e473ffaad8bd0"}, + {file = "numpy-2.3.2-cp314-cp314t-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:cefc2219baa48e468e3db7e706305fcd0c095534a192a08f31e98d83a7d45fb0"}, + {file = "numpy-2.3.2-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:76c3e9501ceb50b2ff3824c3589d5d1ab4ac857b0ee3f8f49629d0de55ecf7c2"}, + {file = "numpy-2.3.2-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:122bf5ed9a0221b3419672493878ba4967121514b1d7d4656a7580cd11dddcbf"}, + {file = "numpy-2.3.2-cp314-cp314t-win32.whl", hash = "sha256:6f1ae3dcb840edccc45af496f312528c15b1f79ac318169d094e85e4bb35fdf1"}, + {file = "numpy-2.3.2-cp314-cp314t-win_amd64.whl", hash = "sha256:087ffc25890d89a43536f75c5fe8770922008758e8eeeef61733957041ed2f9b"}, + {file = "numpy-2.3.2-cp314-cp314t-win_arm64.whl", hash = "sha256:092aeb3449833ea9c0bf0089d70c29ae480685dd2377ec9cdbbb620257f84631"}, + {file = "numpy-2.3.2-pp311-pypy311_pp73-macosx_10_15_x86_64.whl", hash = "sha256:14a91ebac98813a49bc6aa1a0dfc09513dcec1d97eaf31ca21a87221a1cdcb15"}, + {file = "numpy-2.3.2-pp311-pypy311_pp73-macosx_11_0_arm64.whl", hash = "sha256:71669b5daae692189540cffc4c439468d35a3f84f0c88b078ecd94337f6cb0ec"}, + {file = "numpy-2.3.2-pp311-pypy311_pp73-macosx_14_0_arm64.whl", hash = "sha256:69779198d9caee6e547adb933941ed7520f896fd9656834c300bdf4dd8642712"}, + {file = "numpy-2.3.2-pp311-pypy311_pp73-macosx_14_0_x86_64.whl", hash = "sha256:2c3271cc4097beb5a60f010bcc1cc204b300bb3eafb4399376418a83a1c6373c"}, + {file = "numpy-2.3.2-pp311-pypy311_pp73-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:8446acd11fe3dc1830568c941d44449fd5cb83068e5c70bd5a470d323d448296"}, + {file = "numpy-2.3.2-pp311-pypy311_pp73-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:aa098a5ab53fa407fded5870865c6275a5cd4101cfdef8d6fafc48286a96e981"}, + {file = "numpy-2.3.2-pp311-pypy311_pp73-win_amd64.whl", hash = "sha256:6936aff90dda378c09bea075af0d9c675fe3a977a9d2402f95a87f440f59f619"}, + {file = "numpy-2.3.2.tar.gz", hash = "sha256:e0486a11ec30cdecb53f184d496d1c6a20786c81e55e41640270130056f8ee48"}, +] + +[[package]] +name = "packaging" +version = "25.0" +description = "Core utilities for Python packages" +optional = false +python-versions = ">=3.8" +files = [ + {file = "packaging-25.0-py3-none-any.whl", hash = "sha256:29572ef2b1f17581046b3a2227d5c611fb25ec70ca1ba8554b24b0e69331a484"}, + {file = "packaging-25.0.tar.gz", hash = "sha256:d443872c98d677bf60f6a1f2f8c1cb748e8fe762d2bf9d3148b5599295b0fc4f"}, +] + +[[package]] +name = "pathspec" +version = "0.12.1" +description = "Utility library for gitignore style pattern matching of file paths." +optional = false +python-versions = ">=3.8" +files = [ + {file = "pathspec-0.12.1-py3-none-any.whl", hash = "sha256:a0d503e138a4c123b27490a4f7beda6a01c6f288df0e4a8b79c7eb0dc7b4cc08"}, + {file = "pathspec-0.12.1.tar.gz", hash = "sha256:a482d51503a1ab33b1c67a6c3813a26953dbdc71c31dacaef9a838c4e29f5712"}, +] + +[[package]] +name = "pillow" +version = "11.3.0" +description = "Python Imaging Library (Fork)" +optional = false +python-versions = ">=3.9" +files = [ + {file = "pillow-11.3.0-cp310-cp310-macosx_10_10_x86_64.whl", hash = "sha256:1b9c17fd4ace828b3003dfd1e30bff24863e0eb59b535e8f80194d9cc7ecf860"}, + {file = "pillow-11.3.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:65dc69160114cdd0ca0f35cb434633c75e8e7fad4cf855177a05bf38678f73ad"}, + {file = "pillow-11.3.0-cp310-cp310-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:7107195ddc914f656c7fc8e4a5e1c25f32e9236ea3ea860f257b0436011fddd0"}, + {file = "pillow-11.3.0-cp310-cp310-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:cc3e831b563b3114baac7ec2ee86819eb03caa1a2cef0b481a5675b59c4fe23b"}, + {file = "pillow-11.3.0-cp310-cp310-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:f1f182ebd2303acf8c380a54f615ec883322593320a9b00438eb842c1f37ae50"}, + {file = "pillow-11.3.0-cp310-cp310-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:4445fa62e15936a028672fd48c4c11a66d641d2c05726c7ec1f8ba6a572036ae"}, + {file = "pillow-11.3.0-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:71f511f6b3b91dd543282477be45a033e4845a40278fa8dcdbfdb07109bf18f9"}, + {file = "pillow-11.3.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:040a5b691b0713e1f6cbe222e0f4f74cd233421e105850ae3b3c0ceda520f42e"}, + {file = "pillow-11.3.0-cp310-cp310-win32.whl", hash = "sha256:89bd777bc6624fe4115e9fac3352c79ed60f3bb18651420635f26e643e3dd1f6"}, + {file = "pillow-11.3.0-cp310-cp310-win_amd64.whl", hash = "sha256:19d2ff547c75b8e3ff46f4d9ef969a06c30ab2d4263a9e287733aa8b2429ce8f"}, + {file = "pillow-11.3.0-cp310-cp310-win_arm64.whl", hash = "sha256:819931d25e57b513242859ce1876c58c59dc31587847bf74cfe06b2e0cb22d2f"}, + {file = "pillow-11.3.0-cp311-cp311-macosx_10_10_x86_64.whl", hash = "sha256:1cd110edf822773368b396281a2293aeb91c90a2db00d78ea43e7e861631b722"}, + {file = "pillow-11.3.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:9c412fddd1b77a75aa904615ebaa6001f169b26fd467b4be93aded278266b288"}, + {file = "pillow-11.3.0-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:7d1aa4de119a0ecac0a34a9c8bde33f34022e2e8f99104e47a3ca392fd60e37d"}, + {file = "pillow-11.3.0-cp311-cp311-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:91da1d88226663594e3f6b4b8c3c8d85bd504117d043740a8e0ec449087cc494"}, + {file = "pillow-11.3.0-cp311-cp311-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:643f189248837533073c405ec2f0bb250ba54598cf80e8c1e043381a60632f58"}, + {file = "pillow-11.3.0-cp311-cp311-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:106064daa23a745510dabce1d84f29137a37224831d88eb4ce94bb187b1d7e5f"}, + {file = "pillow-11.3.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:cd8ff254faf15591e724dc7c4ddb6bf4793efcbe13802a4ae3e863cd300b493e"}, + {file = "pillow-11.3.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:932c754c2d51ad2b2271fd01c3d121daaa35e27efae2a616f77bf164bc0b3e94"}, + {file = "pillow-11.3.0-cp311-cp311-win32.whl", hash = "sha256:b4b8f3efc8d530a1544e5962bd6b403d5f7fe8b9e08227c6b255f98ad82b4ba0"}, + {file = "pillow-11.3.0-cp311-cp311-win_amd64.whl", hash = "sha256:1a992e86b0dd7aeb1f053cd506508c0999d710a8f07b4c791c63843fc6a807ac"}, + {file = "pillow-11.3.0-cp311-cp311-win_arm64.whl", hash = "sha256:30807c931ff7c095620fe04448e2c2fc673fcbb1ffe2a7da3fb39613489b1ddd"}, + {file = "pillow-11.3.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:fdae223722da47b024b867c1ea0be64e0df702c5e0a60e27daad39bf960dd1e4"}, + {file = "pillow-11.3.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:921bd305b10e82b4d1f5e802b6850677f965d8394203d182f078873851dada69"}, + {file = "pillow-11.3.0-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:eb76541cba2f958032d79d143b98a3a6b3ea87f0959bbe256c0b5e416599fd5d"}, + {file = "pillow-11.3.0-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:67172f2944ebba3d4a7b54f2e95c786a3a50c21b88456329314caaa28cda70f6"}, + {file = "pillow-11.3.0-cp312-cp312-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:97f07ed9f56a3b9b5f49d3661dc9607484e85c67e27f3e8be2c7d28ca032fec7"}, + {file = "pillow-11.3.0-cp312-cp312-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:676b2815362456b5b3216b4fd5bd89d362100dc6f4945154ff172e206a22c024"}, + {file = "pillow-11.3.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:3e184b2f26ff146363dd07bde8b711833d7b0202e27d13540bfe2e35a323a809"}, + {file = "pillow-11.3.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:6be31e3fc9a621e071bc17bb7de63b85cbe0bfae91bb0363c893cbe67247780d"}, + {file = "pillow-11.3.0-cp312-cp312-win32.whl", hash = "sha256:7b161756381f0918e05e7cb8a371fff367e807770f8fe92ecb20d905d0e1c149"}, + {file = "pillow-11.3.0-cp312-cp312-win_amd64.whl", hash = "sha256:a6444696fce635783440b7f7a9fc24b3ad10a9ea3f0ab66c5905be1c19ccf17d"}, + {file = "pillow-11.3.0-cp312-cp312-win_arm64.whl", hash = "sha256:2aceea54f957dd4448264f9bf40875da0415c83eb85f55069d89c0ed436e3542"}, + {file = "pillow-11.3.0-cp313-cp313-ios_13_0_arm64_iphoneos.whl", hash = "sha256:1c627742b539bba4309df89171356fcb3cc5a9178355b2727d1b74a6cf155fbd"}, + {file = "pillow-11.3.0-cp313-cp313-ios_13_0_arm64_iphonesimulator.whl", hash = "sha256:30b7c02f3899d10f13d7a48163c8969e4e653f8b43416d23d13d1bbfdc93b9f8"}, + {file = "pillow-11.3.0-cp313-cp313-ios_13_0_x86_64_iphonesimulator.whl", hash = "sha256:7859a4cc7c9295f5838015d8cc0a9c215b77e43d07a25e460f35cf516df8626f"}, + {file = "pillow-11.3.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:ec1ee50470b0d050984394423d96325b744d55c701a439d2bd66089bff963d3c"}, + {file = "pillow-11.3.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:7db51d222548ccfd274e4572fdbf3e810a5e66b00608862f947b163e613b67dd"}, + {file = "pillow-11.3.0-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:2d6fcc902a24ac74495df63faad1884282239265c6839a0a6416d33faedfae7e"}, + {file = "pillow-11.3.0-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:f0f5d8f4a08090c6d6d578351a2b91acf519a54986c055af27e7a93feae6d3f1"}, + {file = "pillow-11.3.0-cp313-cp313-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:c37d8ba9411d6003bba9e518db0db0c58a680ab9fe5179f040b0463644bc9805"}, + {file = "pillow-11.3.0-cp313-cp313-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:13f87d581e71d9189ab21fe0efb5a23e9f28552d5be6979e84001d3b8505abe8"}, + {file = "pillow-11.3.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:023f6d2d11784a465f09fd09a34b150ea4672e85fb3d05931d89f373ab14abb2"}, + {file = "pillow-11.3.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:45dfc51ac5975b938e9809451c51734124e73b04d0f0ac621649821a63852e7b"}, + {file = "pillow-11.3.0-cp313-cp313-win32.whl", hash = "sha256:a4d336baed65d50d37b88ca5b60c0fa9d81e3a87d4a7930d3880d1624d5b31f3"}, + {file = "pillow-11.3.0-cp313-cp313-win_amd64.whl", hash = "sha256:0bce5c4fd0921f99d2e858dc4d4d64193407e1b99478bc5cacecba2311abde51"}, + {file = "pillow-11.3.0-cp313-cp313-win_arm64.whl", hash = "sha256:1904e1264881f682f02b7f8167935cce37bc97db457f8e7849dc3a6a52b99580"}, + {file = "pillow-11.3.0-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:4c834a3921375c48ee6b9624061076bc0a32a60b5532b322cc0ea64e639dd50e"}, + {file = "pillow-11.3.0-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:5e05688ccef30ea69b9317a9ead994b93975104a677a36a8ed8106be9260aa6d"}, + {file = "pillow-11.3.0-cp313-cp313t-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:1019b04af07fc0163e2810167918cb5add8d74674b6267616021ab558dc98ced"}, + {file = "pillow-11.3.0-cp313-cp313t-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:f944255db153ebb2b19c51fe85dd99ef0ce494123f21b9db4877ffdfc5590c7c"}, + {file = "pillow-11.3.0-cp313-cp313t-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:1f85acb69adf2aaee8b7da124efebbdb959a104db34d3a2cb0f3793dbae422a8"}, + {file = "pillow-11.3.0-cp313-cp313t-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:05f6ecbeff5005399bb48d198f098a9b4b6bdf27b8487c7f38ca16eeb070cd59"}, + {file = "pillow-11.3.0-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:a7bc6e6fd0395bc052f16b1a8670859964dbd7003bd0af2ff08342eb6e442cfe"}, + {file = "pillow-11.3.0-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:83e1b0161c9d148125083a35c1c5a89db5b7054834fd4387499e06552035236c"}, + {file = "pillow-11.3.0-cp313-cp313t-win32.whl", hash = "sha256:2a3117c06b8fb646639dce83694f2f9eac405472713fcb1ae887469c0d4f6788"}, + {file = "pillow-11.3.0-cp313-cp313t-win_amd64.whl", hash = "sha256:857844335c95bea93fb39e0fa2726b4d9d758850b34075a7e3ff4f4fa3aa3b31"}, + {file = "pillow-11.3.0-cp313-cp313t-win_arm64.whl", hash = "sha256:8797edc41f3e8536ae4b10897ee2f637235c94f27404cac7297f7b607dd0716e"}, + {file = "pillow-11.3.0-cp314-cp314-macosx_10_13_x86_64.whl", hash = "sha256:d9da3df5f9ea2a89b81bb6087177fb1f4d1c7146d583a3fe5c672c0d94e55e12"}, + {file = "pillow-11.3.0-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:0b275ff9b04df7b640c59ec5a3cb113eefd3795a8df80bac69646ef699c6981a"}, + {file = "pillow-11.3.0-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:0743841cabd3dba6a83f38a92672cccbd69af56e3e91777b0ee7f4dba4385632"}, + {file = "pillow-11.3.0-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:2465a69cf967b8b49ee1b96d76718cd98c4e925414ead59fdf75cf0fd07df673"}, + {file = "pillow-11.3.0-cp314-cp314-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:41742638139424703b4d01665b807c6468e23e699e8e90cffefe291c5832b027"}, + {file = "pillow-11.3.0-cp314-cp314-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:93efb0b4de7e340d99057415c749175e24c8864302369e05914682ba642e5d77"}, + {file = "pillow-11.3.0-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:7966e38dcd0fa11ca390aed7c6f20454443581d758242023cf36fcb319b1a874"}, + {file = "pillow-11.3.0-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:98a9afa7b9007c67ed84c57c9e0ad86a6000da96eaa638e4f8abe5b65ff83f0a"}, + {file = "pillow-11.3.0-cp314-cp314-win32.whl", hash = "sha256:02a723e6bf909e7cea0dac1b0e0310be9d7650cd66222a5f1c571455c0a45214"}, + {file = "pillow-11.3.0-cp314-cp314-win_amd64.whl", hash = "sha256:a418486160228f64dd9e9efcd132679b7a02a5f22c982c78b6fc7dab3fefb635"}, + {file = "pillow-11.3.0-cp314-cp314-win_arm64.whl", hash = "sha256:155658efb5e044669c08896c0c44231c5e9abcaadbc5cd3648df2f7c0b96b9a6"}, + {file = "pillow-11.3.0-cp314-cp314t-macosx_10_13_x86_64.whl", hash = "sha256:59a03cdf019efbfeeed910bf79c7c93255c3d54bc45898ac2a4140071b02b4ae"}, + {file = "pillow-11.3.0-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:f8a5827f84d973d8636e9dc5764af4f0cf2318d26744b3d902931701b0d46653"}, + {file = "pillow-11.3.0-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:ee92f2fd10f4adc4b43d07ec5e779932b4eb3dbfbc34790ada5a6669bc095aa6"}, + {file = "pillow-11.3.0-cp314-cp314t-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:c96d333dcf42d01f47b37e0979b6bd73ec91eae18614864622d9b87bbd5bbf36"}, + {file = "pillow-11.3.0-cp314-cp314t-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:4c96f993ab8c98460cd0c001447bff6194403e8b1d7e149ade5f00594918128b"}, + {file = "pillow-11.3.0-cp314-cp314t-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:41342b64afeba938edb034d122b2dda5db2139b9a4af999729ba8818e0056477"}, + {file = "pillow-11.3.0-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:068d9c39a2d1b358eb9f245ce7ab1b5c3246c7c8c7d9ba58cfa5b43146c06e50"}, + {file = "pillow-11.3.0-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:a1bc6ba083b145187f648b667e05a2534ecc4b9f2784c2cbe3089e44868f2b9b"}, + {file = "pillow-11.3.0-cp314-cp314t-win32.whl", hash = "sha256:118ca10c0d60b06d006be10a501fd6bbdfef559251ed31b794668ed569c87e12"}, + {file = "pillow-11.3.0-cp314-cp314t-win_amd64.whl", hash = "sha256:8924748b688aa210d79883357d102cd64690e56b923a186f35a82cbc10f997db"}, + {file = "pillow-11.3.0-cp314-cp314t-win_arm64.whl", hash = "sha256:79ea0d14d3ebad43ec77ad5272e6ff9bba5b679ef73375ea760261207fa8e0aa"}, + {file = "pillow-11.3.0-cp39-cp39-macosx_10_10_x86_64.whl", hash = "sha256:48d254f8a4c776de343051023eb61ffe818299eeac478da55227d96e241de53f"}, + {file = "pillow-11.3.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:7aee118e30a4cf54fdd873bd3a29de51e29105ab11f9aad8c32123f58c8f8081"}, + {file = "pillow-11.3.0-cp39-cp39-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:23cff760a9049c502721bdb743a7cb3e03365fafcdfc2ef9784610714166e5a4"}, + {file = "pillow-11.3.0-cp39-cp39-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:6359a3bc43f57d5b375d1ad54a0074318a0844d11b76abccf478c37c986d3cfc"}, + {file = "pillow-11.3.0-cp39-cp39-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:092c80c76635f5ecb10f3f83d76716165c96f5229addbd1ec2bdbbda7d496e06"}, + {file = "pillow-11.3.0-cp39-cp39-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:cadc9e0ea0a2431124cde7e1697106471fc4c1da01530e679b2391c37d3fbb3a"}, + {file = "pillow-11.3.0-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:6a418691000f2a418c9135a7cf0d797c1bb7d9a485e61fe8e7722845b95ef978"}, + {file = "pillow-11.3.0-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:97afb3a00b65cc0804d1c7abddbf090a81eaac02768af58cbdcaaa0a931e0b6d"}, + {file = "pillow-11.3.0-cp39-cp39-win32.whl", hash = "sha256:ea944117a7974ae78059fcc1800e5d3295172bb97035c0c1d9345fca1419da71"}, + {file = "pillow-11.3.0-cp39-cp39-win_amd64.whl", hash = "sha256:e5c5858ad8ec655450a7c7df532e9842cf8df7cc349df7225c60d5d348c8aada"}, + {file = "pillow-11.3.0-cp39-cp39-win_arm64.whl", hash = "sha256:6abdbfd3aea42be05702a8dd98832329c167ee84400a1d1f61ab11437f1717eb"}, + {file = "pillow-11.3.0-pp310-pypy310_pp73-macosx_10_15_x86_64.whl", hash = "sha256:3cee80663f29e3843b68199b9d6f4f54bd1d4a6b59bdd91bceefc51238bcb967"}, + {file = "pillow-11.3.0-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:b5f56c3f344f2ccaf0dd875d3e180f631dc60a51b314295a3e681fe8cf851fbe"}, + {file = "pillow-11.3.0-pp310-pypy310_pp73-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:e67d793d180c9df62f1f40aee3accca4829d3794c95098887edc18af4b8b780c"}, + {file = "pillow-11.3.0-pp310-pypy310_pp73-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:d000f46e2917c705e9fb93a3606ee4a819d1e3aa7a9b442f6444f07e77cf5e25"}, + {file = "pillow-11.3.0-pp310-pypy310_pp73-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:527b37216b6ac3a12d7838dc3bd75208ec57c1c6d11ef01902266a5a0c14fc27"}, + {file = "pillow-11.3.0-pp310-pypy310_pp73-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:be5463ac478b623b9dd3937afd7fb7ab3d79dd290a28e2b6df292dc75063eb8a"}, + {file = "pillow-11.3.0-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:8dc70ca24c110503e16918a658b869019126ecfe03109b754c402daff12b3d9f"}, + {file = "pillow-11.3.0-pp311-pypy311_pp73-macosx_10_15_x86_64.whl", hash = "sha256:7c8ec7a017ad1bd562f93dbd8505763e688d388cde6e4a010ae1486916e713e6"}, + {file = "pillow-11.3.0-pp311-pypy311_pp73-macosx_11_0_arm64.whl", hash = "sha256:9ab6ae226de48019caa8074894544af5b53a117ccb9d3b3dcb2871464c829438"}, + {file = "pillow-11.3.0-pp311-pypy311_pp73-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:fe27fb049cdcca11f11a7bfda64043c37b30e6b91f10cb5bab275806c32f6ab3"}, + {file = "pillow-11.3.0-pp311-pypy311_pp73-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:465b9e8844e3c3519a983d58b80be3f668e2a7a5db97f2784e7079fbc9f9822c"}, + {file = "pillow-11.3.0-pp311-pypy311_pp73-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:5418b53c0d59b3824d05e029669efa023bbef0f3e92e75ec8428f3799487f361"}, + {file = "pillow-11.3.0-pp311-pypy311_pp73-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:504b6f59505f08ae014f724b6207ff6222662aab5cc9542577fb084ed0676ac7"}, + {file = "pillow-11.3.0-pp311-pypy311_pp73-win_amd64.whl", hash = "sha256:c84d689db21a1c397d001aa08241044aa2069e7587b398c8cc63020390b1c1b8"}, + {file = "pillow-11.3.0.tar.gz", hash = "sha256:3828ee7586cd0b2091b6209e5ad53e20d0649bbe87164a459d0676e035e8f523"}, +] + +[package.extras] +docs = ["furo", "olefile", "sphinx (>=8.2)", "sphinx-autobuild", "sphinx-copybutton", "sphinx-inline-tabs", "sphinxext-opengraph"] +fpx = ["olefile"] +mic = ["olefile"] +test-arrow = ["pyarrow"] +tests = ["check-manifest", "coverage (>=7.4.2)", "defusedxml", "markdown2", "olefile", "packaging", "pyroma", "pytest", "pytest-cov", "pytest-timeout", "pytest-xdist", "trove-classifiers (>=2024.10.12)"] +typing = ["typing-extensions"] +xmp = ["defusedxml"] + +[[package]] +name = "pluggy" +version = "1.6.0" +description = "plugin and hook calling mechanisms for python" +optional = false +python-versions = ">=3.9" +files = [ + {file = "pluggy-1.6.0-py3-none-any.whl", hash = "sha256:e920276dd6813095e9377c0bc5566d94c932c33b27a3e3945d8389c374dd4746"}, + {file = "pluggy-1.6.0.tar.gz", hash = "sha256:7dcc130b76258d33b90f61b658791dede3486c3e6bfb003ee5c9bfb396dd22f3"}, +] + +[package.extras] +dev = ["pre-commit", "tox"] +testing = ["coverage", "pytest", "pytest-benchmark"] + +[[package]] +name = "pydantic" +version = "2.11.7" +description = "Data validation using Python type hints" +optional = false +python-versions = ">=3.9" +files = [ + {file = "pydantic-2.11.7-py3-none-any.whl", hash = "sha256:dde5df002701f6de26248661f6835bbe296a47bf73990135c7d07ce741b9623b"}, + {file = "pydantic-2.11.7.tar.gz", hash = "sha256:d989c3c6cb79469287b1569f7447a17848c998458d49ebe294e975b9baf0f0db"}, +] + +[package.dependencies] +annotated-types = ">=0.6.0" +email-validator = {version = ">=2.0.0", optional = true, markers = "extra == \"email\""} +pydantic-core = "2.33.2" +typing-extensions = ">=4.12.2" +typing-inspection = ">=0.4.0" + +[package.extras] +email = ["email-validator (>=2.0.0)"] +timezone = ["tzdata"] + +[[package]] +name = "pydantic-core" +version = "2.33.2" +description = "Core functionality for Pydantic validation and serialization" +optional = false +python-versions = ">=3.9" +files = [ + {file = "pydantic_core-2.33.2-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:2b3d326aaef0c0399d9afffeb6367d5e26ddc24d351dbc9c636840ac355dc5d8"}, + {file = "pydantic_core-2.33.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:0e5b2671f05ba48b94cb90ce55d8bdcaaedb8ba00cc5359f6810fc918713983d"}, + {file = "pydantic_core-2.33.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0069c9acc3f3981b9ff4cdfaf088e98d83440a4c7ea1bc07460af3d4dc22e72d"}, + {file = "pydantic_core-2.33.2-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:d53b22f2032c42eaaf025f7c40c2e3b94568ae077a606f006d206a463bc69572"}, + {file = "pydantic_core-2.33.2-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:0405262705a123b7ce9f0b92f123334d67b70fd1f20a9372b907ce1080c7ba02"}, + {file = "pydantic_core-2.33.2-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:4b25d91e288e2c4e0662b8038a28c6a07eaac3e196cfc4ff69de4ea3db992a1b"}, + {file = "pydantic_core-2.33.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6bdfe4b3789761f3bcb4b1ddf33355a71079858958e3a552f16d5af19768fef2"}, + {file = "pydantic_core-2.33.2-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:efec8db3266b76ef9607c2c4c419bdb06bf335ae433b80816089ea7585816f6a"}, + {file = "pydantic_core-2.33.2-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:031c57d67ca86902726e0fae2214ce6770bbe2f710dc33063187a68744a5ecac"}, + {file = "pydantic_core-2.33.2-cp310-cp310-musllinux_1_1_armv7l.whl", hash = "sha256:f8de619080e944347f5f20de29a975c2d815d9ddd8be9b9b7268e2e3ef68605a"}, + {file = "pydantic_core-2.33.2-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:73662edf539e72a9440129f231ed3757faab89630d291b784ca99237fb94db2b"}, + {file = "pydantic_core-2.33.2-cp310-cp310-win32.whl", hash = "sha256:0a39979dcbb70998b0e505fb1556a1d550a0781463ce84ebf915ba293ccb7e22"}, + {file = "pydantic_core-2.33.2-cp310-cp310-win_amd64.whl", hash = "sha256:b0379a2b24882fef529ec3b4987cb5d003b9cda32256024e6fe1586ac45fc640"}, + {file = "pydantic_core-2.33.2-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:4c5b0a576fb381edd6d27f0a85915c6daf2f8138dc5c267a57c08a62900758c7"}, + {file = "pydantic_core-2.33.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:e799c050df38a639db758c617ec771fd8fb7a5f8eaaa4b27b101f266b216a246"}, + {file = "pydantic_core-2.33.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:dc46a01bf8d62f227d5ecee74178ffc448ff4e5197c756331f71efcc66dc980f"}, + {file = "pydantic_core-2.33.2-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:a144d4f717285c6d9234a66778059f33a89096dfb9b39117663fd8413d582dcc"}, + {file = "pydantic_core-2.33.2-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:73cf6373c21bc80b2e0dc88444f41ae60b2f070ed02095754eb5a01df12256de"}, + {file = "pydantic_core-2.33.2-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:3dc625f4aa79713512d1976fe9f0bc99f706a9dee21dfd1810b4bbbf228d0e8a"}, + {file = "pydantic_core-2.33.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:881b21b5549499972441da4758d662aeea93f1923f953e9cbaff14b8b9565aef"}, + {file = "pydantic_core-2.33.2-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:bdc25f3681f7b78572699569514036afe3c243bc3059d3942624e936ec93450e"}, + {file = "pydantic_core-2.33.2-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:fe5b32187cbc0c862ee201ad66c30cf218e5ed468ec8dc1cf49dec66e160cc4d"}, + {file = "pydantic_core-2.33.2-cp311-cp311-musllinux_1_1_armv7l.whl", hash = "sha256:bc7aee6f634a6f4a95676fcb5d6559a2c2a390330098dba5e5a5f28a2e4ada30"}, + {file = "pydantic_core-2.33.2-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:235f45e5dbcccf6bd99f9f472858849f73d11120d76ea8707115415f8e5ebebf"}, + {file = "pydantic_core-2.33.2-cp311-cp311-win32.whl", hash = "sha256:6368900c2d3ef09b69cb0b913f9f8263b03786e5b2a387706c5afb66800efd51"}, + {file = "pydantic_core-2.33.2-cp311-cp311-win_amd64.whl", hash = "sha256:1e063337ef9e9820c77acc768546325ebe04ee38b08703244c1309cccc4f1bab"}, + {file = "pydantic_core-2.33.2-cp311-cp311-win_arm64.whl", hash = "sha256:6b99022f1d19bc32a4c2a0d544fc9a76e3be90f0b3f4af413f87d38749300e65"}, + {file = "pydantic_core-2.33.2-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:a7ec89dc587667f22b6a0b6579c249fca9026ce7c333fc142ba42411fa243cdc"}, + {file = "pydantic_core-2.33.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:3c6db6e52c6d70aa0d00d45cdb9b40f0433b96380071ea80b09277dba021ddf7"}, + {file = "pydantic_core-2.33.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4e61206137cbc65e6d5256e1166f88331d3b6238e082d9f74613b9b765fb9025"}, + {file = "pydantic_core-2.33.2-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:eb8c529b2819c37140eb51b914153063d27ed88e3bdc31b71198a198e921e011"}, + {file = "pydantic_core-2.33.2-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:c52b02ad8b4e2cf14ca7b3d918f3eb0ee91e63b3167c32591e57c4317e134f8f"}, + {file = "pydantic_core-2.33.2-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:96081f1605125ba0855dfda83f6f3df5ec90c61195421ba72223de35ccfb2f88"}, + {file = "pydantic_core-2.33.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8f57a69461af2a5fa6e6bbd7a5f60d3b7e6cebb687f55106933188e79ad155c1"}, + {file = "pydantic_core-2.33.2-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:572c7e6c8bb4774d2ac88929e3d1f12bc45714ae5ee6d9a788a9fb35e60bb04b"}, + {file = "pydantic_core-2.33.2-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:db4b41f9bd95fbe5acd76d89920336ba96f03e149097365afe1cb092fceb89a1"}, + {file = "pydantic_core-2.33.2-cp312-cp312-musllinux_1_1_armv7l.whl", hash = "sha256:fa854f5cf7e33842a892e5c73f45327760bc7bc516339fda888c75ae60edaeb6"}, + {file = "pydantic_core-2.33.2-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:5f483cfb75ff703095c59e365360cb73e00185e01aaea067cd19acffd2ab20ea"}, + {file = "pydantic_core-2.33.2-cp312-cp312-win32.whl", hash = "sha256:9cb1da0f5a471435a7bc7e439b8a728e8b61e59784b2af70d7c169f8dd8ae290"}, + {file = "pydantic_core-2.33.2-cp312-cp312-win_amd64.whl", hash = "sha256:f941635f2a3d96b2973e867144fde513665c87f13fe0e193c158ac51bfaaa7b2"}, + {file = "pydantic_core-2.33.2-cp312-cp312-win_arm64.whl", hash = "sha256:cca3868ddfaccfbc4bfb1d608e2ccaaebe0ae628e1416aeb9c4d88c001bb45ab"}, + {file = "pydantic_core-2.33.2-cp313-cp313-macosx_10_12_x86_64.whl", hash = "sha256:1082dd3e2d7109ad8b7da48e1d4710c8d06c253cbc4a27c1cff4fbcaa97a9e3f"}, + {file = "pydantic_core-2.33.2-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:f517ca031dfc037a9c07e748cefd8d96235088b83b4f4ba8939105d20fa1dcd6"}, + {file = "pydantic_core-2.33.2-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0a9f2c9dd19656823cb8250b0724ee9c60a82f3cdf68a080979d13092a3b0fef"}, + {file = "pydantic_core-2.33.2-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:2b0a451c263b01acebe51895bfb0e1cc842a5c666efe06cdf13846c7418caa9a"}, + {file = "pydantic_core-2.33.2-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1ea40a64d23faa25e62a70ad163571c0b342b8bf66d5fa612ac0dec4f069d916"}, + {file = "pydantic_core-2.33.2-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:0fb2d542b4d66f9470e8065c5469ec676978d625a8b7a363f07d9a501a9cb36a"}, + {file = "pydantic_core-2.33.2-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9fdac5d6ffa1b5a83bca06ffe7583f5576555e6c8b3a91fbd25ea7780f825f7d"}, + {file = "pydantic_core-2.33.2-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:04a1a413977ab517154eebb2d326da71638271477d6ad87a769102f7c2488c56"}, + {file = "pydantic_core-2.33.2-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:c8e7af2f4e0194c22b5b37205bfb293d166a7344a5b0d0eaccebc376546d77d5"}, + {file = "pydantic_core-2.33.2-cp313-cp313-musllinux_1_1_armv7l.whl", hash = "sha256:5c92edd15cd58b3c2d34873597a1e20f13094f59cf88068adb18947df5455b4e"}, + {file = "pydantic_core-2.33.2-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:65132b7b4a1c0beded5e057324b7e16e10910c106d43675d9bd87d4f38dde162"}, + {file = "pydantic_core-2.33.2-cp313-cp313-win32.whl", hash = "sha256:52fb90784e0a242bb96ec53f42196a17278855b0f31ac7c3cc6f5c1ec4811849"}, + {file = "pydantic_core-2.33.2-cp313-cp313-win_amd64.whl", hash = "sha256:c083a3bdd5a93dfe480f1125926afcdbf2917ae714bdb80b36d34318b2bec5d9"}, + {file = "pydantic_core-2.33.2-cp313-cp313-win_arm64.whl", hash = "sha256:e80b087132752f6b3d714f041ccf74403799d3b23a72722ea2e6ba2e892555b9"}, + {file = "pydantic_core-2.33.2-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:61c18fba8e5e9db3ab908620af374db0ac1baa69f0f32df4f61ae23f15e586ac"}, + {file = "pydantic_core-2.33.2-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:95237e53bb015f67b63c91af7518a62a8660376a6a0db19b89acc77a4d6199f5"}, + {file = "pydantic_core-2.33.2-cp313-cp313t-win_amd64.whl", hash = "sha256:c2fc0a768ef76c15ab9238afa6da7f69895bb5d1ee83aeea2e3509af4472d0b9"}, + {file = "pydantic_core-2.33.2-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:a2b911a5b90e0374d03813674bf0a5fbbb7741570dcd4b4e85a2e48d17def29d"}, + {file = "pydantic_core-2.33.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:6fa6dfc3e4d1f734a34710f391ae822e0a8eb8559a85c6979e14e65ee6ba2954"}, + {file = "pydantic_core-2.33.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c54c939ee22dc8e2d545da79fc5381f1c020d6d3141d3bd747eab59164dc89fb"}, + {file = "pydantic_core-2.33.2-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:53a57d2ed685940a504248187d5685e49eb5eef0f696853647bf37c418c538f7"}, + {file = "pydantic_core-2.33.2-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:09fb9dd6571aacd023fe6aaca316bd01cf60ab27240d7eb39ebd66a3a15293b4"}, + {file = "pydantic_core-2.33.2-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:0e6116757f7959a712db11f3e9c0a99ade00a5bbedae83cb801985aa154f071b"}, + {file = "pydantic_core-2.33.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8d55ab81c57b8ff8548c3e4947f119551253f4e3787a7bbc0b6b3ca47498a9d3"}, + {file = "pydantic_core-2.33.2-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:c20c462aa4434b33a2661701b861604913f912254e441ab8d78d30485736115a"}, + {file = "pydantic_core-2.33.2-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:44857c3227d3fb5e753d5fe4a3420d6376fa594b07b621e220cd93703fe21782"}, + {file = "pydantic_core-2.33.2-cp39-cp39-musllinux_1_1_armv7l.whl", hash = "sha256:eb9b459ca4df0e5c87deb59d37377461a538852765293f9e6ee834f0435a93b9"}, + {file = "pydantic_core-2.33.2-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:9fcd347d2cc5c23b06de6d3b7b8275be558a0c90549495c699e379a80bf8379e"}, + {file = "pydantic_core-2.33.2-cp39-cp39-win32.whl", hash = "sha256:83aa99b1285bc8f038941ddf598501a86f1536789740991d7d8756e34f1e74d9"}, + {file = "pydantic_core-2.33.2-cp39-cp39-win_amd64.whl", hash = "sha256:f481959862f57f29601ccced557cc2e817bce7533ab8e01a797a48b49c9692b3"}, + {file = "pydantic_core-2.33.2-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:5c4aa4e82353f65e548c476b37e64189783aa5384903bfea4f41580f255fddfa"}, + {file = "pydantic_core-2.33.2-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:d946c8bf0d5c24bf4fe333af284c59a19358aa3ec18cb3dc4370080da1e8ad29"}, + {file = "pydantic_core-2.33.2-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:87b31b6846e361ef83fedb187bb5b4372d0da3f7e28d85415efa92d6125d6e6d"}, + {file = "pydantic_core-2.33.2-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:aa9d91b338f2df0508606f7009fde642391425189bba6d8c653afd80fd6bb64e"}, + {file = "pydantic_core-2.33.2-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:2058a32994f1fde4ca0480ab9d1e75a0e8c87c22b53a3ae66554f9af78f2fe8c"}, + {file = "pydantic_core-2.33.2-pp310-pypy310_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:0e03262ab796d986f978f79c943fc5f620381be7287148b8010b4097f79a39ec"}, + {file = "pydantic_core-2.33.2-pp310-pypy310_pp73-musllinux_1_1_armv7l.whl", hash = "sha256:1a8695a8d00c73e50bff9dfda4d540b7dee29ff9b8053e38380426a85ef10052"}, + {file = "pydantic_core-2.33.2-pp310-pypy310_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:fa754d1850735a0b0e03bcffd9d4b4343eb417e47196e4485d9cca326073a42c"}, + {file = "pydantic_core-2.33.2-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:a11c8d26a50bfab49002947d3d237abe4d9e4b5bdc8846a63537b6488e197808"}, + {file = "pydantic_core-2.33.2-pp311-pypy311_pp73-macosx_10_12_x86_64.whl", hash = "sha256:dd14041875d09cc0f9308e37a6f8b65f5585cf2598a53aa0123df8b129d481f8"}, + {file = "pydantic_core-2.33.2-pp311-pypy311_pp73-macosx_11_0_arm64.whl", hash = "sha256:d87c561733f66531dced0da6e864f44ebf89a8fba55f31407b00c2f7f9449593"}, + {file = "pydantic_core-2.33.2-pp311-pypy311_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2f82865531efd18d6e07a04a17331af02cb7a651583c418df8266f17a63c6612"}, + {file = "pydantic_core-2.33.2-pp311-pypy311_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2bfb5112df54209d820d7bf9317c7a6c9025ea52e49f46b6a2060104bba37de7"}, + {file = "pydantic_core-2.33.2-pp311-pypy311_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:64632ff9d614e5eecfb495796ad51b0ed98c453e447a76bcbeeb69615079fc7e"}, + {file = "pydantic_core-2.33.2-pp311-pypy311_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:f889f7a40498cc077332c7ab6b4608d296d852182211787d4f3ee377aaae66e8"}, + {file = "pydantic_core-2.33.2-pp311-pypy311_pp73-musllinux_1_1_armv7l.whl", hash = "sha256:de4b83bb311557e439b9e186f733f6c645b9417c84e2eb8203f3f820a4b988bf"}, + {file = "pydantic_core-2.33.2-pp311-pypy311_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:82f68293f055f51b51ea42fafc74b6aad03e70e191799430b90c13d643059ebb"}, + {file = "pydantic_core-2.33.2-pp311-pypy311_pp73-win_amd64.whl", hash = "sha256:329467cecfb529c925cf2bbd4d60d2c509bc2fb52a20c1045bf09bb70971a9c1"}, + {file = "pydantic_core-2.33.2-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:87acbfcf8e90ca885206e98359d7dca4bcbb35abdc0ff66672a293e1d7a19101"}, + {file = "pydantic_core-2.33.2-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:7f92c15cd1e97d4b12acd1cc9004fa092578acfa57b67ad5e43a197175d01a64"}, + {file = "pydantic_core-2.33.2-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d3f26877a748dc4251cfcfda9dfb5f13fcb034f5308388066bcfe9031b63ae7d"}, + {file = "pydantic_core-2.33.2-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:dac89aea9af8cd672fa7b510e7b8c33b0bba9a43186680550ccf23020f32d535"}, + {file = "pydantic_core-2.33.2-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:970919794d126ba8645f3837ab6046fb4e72bbc057b3709144066204c19a455d"}, + {file = "pydantic_core-2.33.2-pp39-pypy39_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:3eb3fe62804e8f859c49ed20a8451342de53ed764150cb14ca71357c765dc2a6"}, + {file = "pydantic_core-2.33.2-pp39-pypy39_pp73-musllinux_1_1_armv7l.whl", hash = "sha256:3abcd9392a36025e3bd55f9bd38d908bd17962cc49bc6da8e7e96285336e2bca"}, + {file = "pydantic_core-2.33.2-pp39-pypy39_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:3a1c81334778f9e3af2f8aeb7a960736e5cab1dfebfb26aabca09afd2906c039"}, + {file = "pydantic_core-2.33.2-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:2807668ba86cb38c6817ad9bc66215ab8584d1d304030ce4f0887336f28a5e27"}, + {file = "pydantic_core-2.33.2.tar.gz", hash = "sha256:7cb8bc3605c29176e1b105350d2e6474142d7c1bd1d9327c4a9bdb46bf827acc"}, +] + +[package.dependencies] +typing-extensions = ">=4.6.0,<4.7.0 || >4.7.0" + +[[package]] +name = "pydantic-settings" +version = "2.10.1" +description = "Settings management using Pydantic" +optional = false +python-versions = ">=3.9" +files = [ + {file = "pydantic_settings-2.10.1-py3-none-any.whl", hash = "sha256:a60952460b99cf661dc25c29c0ef171721f98bfcb52ef8d9ea4c943d7c8cc796"}, + {file = "pydantic_settings-2.10.1.tar.gz", hash = "sha256:06f0062169818d0f5524420a360d632d5857b83cffd4d42fe29597807a1614ee"}, +] + +[package.dependencies] +pydantic = ">=2.7.0" +python-dotenv = ">=0.21.0" +typing-inspection = ">=0.4.0" + +[package.extras] +aws-secrets-manager = ["boto3 (>=1.35.0)", "boto3-stubs[secretsmanager]"] +azure-key-vault = ["azure-identity (>=1.16.0)", "azure-keyvault-secrets (>=4.8.0)"] +gcp-secret-manager = ["google-cloud-secret-manager (>=2.23.1)"] +toml = ["tomli (>=2.0.1)"] +yaml = ["pyyaml (>=6.0.1)"] + +[[package]] +name = "pygments" +version = "2.19.2" +description = "Pygments is a syntax highlighting package written in Python." +optional = false +python-versions = ">=3.8" +files = [ + {file = "pygments-2.19.2-py3-none-any.whl", hash = "sha256:86540386c03d588bb81d44bc3928634ff26449851e99741617ecb9037ee5ec0b"}, + {file = "pygments-2.19.2.tar.gz", hash = "sha256:636cb2477cec7f8952536970bc533bc43743542f70392ae026374600add5b887"}, +] + +[package.extras] +windows-terminal = ["colorama (>=0.4.6)"] + +[[package]] +name = "pyparsing" +version = "3.2.3" +description = "pyparsing module - Classes and methods to define and execute parsing grammars" +optional = false +python-versions = ">=3.9" +files = [ + {file = "pyparsing-3.2.3-py3-none-any.whl", hash = "sha256:a749938e02d6fd0b59b356ca504a24982314bb090c383e3cf201c95ef7e2bfcf"}, + {file = "pyparsing-3.2.3.tar.gz", hash = "sha256:b9c13f1ab8b3b542f72e28f634bad4de758ab3ce4546e4301970ad6fa77c38be"}, +] + +[package.extras] +diagrams = ["jinja2", "railroad-diagrams"] + +[[package]] +name = "pytest" +version = "8.4.1" +description = "pytest: simple powerful testing with Python" +optional = false +python-versions = ">=3.9" +files = [ + {file = "pytest-8.4.1-py3-none-any.whl", hash = "sha256:539c70ba6fcead8e78eebbf1115e8b589e7565830d7d006a8723f19ac8a0afb7"}, + {file = "pytest-8.4.1.tar.gz", hash = "sha256:7c67fd69174877359ed9371ec3af8a3d2b04741818c51e5e99cc1742251fa93c"}, +] + +[package.dependencies] +colorama = {version = ">=0.4", markers = "sys_platform == \"win32\""} +iniconfig = ">=1" +packaging = ">=20" +pluggy = ">=1.5,<2" +pygments = ">=2.7.2" + +[package.extras] +dev = ["argcomplete", "attrs (>=19.2)", "hypothesis (>=3.56)", "mock", "requests", "setuptools", "xmlschema"] + +[[package]] +name = "pytest-asyncio" +version = "1.1.0" +description = "Pytest support for asyncio" +optional = false +python-versions = ">=3.9" +files = [ + {file = "pytest_asyncio-1.1.0-py3-none-any.whl", hash = "sha256:5fe2d69607b0bd75c656d1211f969cadba035030156745ee09e7d71740e58ecf"}, + {file = "pytest_asyncio-1.1.0.tar.gz", hash = "sha256:796aa822981e01b68c12e4827b8697108f7205020f24b5793b3c41555dab68ea"}, +] + +[package.dependencies] +pytest = ">=8.2,<9" + +[package.extras] +docs = ["sphinx (>=5.3)", "sphinx-rtd-theme (>=1)"] +testing = ["coverage (>=6.2)", "hypothesis (>=5.7.1)"] + +[[package]] +name = "pytest-cov" +version = "6.2.1" +description = "Pytest plugin for measuring coverage." +optional = false +python-versions = ">=3.9" +files = [ + {file = "pytest_cov-6.2.1-py3-none-any.whl", hash = "sha256:f5bc4c23f42f1cdd23c70b1dab1bbaef4fc505ba950d53e0081d0730dd7e86d5"}, + {file = "pytest_cov-6.2.1.tar.gz", hash = "sha256:25cc6cc0a5358204b8108ecedc51a9b57b34cc6b8c967cc2c01a4e00d8a67da2"}, +] + +[package.dependencies] +coverage = {version = ">=7.5", extras = ["toml"]} +pluggy = ">=1.2" +pytest = ">=6.2.5" + +[package.extras] +testing = ["fields", "hunter", "process-tests", "pytest-xdist", "virtualenv"] + +[[package]] +name = "python-dateutil" +version = "2.9.0.post0" +description = "Extensions to the standard Python datetime module" +optional = false +python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,>=2.7" +files = [ + {file = "python-dateutil-2.9.0.post0.tar.gz", hash = "sha256:37dd54208da7e1cd875388217d5e00ebd4179249f90fb72437e91a35459a0ad3"}, + {file = "python_dateutil-2.9.0.post0-py2.py3-none-any.whl", hash = "sha256:a8b2bc7bffae282281c8140a97d3aa9c14da0b136dfe83f850eea9a5f7470427"}, +] + +[package.dependencies] +six = ">=1.5" + +[[package]] +name = "python-dotenv" +version = "1.1.1" +description = "Read key-value pairs from a .env file and set them as environment variables" +optional = false +python-versions = ">=3.9" +files = [ + {file = "python_dotenv-1.1.1-py3-none-any.whl", hash = "sha256:31f23644fe2602f88ff55e1f5c79ba497e01224ee7737937930c448e4d0e24dc"}, + {file = "python_dotenv-1.1.1.tar.gz", hash = "sha256:a8a6399716257f45be6a007360200409fce5cda2661e3dec71d23dc15f6189ab"}, +] + +[package.extras] +cli = ["click (>=5.0)"] + +[[package]] +name = "pyyaml" +version = "6.0.2" +description = "YAML parser and emitter for Python" +optional = false +python-versions = ">=3.8" +files = [ + {file = "PyYAML-6.0.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:0a9a2848a5b7feac301353437eb7d5957887edbf81d56e903999a75a3d743086"}, + {file = "PyYAML-6.0.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:29717114e51c84ddfba879543fb232a6ed60086602313ca38cce623c1d62cfbf"}, + {file = "PyYAML-6.0.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8824b5a04a04a047e72eea5cec3bc266db09e35de6bdfe34c9436ac5ee27d237"}, + {file = "PyYAML-6.0.2-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:7c36280e6fb8385e520936c3cb3b8042851904eba0e58d277dca80a5cfed590b"}, + {file = "PyYAML-6.0.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ec031d5d2feb36d1d1a24380e4db6d43695f3748343d99434e6f5f9156aaa2ed"}, + {file = "PyYAML-6.0.2-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:936d68689298c36b53b29f23c6dbb74de12b4ac12ca6cfe0e047bedceea56180"}, + {file = "PyYAML-6.0.2-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:23502f431948090f597378482b4812b0caae32c22213aecf3b55325e049a6c68"}, + {file = "PyYAML-6.0.2-cp310-cp310-win32.whl", hash = "sha256:2e99c6826ffa974fe6e27cdb5ed0021786b03fc98e5ee3c5bfe1fd5015f42b99"}, + {file = "PyYAML-6.0.2-cp310-cp310-win_amd64.whl", hash = "sha256:a4d3091415f010369ae4ed1fc6b79def9416358877534caf6a0fdd2146c87a3e"}, + {file = "PyYAML-6.0.2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:cc1c1159b3d456576af7a3e4d1ba7e6924cb39de8f67111c735f6fc832082774"}, + {file = "PyYAML-6.0.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:1e2120ef853f59c7419231f3bf4e7021f1b936f6ebd222406c3b60212205d2ee"}, + {file = "PyYAML-6.0.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5d225db5a45f21e78dd9358e58a98702a0302f2659a3c6cd320564b75b86f47c"}, + {file = "PyYAML-6.0.2-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5ac9328ec4831237bec75defaf839f7d4564be1e6b25ac710bd1a96321cc8317"}, + {file = "PyYAML-6.0.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3ad2a3decf9aaba3d29c8f537ac4b243e36bef957511b4766cb0057d32b0be85"}, + {file = "PyYAML-6.0.2-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:ff3824dc5261f50c9b0dfb3be22b4567a6f938ccce4587b38952d85fd9e9afe4"}, + {file = "PyYAML-6.0.2-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:797b4f722ffa07cc8d62053e4cff1486fa6dc094105d13fea7b1de7d8bf71c9e"}, + {file = "PyYAML-6.0.2-cp311-cp311-win32.whl", hash = "sha256:11d8f3dd2b9c1207dcaf2ee0bbbfd5991f571186ec9cc78427ba5bd32afae4b5"}, + {file = "PyYAML-6.0.2-cp311-cp311-win_amd64.whl", hash = "sha256:e10ce637b18caea04431ce14fabcf5c64a1c61ec9c56b071a4b7ca131ca52d44"}, + {file = "PyYAML-6.0.2-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:c70c95198c015b85feafc136515252a261a84561b7b1d51e3384e0655ddf25ab"}, + {file = "PyYAML-6.0.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:ce826d6ef20b1bc864f0a68340c8b3287705cae2f8b4b1d932177dcc76721725"}, + {file = "PyYAML-6.0.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1f71ea527786de97d1a0cc0eacd1defc0985dcf6b3f17bb77dcfc8c34bec4dc5"}, + {file = "PyYAML-6.0.2-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:9b22676e8097e9e22e36d6b7bda33190d0d400f345f23d4065d48f4ca7ae0425"}, + {file = "PyYAML-6.0.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:80bab7bfc629882493af4aa31a4cfa43a4c57c83813253626916b8c7ada83476"}, + {file = "PyYAML-6.0.2-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:0833f8694549e586547b576dcfaba4a6b55b9e96098b36cdc7ebefe667dfed48"}, + {file = "PyYAML-6.0.2-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:8b9c7197f7cb2738065c481a0461e50ad02f18c78cd75775628afb4d7137fb3b"}, + {file = "PyYAML-6.0.2-cp312-cp312-win32.whl", hash = "sha256:ef6107725bd54b262d6dedcc2af448a266975032bc85ef0172c5f059da6325b4"}, + {file = "PyYAML-6.0.2-cp312-cp312-win_amd64.whl", hash = "sha256:7e7401d0de89a9a855c839bc697c079a4af81cf878373abd7dc625847d25cbd8"}, + {file = "PyYAML-6.0.2-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:efdca5630322a10774e8e98e1af481aad470dd62c3170801852d752aa7a783ba"}, + {file = "PyYAML-6.0.2-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:50187695423ffe49e2deacb8cd10510bc361faac997de9efef88badc3bb9e2d1"}, + {file = "PyYAML-6.0.2-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0ffe8360bab4910ef1b9e87fb812d8bc0a308b0d0eef8c8f44e0254ab3b07133"}, + {file = "PyYAML-6.0.2-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:17e311b6c678207928d649faa7cb0d7b4c26a0ba73d41e99c4fff6b6c3276484"}, + {file = "PyYAML-6.0.2-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:70b189594dbe54f75ab3a1acec5f1e3faa7e8cf2f1e08d9b561cb41b845f69d5"}, + {file = "PyYAML-6.0.2-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:41e4e3953a79407c794916fa277a82531dd93aad34e29c2a514c2c0c5fe971cc"}, + {file = "PyYAML-6.0.2-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:68ccc6023a3400877818152ad9a1033e3db8625d899c72eacb5a668902e4d652"}, + {file = "PyYAML-6.0.2-cp313-cp313-win32.whl", hash = "sha256:bc2fa7c6b47d6bc618dd7fb02ef6fdedb1090ec036abab80d4681424b84c1183"}, + {file = "PyYAML-6.0.2-cp313-cp313-win_amd64.whl", hash = "sha256:8388ee1976c416731879ac16da0aff3f63b286ffdd57cdeb95f3f2e085687563"}, + {file = "PyYAML-6.0.2-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:24471b829b3bf607e04e88d79542a9d48bb037c2267d7927a874e6c205ca7e9a"}, + {file = "PyYAML-6.0.2-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d7fded462629cfa4b685c5416b949ebad6cec74af5e2d42905d41e257e0869f5"}, + {file = "PyYAML-6.0.2-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d84a1718ee396f54f3a086ea0a66d8e552b2ab2017ef8b420e92edbc841c352d"}, + {file = "PyYAML-6.0.2-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9056c1ecd25795207ad294bcf39f2db3d845767be0ea6e6a34d856f006006083"}, + {file = "PyYAML-6.0.2-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:82d09873e40955485746739bcb8b4586983670466c23382c19cffecbf1fd8706"}, + {file = "PyYAML-6.0.2-cp38-cp38-win32.whl", hash = "sha256:43fa96a3ca0d6b1812e01ced1044a003533c47f6ee8aca31724f78e93ccc089a"}, + {file = "PyYAML-6.0.2-cp38-cp38-win_amd64.whl", hash = "sha256:01179a4a8559ab5de078078f37e5c1a30d76bb88519906844fd7bdea1b7729ff"}, + {file = "PyYAML-6.0.2-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:688ba32a1cffef67fd2e9398a2efebaea461578b0923624778664cc1c914db5d"}, + {file = "PyYAML-6.0.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:a8786accb172bd8afb8be14490a16625cbc387036876ab6ba70912730faf8e1f"}, + {file = "PyYAML-6.0.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d8e03406cac8513435335dbab54c0d385e4a49e4945d2909a581c83647ca0290"}, + {file = "PyYAML-6.0.2-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f753120cb8181e736c57ef7636e83f31b9c0d1722c516f7e86cf15b7aa57ff12"}, + {file = "PyYAML-6.0.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3b1fdb9dc17f5a7677423d508ab4f243a726dea51fa5e70992e59a7411c89d19"}, + {file = "PyYAML-6.0.2-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:0b69e4ce7a131fe56b7e4d770c67429700908fc0752af059838b1cfb41960e4e"}, + {file = "PyYAML-6.0.2-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:a9f8c2e67970f13b16084e04f134610fd1d374bf477b17ec1599185cf611d725"}, + {file = "PyYAML-6.0.2-cp39-cp39-win32.whl", hash = "sha256:6395c297d42274772abc367baaa79683958044e5d3835486c16da75d2a694631"}, + {file = "PyYAML-6.0.2-cp39-cp39-win_amd64.whl", hash = "sha256:39693e1f8320ae4f43943590b49779ffb98acb81f788220ea932a6b6c51004d8"}, + {file = "pyyaml-6.0.2.tar.gz", hash = "sha256:d584d9ec91ad65861cc08d42e834324ef890a082e591037abe114850ff7bbc3e"}, +] + +[[package]] +name = "ruff" +version = "0.12.9" +description = "An extremely fast Python linter and code formatter, written in Rust." +optional = false +python-versions = ">=3.7" +files = [ + {file = "ruff-0.12.9-py3-none-linux_armv6l.whl", hash = "sha256:fcebc6c79fcae3f220d05585229463621f5dbf24d79fdc4936d9302e177cfa3e"}, + {file = "ruff-0.12.9-py3-none-macosx_10_12_x86_64.whl", hash = "sha256:aed9d15f8c5755c0e74467731a007fcad41f19bcce41cd75f768bbd687f8535f"}, + {file = "ruff-0.12.9-py3-none-macosx_11_0_arm64.whl", hash = "sha256:5b15ea354c6ff0d7423814ba6d44be2807644d0c05e9ed60caca87e963e93f70"}, + {file = "ruff-0.12.9-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d596c2d0393c2502eaabfef723bd74ca35348a8dac4267d18a94910087807c53"}, + {file = "ruff-0.12.9-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:1b15599931a1a7a03c388b9c5df1bfa62be7ede6eb7ef753b272381f39c3d0ff"}, + {file = "ruff-0.12.9-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:3d02faa2977fb6f3f32ddb7828e212b7dd499c59eb896ae6c03ea5c303575756"}, + {file = "ruff-0.12.9-py3-none-manylinux_2_17_ppc64.manylinux2014_ppc64.whl", hash = "sha256:17d5b6b0b3a25259b69ebcba87908496e6830e03acfb929ef9fd4c58675fa2ea"}, + {file = "ruff-0.12.9-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:72db7521860e246adbb43f6ef464dd2a532ef2ef1f5dd0d470455b8d9f1773e0"}, + {file = "ruff-0.12.9-py3-none-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a03242c1522b4e0885af63320ad754d53983c9599157ee33e77d748363c561ce"}, + {file = "ruff-0.12.9-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9fc83e4e9751e6c13b5046d7162f205d0a7bac5840183c5beebf824b08a27340"}, + {file = "ruff-0.12.9-py3-none-manylinux_2_31_riscv64.whl", hash = "sha256:881465ed56ba4dd26a691954650de6ad389a2d1fdb130fe51ff18a25639fe4bb"}, + {file = "ruff-0.12.9-py3-none-musllinux_1_2_aarch64.whl", hash = "sha256:43f07a3ccfc62cdb4d3a3348bf0588358a66da756aa113e071b8ca8c3b9826af"}, + {file = "ruff-0.12.9-py3-none-musllinux_1_2_armv7l.whl", hash = "sha256:07adb221c54b6bba24387911e5734357f042e5669fa5718920ee728aba3cbadc"}, + {file = "ruff-0.12.9-py3-none-musllinux_1_2_i686.whl", hash = "sha256:f5cd34fabfdea3933ab85d72359f118035882a01bff15bd1d2b15261d85d5f66"}, + {file = "ruff-0.12.9-py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:f6be1d2ca0686c54564da8e7ee9e25f93bdd6868263805f8c0b8fc6a449db6d7"}, + {file = "ruff-0.12.9-py3-none-win32.whl", hash = "sha256:cc7a37bd2509974379d0115cc5608a1a4a6c4bff1b452ea69db83c8855d53f93"}, + {file = "ruff-0.12.9-py3-none-win_amd64.whl", hash = "sha256:6fb15b1977309741d7d098c8a3cb7a30bc112760a00fb6efb7abc85f00ba5908"}, + {file = "ruff-0.12.9-py3-none-win_arm64.whl", hash = "sha256:63c8c819739d86b96d500cce885956a1a48ab056bbcbc61b747ad494b2485089"}, + {file = "ruff-0.12.9.tar.gz", hash = "sha256:fbd94b2e3c623f659962934e52c2bea6fc6da11f667a427a368adaf3af2c866a"}, +] + +[[package]] +name = "simpy" +version = "4.1.1" +description = "Event discrete, process based simulation for Python." +optional = false +python-versions = ">=3.8" +files = [ + {file = "simpy-4.1.1-py3-none-any.whl", hash = "sha256:7c5ae380240fd2238671160e4830956f8055830a8317edf5c05e495b3823cd88"}, + {file = "simpy-4.1.1.tar.gz", hash = "sha256:06d0750a7884b11e0e8e20ce0bc7c6d4ed5f1743d456695340d13fdff95001a6"}, +] + +[[package]] +name = "six" +version = "1.17.0" +description = "Python 2 and 3 compatibility utilities" +optional = false +python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,>=2.7" +files = [ + {file = "six-1.17.0-py2.py3-none-any.whl", hash = "sha256:4721f391ed90541fddacab5acf947aa0d3dc7d27b2e1e8eda2be8970586c3274"}, + {file = "six-1.17.0.tar.gz", hash = "sha256:ff70335d468e7eb6ec65b95b99d3a2836546063f63acc5171de367e834932a81"}, +] + +[[package]] +name = "types-pyyaml" +version = "6.0.12.20250809" +description = "Typing stubs for PyYAML" +optional = false +python-versions = ">=3.9" +files = [ + {file = "types_pyyaml-6.0.12.20250809-py3-none-any.whl", hash = "sha256:032b6003b798e7de1a1ddfeefee32fac6486bdfe4845e0ae0e7fb3ee4512b52f"}, + {file = "types_pyyaml-6.0.12.20250809.tar.gz", hash = "sha256:af4a1aca028f18e75297da2ee0da465f799627370d74073e96fee876524f61b5"}, +] + +[[package]] +name = "typing-extensions" +version = "4.14.1" +description = "Backported and Experimental Type Hints for Python 3.9+" +optional = false +python-versions = ">=3.9" +files = [ + {file = "typing_extensions-4.14.1-py3-none-any.whl", hash = "sha256:d1e1e3b58374dc93031d6eda2420a48ea44a36c2b4766a4fdeb3710755731d76"}, + {file = "typing_extensions-4.14.1.tar.gz", hash = "sha256:38b39f4aeeab64884ce9f74c94263ef78f3c22467c8724005483154c26648d36"}, +] + +[[package]] +name = "typing-inspection" +version = "0.4.1" +description = "Runtime typing introspection tools" +optional = false +python-versions = ">=3.9" +files = [ + {file = "typing_inspection-0.4.1-py3-none-any.whl", hash = "sha256:389055682238f53b04f7badcb49b989835495a96700ced5dab2d8feae4b26f51"}, + {file = "typing_inspection-0.4.1.tar.gz", hash = "sha256:6ae134cc0203c33377d43188d4064e9b357dba58cff3185f22924610e70a9d28"}, +] + +[package.dependencies] +typing-extensions = ">=4.12.0" + +[metadata] +lock-version = "2.0" +python-versions = "^3.12" +content-hash = "b7b303ed9df0a73da71790803bfde02b8dc243871801686bb7a3127ca3f638aa" diff --git a/pyproject.toml b/pyproject.toml index 0fb29d2..09c14e1 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,36 +1,57 @@ [tool.poetry] -name = "project-backend" -version = "0.1.0" -description = "solo professional project" +name = "AsyncFlow-Sim" +version = "0.1.0a1" +description = "Digital-twin simulator for distributed async systems. Build what-if scenarios and quantify capacity, latency and throughput offline—before you deploy." authors = ["Gioele Botta"] readme = "README.md" - +license = "MIT" packages = [ - { include = "app", from = "src" } + { include = "asyncflow", from = "src" } +] + +include = ["LICENSE", "src/asyncflow/py.typed"] +exclude = ["tests", "docs", "scripts", ".github", "alembic"] + +# Better pypi discovery +keywords = [ + "simulation", "simpy", "asyncio", "capacity-planning", "performance", + "fastapi", "uvicorn", "distributed-systems", "queuing-theory" +] + +homepage = "https://github.com/AsyncFlow-Sim" +repository = "https://github.com/AsyncFlow-Sim/AsyncFlow" +documentation = "https://github.com/AsyncFlow-Sim/AsyncFlow-Sim/tree/v0.1.0/docs" + +classifiers = [ + "Development Status :: 3 - Alpha", + "Intended Audience :: Developers", + "Intended Audience :: Science/Research", + "Topic :: Scientific/Engineering :: Simulation", + "Topic :: System :: Distributed Computing", + "Topic :: Software Development :: Testing", + "Topic :: System :: Benchmark", + "License :: OSI Approved :: MIT License", + "Programming Language :: Python :: 3.12", + "Typing :: Typed", + "Operating System :: OS Independent", ] [tool.poetry.dependencies] python = "^3.12" -fastapi = "^0.115.14" -sqlalchemy = "^2.0.41" -psycopg = {extras = ["binary"], version = "^3.2.9"} -uvicorn = {extras = ["standard"], version = "^0.34.3"} -gunicorn = "^23.0.0" pydantic-settings = "^2.10.1" pydantic = {extras = ["email"], version = "^2.11.7"} -asyncpg = "^0.30.0" -passlib = {extras = ["argon2"], version = "^1.7.4"} -argon2-cffi = "^25.1.0" -sqlalchemy-utils = "^0.41.2" +numpy = "^2.3.1" +simpy = "^4.1.1" +matplotlib = "^3.10.3" +pyyaml = "^6.0.2" [tool.poetry.group.dev.dependencies] pytest = "^8.4.1" pytest-asyncio = "^1.0.0" -httpx = "^0.28.1" pytest-cov = "^6.2.1" mypy = "^1.16.1" -alembic = "^1.16.2" ruff = "^0.12.1" +types-pyyaml = "^6.0.12.20250516" [build-system] requires = ["poetry-core"] @@ -102,4 +123,4 @@ ignore_missing_imports = true warn_unused_ignores = true show_error_codes = true pretty = true -exclude = ["^alembic/", "^.venv/"] +exclude = ["^.venv/"] diff --git a/pytest.ini b/pytest.ini index 6d4afe2..c61714b 100644 --- a/pytest.ini +++ b/pytest.ini @@ -2,5 +2,6 @@ addopts = -ra -q testpaths = tests markers = - integration: tests that require external services (database, HTTP calls, etc.) -asyncio_mode = auto \ No newline at end of file + integration: tests that require components interactions + system: end-to-end/system scenarios +asyncio_mode = auto diff --git a/readme_img/topology.png b/readme_img/topology.png new file mode 100644 index 0000000..bbb89e7 Binary files /dev/null and b/readme_img/topology.png differ diff --git a/scripts/dev_setup.ps1 b/scripts/dev_setup.ps1 new file mode 100644 index 0000000..88bee3d --- /dev/null +++ b/scripts/dev_setup.ps1 @@ -0,0 +1,176 @@ +# Post-clone developer setup for AsyncFlow (Windows / PowerShell). +# +# What it does: +# 1) Ensures Poetry is available (official installer if missing). +# 2) Configures Poetry to create an in-project virtualenv (.venv). +# 3) Removes poetry.lock (fresh dependency resolution by policy). +# 4) Installs the project with dev extras. +# 5) Runs ruff, mypy, and pytest (with coverage if available). +# +# Usage: +# .\scripts\dev_setup.ps1 +# +# Notes: +# - Run this from anywhere; it will cd to repo root. +# - Requires Python >= 3.12 to be available (via 'py' launcher or python.exe). +# - We do NOT delete an existing .venv; it will be reused if compatible. + +# Strict error handling +$ErrorActionPreference = 'Stop' +Set-StrictMode -Version Latest + +# --- helpers ------------------------------------------------------------------ + +function Write-Info { param([string]$Msg) Write-Host "==> $Msg" } +function Write-Ok { param([string]$Msg) Write-Host "✅ $Msg" -ForegroundColor Green } +function Fail { param([string]$Msg) Write-Error $Msg; exit 1 } + +# Resolve repo root (this script lives in scripts/) +$ScriptDir = Split-Path -Parent $MyInvocation.MyCommand.Path +$RepoRoot = Resolve-Path (Join-Path $ScriptDir '..') + +function Require-Pyproject { + if (-not (Test-Path (Join-Path $RepoRoot 'pyproject.toml'))) { + Fail "pyproject.toml not found at repo root ($RepoRoot)" + } +} + +function Get-PythonPath-3_12Plus { + <# + Try common Windows launchers/executables and return the *actual* Python + interpreter path (sys.executable) for a version >= 3.12. + #> + $candidates = @( + @('py', '-3.13'), + @('py', '-3.12'), + @('py', '-3'), + @('python3.13'), + @('python3.12'), + @('python') + ) + + foreach ($cand in $candidates) { + $exe = $cand[0] + $args = @() + if ($cand.Count -gt 1) { $args = $cand[1..($cand.Count-1)] } + + if (-not (Get-Command $exe -ErrorAction SilentlyContinue)) { continue } + + # Check version + & $exe @args -c "import sys; import sys as s; raise SystemExit(0 if sys.version_info[:2] >= (3,12) else 1)" 2>$null + if ($LASTEXITCODE -ne 0) { continue } + + # Obtain the real interpreter path + $pyPath = & $exe @args -c "import sys; print(sys.executable)" 2>$null + if ($LASTEXITCODE -eq 0 -and $pyPath) { + return $pyPath.Trim() + } + } + + return $null +} + +function Ensure-Poetry { + if (Get-Command poetry -ErrorAction SilentlyContinue) { + poetry --version | Out-Null + return + } + + Write-Info "Poetry not found; attempting installation…" + + # Official installer (recommended by Poetry) + $installer = (Invoke-WebRequest -Uri https://install.python-poetry.org -UseBasicParsing).Content + # Pipe installer to Python (stdin) + $pythonToUse = (Get-Command py -ErrorAction SilentlyContinue) ? 'py' : 'python' + $installer | & $pythonToUse - + + # Common locations (make available for current session) + $poetryCandidates = @( + (Join-Path $env:APPDATA 'pypoetry\venv\Scripts'), + (Join-Path $env:USERPROFILE '.local\bin') + ) + foreach ($p in $poetryCandidates) { + if (Test-Path $p) { $env:Path = "$p;$env:Path" } + } + + if (-not (Get-Command poetry -ErrorAction SilentlyContinue)) { + Fail "Poetry installation failed (not on PATH). Close & reopen PowerShell or add the Poetry path to PATH." + } + + poetry --version | Out-Null +} + +function Run-Tests-WithOptionalCoverage { + <# + Try pytest with coverage first; if the plugin is missing, + fall back to plain pytest. Propagate failure if tests fail. + #> + $cmd = { poetry run pytest --cov=src --cov-report=term-missing:skip-covered --cov-report=xml --disable-warnings -q } + try { + & $cmd + if ($LASTEXITCODE -eq 0) { + Write-Ok "Tests (with coverage) PASSED" + return + } + } catch { + # ignore; retry without coverage below + } + + Write-Info "Coverage run failed (likely pytest-cov not installed). Falling back to plain pytest…" + poetry run pytest --disable-warnings -q + if ($LASTEXITCODE -ne 0) { + Fail "Tests FAILED" + } + Write-Ok "Tests PASSED" +} + +# --- main --------------------------------------------------------------------- + +Set-Location $RepoRoot +Require-Pyproject + +$PythonExe = Get-PythonPath-3_12Plus +if (-not $PythonExe) { + Fail "Python >= 3.12 not found. Install Python 3.12+ and re-run." +} +Write-Info ("Using Python: " + (& $PythonExe -V)) + +Ensure-Poetry + +# Make sure Poetry venv lives inside the repo +Write-Info "Configuring Poetry to use in-project virtualenv (.venv)…" +poetry config virtualenvs.in-project true +Write-Ok "Poetry configured to use .venv" + +# Bind Poetry to the chosen interpreter (creates .venv if needed) +poetry env use "$PythonExe" | Out-Null +Write-Ok "Virtualenv ready (.venv)" + +# Policy: always remove lock to avoid conflicts across environments +$lockPath = Join-Path $RepoRoot 'poetry.lock' +if (Test-Path $lockPath) { + Write-Info "Removing poetry.lock for a clean resolution…" + Remove-Item $lockPath -Force + Write-Ok "poetry.lock removed" +} + +# Faster installs and stable headless plotting +$env:PIP_DISABLE_PIP_VERSION_CHECK = '1' +$env:MPLBACKEND = 'Agg' + +Write-Info "Installing project with dev extras…" +poetry install --with dev --no-interaction --no-ansi +Write-Ok "Dependencies installed (dev)" + +Write-Info "Running Ruff (lint)…" +poetry run ruff check src tests +Write-Ok "Ruff PASSED" + +Write-Info "Running MyPy (type-check)…" +poetry run mypy src tests +Write-Ok "MyPy PASSED" + +Write-Info "Running tests (with coverage if available)…" +Run-Tests-WithOptionalCoverage + +Write-Ok "All checks completed SUCCESSFULLY 🎉" diff --git a/scripts/dev_setup.sh b/scripts/dev_setup.sh new file mode 100644 index 0000000..fe97c0c --- /dev/null +++ b/scripts/dev_setup.sh @@ -0,0 +1,135 @@ +# Post-clone developer setup for AsyncFlow (Linux/macOS/WSL). +# +# What it does: +# 1) Ensures Poetry is available (prefers pipx if present; otherwise uses +# the official installer). +# 2) Configures Poetry to create an in-project virtualenv (.venv). +# 3) Removes poetry.lock (fresh dependency resolution by policy). +# 4) Installs the project with dev extras. +# 5) Runs ruff, mypy, and pytest (with coverage if available). +# +# Usage: +# bash scripts/dev_setup.sh +# +# Notes: +# - Run this from anywhere; it will cd to repo root. +# - Requires Python >= 3.12 to be available (python3.12 or python3). +# - We do NOT delete an existing .venv; it will be reused if compatible. + +set -Eeuo pipefail + +# --- helpers ----------------------------------------------------------------- + +repo_root="$(cd "$(dirname "${BASH_SOURCE[0]}")/.." && pwd)" + +err() { echo "ERROR: $*" >&2; exit 1; } +info() { echo "==> $*"; } +ok() { echo "✅ $*"; } + +require_pyproject() { + [[ -f "$repo_root/pyproject.toml" ]] || err "pyproject.toml not found at repo root ($repo_root)" +} + +pick_python() { + # Return a python executable >= 3.12 + for cand in python3.13 python3.12 python3; do + if command -v "$cand" >/dev/null 2>&1; then + if "$cand" -c 'import sys; sys.exit(0 if sys.version_info[:2] >= (3,12) else 1)'; then + echo "$cand" + return 0 + fi + fi + done + err "Python >= 3.12 not found. Install python3.12+ and re-run." +} + +ensure_poetry() { + if command -v poetry >/dev/null 2>&1; then + poetry --version || true + return 0 + fi + + info "Poetry not found; attempting installation…" + + if command -v pipx >/dev/null 2>&1; then + pipx install poetry || pipx upgrade poetry || true + else + # Official installer (recommended by Poetry) — installs to ~/.local/bin + curl -sSL https://install.python-poetry.org | python3 - + export PATH="$HOME/.local/bin:$PATH" + fi + + # Ensure poetry is now available on PATH + export PATH="$HOME/.local/bin:$PATH" + command -v poetry >/dev/null 2>&1 || err "Poetry installation failed (not on PATH)." + poetry --version || true +} + +run_tests_with_optional_coverage() { + # Try pytest with coverage first; if plugin missing, fallback to plain pytest. + set +e + poetry run pytest \ + --cov=src \ + --cov-report=term-missing:skip-covered \ + --cov-report=xml \ + --disable-warnings -q + local status=$? + set -e + + if [[ $status -eq 0 ]]; then + ok "Tests (with coverage) PASSED" + return 0 + fi + + info "Coverage run failed (likely pytest-cov not installed). Falling back to plain pytest…" + + poetry run pytest --disable-warnings -q + ok "Tests PASSED" +} + +# --- main -------------------------------------------------------------------- + +cd "$repo_root" +require_pyproject + +PY_BIN="$(pick_python)" +info "Using Python: $("$PY_BIN" -V)" + +ensure_poetry + +# Make sure Poetry venv lives inside the repo +info "Configuring Poetry to use in-project virtualenv (.venv)…" +poetry config virtualenvs.in-project true +ok "Poetry configured to use .venv" + +# Bind Poetry to the chosen interpreter (creates .venv if needed) +poetry env use "$PY_BIN" >/dev/null 2>&1 || true +ok "Virtualenv ready (.venv)" + +# Policy: always remove lock to avoid conflicts across environments +if [[ -f poetry.lock ]]; then + info "Removing poetry.lock for a clean resolution…" + rm -f poetry.lock + ok "poetry.lock removed" +fi + +# Faster installs and stable headless plotting +export PIP_DISABLE_PIP_VERSION_CHECK=1 +export MPLBACKEND=Agg + +info "Installing project with dev extras…" +poetry install --with dev --no-interaction --no-ansi +ok "Dependencies installed (dev)" + +info "Running Ruff (lint)…" +poetry run ruff check src tests +ok "Ruff PASSED" + +info "Running MyPy (type-check)…" +poetry run mypy src tests +ok "MyPy PASSED" + +info "Running tests (with coverage if available)…" +run_tests_with_optional_coverage + +ok "All checks completed SUCCESSFULLY 🎉" diff --git a/scripts/init-docker-dev.sh b/scripts/init-docker-dev.sh deleted file mode 100644 index 6e1e270..0000000 --- a/scripts/init-docker-dev.sh +++ /dev/null @@ -1,83 +0,0 @@ -#!/usr/bin/env bash -# scripts/init-docker-dev.sh -# Bring up local development stack using .env.dev in project root - - -set -euo pipefail - -# ────────────────────────────────────────────────────────────────────────────── -# 0. Paths -# ────────────────────────────────────────────────────────────────────────────── -SCRIPT_PATH="$(realpath "${BASH_SOURCE[0]}")" -SCRIPT_DIR="$(cd "$(dirname "$SCRIPT_PATH")" && pwd)" -PROJECT_ROOT="$(dirname "$SCRIPT_DIR")" -COMPOSE_FILE="$PROJECT_ROOT/docker/docker-compose.dev.yml" -ENV_DEV="$PROJECT_ROOT/docker/.env.dev" -ENV_DOT="$PROJECT_ROOT/docker/.env" - -# ────────────────────────────────────────────────────────────────────────────── -# 0.1 Make script executable -# ────────────────────────────────────────────────────────────────────────────── -if [[ ! -x "$SCRIPT_PATH" ]]; then - chmod +x "$SCRIPT_PATH" || true -fi - -# ────────────────────────────────────────────────────────────────────────────── -# 0.2 Ensure docker/.env exists for Compose interpolation -# ────────────────────────────────────────────────────────────────────────────── -if [[ -f "$ENV_DEV" && ! -f "$ENV_DOT" ]]; then - echo ">>> Copying .env.dev → .env for Compose interpolation" - cp "$ENV_DEV" "$ENV_DOT" -fi - -# ────────────────────────────────────────────────────────────────────────────── -# 1. Load env vars from .env.dev into this shell -# ────────────────────────────────────────────────────────────────────────────── -if [[ -f "$ENV_DEV" ]]; then - set -o allexport - source "$ENV_DEV" - set +o allexport -else - echo "ERROR: $ENV_DEV not found. Please create it from .env.example." >&2 - exit 1 -fi - -# ────────────────────────────────────────────────────────────────────────────── -# 2. Pull remote images (only missing ones) -# ────────────────────────────────────────────────────────────────────────────── -echo ">>> Pulling external service images..." -docker compose \ - --env-file "$ENV_DEV" \ - -f "$COMPOSE_FILE" pull - -# ────────────────────────────────────────────────────────────────────────────── -# 3. Start Postgres + pgAdmin (detached) -# ────────────────────────────────────────────────────────────────────────────── -echo ">>> Starting Postgres and pgAdmin..." -docker compose \ - --env-file "$ENV_DEV" \ - -f "$COMPOSE_FILE" up -d db pgadmin - -# ────────────────────────────────────────────────────────────────────────────── -# 4. Run Alembic migrations -# ────────────────────────────────────────────────────────────────────────────── -echo ">>> Applying database migrations (Alembic)…" -cd "$PROJECT_ROOT" -poetry run alembic upgrade head - -# ────────────────────────────────────────────────────────────────────────────── -# 5. Build (if needed) & start everything in background -# ────────────────────────────────────────────────────────────────────────────── -echo ">>> Building (if needed) and starting all services…" -docker compose \ - --env-file "$ENV_DEV" \ - -f "$COMPOSE_FILE" up -d --build - -echo ">>> Development stack is up!" -echo " • Backend: http://localhost:8000" -echo " • pgAdmin: http://localhost:8080" -echo -echo "To tail backend logs without warnings, run:" -echo " docker compose \\ - --env-file \"$ENV_DEV\" \\ - -f \"$COMPOSE_FILE\" logs -f backend" diff --git a/scripts/quality_check.ps1 b/scripts/quality_check.ps1 new file mode 100644 index 0000000..74bbd4c --- /dev/null +++ b/scripts/quality_check.ps1 @@ -0,0 +1,14 @@ +# Lint & format with Ruff (applies --fix) and type-check with MyPy. +# Usage: +# .\scripts\quality_check.ps1 + +$ErrorActionPreference = 'Stop' +Set-StrictMode -Version Latest + +# Ruff (lint + auto-fix) +poetry run ruff check src tests --fix + +# MyPy (type-check) +poetry run mypy src tests + +Write-Host "✅ Linting and type-checking completed SUCCESSFULLY" diff --git a/scripts/quality_check.sh b/scripts/quality_check.sh new file mode 100644 index 0000000..85c269e --- /dev/null +++ b/scripts/quality_check.sh @@ -0,0 +1,9 @@ +set -euo pipefail + +# Lint & format with ruff, automatic corrections applied (--fix) +poetry run ruff check src tests --fix + +# Type‐check with mypy +poetry run mypy src tests + +echo "✅ Linting and type‐checking completed SUCCESSFULLY" \ No newline at end of file diff --git a/scripts/run_sys_tests.ps1 b/scripts/run_sys_tests.ps1 new file mode 100644 index 0000000..aa3630f --- /dev/null +++ b/scripts/run_sys_tests.ps1 @@ -0,0 +1,55 @@ +# Run only system tests (marked @pytest.mark.system) with the required env var. +# Keeps output concise (no XML, no slowest list), shows the usual pytest summary. +# +# Usage: +# .\scripts\run_system_tests.ps1 +# +# Notes: +# - Uses `poetry run` when Poetry + pyproject.toml are present; otherwise falls back to `pytest`. +# - Forces a headless backend for any plots generated during tests. + +Set-StrictMode -Version Latest +$ErrorActionPreference = 'Stop' + +# Resolve repo root +$ScriptDir = Split-Path -Parent $MyInvocation.MyCommand.Path +$RepoRoot = Resolve-Path (Join-Path $ScriptDir '..') + +# Collect test paths (default: tests/system) +if ($args.Count -ge 1) { + $TestPaths = $args +} else { + $TestPaths = @('tests/system') +} + +# Decide runner prefix +$UsePoetry = (Get-Command poetry -ErrorAction SilentlyContinue) -ne $null -and + (Test-Path (Join-Path $RepoRoot 'pyproject.toml')) +$Runner = if ($UsePoetry) { 'poetry run pytest' } else { 'pytest' } + +# Set env vars for this process +$env:MPLBACKEND = if ($env:MPLBACKEND) { $env:MPLBACKEND } else { 'Agg' } +$env:ASYNCFLOW_RUN_SYSTEM_TESTS = '1' + +Push-Location $RepoRoot +try { + Write-Host "==> Running system tests…" + # Clear any configured addopts and run only system-marked tests + $pytestArgs = @( + '-o', 'addopts=', + '-m', 'system', + '--disable-warnings', + '-q' + ) + $TestPaths + + if ($UsePoetry) { + poetry run pytest @pytestArgs + } else { + pytest @pytestArgs + } + + Write-Host "✅ System tests PASSED" +} +finally { + Pop-Location +} diff --git a/scripts/run_sys_tests.sh b/scripts/run_sys_tests.sh new file mode 100644 index 0000000..78ee74b --- /dev/null +++ b/scripts/run_sys_tests.sh @@ -0,0 +1,42 @@ +# Run only system tests (marked @pytest.mark.system) with the required env var. +# Keeps output concise (no XML, no slowest list), shows the usual pytest summary. +# +# Usage: +# bash scripts/run_system_tests.sh +# +# Notes: +# - Uses `poetry run` when Poetry + pyproject.toml are present; otherwise falls back to `pytest`. +# - Forces a headless backend for any plots generated during tests. + +set -Eeuo pipefail + +# Pick test paths (default to tests/system) +if [[ $# -ge 1 ]]; then + TEST_PATHS=("$@") +else + TEST_PATHS=(tests/system) +fi + +REPO_ROOT="$(cd "$(dirname "${BASH_SOURCE[0]}")/.." && pwd)" +RUN_PREFIX="" +if command -v poetry >/dev/null 2>&1 && [[ -f "$REPO_ROOT/pyproject.toml" ]]; then + RUN_PREFIX="poetry run" +fi + +# Headless plotting; enable system tests +export MPLBACKEND="${MPLBACKEND:-Agg}" +export ASYNCFLOW_RUN_SYSTEM_TESTS=1 + +cd "$REPO_ROOT" + +echo "==> Running system tests…" +# Clear any configured addopts and run only system-marked tests +# Keep output short but with the final summary line. +$RUN_PREFIX pytest \ + -o addopts= \ + -m system \ + --disable-warnings \ + -q \ + "${TEST_PATHS[@]}" + +echo "✅ System tests PASSED" diff --git a/scripts/run_tests.ps1 b/scripts/run_tests.ps1 new file mode 100644 index 0000000..86bacfd --- /dev/null +++ b/scripts/run_tests.ps1 @@ -0,0 +1,39 @@ +# Run tests with coverage ONLY (no XML, no durations), showing pytest’s usual summary. +# It also overrides any configured addopts (e.g. durations/xml) via `-o addopts=`. +# +# Usage: +# .\scripts\run_tests.ps1 + +$ErrorActionPreference = 'Stop' +Set-StrictMode -Version Latest + +# Pick test paths +[string[]]$TestPaths = if ($args.Count -ge 1) { $args } else { @('tests') } + +# Resolve repo root (this script lives in scripts/) +$ScriptDir = Split-Path -Parent $MyInvocation.MyCommand.Path +$RepoRoot = Resolve-Path (Join-Path $ScriptDir '..') + +# Use Poetry if available and pyproject exists +$RunWithPoetry = (Get-Command poetry -ErrorAction SilentlyContinue) -and (Test-Path (Join-Path $RepoRoot 'pyproject.toml')) + +# Headless backend if plots are generated during tests +if (-not $env:MPLBACKEND) { $env:MPLBACKEND = 'Agg' } + +Set-Location $RepoRoot + +# Build command +$cmd = @() +if ($RunWithPoetry) { $cmd += @('poetry', 'run') } +$cmd += 'pytest' +$cmd += @( + '-o', 'addopts=', + '--cov=src', + '--cov-report=term', + '--disable-warnings', + '-q' +) +$cmd += $TestPaths + +# Execute +& $cmd diff --git a/scripts/run_tests.sh b/scripts/run_tests.sh new file mode 100644 index 0000000..089d8e4 --- /dev/null +++ b/scripts/run_tests.sh @@ -0,0 +1,34 @@ +# Run tests with coverage ONLY (no XML, no durations), showing pytest’s usual summary. +# It also overrides any configured addopts (e.g. durations/xml) via `-o addopts=`. +# +# Usage: +# bash scripts/run_tests.sh +set -Eeuo pipefail + +# Pick test paths +if [[ $# -ge 1 ]]; then + TEST_PATHS=("$@") +else + TEST_PATHS=(tests) +fi + +REPO_ROOT="$(cd "$(dirname "${BASH_SOURCE[0]}")/.." && pwd)" +RUN_PREFIX="" +if command -v poetry >/dev/null 2>&1 && [[ -f "$REPO_ROOT/pyproject.toml" ]]; then + RUN_PREFIX="poetry run" +fi + +# Headless backend if plots are generated during tests +export MPLBACKEND="${MPLBACKEND:-Agg}" + +cd "$REPO_ROOT" + +# Run pytest with coverage summary in terminal, no xml, no durations, +# and wipe any addopts coming from config files. +$RUN_PREFIX pytest \ + -o addopts= \ + --cov=src \ + --cov-report=term \ + --disable-warnings \ + -q \ + "${TEST_PATHS[@]}" diff --git a/src/app/__init__.py b/src/app/__init__.py deleted file mode 100644 index 0f69098..0000000 --- a/src/app/__init__.py +++ /dev/null @@ -1 +0,0 @@ -"""Main application package for the project backend.""" diff --git a/src/app/api/__init__.py b/src/app/api/__init__.py deleted file mode 100644 index 9455eaa..0000000 --- a/src/app/api/__init__.py +++ /dev/null @@ -1 +0,0 @@ -"""API endpoints and routers.""" diff --git a/src/app/api/health_check.py b/src/app/api/health_check.py deleted file mode 100644 index 74b4d4f..0000000 --- a/src/app/api/health_check.py +++ /dev/null @@ -1,11 +0,0 @@ -"""Health check API endpoints.""" - -from fastapi import APIRouter - -router = APIRouter() - - -@router.get("/health") -def health_check() -> dict[str, str]: - """Return the health status of the application.""" - return {"status": "ok"} diff --git a/src/app/config/__init__.py b/src/app/config/__init__.py deleted file mode 100644 index 255bf0d..0000000 --- a/src/app/config/__init__.py +++ /dev/null @@ -1 +0,0 @@ -"""Configuration modules and settings.""" diff --git a/src/app/config/constants.py b/src/app/config/constants.py deleted file mode 100644 index e5e9d0b..0000000 --- a/src/app/config/constants.py +++ /dev/null @@ -1,23 +0,0 @@ -"""Application constants and configuration values.""" - -from enum import Enum - - -class UserLevel(str, Enum): - """constants to define the different levels the user can have in the application""" - - USER = "user" - ADMIN = "admin" - -class SubscriptionLevel(str, Enum): - """ - constants to define the different subscription levels the user can have - in the application - """ - - FREE = "free" - EXPERT = "expert" - PRO = "pro" - ENTERPRISE = "enterprise" - - diff --git a/src/app/config/settings.py b/src/app/config/settings.py deleted file mode 100644 index 69ee4ad..0000000 --- a/src/app/config/settings.py +++ /dev/null @@ -1,43 +0,0 @@ -"""Application settings and configuration.""" - -from pathlib import Path -from typing import Literal - -from dotenv import load_dotenv -from pydantic import Field -from pydantic_settings import BaseSettings, SettingsConfigDict - -env_path = Path(__file__).resolve().parents[3] / "docker" / ".env.dev" -if env_path.exists(): - load_dotenv(dotenv_path=env_path) - - -class Settings(BaseSettings): - """Application settings loaded from environment variables.""" - - model_config = SettingsConfigDict(env_file=None, extra="ignore") - - app_name: str = "Project Backend" - - environment: Literal["development", "staging", "production", "test"] = Field( - default="development", - description="Runtime environment", - alias="ENVIRONMENT", - ) - - db_host: str = Field(default="localhost", alias="DB_HOST") - db_user: str = Field(default="postgres", alias="DB_USER") - db_password: str = Field(default="password", alias="DB_PASSWORD") - db_name: str = Field(default="project_db", alias="DB_NAME") - db_url_env: str | None = Field(default=None, alias="DB_URL") - - @property - def db_url(self) -> str: - """Compute the full database URL from components if not explicitly set.""" - if self.db_url_env: - return self.db_url_env - return f"postgresql+asyncpg://{self.db_user}:{self.db_password}@{self.db_host}/{self.db_name}" - - - -settings = Settings() diff --git a/src/app/core/auth_helpers.py b/src/app/core/auth_helpers.py deleted file mode 100644 index 4a3437a..0000000 --- a/src/app/core/auth_helpers.py +++ /dev/null @@ -1,62 +0,0 @@ -"""helper functions for the auth process""" - -import string -from typing import cast - -from passlib.context import CryptContext - -MAX_PASSWORD_LENGTH = 128 # prevent DoS -MIN_PASSWORD_LENGTH = 8 -SPECIAL_CHARS = set(string.punctuation) - -pwd_context = CryptContext( - schemes=["argon2"], - deprecated="auto", - -) - -def is_password_safe(password: str) -> bool: - """Function to verify if a password is safe""" - has_upper = False - has_lower = False - has_digit = False - has_special = False - - if len(password) < MIN_PASSWORD_LENGTH or len(password) > MAX_PASSWORD_LENGTH: - return False - - for char in password: - if char.isupper(): - has_upper = True - elif char.islower(): - has_lower = True - elif char.isdigit(): - has_digit = True - elif char in SPECIAL_CHARS: - has_special = True - - if has_upper and has_lower and has_digit and has_special: - return True - - return False - - -def verify_passwords_equality(password: str, confirmed_password: str) -> bool: - """Verify if the passwords given in the registration form are equivalent.""" - return password == confirmed_password - - -def hash_password(password: str) -> str: - """ - Receive the password - give back the hashed password - """ - return cast("str", pwd_context.hash(password)) - - -def verify_hashed_pwd_plain_pwd_equality(plain_pwd: str, hashed_pwd: str) -> bool: - """ - Verify during the login if the plain password - correspond to the hashed password - """ - return cast("bool", pwd_context.verify(plain_pwd, hashed_pwd)) diff --git a/src/app/db/__init__.py b/src/app/db/__init__.py deleted file mode 100644 index 8e0edc2..0000000 --- a/src/app/db/__init__.py +++ /dev/null @@ -1 +0,0 @@ -"""Database modules and utilities.""" diff --git a/src/app/db/base.py b/src/app/db/base.py deleted file mode 100644 index 2b59174..0000000 --- a/src/app/db/base.py +++ /dev/null @@ -1,14 +0,0 @@ -"""Database base classes and utilities.""" - -import re - -from sqlalchemy.orm import DeclarativeBase, declared_attr - - -class Base(DeclarativeBase): - """Base class for all ORM models.""" - - @declared_attr.directive - def __tablename__(cls) -> str: - """Generate table name from class name by converting CamelCase to snake_case.""" - return re.sub(r"(? None: - """Initialize database models by creating all tables.""" - async with engine.begin() as conn: - await conn.run_sync(Base.metadata.create_all) - - -async def close_engine() -> None: - """Close the database engine and dispose of connections.""" - await engine.dispose() diff --git a/src/app/db/session.py b/src/app/db/session.py deleted file mode 100644 index 6e2924e..0000000 --- a/src/app/db/session.py +++ /dev/null @@ -1,37 +0,0 @@ -"""Database session management and connection utilities.""" - -from collections.abc import AsyncGenerator - -from sqlalchemy.ext.asyncio import AsyncSession, async_sessionmaker, create_async_engine - -from app.config.settings import settings - -engine = create_async_engine( - settings.db_url, - echo=False, - future=True, -) - -AsyncSessionLocal = async_sessionmaker( - engine, - class_=AsyncSession, - expire_on_commit=False, -) - - -async def get_db() -> AsyncGenerator[AsyncSession, None]: - """ - Dependency that provides a transactional database session. - It commits the transaction on successful completion or rolls back on error. - """ - async with AsyncSessionLocal() as session: - try: - - yield session - - await session.commit() - except Exception: - - await session.rollback() - - raise diff --git a/src/app/main.py b/src/app/main.py deleted file mode 100644 index 4e85b19..0000000 --- a/src/app/main.py +++ /dev/null @@ -1,34 +0,0 @@ -"""Main FastAPI application module.""" - -from collections.abc import AsyncGenerator -from contextlib import asynccontextmanager - -from fastapi import FastAPI - -from app.api.auth import router as auth_router -from app.api.health_check import router as health_router -from app.config.settings import settings -from app.db.init_db import close_engine, init_models - - -@asynccontextmanager -async def lifespan(_app: FastAPI) -> AsyncGenerator[None, None]: - """Manage application lifespan events.""" - # Startup - if settings.environment == "development": - await init_models() - yield - # Shutdown - await close_engine() - - -app = FastAPI( - title="Project Backend", - version="0.1.0", - description="Backend service with health-check endpoint", - validate_response=True, #type validation of pydantic output - lifespan=lifespan, -) - -app.include_router(health_router) -app.include_router(auth_router) diff --git a/src/asyncflow/__init__.py b/src/asyncflow/__init__.py new file mode 100644 index 0000000..0f38c83 --- /dev/null +++ b/src/asyncflow/__init__.py @@ -0,0 +1,7 @@ +"""Public facade for high-level API.""" +from __future__ import annotations + +from asyncflow.builder.asyncflow_builder import AsyncFlow +from asyncflow.runtime.simulation_runner import SimulationRunner + +__all__ = ["AsyncFlow", "SimulationRunner"] diff --git a/src/asyncflow/analysis/__init__.py b/src/asyncflow/analysis/__init__.py new file mode 100644 index 0000000..825de6e --- /dev/null +++ b/src/asyncflow/analysis/__init__.py @@ -0,0 +1,5 @@ +"""Public module exposing the results analyzer""" + +from asyncflow.metrics.analyzer import ResultsAnalyzer + +__all__ = ["ResultsAnalyzer"] diff --git a/src/asyncflow/builder/asyncflow_builder.py b/src/asyncflow/builder/asyncflow_builder.py new file mode 100644 index 0000000..f6d2cea --- /dev/null +++ b/src/asyncflow/builder/asyncflow_builder.py @@ -0,0 +1,127 @@ +"""Definition of the input of the simulation through python object""" + +from __future__ import annotations + +from typing import Self + +from asyncflow.schemas.payload import SimulationPayload +from asyncflow.schemas.settings.simulation import SimulationSettings +from asyncflow.schemas.topology.edges import Edge +from asyncflow.schemas.topology.graph import TopologyGraph +from asyncflow.schemas.topology.nodes import ( + Client, + LoadBalancer, + Server, + TopologyNodes, +) +from asyncflow.schemas.workload.rqs_generator import RqsGenerator + + +class AsyncFlow: + """class with method to create the input for the simulation""" + + def __init__(self) -> None: + """Instance attributes necessary to define the simulation payload""" + self._generator: RqsGenerator | None = None + self._client: Client | None = None + self._servers: list[Server] | None = None + self._edges: list[Edge] | None = None + self._sim_settings: SimulationSettings | None = None + self._load_balancer: LoadBalancer | None = None + + def add_generator(self, rqs_generator: RqsGenerator) -> Self: + """Method to instantiate the generator""" + if not isinstance(rqs_generator, RqsGenerator): + msg = "You must add a RqsGenerator instance" + raise TypeError(msg) + self._generator = rqs_generator + return self + + def add_client(self, client: Client) -> Self: + """Method to instantiate the client""" + if not isinstance(client, Client): + msg = "You must add a Client instance" + raise TypeError(msg) + + self._client = client + return self + + def add_servers(self, *servers: Server) -> Self: + """Method to instantiate the server list""" + if self._servers is None: + self._servers = [] + + for server in servers: + if not isinstance(server, Server): + msg = "All the instances must be of the type Server" + raise TypeError(msg) + self._servers.append(server) + return self + + def add_edges(self, *edges: Edge) -> Self: + """Method to instantiate the list of edges""" + if self._edges is None: + self._edges = [] + + for edge in edges: + if not isinstance(edge, Edge): + msg = "All the instances must be of the type Edge" + raise TypeError(msg) + self._edges.append(edge) + return self + + def add_simulation_settings(self, sim_settings: SimulationSettings) -> Self: + """Method to instantiate the settings for the simulation""" + if not isinstance(sim_settings, SimulationSettings): + msg = "The instance must be of the type SimulationSettings" + raise TypeError(msg) + + self._sim_settings = sim_settings + return self + + def add_load_balancer(self, load_balancer: LoadBalancer) -> Self: + """Method to instantiate a load balancer""" + if not isinstance(load_balancer, LoadBalancer): + msg = "The instance must be of the type LoadBalancer" + raise TypeError(msg) + + self._load_balancer = load_balancer + return self + + def build_payload(self) -> SimulationPayload: + """Method to build the payload for the simulation""" + if self._generator is None: + msg = "The generator input must be instantiated before the simulation" + raise ValueError(msg) + if self._client is None: + msg = "The client input must be instantiated before the simulation" + raise ValueError(msg) + if not self._servers: + msg = "You must instantiate at least one server before the simulation" + raise ValueError(msg) + if not self._edges: + msg = "You must instantiate edges before the simulation" + raise ValueError(msg) + if self._sim_settings is None: + msg = "The simulation settings must be instantiated before the simulation" + raise ValueError(msg) + + nodes = TopologyNodes( + servers=self._servers, + client=self._client, + load_balancer=self._load_balancer, + ) + + graph = TopologyGraph( + nodes = nodes, + edges=self._edges, + ) + + return SimulationPayload.model_validate({ + "rqs_input": self._generator, + "topology_graph": graph, + "sim_settings": self._sim_settings, + }) + + + diff --git a/src/asyncflow/components/__init__.py b/src/asyncflow/components/__init__.py new file mode 100644 index 0000000..52d66c7 --- /dev/null +++ b/src/asyncflow/components/__init__.py @@ -0,0 +1,15 @@ +"""Public components: re-exports Pydantic schemas (topology).""" +from __future__ import annotations + +from asyncflow.schemas.topology.edges import Edge +from asyncflow.schemas.topology.endpoint import Endpoint +from asyncflow.schemas.topology.nodes import ( + Client, + LoadBalancer, + Server, + ServerResources, +) + +__all__ = ["Client", "Edge", "Endpoint", "LoadBalancer", "Server", "ServerResources"] + + diff --git a/src/asyncflow/config/constants.py b/src/asyncflow/config/constants.py new file mode 100644 index 0000000..de79a33 --- /dev/null +++ b/src/asyncflow/config/constants.py @@ -0,0 +1,256 @@ +""" +Application-wide constants and configuration values. + +This module groups all the static enumerations used by the AsyncFlow backend +so that: + + JSON / YAML payloads can be strictly validated with Pydantic. + Front-end and simulation engine share a single source of truth. + Ruff, mypy and IDEs can leverage the strong typing provided by Enum classes. + +IMPORTANT: Changing any enum value is a breaking-change for every +stored configuration file. Add new members whenever possible instead of +renaming existing ones. +""" + +from enum import Enum, IntEnum, StrEnum + +# ====================================================================== +# CONSTANTS FOR THE REQUEST-GENERATOR COMPONENT +# ====================================================================== + + +class TimeDefaults(IntEnum): + """ + Default time-related constants (expressed in seconds). + + These values are used when the user omits an explicit parameter. They also + serve as lower / upper bounds for validation for the requests generator. + """ + + MIN_TO_SEC = 60 # 1 minute → 60 s + USER_SAMPLING_WINDOW = 60 # every 60 seconds sample the number of active user + SIMULATION_TIME = 3_600 # run 1 h if user gives no value + MIN_SIMULATION_TIME = 5 # 5 seconds give a broad spectrum + MIN_USER_SAMPLING_WINDOW = 1 # 1 s minimum + MAX_USER_SAMPLING_WINDOW = 120 # 2 min maximum + + +class Distribution(StrEnum): + """ + Probability distributions accepted by app.schemas.RVConfig. + + The string value is exactly the identifier that must appear in JSON + payloads. The simulation engine will map each name to the corresponding + random sampler (e.g.numpy.random.poisson). + """ + + POISSON = "poisson" + NORMAL = "normal" + LOG_NORMAL = "log_normal" + EXPONENTIAL = "exponential" + UNIFORM = "uniform" + +# ====================================================================== +# CONSTANTS FOR ENDPOINT STEP DEFINITION (REQUEST-HANDLER) +# ====================================================================== + +class EndpointStepIO(StrEnum): + """ + I/O-bound operation categories that can occur inside an endpoint step. + - TASK_SPAWN + Spawns an additional ``asyncio.Task`` and returns immediately. + - LLM + Performs a remote Large-Language-Model inference call. + - WAIT + Passive, *non-blocking* wait for I/O completion; no new task spawned. + - DB + Round-trip to a relational / NoSQL database. + - CACHE + Access to a local or distributed cache layer. + """ + + TASK_SPAWN = "io_task_spawn" + LLM = "io_llm" + WAIT = "io_wait" + DB = "io_db" + CACHE = "io_cache" + + +class EndpointStepCPU(StrEnum): + """ + CPU-bound operation categories inside an endpoint step. + + Use these when the coroutine keeps the Python interpreter busy + (GIL-bound or compute-heavy code) rather than waiting for I/O. + """ + + INITIAL_PARSING = "initial_parsing" + CPU_BOUND_OPERATION = "cpu_bound_operation" + + +class EndpointStepRAM(StrEnum): + """ + Memory-related operations inside a step. + + Currently limited to a single category, but kept as an Enum so that future + resource types (e.g. GPU memory) can be added without schema changes. + """ + + RAM = "ram" + + +class StepOperation(StrEnum): + """ + Keys used inside the metrics dictionary of a step. + + CPU_TIME - Service time (seconds) during which the coroutine occupies + the CPU / GIL. + NECESSARY_RAM - Peak memory (MB) required by the step. + """ + + CPU_TIME = "cpu_time" + IO_WAITING_TIME = "io_waiting_time" + NECESSARY_RAM = "necessary_ram" + +# ====================================================================== +# CONSTANTS FOR THE RESOURCES OF A SERVER +# ====================================================================== + +class ServerResourcesDefaults: + """Resources available for a single server""" + + CPU_CORES = 1 + MINIMUM_CPU_CORES = 1 + RAM_MB = 1024 + MINIMUM_RAM_MB = 256 + DB_CONNECTION_POOL = None + +# ====================================================================== +# CONSTANTS FOR NETWORK PARAMETERS +# ====================================================================== + +class NetworkParameters: + """parameters for the network""" + + MIN_DROPOUT_RATE = 0.0 + DROPOUT_RATE = 0.01 + MAX_DROPOUT_RATE = 1.0 + +# ====================================================================== +# NAME FOR LOAD BALANCER ALGORITHMS +# ====================================================================== + +class LbAlgorithmsName(StrEnum): + """definition of the available algortithms for the Load Balancer""" + + ROUND_ROBIN = "round_robin" + LEAST_CONNECTIONS = "least_connection" + + +# ====================================================================== +# CONSTANTS FOR THE MACRO-TOPOLOGY GRAPH +# ====================================================================== + +class SystemNodes(StrEnum): + """ + High-level node categories of the system topology graph. + + Each member represents a *macro-component* that may have its own SimPy + resources (CPU cores, DB pool, etc.). + """ + + GENERATOR = "generator" + SERVER = "server" + CLIENT = "client" + LOAD_BALANCER = "load_balancer" + +class SystemEdges(StrEnum): + """ + Edge categories connecting different class SystemNodes. + + Currently only network links are modeled; new types (IPC queue, message + bus, stream) can be added without impacting existing payloads. + """ + + NETWORK_CONNECTION = "network_connection" + +# ====================================================================== +# CONSTANTS FOR SAMPLED METRICS +# ====================================================================== + +class SampledMetricName(StrEnum): + """ + Define the metrics sampled every fixed amount of + time to create a time series + """ + + # Mandatory metrics to collect + READY_QUEUE_LEN = "ready_queue_len" #length of the event loop ready q + EVENT_LOOP_IO_SLEEP = "event_loop_io_sleep" + RAM_IN_USE = "ram_in_use" + EDGE_CONCURRENT_CONNECTION = "edge_concurrent_connection" + + +class SamplePeriods(float, Enum): + """ + Defining the value of the sample periods for the metrics for which + we have to extract a time series + """ + + STANDARD_TIME = 0.01 # 10 MILLISECONDS + MINIMUM_TIME = 0.001 # 1 MILLISECOND + MAXIMUM_TIME = 0.1 # 100 MILLISECONDS + +# ====================================================================== +# CONSTANTS FOR EVENT METRICS +# ====================================================================== + +class EventMetricName(StrEnum): + """ + Define the metrics triggered by event with no + time series + """ + + # Mandatory + RQS_CLOCK = "rqs_clock" # useful to collect starting and finishing time of rqs + # Not mandatory + LLM_COST = "llm_cost" + + +# ====================================================================== +# CONSTANTS FOR AGGREGATED METRICS +# ====================================================================== + +class AggregatedMetricName(StrEnum): + """aggregated metrics to calculate at the end of simulation""" + + LATENCY_STATS = "latency_stats" + THROUGHPUT = "throughput_rps" + LLM_STATS = "llm_stats" + +# ====================================================================== +# CONSTANTS FOR SERVER RUNTIME +# ====================================================================== + +class ServerResourceName(StrEnum): + """Keys for each server resource type, used when building the container map.""" + + CPU = "CPU" + RAM = "RAM" + +# ====================================================================== +# CONSTANTS FOR LATENCY STATS +# ====================================================================== + +class LatencyKey(StrEnum): + """Keys for the collection of the latency stats""" + + TOTAL_REQUESTS = "total_requests" + MEAN = "mean" + MEDIAN = "median" + STD_DEV = "std_dev" + P95 = "p95" + P99 = "p99" + MIN = "min" + MAX = "max" diff --git a/src/asyncflow/config/plot_constants.py b/src/asyncflow/config/plot_constants.py new file mode 100644 index 0000000..efe28b9 --- /dev/null +++ b/src/asyncflow/config/plot_constants.py @@ -0,0 +1,47 @@ +"""Dataclass to define a central structure to plot the metrics""" +from dataclasses import dataclass + + +@dataclass(frozen=True) +class PlotCfg: + """Dataclass for the plot of the various metrics""" + + no_data: str + title: str + x_label: str + y_label: str + ready_label: str | None = None + io_label: str | None = None + legend_label: str | None = None + +LATENCY_PLOT = PlotCfg( + no_data="No latency data", + title="Request Latency Distribution", + x_label="Latency (s)", + y_label="Frequency", +) + +THROUGHPUT_PLOT = PlotCfg( + no_data="No throughput data", + title="Throughput (RPS)", + x_label="Time (s)", + y_label="Requests/s", +) + + +SERVER_QUEUES_PLOT = PlotCfg( + no_data="No queue data", + title="Server Queues", + x_label="Time (s)", + y_label="Queue length", + ready_label="Ready queue", + io_label="I/O queue", +) + +RAM_PLOT = PlotCfg( + no_data="No RAM data", + title="RAM Usage", + x_label="Time (s)", + y_label="RAM (MB)", + legend_label="RAM", +) diff --git a/src/asyncflow/enums/__init__.py b/src/asyncflow/enums/__init__.py new file mode 100644 index 0000000..a07a18f --- /dev/null +++ b/src/asyncflow/enums/__init__.py @@ -0,0 +1,23 @@ +"""Public enums used in scenario definitions.""" + +from asyncflow.config.constants import ( + Distribution, + EndpointStepCPU, + EndpointStepIO, + EndpointStepRAM, + EventMetricName, + LbAlgorithmsName, + SampledMetricName, + StepOperation, +) + +__all__ = [ + "Distribution", + "EndpointStepCPU", + "EndpointStepIO", + "EndpointStepRAM", + "EventMetricName", + "LbAlgorithmsName", + "SampledMetricName", + "StepOperation", +] diff --git a/src/asyncflow/metrics/analyzer.py b/src/asyncflow/metrics/analyzer.py new file mode 100644 index 0000000..b9a6ea2 --- /dev/null +++ b/src/asyncflow/metrics/analyzer.py @@ -0,0 +1,589 @@ +"""Module for post-simulation analysis and visualization.""" + +from __future__ import annotations + +from collections import defaultdict +from typing import TYPE_CHECKING + +import numpy as np + +from asyncflow.config.constants import LatencyKey, SampledMetricName +from asyncflow.config.plot_constants import ( + LATENCY_PLOT, + RAM_PLOT, + SERVER_QUEUES_PLOT, + THROUGHPUT_PLOT, + PlotCfg, +) + +if TYPE_CHECKING: + # Standard library typing imports in type-checking block (TC003). + from collections.abc import Iterable + + from matplotlib.axes import Axes + from matplotlib.lines import Line2D + + from asyncflow.runtime.actors.client import ClientRuntime + from asyncflow.runtime.actors.edge import EdgeRuntime + from asyncflow.runtime.actors.server import ServerRuntime + from asyncflow.schemas.settings.simulation import SimulationSettings + + +# Short alias to keep signatures within 88 chars (E501). +Series = tuple[list[float], list[float]] + + +class ResultsAnalyzer: + """Analyze and visualize the results of a completed simulation. + + This class holds the raw runtime objects and lazily computes: + - latency statistics + - throughput time series (RPS) + - sampled metrics from servers and edges + + It also exposes compact plotting/rendering helpers so that CLI scripts + can be short and consistent. + """ + + # Default bucket size (seconds) used for cached throughput. + _WINDOW_SIZE_S: float = 1.0 + + def __init__( + self, + *, + client: ClientRuntime, + servers: list[ServerRuntime], + edges: list[EdgeRuntime], + settings: SimulationSettings, + ) -> None: + """Initialize with the runtime objects and original settings.""" + self._client = client + self._servers = servers + self._edges = edges + self._settings = settings + + # Lazily computed caches + self.latencies: list[float] | None = None + self.latency_stats: dict[LatencyKey, float] | None = None + self.throughput_series: Series | None = None + # Sampled metrics are stored with string metric keys for simplicity. + self.sampled_metrics: dict[str, dict[str, list[float]]] | None = None + + # ───────────────────────────────────────────── + # Core computation + # ───────────────────────────────────────────── + def process_all_metrics(self) -> None: + """Compute all aggregated and sampled metrics if not already done.""" + if self.latency_stats is None and self._client.rqs_clock: + self._process_event_metrics() + + if self.sampled_metrics is None: + self._extract_sampled_metrics() + + def _process_event_metrics(self) -> None: + """Calculate latency stats and throughput time series (1s RPS).""" + # 1) Latencies + self.latencies = [ + clock.finish - clock.start + for clock in self._client.rqs_clock + ] + + # 2) Summary stats + if self.latencies: + arr = np.array(self.latencies, dtype=float) + self.latency_stats = { + LatencyKey.TOTAL_REQUESTS: float(arr.size), + LatencyKey.MEAN: float(np.mean(arr)), + LatencyKey.MEDIAN: float(np.median(arr)), + LatencyKey.STD_DEV: float(np.std(arr)), + LatencyKey.P95: float(np.percentile(arr, 95)), + LatencyKey.P99: float(np.percentile(arr, 99)), + LatencyKey.MIN: float(np.min(arr)), + LatencyKey.MAX: float(np.max(arr)), + } + else: + self.latency_stats = {} + + # 3) Throughput per 1s window (cached) + completion_times = sorted(clock.finish for clock in self._client.rqs_clock) + end_time = self._settings.total_simulation_time + + timestamps: list[float] = [] + rps_values: list[float] = [] + idx = 0 + current_end = ResultsAnalyzer._WINDOW_SIZE_S + + while current_end <= end_time: + count = 0 + while idx < len(completion_times) and completion_times[idx] <= current_end: + count += 1 + idx += 1 + timestamps.append(current_end) + rps_values.append(count / ResultsAnalyzer._WINDOW_SIZE_S) + current_end += ResultsAnalyzer._WINDOW_SIZE_S + + self.throughput_series = (timestamps, rps_values) + + def _extract_sampled_metrics(self) -> None: + """Gather sampled metrics from servers and edges into a nested dict.""" + metrics: dict[str, dict[str, list[float]]] = defaultdict(dict) + + for server in self._servers: + sid = server.server_config.id + for name, values in server.enabled_metrics.items(): + # Store with string key for a consistent external API. + metrics[name.value][sid] = values + + for edge in self._edges: + eid = edge.edge_config.id + for name, values in edge.enabled_metrics.items(): + metrics[name.value][eid] = values + + self.sampled_metrics = metrics + + # ───────────────────────────────────────────── + # Public accessors & formatting + # ───────────────────────────────────────────── + def list_server_ids(self) -> list[str]: + """Return server IDs in a stable order as given in the topology.""" + return [s.server_config.id for s in self._servers] + + def get_latency_stats(self) -> dict[LatencyKey, float]: + """Return latency statistics, computing them if necessary.""" + self.process_all_metrics() + return self.latency_stats or {} + + def format_latency_stats(self) -> str: + """Return a human-readable block with latency stats.""" + stats = self.get_latency_stats() + if not stats: + return "Latency stats: (empty)" + + by_name: dict[str, float] = { + getattr(k, "name", str(k)): v + for k, v in stats.items() + } + order = [ + "TOTAL_REQUESTS", + "MEAN", + "MEDIAN", + "STD_DEV", + "P95", + "P99", + "MIN", + "MAX", + ] + + lines = ["════════ LATENCY STATS ════════"] + # PERF401: build then extend instead of append in a loop. + formatted = [ + f"{k:<20} = {by_name[k]:.6f}" + for k in order + if k in by_name + ] + lines.extend(formatted) + return "\n".join(lines) + + def get_throughput_series( + self, + window_s: float | None = None, + ) -> Series: + """Return (timestamps, RPS). If `window_s` is provided, recompute on the fly.""" + self.process_all_metrics() + + # Use cached (1s) series when suitable. + if window_s is None or window_s == ResultsAnalyzer._WINDOW_SIZE_S: + return self.throughput_series or ([], []) + + # Recompute with a custom window size. + completion_times = sorted(clock.finish for clock in self._client.rqs_clock) + end_time = self._settings.total_simulation_time + + timestamps: list[float] = [] + rps_values: list[float] = [] + idx = 0 + current_end = float(window_s) + + while current_end <= end_time: + count = 0 + while idx < len(completion_times) and completion_times[idx] <= current_end: + count += 1 + idx += 1 + timestamps.append(current_end) + rps_values.append(count / float(window_s)) + current_end += float(window_s) + + return (timestamps, rps_values) + + def get_sampled_metrics(self) -> dict[str, dict[str, list[float]]]: + """Return sampled metrics from servers and edges.""" + self.process_all_metrics() + assert self.sampled_metrics is not None + return self.sampled_metrics + + def get_metric_map(self, key: SampledMetricName | str) -> dict[str, list[float]]: + """Return a series map for a metric, tolerant to enum/string keys.""" + self.process_all_metrics() + assert self.sampled_metrics is not None + + if isinstance(key, SampledMetricName): + # Prefer the canonical .value key; fall back to .name. + found = ( + self.sampled_metrics.get(key.value) + or self.sampled_metrics.get(key.name, {}) + ) + return found or {} + # If caller used a raw string: + return self.sampled_metrics.get(key, {}) + + def get_series(self, key: SampledMetricName | str, entity_id: str) -> Series: + """Return (times, values) for a given sampled metric and entity id.""" + series_map = self.get_metric_map(key) + vals = series_map.get(entity_id, []) + times = (np.arange(len(vals)) * self._settings.sample_period_s).tolist() + return times, vals + + # ───────────────────────────────────────────── + # Plotting helpers + # ───────────────────────────────────────────── + @staticmethod + def _apply_plot_cfg( + ax: Axes, + cfg: PlotCfg, + *, + legend_handles: Iterable[Line2D] | None = None, + ) -> None: + """Apply title / axis labels / grid and (optionally) legend to ax.""" + ax.set_title(cfg.title) + ax.set_xlabel(cfg.x_label) + ax.set_ylabel(cfg.y_label) + ax.grid(visible=True) + if legend_handles: + ax.legend(handles=legend_handles) + + def plot_base_dashboard(self, ax_latency: Axes, ax_throughput: Axes) -> None: + """Plot a 2x1 header: latency histogram + throughput line.""" + self.plot_latency_distribution(ax_latency) + self.plot_throughput(ax_throughput) + + def plot_latency_distribution(self, ax: Axes) -> None: + """Plot latency histogram with mean/P50/P95/P99 lines and a single + legend box with values. + """ + self.process_all_metrics() + if not self.latencies: + ax.text(0.5, 0.5, LATENCY_PLOT.no_data, ha="center", va="center") + return + + # Colors that pop on blue/white + col_mean = "#d62728" # red + col_p50 = "#ff7f0e" # orange + col_p95 = "#2ca02c" # green + col_p99 = "#9467bd" # purple + hist_color = "#1f77b4" # soft blue + + arr = np.asarray(self.latencies, dtype=float) + v_mean = float(np.mean(arr)) + v_p50 = float(np.percentile(arr, 50)) + v_p95 = float(np.percentile(arr, 95)) + v_p99 = float(np.percentile(arr, 99)) + + # Histogram (subtle to let overlays stand out) + ax.hist( + arr, bins=50, color=hist_color, alpha=0.40, + edgecolor="none", zorder=1, + ) + + # Vertical overlays + ax.axvline( + v_mean, color=col_mean, linestyle=":", linewidth=1.8, + alpha=0.95, zorder=3, + ) + ax.axvline( + v_p50, color=col_p50, linestyle="-.", linewidth=1.6, + alpha=0.90, zorder=3, + ) + ax.axvline( + v_p95, color=col_p95, linestyle="--", linewidth=1.6, + alpha=0.90, zorder=3, + ) + ax.axvline( + v_p99, color=col_p99, linestyle="--", linewidth=1.6, + alpha=0.90, zorder=3, + ) + + # Build legend handles (dummy lines, no data) + h_mean = ax.plot( + [], [], color=col_mean, linestyle=":", linewidth=2.4, + label=f"mean = {v_mean:.3f}", + )[0] + h_p50 = ax.plot( + [], [], color=col_p50, linestyle="-.", linewidth=2.4, + label=f"P50 = {v_p50:.3f}", + )[0] + h_p95 = ax.plot( + [], [], color=col_p95, linestyle="--", linewidth=2.4, + label=f"P95 = {v_p95:.3f}", + )[0] + h_p99 = ax.plot( + [], [], color=col_p99, linestyle="--", linewidth=2.4, + label=f"P99 = {v_p99:.3f}", + )[0] + + # Titles / labels / grid + self._apply_plot_cfg(ax, LATENCY_PLOT) + + # Legend (top-right) with readable background + leg = ax.legend( + handles=[h_mean, h_p50, h_p95, h_p99], + loc="upper right", + bbox_to_anchor=(0.98, 0.98), + borderaxespad=0.0, + framealpha=0.90, + fancybox=True, + handlelength=2.6, + fontsize=9.5, + ) + leg.get_frame().set_facecolor("white") + + + def plot_throughput(self, ax: Axes, *, window_s: float | None = None) -> None: + """Plot throughput with mean/P95/max lines, EWMA curve, and a single + legend box with values. + """ + timestamps, values = self.get_throughput_series(window_s=window_s) + if not timestamps: + ax.text(0.5, 0.5, THROUGHPUT_PLOT.no_data, ha="center", va="center") + return + + # Colors (high contrast on blue/white) + col_series = "#1f77b4" # blue main series + col_mean = "#d62728" # red + col_p95 = "#2ca02c" # green + col_max = "#9467bd" # purple + + + vals = np.asarray(values, dtype=float) + v_mean = float(np.mean(vals)) + v_p95 = float(np.percentile(vals, 95)) + v_max = float(np.max(vals)) + + # Main series + ax.plot( + timestamps, vals, marker="o", linewidth=1.6, alpha=0.95, + color=col_series, zorder=2, + ) + + # Horizontal overlays (match legend colors) + ax.axhline( + v_mean, color=col_mean, linestyle=":", linewidth=1.8, + alpha=0.95, zorder=4, + ) + ax.axhline( + v_p95, color=col_p95, linestyle="--", linewidth=1.6, + alpha=0.90, zorder=4, + ) + ax.axhline( + v_max, color=col_max, linestyle="--", linewidth=1.6, + alpha=0.90, zorder=4, + ) + + # Legend handles (dummy, no data) + h_mean = ax.plot( + [], [], color=col_mean, linestyle=":", linewidth=2.4, + label=f"mean = {v_mean:.3f}", + )[0] + h_p95 = ax.plot( + [], [], color=col_p95, linestyle="--", linewidth=2.4, + label=f"P95 = {v_p95:.3f}", + )[0] + h_max = ax.plot( + [], [], color=col_max, linestyle="--", linewidth=2.4, + label=f"max = {v_max:.3f}", + )[0] + + # Apply base cfg (titles/labels/grid) + self._apply_plot_cfg(ax, THROUGHPUT_PLOT) + + # Legend: upper-right; single box with values + leg = ax.legend( + handles=[h_mean, h_p95, h_max], + loc="upper right", + bbox_to_anchor=(0.98, 0.98), + borderaxespad=0.0, + framealpha=0.90, + fancybox=True, + handlelength=2.6, + fontsize=9.5, + ) + leg.get_frame().set_facecolor("white") + + + + def plot_single_server_ready_queue(self, ax: Axes, server_id: str) -> None: + """Plot Ready queue with mean/min/max lines and a single legend box with + values. No trend/ewma, no legend entry for the main series. + """ + times, vals = self.get_series(SampledMetricName.READY_QUEUE_LEN, server_id) + if not vals: + ax.text(0.5, 0.5, SERVER_QUEUES_PLOT.no_data, ha="center", va="center") + return + + # Colors consistent with other charts + col_mean = "#d62728" # red + col_min = "#2ca02c" # green + col_max = "#9467bd" # purple + + y = np.asarray(vals, dtype=float) + v_mean = float(np.mean(y)) + v_min = float(np.min(y)) + v_max = float(np.max(y)) + + # Main series (no label/legend as requested) + ax.plot(times, y, linewidth=1.6, alpha=0.95) + + # Overlays + ax.axhline(v_mean, color=col_mean, linestyle=":", linewidth=1.8, alpha=0.95) + ax.axhline(v_min, color=col_min, linestyle="--", linewidth=1.6, alpha=0.90) + ax.axhline(v_max, color=col_max, linestyle="--", linewidth=1.6, alpha=0.90) + + # Legend handles (dummy lines with values) + h_mean = ax.plot( + [], [], color=col_mean, linestyle=":", linewidth=2.4, + label=f"mean = {v_mean:.3f}", + )[0] + h_min = ax.plot( + [], [], color=col_min, linestyle="--", linewidth=2.4, + label=f"min = {v_min:.3f}", + )[0] + h_max = ax.plot( + [], [], color=col_max, linestyle="--", linewidth=2.4, + label=f"max = {v_max:.3f}", + )[0] + + ax.set_title(f"Ready Queue — {server_id}") + ax.set_xlabel(SERVER_QUEUES_PLOT.x_label) + ax.set_ylabel(SERVER_QUEUES_PLOT.y_label) + ax.grid(visible=True) + + leg = ax.legend( + handles=[h_mean, h_min, h_max], + loc="upper right", + bbox_to_anchor=(0.98, 0.98), + borderaxespad=0.0, + framealpha=0.90, + fancybox=True, + handlelength=2.6, + fontsize=9.5, + ) + leg.get_frame().set_facecolor("white") + + def plot_single_server_io_queue(self, ax: Axes, server_id: str) -> None: + """Plot I/O queue with mean/min/max lines and a single legend box with + values. No trend/ewma, no legend entry for the main series. + """ + times, vals = self.get_series(SampledMetricName.EVENT_LOOP_IO_SLEEP, server_id) + if not vals: + ax.text(0.5, 0.5, SERVER_QUEUES_PLOT.no_data, ha="center", va="center") + return + + col_mean = "#d62728" # red + col_min = "#2ca02c" # green + col_max = "#9467bd" # purple + + y = np.asarray(vals, dtype=float) + v_mean = float(np.mean(y)) + v_min = float(np.min(y)) + v_max = float(np.max(y)) + + ax.plot(times, y, linewidth=1.6, alpha=0.95) + + ax.axhline(v_mean, color=col_mean, linestyle=":", linewidth=1.8, alpha=0.95) + ax.axhline(v_min, color=col_min, linestyle="--", linewidth=1.6, alpha=0.90) + ax.axhline(v_max, color=col_max, linestyle="--", linewidth=1.6, alpha=0.90) + + h_mean = ax.plot( + [], [], color=col_mean, linestyle=":", linewidth=2.4, + label=f"mean = {v_mean:.3f}", + )[0] + h_min = ax.plot( + [], [], color=col_min, linestyle="--", linewidth=2.4, + label=f"min = {v_min:.3f}", + )[0] + h_max = ax.plot( + [], [], color=col_max, linestyle="--", linewidth=2.4, + label=f"max = {v_max:.3f}", + )[0] + + ax.set_title(f"I/O Queue — {server_id}") + ax.set_xlabel(SERVER_QUEUES_PLOT.x_label) + ax.set_ylabel(SERVER_QUEUES_PLOT.y_label) + ax.grid(visible=True) + + leg = ax.legend( + handles=[h_mean, h_min, h_max], + loc="upper right", + bbox_to_anchor=(0.98, 0.98), + borderaxespad=0.0, + framealpha=0.90, + fancybox=True, + handlelength=2.6, + fontsize=9.5, + ) + leg.get_frame().set_facecolor("white") + + + + def plot_single_server_ram(self, ax: Axes, server_id: str) -> None: + """Plot RAM usage with mean/min/max lines and a single legend box with + values. No trend/ewma, no legend entry for the main series. + """ + times, vals = self.get_series(SampledMetricName.RAM_IN_USE, server_id) + if not vals: + ax.text(0.5, 0.5, RAM_PLOT.no_data, ha="center", va="center") + return + + col_mean = "#d62728" # red + col_min = "#2ca02c" # green + col_max = "#9467bd" # purple + + y = np.asarray(vals, dtype=float) + v_mean = float(np.mean(y)) + v_min = float(np.min(y)) + v_max = float(np.max(y)) + + ax.plot(times, y, linewidth=1.6, alpha=0.95) + + ax.axhline(v_mean, color=col_mean, linestyle=":", linewidth=1.8, alpha=0.95) + ax.axhline(v_min, color=col_min, linestyle="--", linewidth=1.6, alpha=0.90) + ax.axhline(v_max, color=col_max, linestyle="--", linewidth=1.6, alpha=0.90) + + h_mean = ax.plot( + [], [], color=col_mean, linestyle=":", linewidth=2.4, + label=f"mean = {v_mean:.3f}", + )[0] + h_min = ax.plot( + [], [], color=col_min, linestyle="--", linewidth=2.4, + label=f"min = {v_min:.3f}", + )[0] + h_max = ax.plot( + [], [], color=col_max, linestyle="--", linewidth=2.4, + label=f"max = {v_max:.3f}", + )[0] + + ax.set_title(f"{RAM_PLOT.title} — {server_id}") + ax.set_xlabel(RAM_PLOT.x_label) + ax.set_ylabel(RAM_PLOT.y_label) + ax.grid(visible=True) + + leg = ax.legend( + handles=[h_mean, h_min, h_max], + loc="upper right", + bbox_to_anchor=(0.98, 0.98), + borderaxespad=0.0, + framealpha=0.90, + fancybox=True, + handlelength=2.6, + fontsize=9.5, + ) + leg.get_frame().set_facecolor("white") diff --git a/src/asyncflow/metrics/client.py b/src/asyncflow/metrics/client.py new file mode 100644 index 0000000..2e49638 --- /dev/null +++ b/src/asyncflow/metrics/client.py @@ -0,0 +1,18 @@ +""" +initialization of the structure to gather the metrics +for the client of the system +""" + +from typing import NamedTuple + + +class RqsClock(NamedTuple): + """ + structure to register time of generation and + time of elaboration for each request + """ + + start: float + finish: float + + diff --git a/src/asyncflow/metrics/collector.py b/src/asyncflow/metrics/collector.py new file mode 100644 index 0000000..38c2f0d --- /dev/null +++ b/src/asyncflow/metrics/collector.py @@ -0,0 +1,78 @@ +"""class to centralized the the collection of time series regarding metrics""" + +from collections.abc import Generator + +import simpy + +from asyncflow.config.constants import SampledMetricName +from asyncflow.runtime.actors.edge import EdgeRuntime +from asyncflow.runtime.actors.server import ServerRuntime +from asyncflow.schemas.settings.simulation import SimulationSettings + +# The idea for this class is to gather list of runtime objects that +# are defined in the central class to build the simulation, in this +# way we optimize the initialization of various objects reducing +# the global overhead + +class SampledMetricCollector: + """class to define a centralized object to collect sampled metrics""" + + def __init__( + self, + *, + edges: list[EdgeRuntime], + servers: list[ServerRuntime], + env: simpy.Environment, + sim_settings: SimulationSettings, + ) -> None: + """ + Args: + edges (list[EdgeRuntime]): list of the class EdgeRuntime + servers (list[ServerRuntime]): list of server of the class ServerRuntime + env (simpy.Environment): environment for the simulation + sim_settings (SimulationSettings): general settings for the simulation + + """ + self.edges = edges + self.servers = servers + self.sim_settings = sim_settings + self.env = env + self._sample_period = sim_settings.sample_period_s + + + # enum keys instance-level for mandatory sampled metrics to collect + self._conn_key = SampledMetricName.EDGE_CONCURRENT_CONNECTION + self._ram_key = SampledMetricName.RAM_IN_USE + self._io_key = SampledMetricName.EVENT_LOOP_IO_SLEEP + self._ready_key = SampledMetricName.READY_QUEUE_LEN + + + def _build_time_series(self) -> Generator[simpy.Event, None, None]: + """Function to build time series for enabled metrics""" + while True: + yield self.env.timeout(self._sample_period) + for edge in self.edges: + if self._conn_key in edge.enabled_metrics: + edge.enabled_metrics[self._conn_key].append( + edge.concurrent_connections, + ) + for server in self.servers: + if all( + k in server.enabled_metrics + for k in (self._ram_key, self._io_key, self._ready_key) + ): + server.enabled_metrics[self._ram_key].append(server.ram_in_use) + server.enabled_metrics[self._io_key].append(server.io_queue_len) + server.enabled_metrics[self._ready_key].append(server.ready_queue_len) + + + + def start(self) -> simpy.Process: + """Definition of the process to collect sampled metrics""" + return self.env.process(self._build_time_series()) + + + + + + diff --git a/src/asyncflow/metrics/edge.py b/src/asyncflow/metrics/edge.py new file mode 100644 index 0000000..f9626dd --- /dev/null +++ b/src/asyncflow/metrics/edge.py @@ -0,0 +1,29 @@ +"""initialization of the structure to gather the metrics for the edges of the system""" + +from collections.abc import Iterable + +from asyncflow.config.constants import SampledMetricName + +# Initialize one time outside the function all possible metrics +# related to the edges, the idea of this structure is to +# guarantee scalability in the long term if multiple metrics +# will be considered + +EDGE_METRICS = ( + SampledMetricName.EDGE_CONCURRENT_CONNECTION, +) + +def build_edge_metrics( + enabled_sample_metrics: Iterable[SampledMetricName], + ) -> dict[SampledMetricName, list[float | int]]: + """ + Function to populate a dictionary to collect values for + time series of sampled metrics related to the edges of + the system. + """ + # The edge case of the empty dict is avoided since at least + # one metric is always measured by default. + return { + metric: [] for metric in EDGE_METRICS + if metric in enabled_sample_metrics + } diff --git a/src/asyncflow/metrics/server.py b/src/asyncflow/metrics/server.py new file mode 100644 index 0000000..6ebb96e --- /dev/null +++ b/src/asyncflow/metrics/server.py @@ -0,0 +1,34 @@ +""" +initialization of the structure to gather the sampled metrics +for the server of the system +""" + +from collections.abc import Iterable + +from asyncflow.config.constants import SampledMetricName + +# Initialize one time outside the function all possible metrics +# related to the servers, the idea of this structure is to +# guarantee scalability in the long term if multiple metrics +# will be considered + +SERVER_METRICS = ( + SampledMetricName.READY_QUEUE_LEN, + SampledMetricName.EVENT_LOOP_IO_SLEEP, + SampledMetricName.RAM_IN_USE, +) + +def build_server_metrics( + enabled_sample_metrics: Iterable[SampledMetricName], + ) -> dict[SampledMetricName, list[float | int]]: + """ + Function to populate a dictionary to collect values for + time series of sampled metrics related to the server of + the system. + """ + # The edge case of the empty dict is avoided since at least + # one metric is always measured by default. + return { + metric: [] for metric in SERVER_METRICS + if metric in enabled_sample_metrics + } diff --git a/.github/workflows/ci-cd-main.yml b/src/asyncflow/py.typed similarity index 100% rename from .github/workflows/ci-cd-main.yml rename to src/asyncflow/py.typed diff --git a/src/asyncflow/resources/registry.py b/src/asyncflow/resources/registry.py new file mode 100644 index 0000000..26576ba --- /dev/null +++ b/src/asyncflow/resources/registry.py @@ -0,0 +1,39 @@ +""" +Runtime resource registry for server nodes. + +This module defines the ResourcesRuntime class, which takes a validated +TopologyGraph and a SimPy environment, then builds and stores a map +from each server's unique identifier to its SimPy resource containers. +Processes can later retrieve CPU and RAM containers by indexing this registry. +""" + +import simpy + +from asyncflow.resources.server_containers import ServerContainers, build_containers +from asyncflow.schemas.topology.graph import TopologyGraph + + +class ResourcesRuntime: + """definition of the class to associate resources to various nodes""" + + def __init__( + self, + *, + env: simpy.Environment, + data: TopologyGraph, + + ) -> None: + """Initialization of the attributes""" + self.env = env + self.data = data + self._by_server: dict[str, ServerContainers] = { + server.id: build_containers(env, server.server_resources) + for server in data.nodes.servers + } + + def __getitem__(self, server_id: str) -> ServerContainers: + """ + Useful map to pass to each server the resources based + on the server unique id + """ + return self._by_server[server_id] diff --git a/src/asyncflow/resources/server_containers.py b/src/asyncflow/resources/server_containers.py new file mode 100644 index 0000000..1401247 --- /dev/null +++ b/src/asyncflow/resources/server_containers.py @@ -0,0 +1,71 @@ +""" +Definition of support structures for the simulation runtime. + +After Pydantic validation, this module provides TypedDicts and helpers +to build SimPy Containers for each server in the topology, improving +readability and ensuring a single point of truth for resource setup. +""" + + +from typing import TypedDict + +import simpy + +from asyncflow.config.constants import ServerResourceName +from asyncflow.schemas.topology.nodes import ServerResources + +# ============================================================== +# DICT FOR THE REGISTRY TO INITIALIZE RESOURCES FOR EACH SERVER +# ============================================================== + + +class ServerContainers(TypedDict): + """ + Mapping of resource names to their SimPy Container instances for a server. + + - CPU: simpy.Container for CPU cores. + - RAM: simpy.Container for RAM in megabytes. + """ + + CPU: simpy.Container + RAM: simpy.Container + +# Central funcrion to initialize the dictionary with ram and cpu container +def build_containers( + env: simpy.Environment, + spec: ServerResources, + ) -> ServerContainers: + """ + Construct and return a mapping of SimPy Containers for a server's CPU and RAM. + + Given a SimPy environment and a validated ServerResources spec, this function + initializes one simpy.Container for CPU (with capacity equal to cpu_cores) + and one for RAM (with capacity equal to ram_mb), then returns them in a + ServerContainers TypedDict keyed by "CPU" and "RAM". + + Parameters + ---------- + env : simpy.Environment + The simulation environment in which the Containers will be created. + spec : ServerResources + A Pydantic model instance defining the server's cpu_cores and ram_mb. + + Returns + ------- + ServerContainers + A TypedDict with exactly two entries: + - "CPU": simpy.Container initialized with spec.cpu_cores + - "RAM": simpy.Container initialized with spec.ram_mb + + """ + return { + ServerResourceName.CPU.value: simpy.Container( + env, capacity=spec.cpu_cores, init=spec.cpu_cores, + ), + ServerResourceName.RAM.value: simpy.Container( + env, capacity=spec.ram_mb, init=spec.ram_mb, + ), + } + + + diff --git a/src/asyncflow/runtime/actors/client.py b/src/asyncflow/runtime/actors/client.py new file mode 100644 index 0000000..6c752f1 --- /dev/null +++ b/src/asyncflow/runtime/actors/client.py @@ -0,0 +1,83 @@ +"""defining the object client for the simulation""" + +from collections.abc import Generator +from typing import TYPE_CHECKING + +import simpy + +from asyncflow.config.constants import SystemNodes +from asyncflow.metrics.client import RqsClock +from asyncflow.runtime.actors.edge import EdgeRuntime +from asyncflow.schemas.topology.nodes import Client + +if TYPE_CHECKING: + from asyncflow.runtime.rqs_state import RequestState + + + +class ClientRuntime: + """class to define the client runtime""" + + def __init__( + self, + *, + env: simpy.Environment, + out_edge: EdgeRuntime | None, + client_box: simpy.Store, + completed_box: simpy.Store, + client_config: Client, + ) -> None: + """Definition of attributes for the client""" + self.env = env + self.out_edge = out_edge + self.client_config = client_config + self.client_box = client_box + self.completed_box = completed_box + # This list will be enough to calculate at the end + # of the simulation both the throughput and the + # latency distribution + + self._rqs_clock: list[RqsClock] = [] + + + def _forwarder(self) -> Generator[simpy.Event, None, None]: + """Updtate the state before passing it to another node""" + assert self.out_edge is not None + while True: + + state: RequestState = yield self.client_box.get() # type: ignore[assignment] + + state.record_hop( + SystemNodes.CLIENT, + self.client_config.id, + self.env.now, + ) + + # if the length of the list is bigger than two + # it means that the state is coming back to the + # client after being elaborated, since if the value + # would be equal to two would mean that the state + # went through the mandatory path to be generated + # rqs generator and client registration + if len(state.history) > 3: + state.finish_time = self.env.now + clock_data = RqsClock( + start=state.initial_time, + finish=state.finish_time, + ) + self._rqs_clock.append(clock_data) + yield self.completed_box.put(state) + else: + self.out_edge.transport(state) + + def start(self) -> simpy.Process: + """Initialization of the process""" + return self.env.process(self._forwarder()) + + @property + def rqs_clock(self) -> list[RqsClock]: + """ + Expose the value of the private list of the starting + and arrival time for each rqs just for reading purpose + """ + return self._rqs_clock diff --git a/src/asyncflow/runtime/actors/edge.py b/src/asyncflow/runtime/actors/edge.py new file mode 100644 index 0000000..ee2131d --- /dev/null +++ b/src/asyncflow/runtime/actors/edge.py @@ -0,0 +1,106 @@ +""" +Unidirectional link that simulates message transmission between nodes. +Encapsulates network behavior—latency sampling (LogNormal, Exponential, etc.), +drop probability, and optional connection-pool contention—by exposing a +`send(msg)` method. Each `send` call schedules a SimPy subprocess that +waits the sampled delay (and any resource wait) before delivering the +message to the target node's inbox. +""" +from collections.abc import Generator +from typing import TYPE_CHECKING + +import numpy as np +import simpy + +from asyncflow.config.constants import SampledMetricName, SystemEdges +from asyncflow.metrics.edge import build_edge_metrics +from asyncflow.runtime.rqs_state import RequestState +from asyncflow.samplers.common_helpers import general_sampler +from asyncflow.schemas.settings.simulation import SimulationSettings +from asyncflow.schemas.topology.edges import Edge + +if TYPE_CHECKING: + from asyncflow.schemas.common.random_variables import RVConfig + + + +class EdgeRuntime: + """definining the logic to handle the edges during the simulation""" + + def __init__( + self, + *, + env: simpy.Environment, + edge_config: Edge, + rng: np.random.Generator | None = None, + target_box: simpy.Store, + settings: SimulationSettings, + ) -> None: + """Definition of the instance attributes""" + self.env = env + self.edge_config = edge_config + self.target_box = target_box + self.rng = rng or np.random.default_rng() + self.setting = settings + self._edge_enabled_metrics = build_edge_metrics( + settings.enabled_sample_metrics, + ) + self._concurrent_connections: int = 0 + + # We keep a reference to `settings` because this class needs to observe but not + # persist the edge-related metrics the user has enabled. + # The actual persistence (appending snapshots to the time series lists) + # is handled centrally in metrics/collector.py,which runs every Xmilliseconds. + # Here we only expose the current metric values, guarded by a few if checks to + # verify that each optional metric is active. For deafult metric settings + # is not needed but as we will scale as explained above we will need it + + def _deliver(self, state: RequestState) -> Generator[simpy.Event, None, None]: + """Function to deliver the state to the next node""" + # extract the random variables defining the latency of the edge + random_variable: RVConfig = self.edge_config.latency + + uniform_variable = self.rng.uniform() + if uniform_variable < self.edge_config.dropout_rate: + state.finish_time = self.env.now + state.record_hop( + SystemEdges.NETWORK_CONNECTION, + f"{self.edge_config.id}-dropped", + state.finish_time, + ) + return + + self._concurrent_connections +=1 + + transit_time = general_sampler(random_variable, self.rng) + yield self.env.timeout(transit_time) + + state.record_hop( + SystemEdges.NETWORK_CONNECTION, + self.edge_config.id, + self.env.now, + ) + self._concurrent_connections -=1 + yield self.target_box.put(state) + + + def transport(self, state: RequestState) -> simpy.Process: + """ + Called by the upstream node. Immediately spins off a SimPy process + that will handle drop + delay + delivery of `state`. + """ + return self.env.process(self._deliver(state)) + + @property + def enabled_metrics(self) -> dict[SampledMetricName, list[float | int]]: + """Read-only access to the metric store.""" + return self._edge_enabled_metrics + + @property + def concurrent_connections(self) -> int: + """Current number of open connections on this edge.""" + return self._concurrent_connections + + + + diff --git a/src/asyncflow/runtime/actors/load_balancer.py b/src/asyncflow/runtime/actors/load_balancer.py new file mode 100644 index 0000000..498fb18 --- /dev/null +++ b/src/asyncflow/runtime/actors/load_balancer.py @@ -0,0 +1,73 @@ +"""Definition of the node represented by the LB in the simulation""" + +from collections.abc import Generator +from typing import TYPE_CHECKING + +import simpy + +from asyncflow.config.constants import LbAlgorithmsName, SystemNodes +from asyncflow.runtime.actors.edge import EdgeRuntime +from asyncflow.runtime.actors.routing.lb_algorithms import ( + least_connections, + round_robin, +) +from asyncflow.schemas.topology.nodes import LoadBalancer + +if TYPE_CHECKING: + from asyncflow.runtime.rqs_state import RequestState + + + +class LoadBalancerRuntime: + """class to define the behaviour of the LB in the simulation""" + + def __init__( + self, + *, + env: simpy.Environment, + lb_config: LoadBalancer, + out_edges: list[EdgeRuntime] | None, + lb_box: simpy.Store, + ) -> None: + """ + Descriprion of the instance attributes for the class + Args: + env (simpy.Environment): env of the simulation + lb_config (LoadBalancer): input to define the lb in the runtime + rqs_state (RequestState): state of the simulation + out_edges (list[EdgeRuntime]): list of edges that connects lb with servers + lb_box (simpy.Store): store to add the state + + """ + self.env = env + self.lb_config = lb_config + self.out_edges = out_edges + self.lb_box = lb_box + self._round_robin_index: int = 0 + + + def _forwarder(self) -> Generator[simpy.Event, None, None]: + """Updtate the state before passing it to another node""" + assert self.out_edges is not None + while True: + state: RequestState = yield self.lb_box.get() # type: ignore[assignment] + + state.record_hop( + SystemNodes.LOAD_BALANCER, + self.lb_config.id, + self.env.now, + ) + + if self.lb_config.algorithms == LbAlgorithmsName.ROUND_ROBIN: + out_edge, self._round_robin_index = round_robin( + self.out_edges, + self._round_robin_index, + ) + else: + out_edge = least_connections(self.out_edges) + + out_edge.transport(state) + + def start(self) -> simpy.Process: + """Initialization of the simpy process for the LB""" + return self.env.process(self._forwarder()) diff --git a/src/asyncflow/runtime/actors/routing/lb_algorithms.py b/src/asyncflow/runtime/actors/routing/lb_algorithms.py new file mode 100644 index 0000000..46078f7 --- /dev/null +++ b/src/asyncflow/runtime/actors/routing/lb_algorithms.py @@ -0,0 +1,30 @@ +"""algorithms to simulate the load balancer during the simulation""" + + + +from asyncflow.runtime.actors.edge import EdgeRuntime + + +def least_connections(list_edges: list[EdgeRuntime]) -> EdgeRuntime: + """We send the state to the edge with less concurrent connections""" + concurrent_connections = [edge.concurrent_connections for edge in list_edges] + + idx_min = concurrent_connections.index(min(concurrent_connections)) + + return list_edges[idx_min] + +def round_robin(edges: list[EdgeRuntime], idx: int) -> tuple[EdgeRuntime, int]: + """ + We send states to different server in uniform way by + rotating the list of edges that should transport the state + to the correct server, we rotate the index and not the list + to avoid aliasing since the list is shared by many components + """ + idx %= len(edges) + chosen = edges[idx] + idx = (idx + 1) % len(edges) + return chosen, idx + + + + diff --git a/src/asyncflow/runtime/actors/rqs_generator.py b/src/asyncflow/runtime/actors/rqs_generator.py new file mode 100644 index 0000000..1b67213 --- /dev/null +++ b/src/asyncflow/runtime/actors/rqs_generator.py @@ -0,0 +1,123 @@ +""" +definition of the class representing the rqs generator +that will be passed as a process in the simpy simulation +""" + +from __future__ import annotations + +from typing import TYPE_CHECKING + +import numpy as np + +from asyncflow.config.constants import Distribution, SystemNodes +from asyncflow.runtime.rqs_state import RequestState +from asyncflow.samplers.gaussian_poisson import gaussian_poisson_sampling +from asyncflow.samplers.poisson_poisson import poisson_poisson_sampling + +if TYPE_CHECKING: + + from collections.abc import Generator + + import simpy + + from asyncflow.runtime.actors.edge import EdgeRuntime + from asyncflow.schemas.settings.simulation import SimulationSettings + from asyncflow.schemas.workload.rqs_generator import RqsGenerator + + +class RqsGeneratorRuntime: + """ + A “node” that produces request contexts at stochastic inter-arrival times + and immediately pushes them down the pipeline via an EdgeRuntime. + """ + + def __init__( + self, + *, + env: simpy.Environment, + out_edge: EdgeRuntime | None, + rqs_generator_data: RqsGenerator, + sim_settings: SimulationSettings, + rng: np.random.Generator | None = None, + ) -> None: + """ + Definition of the instance attributes for the RqsGeneratorRuntime + + Args: + env (simpy.Environment): environment for the simulation + out_edge (EdgeRuntime): edge connecting this node with the next one + rqs_generator_data (RqsGenerator): data do define the sampler + sim_settings (SimulationSettings): settings to start the simulation + rng (np.random.Generator | None, optional): random variable generator. + + """ + self.rqs_generator_data = rqs_generator_data + self.sim_settings = sim_settings + self.rng = rng or np.random.default_rng() + self.out_edge = out_edge + self.env = env + self.id_counter = 0 + + + def _next_id(self) -> int: + self.id_counter += 1 + return self.id_counter + + + def _requests_generator(self) -> Generator[float, None, None]: + """ + Return an iterator of inter-arrival gaps (seconds) according to the model + chosen in *input_data*. + + Notes + ----- + * If ``avg_active_users.distribution`` is ``"gaussian"`` or ``"normal"``, + the Gaussian-Poisson sampler is used. + * Otherwise the default Poisson-Poisson sampler is returned. + + """ + dist = self.rqs_generator_data.avg_active_users.distribution + + if dist == Distribution.NORMAL: + #Gaussian-Poisson model + return gaussian_poisson_sampling( + input_data=self.rqs_generator_data, + sim_settings=self.sim_settings, + rng=self.rng, + + ) + + # Poisson + Poisson + return poisson_poisson_sampling( + input_data=self.rqs_generator_data, + sim_settings=self.sim_settings, + rng=self.rng, + ) + + def _event_arrival(self) -> Generator[simpy.Event, None, None]: + """Simulating the process of event generation""" + assert self.out_edge is not None + + time_gaps = self._requests_generator() + + for gap in time_gaps: + yield self.env.timeout(gap) + + state = RequestState( + id=self._next_id(), + initial_time=self.env.now, + + ) + state.record_hop( + SystemNodes.GENERATOR, + self.rqs_generator_data.id, + self.env.now, + ) + # transport is a method of the edge runtime + # which define the step of how the state is moving + # from one node to another + self.out_edge.transport(state) + + def start(self) -> simpy.Process: + """Passing the structure as a simpy process""" + return self.env.process(self._event_arrival()) diff --git a/src/asyncflow/runtime/actors/server.py b/src/asyncflow/runtime/actors/server.py new file mode 100644 index 0000000..0572956 --- /dev/null +++ b/src/asyncflow/runtime/actors/server.py @@ -0,0 +1,291 @@ +""" +definition of the class necessary to manage the server +during the simulation +""" + +from collections.abc import Generator +from typing import cast + +import numpy as np +import simpy + +from asyncflow.config.constants import ( + EndpointStepCPU, + EndpointStepIO, + EndpointStepRAM, + SampledMetricName, + ServerResourceName, + StepOperation, + SystemNodes, +) +from asyncflow.metrics.server import build_server_metrics +from asyncflow.resources.server_containers import ServerContainers +from asyncflow.runtime.actors.edge import EdgeRuntime +from asyncflow.runtime.rqs_state import RequestState +from asyncflow.schemas.settings.simulation import SimulationSettings +from asyncflow.schemas.topology.nodes import Server + + +class ServerRuntime: + """class to define the server during the simulation""" + + def __init__( # noqa: PLR0913 + self, + *, + env: simpy.Environment, + server_resources: ServerContainers, + server_config: Server, + out_edge: EdgeRuntime | None, + server_box: simpy.Store, + settings: SimulationSettings, + rng: np.random.Generator | None = None, + ) -> None: + """ + Definition of the instance attributes + Args: + env (simpy.Environment): simpy environment + server_resources (ServerContainers):resource defined in the + input for each server + server_config (Server): parameter to define the server from the input + out_edge (EdgeRuntime): edge connecting the server to the next node + server_box (simpy.Store): box with the states that the server + should elaborate + settings (SimulationSettings): general input settings for the simulation + rng (np.random.Generator | None, optional): random number generator. + """ + self.env = env + self.server_resources = server_resources + self.server_config = server_config + self.out_edge = out_edge + self.server_box = server_box + self.rng = rng or np.random.default_rng() + # length of the active queue of the event loop + self._el_ready_queue_len: int = 0 + # total ram used in the server + self._ram_in_use: int | float = 0 + # length of the queue of the I/O task of the vent loop + self._el_io_queue_len: int = 0 + + # Right now is not necessary but as we will introduce + # non mandatory metrics we will need this structure to + # check if we have to measure a given metric + # right now it is not necessary because we are dealing + # only with mandatory metrics + self._server_enabled_metrics = build_server_metrics( + settings.enabled_sample_metrics, + ) + + # right now we disable the warnings but a refactor will be done soon + def _handle_request( # noqa: PLR0915, PLR0912, C901 + self, + state: RequestState, + ) -> Generator[simpy.Event, None, None]: + """ + Define all the step each request has to do ones reach + the server + """ + #register the history for the state: + state.record_hop( + SystemNodes.SERVER, + self.server_config.id, + self.env.now, + ) + + # Define the length of the endpoint list + endpoints_list = self.server_config.endpoints + endpoints_number = len(endpoints_list) + + # select the endpoint where the requests is directed at the moment we use + # a uniform distribution, in the future we will allow the user to define a + # custom distribution + selected_endpoint_idx = self.rng.integers(low=0, high=endpoints_number) + selected_endpoint = endpoints_list[selected_endpoint_idx] + + + # Extract the total ram to execute the endpoint + total_ram = sum( + step.step_operation[StepOperation.NECESSARY_RAM] + for step in selected_endpoint.steps + if isinstance(step.kind, EndpointStepRAM) + ) + + # ------------------------------------------------------------------ + # CPU & RAM SCHEDULING + # + # RAM FIRST, CPU LATER + # - The request reserves its full working set (total_ram) before + # acquiring any CPU core. If memory isn't available, it stays + # queued and leaves cores free for other requests. + # + # LAZY-CPU LOCK + # - A core token is acquired only at the FIRST CPU step + # (`if not core_locked`) and held for all consecutive CPU steps. + # - As soon as an I/O step is encountered, the core is released + # (`CPU.put(1)`) and remains free until the next CPU step, + # which will re-acquire it. + # + # WHY THIS IS REALISTIC + # Prevents “core-hogging” during long I/O awaits. + # Avoids redundant get/put calls for consecutive CPU steps + # (one token for the entire sequence). + # Mirrors a real Python async server: the GIL/worker thread is + # held only during CPU-bound code and released on each await. + # + # END OF HANDLER + # - If we still hold the core at the end (`core_locked == True`), + # we put it back, then release the reserved RAM. + # ------------------------------------------------------------------ + + # Ask the necessary ram to the server + if total_ram: + yield self.server_resources[ServerResourceName.RAM.value].get(total_ram) + self._ram_in_use += total_ram + + + # Initial conditions of the server a rqs a priori is not in any queue + # and it does not occupy a core until it started to be elaborated + core_locked = False + is_in_io_queue = False + is_in_ready_queue = False + + + # --- Step Execution: Process CPU and IO operations --- + # EDGE CASE + # First-step I/O + # A request could (in theory) start with an I/O step. In that case + # it doesn't hold any core; we enter the + # `not core_locked and not is_in_io_queue` branch and add +1 + # to the I/O queue without touching the ready queue. + # + # Consecutive I/O steps + # The second I/O sees `is_in_io_queue == True`, so it does NOT + # increment again—no double counting. + # + # Transition CPU → I/O → CPU + # - CPU step: `core_locked` becomes True, +1 ready queue + # - I/O step: core is put back, -1 ready queue, +1 I/O queue + # - Next CPU step: core is acquired, -1 I/O queue, +1 ready queue + # + # Endpoint completion + # If `core_locked == True` we were in the ready queue (-1) + # Otherwise we were in the I/O queue (-1) + # In both cases we clear the local flags so no “ghost” entries + # remain in the global counters. + # ------------------------------------------------------------------ + + + for step in selected_endpoint.steps: + + if step.kind in EndpointStepCPU: + # with the boolean we avoid redundant operation of asking + # the core multiple time on a given step + # for example if we have two consecutive cpu bound step + # in this configuration we are asking the cpu just in the + # first one + + if not core_locked: + core_locked = True + + if is_in_io_queue: + is_in_io_queue = False + self._el_io_queue_len -= 1 + + if not is_in_ready_queue: + is_in_ready_queue = True + self._el_ready_queue_len += 1 + + + yield self.server_resources[ServerResourceName.CPU.value].get(1) + + cpu_time = step.step_operation[StepOperation.CPU_TIME] + # Execute the step giving back the control to the simpy env + yield self.env.timeout(cpu_time) + + # since the object is of an Enum class we check if the step.kind + # is one member of enum + elif step.kind in EndpointStepIO: + io_time = step.step_operation[StepOperation.IO_WAITING_TIME] + # Same here with the boolean if we have multiple I/O steps + # we release the core just the first time if the previous step + # was a cpu bound step + + if not core_locked and not is_in_io_queue: + is_in_io_queue = True + self._el_io_queue_len += 1 + + + if core_locked: + # if the core is locked in the function it means that for sure + # we had a cpu bound step so the if statement will be always + # satisfy and we have to remove one element from the ready queue + + if is_in_ready_queue: + is_in_ready_queue = False + self._el_ready_queue_len -= 1 + + if not is_in_io_queue: + is_in_io_queue = True + self._el_io_queue_len += 1 + + yield self.server_resources[ServerResourceName.CPU.value].put(1) + core_locked = False + yield self.env.timeout(io_time) # Wait without holding a CPU core + + + if core_locked: + is_in_ready_queue = False + self._el_ready_queue_len -= 1 + yield self.server_resources[ServerResourceName.CPU.value].put(1) + else: + is_in_io_queue = False + self._el_io_queue_len -= 1 + + if total_ram: + + self._ram_in_use -= total_ram + yield self.server_resources[ServerResourceName.RAM.value].put(total_ram) + + assert self.out_edge is not None + self.out_edge.transport(state) + + + # we need three accessor because we need to read these private attribute + # in the sampled metric collector + @property + def ready_queue_len(self) -> int: + """Current length of the event-loop ready queue for this server.""" + return self._el_ready_queue_len + + @property + def io_queue_len(self) -> int: + """Current length of the event-loop I/O queue for this server.""" + return self._el_io_queue_len + + @property + def ram_in_use(self) -> int | float: + """Total RAM (MB) currently reserved by active requests.""" + return self._ram_in_use + + @property + def enabled_metrics(self) -> dict[SampledMetricName, list[float | int]]: + """Read-only access to the metric store.""" + return self._server_enabled_metrics + + + + def _dispatcher(self) -> Generator[simpy.Event, None, None]: + """ + The main dispatcher loop. It pulls requests from the inbox and + spawns a new '_handle_request' process for each one. + """ + while True: + # Wait for a request to arrive in the server's inbox + raw_state = yield self.server_box.get() + request_state = cast("RequestState", raw_state) + # Spawn a new, independent process to handle this request + self.env.process(self._handle_request(request_state)) + + def start(self) -> simpy.Process: + """Generate the process to simulate the server inside simpy env""" + return self.env.process(self._dispatcher()) + diff --git a/src/asyncflow/runtime/rqs_state.py b/src/asyncflow/runtime/rqs_state.py new file mode 100644 index 0000000..71b8389 --- /dev/null +++ b/src/asyncflow/runtime/rqs_state.py @@ -0,0 +1,51 @@ +"""Data structures representing the life-cycle of a single request.""" + +from __future__ import annotations + +from dataclasses import dataclass, field +from typing import TYPE_CHECKING, NamedTuple + +if TYPE_CHECKING: + from asyncflow.config.constants import SystemEdges, SystemNodes + + +class Hop(NamedTuple): + """A single traversal of a node or edge.""" + + component_type: SystemNodes | SystemEdges + component_id: str + timestamp: float + + +@dataclass +class RequestState: + """Mutable state carried by each request throughout the simulation.""" + + id: int + initial_time: float + finish_time: float | None = None + history: list[Hop] = field(default_factory=list) + + # ------------------------------------------------------------------ # + # API # + # ------------------------------------------------------------------ # + + def record_hop( + self, + component_type: SystemNodes | SystemEdges, + component_id: str, + now: float, + ) -> None: + """Append a new hop in chronological order.""" + self.history.append(Hop(component_type, component_id, now)) + + # ------------------------------------------------------------------ # + # Derived metrics # + # ------------------------------------------------------------------ # + + @property + def latency(self) -> float | None: + """Total time inside the system or ``None`` if not yet completed.""" + if self.finish_time is None: + return None + return self.finish_time - self.initial_time diff --git a/src/asyncflow/runtime/simulation_runner.py b/src/asyncflow/runtime/simulation_runner.py new file mode 100644 index 0000000..5d112ae --- /dev/null +++ b/src/asyncflow/runtime/simulation_runner.py @@ -0,0 +1,313 @@ +"""Components to run the whole simulation given specific input data""" + +from itertools import chain +from pathlib import Path +from typing import TYPE_CHECKING, Protocol, cast + +import numpy as np +import simpy +import yaml + +from asyncflow.metrics.analyzer import ResultsAnalyzer +from asyncflow.metrics.collector import SampledMetricCollector +from asyncflow.resources.registry import ResourcesRuntime +from asyncflow.runtime.actors.client import ClientRuntime +from asyncflow.runtime.actors.edge import EdgeRuntime +from asyncflow.runtime.actors.load_balancer import LoadBalancerRuntime +from asyncflow.runtime.actors.rqs_generator import RqsGeneratorRuntime +from asyncflow.runtime.actors.server import ServerRuntime +from asyncflow.schemas.payload import SimulationPayload + +if TYPE_CHECKING: + from collections.abc import Iterable + + from asyncflow.schemas.topology.edges import Edge + from asyncflow.schemas.topology.nodes import ( + Client, + LoadBalancer, + Server, + ) + from asyncflow.schemas.workload.rqs_generator import RqsGenerator + +# --- PROTOCOL DEFINITION --- +# This is the contract that all runtime actors must follow. +# it is a contract useful to communicate to mypy that object of +# startable type have all the method start +class Startable(Protocol): + """A protocol for runtime actors that can be started.""" + + def start(self) -> simpy.Process: + """Starts the main process loop for the actor.""" + ... + +class SimulationRunner: + """Class to handle the simulation""" + + def __init__( + self, + *, + env: simpy.Environment, + simulation_input: SimulationPayload, + ) -> None: + """ + Orchestrates building, wiring and running all actor runtimes. + + Args: + env (simpy.Environment): global environment for the simulation + simulation_input (SimulationPayload): full input for the simulation + + """ + self.env = env + self.simulation_input = simulation_input + + # instantiation of object needed to build nodes for the runtime phase + self.servers: list[Server] = simulation_input.topology_graph.nodes.servers + self.client: Client = simulation_input.topology_graph.nodes.client + self.rqs_generator: RqsGenerator = simulation_input.rqs_input + self.lb: LoadBalancer | None = None + self.simulation_settings = simulation_input.sim_settings + self.edges: list[Edge] = simulation_input.topology_graph.edges + self.rng = np.random.default_rng() + + # Object needed to start the simualation + self._servers_runtime: dict[str, ServerRuntime] = {} + self._client_runtime: dict[str, ClientRuntime] = {} + self._rqs_runtime: dict[str, RqsGeneratorRuntime] = {} + self._lb_runtime: dict[str, LoadBalancerRuntime] = {} + self._edges_runtime: dict[tuple[str, str], EdgeRuntime] = {} + + + # ------------------------------------------------------------------ # + # Private: build phase # + # ------------------------------------------------------------------ # + + def _make_inbox(self) -> simpy.Store: # local helper + """Helper to create store for the states of the simulation""" + return simpy.Store(self.env) + + def _build_rqs_generator(self) -> None: + """ + Build the rqs generator runtime, we use a dict for one reason + In the future we might add CDN so we will need + multiple generators , one for each client + """ + self._rqs_runtime[self.rqs_generator.id] = RqsGeneratorRuntime( + env = self.env, + out_edge=None, + rqs_generator_data=self.rqs_generator, + sim_settings=self.simulation_settings, + rng=self.rng, + ) + + + def _build_client(self) -> None: + """ + Build the client runtime, we use a dict for two reasons + 1) In the future we might add CDN so we will need + multiple client + 2) When we will assign outer edges we will need a dict + with all components indexed by their id + """ + self._client_runtime[self.client.id] = ClientRuntime( + env=self.env, + out_edge=None, + completed_box=self._make_inbox(), + client_box=self._make_inbox(), + client_config=self.client, + ) + + + def _build_servers(self) -> None: + """ + Build given the input data a dict containing all server Runtime + indexed by their unique id + """ + registry = ResourcesRuntime( + env=self.env, + data=self.simulation_input.topology_graph, + ) + for server in self.servers: + container = registry[server.id] + self._servers_runtime[server.id] = ServerRuntime( + env=self.env, + server_resources=container, + server_config=server, + out_edge=None, + server_box=self._make_inbox(), + settings=self.simulation_settings, + rng= self.rng, + + ) + + + def _build_load_balancer(self) -> None: + """ + Build given the input data the load balancer runtime we will + use a dict because we may have multiple load balancer and we + will be usefull to assign outer edges + """ + # Topologies without a LB are perfectly legal (e.g. the “minimal” + # integration test). Early-return instead of asserting. + if self.simulation_input.topology_graph.nodes.load_balancer is None: + return + + self.lb = self.simulation_input.topology_graph.nodes.load_balancer + + self._lb_runtime[self.lb.id] = LoadBalancerRuntime( + env=self.env, + lb_config=self.lb, + out_edges= [], + lb_box=self._make_inbox(), + ) + + + def _build_edges(self) -> None: + """Initialization of the edges runtime dictionary from the input data""" + # We need to merge all previous dictionary for the nodes to assign + # for each edge the correct target box + all_nodes: dict[str, object] = { + **self._servers_runtime, + **self._client_runtime, + **self._lb_runtime, + **self._rqs_runtime, +} + + for edge in self.edges: + + target_object = all_nodes[edge.target] # O(1) lookup + + if isinstance(target_object, ServerRuntime): + target_box = target_object.server_box + elif isinstance(target_object, ClientRuntime): + target_box = target_object.client_box + elif isinstance(target_object, LoadBalancerRuntime): + target_box = target_object.lb_box + else: + msg = f"Unknown runtime for {edge.target!r}" + raise TypeError(msg) + + self._edges_runtime[(edge.source, edge.target)] = ( + EdgeRuntime( + env=self.env, + edge_config=edge, + rng=self.rng, + target_box= target_box, + settings=self.simulation_settings, + ) + ) + # Here we assign the outer edges to all nodes + source_object = all_nodes[edge.source] + + if isinstance(source_object, ( + ServerRuntime, + ClientRuntime, + RqsGeneratorRuntime, + )): + source_object.out_edge = self._edges_runtime[( + edge.source, + edge.target) + ] + elif isinstance(source_object, LoadBalancerRuntime): + assert source_object.out_edges is not None + source_object.out_edges.append(self._edges_runtime[( + edge.source, + edge.target, + ) + ]) + + else: + msg = f"Unknown runtime for {edge.source!r}" + raise TypeError(msg) + + + # ------------------------------------------------------------------ # + # RUN phase # + # ------------------------------------------------------------------ # + def _start_all_processes(self) -> None: + """Register every .start() in the environment.""" + # ------------------------------------------------------------------ + # Start every actor's main coroutine + # + # * itertools.chain lazily stitches together the four dict_views + # into ONE iterator. No temporary list is built, zero extra + # allocations, yet the for-loop stays single and readable. + # * Order matters only for determinism, so we keep the natural + # “generator → client → servers → LB” sequence by listing the + # dicts explicitly. + # * Alternative ( list(a)+list(b)+… ) would copy thousands of + # references just to throw them away after the loop - wasteful. + # ------------------------------------------------------------------ + + runtimes = chain( + self._rqs_runtime.values(), + self._client_runtime.values(), + self._servers_runtime.values(), + self._lb_runtime.values(), + ) + + # Here we are saying to mypy that those object are of + # the startable type and they share the start method + for rt in cast("Iterable[Startable]", runtimes): + rt.start() + + def _start_metric_collector(self) -> None: + """One coroutine that snapshots RAM / queues / connections.""" + SampledMetricCollector( + edges=list(self._edges_runtime.values()), + servers=list(self._servers_runtime.values()), + env=self.env, + sim_settings=self.simulation_settings, + ).start() + + # ------------------------------------------------------------------ # + # Public entry-point # + # ------------------------------------------------------------------ # + def run(self) -> ResultsAnalyzer: + """Build → wire → start → run the clock → return `ResultsAnalyzer`""" + # 1. BUILD + self._build_rqs_generator() + self._build_client() + self._build_servers() + self._build_load_balancer() + + # 2. WIRE + self._build_edges() + + # 3. START ALL COROUTINES + self._start_all_processes() + self._start_metric_collector() + + # 4. ADVANCE THE SIMULATION + self.env.run(until=self.simulation_settings.total_simulation_time) + + return ResultsAnalyzer( + client=next(iter(self._client_runtime.values())), + servers=list(self._servers_runtime.values()), + edges=list(self._edges_runtime.values()), + settings=self.simulation_settings, + ) + + # ------------------------------------------------------------------ # + # Convenience constructor (load from YAML) # + # ------------------------------------------------------------------ # + @classmethod + def from_yaml( + cls, + *, + env: simpy.Environment, + yaml_path: str | Path, + ) -> "SimulationRunner": + """ + Quick helper so that integration tests & CLI can do: + + ```python + runner = SimulationRunner.from_yaml(env, "scenario.yml") + results = runner.run() + ``` + """ + data = yaml.safe_load(Path(yaml_path).read_text()) + payload = SimulationPayload.model_validate(data) + return cls(env=env, simulation_input=payload) + + + diff --git a/src/asyncflow/samplers/common_helpers.py b/src/asyncflow/samplers/common_helpers.py new file mode 100644 index 0000000..4f2f675 --- /dev/null +++ b/src/asyncflow/samplers/common_helpers.py @@ -0,0 +1,89 @@ +"""Helpers function for the request generator""" + + +import numpy as np + +from asyncflow.config.constants import Distribution +from asyncflow.schemas.common.random_variables import RVConfig + + +def uniform_variable_generator(rng: np.random.Generator) -> float: + """Return U~Uniform(0, 1).""" + # rng is guaranteed to be a valid np.random.Generator due to the type signature. + return rng.random() + +def poisson_variable_generator( + mean: float, + rng: np.random.Generator, +) -> float: + """Return a Poisson-distributed integer with expectation *mean*.""" + return rng.poisson(mean) + +def truncated_gaussian_generator( + mean: float, + variance: float, + rng: np.random.Generator, +) -> float: + """ + Generate a Normal-distributed variable + with mean and variance + """ + value = rng.normal(mean, variance) + return max(0.0, value) + +def lognormal_variable_generator( + mean: float, + variance: float, + rng: np.random.Generator, +) -> float: + """Return a Poisson-distributed floateger with expectation *mean*.""" + return rng.lognormal(mean, variance) + +def exponential_variable_generator( + mean: float, + rng: np.random.Generator, +) -> float: + """Return an exponentially-distributed float with mean *mean*.""" + return float(rng.exponential(mean)) + +def general_sampler(random_variable: RVConfig, rng: np.random.Generator) -> float: + """ + Draw one sample from the distribution described by *random_variable*. + + Only **Normal** and **Log-normal** require an explicit ``variance``. + For **Uniform**, **Poisson** and **Exponential** the mean is enough. + """ + dist = random_variable.distribution + mean = random_variable.mean + var = random_variable.variance + + match dist: + # ── No extra parameters needed ────────────────────────────────── + case Distribution.UNIFORM: + # Variance is meaningless for an ad-hoc uniform [0, 1) helper. + assert var is None + return uniform_variable_generator(rng) + + case Distribution.POISSON: + # λ == mean ; numpy returns ints → cast to float for consistency + assert var is None + return float(poisson_variable_generator(mean, rng)) + + case Distribution.EXPONENTIAL: + # β (scale) == mean ; nothing else required + assert var is None + return exponential_variable_generator(mean, rng) + + # ── Distributions that *do* need a variance parameter ─────────── + case Distribution.NORMAL: + assert var is not None + return truncated_gaussian_generator(mean, var, rng) + + case Distribution.LOG_NORMAL: + assert var is not None + return lognormal_variable_generator(mean, var, rng) + + # ── Anything else is unsupported ──────────────────────────────── + case _: + msg = f"Unsupported distribution: {dist}" + raise ValueError(msg) diff --git a/src/asyncflow/samplers/gaussian_poisson.py b/src/asyncflow/samplers/gaussian_poisson.py new file mode 100644 index 0000000..b96eca5 --- /dev/null +++ b/src/asyncflow/samplers/gaussian_poisson.py @@ -0,0 +1,94 @@ +""" +event sampler in the case of gaussian distribution +for concurrent user and poisson distribution for rqs per minute per user. +The rationale behind this choice is about considering scenario +with variance bigger or smaller w.r.t the one inherited from +the Poisson distribution +""" + +import math +from collections.abc import Generator + +import numpy as np + +from asyncflow.config.constants import TimeDefaults +from asyncflow.samplers.common_helpers import ( + truncated_gaussian_generator, + uniform_variable_generator, +) +from asyncflow.schemas.settings.simulation import SimulationSettings +from asyncflow.schemas.workload.rqs_generator import RqsGenerator + + +def gaussian_poisson_sampling( + input_data: RqsGenerator, + sim_settings: SimulationSettings, + *, + rng: np.random.Generator, +) -> Generator[float, None, None]: + """ + Yield inter-arrival gaps (seconds) for the compound Gaussian-Poisson process. + + Algorithm + --------- + 1. Every *sampling_window_s* seconds, draw + U ~ Gaussian(mean_concurrent_user, variance). + 2. Compute the aggregate rate + Λ = U * (mean_req_per_minute_per_user / 60) [req/s]. + 3. While inside the current window, draw gaps + Δt ~ Exponential(Λ) using inverse-CDF. + 4. Stop once the virtual clock exceeds *total_simulation_time*. + """ + simulation_time = sim_settings.total_simulation_time + user_sampling_window = input_data.user_sampling_window + + # λ_u : mean concurrent users per window + mean_concurrent_user = float(input_data.avg_active_users.mean) + + # Let's be sure that the variance is not None (guaranteed from pydantic) + variance_concurrent_user = input_data.avg_active_users.variance + assert variance_concurrent_user is not None + variance_concurrent_user = float(variance_concurrent_user) + + # λ_r / 60 : mean req/s per user + mean_req_per_sec_per_user = ( + float( + input_data.avg_request_per_minute_per_user.mean) + / TimeDefaults.MIN_TO_SEC + ) + + now = 0.0 # virtual clock (s) + window_end = 0.0 # end of the current user window + lam = 0.0 # aggregate rate Λ (req/s) + + while now < simulation_time: + # (Re)sample U at the start of each window + if now >= window_end: + window_end = now + float(user_sampling_window) + users = truncated_gaussian_generator( + mean_concurrent_user, + variance_concurrent_user, + rng, + ) + lam = users * mean_req_per_sec_per_user + + # No users → fast-forward to next window + if lam <= 0.0: + now = window_end + continue + + # Exponential gap from a protected uniform value + u_raw = max(uniform_variable_generator(rng), 1e-15) + delta_t = -math.log(1.0 - u_raw) / lam + + # End simulation if the next event exceeds the horizon + if now + delta_t > simulation_time: + break + + # If the gap crosses the window boundary, jump to it + if now + delta_t >= window_end: + now = window_end + continue + + now += delta_t + yield delta_t diff --git a/src/asyncflow/samplers/poisson_poisson.py b/src/asyncflow/samplers/poisson_poisson.py new file mode 100644 index 0000000..ea7a4fb --- /dev/null +++ b/src/asyncflow/samplers/poisson_poisson.py @@ -0,0 +1,82 @@ +""" +event sampler in the case of poisson distribution +both for concurrent user and rqs per minute per user +""" + +import math +from collections.abc import Generator + +import numpy as np + +from asyncflow.config.constants import TimeDefaults +from asyncflow.samplers.common_helpers import ( + poisson_variable_generator, + uniform_variable_generator, +) +from asyncflow.schemas.settings.simulation import SimulationSettings +from asyncflow.schemas.workload.rqs_generator import RqsGenerator + + +def poisson_poisson_sampling( + input_data: RqsGenerator, + sim_settings: SimulationSettings, + *, + rng: np.random.Generator, +) -> Generator[float, None, None]: + """ + Yield inter-arrival gaps (seconds) for the compound Poisson-Poisson process. + + Algorithm + --------- + 1. Every sampling_window_s seconds, draw + U ~ Poisson(mean_concurrent_user). + 2. Compute the aggregate rate + Λ = U * (mean_req_per_minute_per_user / 60) [req/s]. + 3. While inside the current window, draw gaps + Δt ~ Exponential(Λ) using inverse-CDF. + 4. Stop once the virtual clock exceeds *total_simulation_time*. + """ + simulation_time = sim_settings.total_simulation_time + user_sampling_window = input_data.user_sampling_window + + # λ_u : mean concurrent users per window + mean_concurrent_user = float(input_data.avg_active_users.mean) + + # λ_r / 60 : mean req/s per user + mean_req_per_sec_per_user = ( + float( + input_data.avg_request_per_minute_per_user.mean) + / TimeDefaults.MIN_TO_SEC + ) + + now = 0.0 # virtual clock (s) + window_end = 0.0 # end of the current user window + lam = 0.0 # aggregate rate Λ (req/s) + + while now < simulation_time: + # (Re)sample U at the start of each window + if now >= window_end: + window_end = now + float(user_sampling_window) + users = poisson_variable_generator(mean_concurrent_user, rng) + lam = users * mean_req_per_sec_per_user + + # No users → fast-forward to next window + if lam <= 0.0: + now = window_end + continue + + # Exponential gap from a protected uniform value + u_raw = max(uniform_variable_generator(rng), 1e-15) + delta_t = -math.log(1.0 - u_raw) / lam + + # End simulation if the next event exceeds the horizon + if now + delta_t > simulation_time: + break + + # If the gap crosses the window boundary, jump to it + if now + delta_t >= window_end: + now = window_end + continue + + now += delta_t + yield delta_t diff --git a/src/asyncflow/schemas/common/__init__.py b/src/asyncflow/schemas/common/__init__.py new file mode 100644 index 0000000..206bfb9 --- /dev/null +++ b/src/asyncflow/schemas/common/__init__.py @@ -0,0 +1 @@ +"""Shared, reusable primitives for schema modules (e.g., RVConfig).""" diff --git a/src/asyncflow/schemas/common/random_variables.py b/src/asyncflow/schemas/common/random_variables.py new file mode 100644 index 0000000..d827b92 --- /dev/null +++ b/src/asyncflow/schemas/common/random_variables.py @@ -0,0 +1,37 @@ +"""Definition of the schema for a Random variable""" + +from pydantic import BaseModel, field_validator, model_validator + +from asyncflow.config.constants import Distribution + + +class RVConfig(BaseModel): + """class to configure random variables""" + + mean: float + distribution: Distribution = Distribution.POISSON + variance: float | None = None + + @field_validator("mean", mode="before") + def ensure_mean_is_numeric_and_positive( + cls, # noqa: N805 + v: float, + ) -> float: + """Ensure `mean` is numeric, then coerce to float.""" + err_msg = "mean must be a number (int or float)" + if not isinstance(v, (float, int)): + raise ValueError(err_msg) # noqa: TRY004 + + return float(v) + + @model_validator(mode="after") # type: ignore[arg-type] + def default_variance(cls, model: "RVConfig") -> "RVConfig": # noqa: N805 + """Set variance = mean when distribution require and variance is missing.""" + needs_variance: set[Distribution] = { + Distribution.NORMAL, + Distribution.LOG_NORMAL, + } + + if model.variance is None and model.distribution in needs_variance: + model.variance = model.mean + return model diff --git a/src/asyncflow/schemas/payload.py b/src/asyncflow/schemas/payload.py new file mode 100644 index 0000000..3c889e4 --- /dev/null +++ b/src/asyncflow/schemas/payload.py @@ -0,0 +1,15 @@ +"""Definition of the full input for the simulation""" + +from pydantic import BaseModel + +from asyncflow.schemas.settings.simulation import SimulationSettings +from asyncflow.schemas.topology.graph import TopologyGraph +from asyncflow.schemas.workload.rqs_generator import RqsGenerator + + +class SimulationPayload(BaseModel): + """Full input structure to perform a simulation""" + + rqs_input: RqsGenerator + topology_graph: TopologyGraph + sim_settings: SimulationSettings diff --git a/src/asyncflow/schemas/settings/simulation.py b/src/asyncflow/schemas/settings/simulation.py new file mode 100644 index 0000000..7f0d145 --- /dev/null +++ b/src/asyncflow/schemas/settings/simulation.py @@ -0,0 +1,46 @@ +"""define a class with the global settings for the simulation""" + +from pydantic import BaseModel, Field + +from asyncflow.config.constants import ( + EventMetricName, + SampledMetricName, + SamplePeriods, + TimeDefaults, +) + + +class SimulationSettings(BaseModel): + """Global parameters that apply to the whole run.""" + + total_simulation_time: int = Field( + default=TimeDefaults.SIMULATION_TIME, + ge=TimeDefaults.MIN_SIMULATION_TIME, + description="Simulation horizon in seconds.", + ) + + # These represent the mandatory metrics to collect + enabled_sample_metrics: set[SampledMetricName] = Field( + default_factory=lambda: { + SampledMetricName.READY_QUEUE_LEN, + SampledMetricName.EVENT_LOOP_IO_SLEEP, + SampledMetricName.RAM_IN_USE, + SampledMetricName.EDGE_CONCURRENT_CONNECTION, + }, + description="Which time-series KPIs to collect by default.", + ) + enabled_event_metrics: set[EventMetricName] = Field( + default_factory=lambda: { + EventMetricName.RQS_CLOCK, + }, + description="Which per-event KPIs to collect by default.", + ) + + sample_period_s: float = Field( + default = SamplePeriods.STANDARD_TIME, + ge = SamplePeriods.MINIMUM_TIME, + le = SamplePeriods.MAXIMUM_TIME, + description="constant interval of time to build time series for metrics", + ) + + diff --git a/src/asyncflow/schemas/topology/edges.py b/src/asyncflow/schemas/topology/edges.py new file mode 100644 index 0000000..6e3d03b --- /dev/null +++ b/src/asyncflow/schemas/topology/edges.py @@ -0,0 +1,99 @@ +""" +Define the property of the edges of the system representing +links between different nodes +""" + +from pydantic import ( + BaseModel, + Field, + field_validator, + model_validator, +) +from pydantic_core.core_schema import ValidationInfo + +from asyncflow.config.constants import ( + NetworkParameters, + SystemEdges, +) +from asyncflow.schemas.common.random_variables import RVConfig + +#------------------------------------------------------------- +# Definition of the edges structure for the graph representing +# the topoogy of the system defined for the simulation +#------------------------------------------------------------- + +class Edge(BaseModel): + """ + A directed connection in the topology graph. + + Attributes + ---------- + source : str + Identifier of the source node (where the request comes from). + target : str + Identifier of the destination node (where the request goes to). + latency : RVConfig + Random-variable configuration for network latency on this link. + probability : float + Probability of taking this edge when there are multiple outgoing links. + Must be in [0.0, 1.0]. Defaults to 1.0 (always taken). + edge_type : SystemEdges + Category of the link (e.g. network, queue, stream). + + """ + + id: str + source: str + target: str + latency: RVConfig + edge_type: SystemEdges = SystemEdges.NETWORK_CONNECTION + dropout_rate: float = Field( + NetworkParameters.DROPOUT_RATE, + ge = NetworkParameters.MIN_DROPOUT_RATE, + le = NetworkParameters.MAX_DROPOUT_RATE, + description=( + "for each nodes representing a network we define" + "a probability to drop the request" + ), + ) + + # The idea to put here the control about variance and mean about the edges + # latencies and not in RVConfig is to provide a better error handling + # providing a direct indication of the edge with the error + # The idea to put here the control about variance and mean about the edges + # latencies and not in RVConfig is to provide a better error handling + # providing a direct indication of the edge with the error + @field_validator("latency", mode="after") + def ensure_latency_is_non_negative( + cls, # noqa: N805 + v: RVConfig, + info: ValidationInfo, + ) -> RVConfig: + """Ensures that the latency's mean and variance are positive.""" + mean = v.mean + variance = v.variance + + # We can get the edge ID from the validation context for a better error message + edge_id = info.data.get("id", "unknown") + + if mean <= 0: + msg = f"The mean latency of the edge '{edge_id}' must be positive" + raise ValueError(msg) + if variance is not None and variance < 0: # Variance can be zero + msg = ( + f"The variance of the latency of the edge {edge_id}" + "must be non negative" + ) + raise ValueError(msg) + return v + + + @model_validator(mode="after") # type: ignore[arg-type] + def check_src_trgt_different(cls, model: "Edge") -> "Edge": # noqa: N805 + """Ensure source is different from target""" + if model.source == model.target: + msg = "source and target must be different nodes" + raise ValueError(msg) + return model + + diff --git a/src/asyncflow/schemas/topology/endpoint.py b/src/asyncflow/schemas/topology/endpoint.py new file mode 100644 index 0000000..aa91c7b --- /dev/null +++ b/src/asyncflow/schemas/topology/endpoint.py @@ -0,0 +1,102 @@ +"""Defining the input schema for the requests handler""" + +from pydantic import ( + BaseModel, + PositiveFloat, + PositiveInt, + field_validator, + model_validator, +) + +from asyncflow.config.constants import ( + EndpointStepCPU, + EndpointStepIO, + EndpointStepRAM, + StepOperation, +) + + +class Step(BaseModel): + """ + Steps to be executed inside an endpoint in terms of + the resources needed to accomplish the single step + """ + + kind: EndpointStepIO | EndpointStepCPU | EndpointStepRAM + step_operation: dict[StepOperation, PositiveFloat | PositiveInt] + + @field_validator("step_operation", mode="before") + def ensure_non_empty( + cls, # noqa: N805 + v: dict[StepOperation, PositiveFloat | PositiveInt], + ) -> dict[StepOperation, PositiveFloat | PositiveInt]: + """Ensure the dict step operation exist""" + if not v: + msg = "step_operation cannot be empty" + raise ValueError(msg) + return v + + @model_validator(mode="after") # type: ignore[arg-type] + def ensure_coherence_type_operation( + cls, # noqa: N805 + model: "Step", + ) -> "Step": + """ + Validation to couple kind and operation only when they are + valid for example ram cannot have associated a cpu time + """ + operation_keys = set(model.step_operation) + + # Control of the length of the set to be sure only on key is passed + if len(operation_keys) != 1: + msg = "step_operation must contain exactly one entry" + raise ValueError(msg) + + # Coherence CPU bound operation and operation + if ( + isinstance(model.kind, EndpointStepCPU) + and operation_keys != {StepOperation.CPU_TIME} + ): + msg = ( + "The operation to quantify a CPU BOUND step" + f"must be {StepOperation.CPU_TIME}" + ) + raise ValueError(msg) + + # Coherence RAM operation and operation + if ( + isinstance(model.kind, EndpointStepRAM) + and operation_keys != {StepOperation.NECESSARY_RAM} + ): + msg = ( + "The operation to quantify a RAM step" + f"must be {StepOperation.NECESSARY_RAM}" + ) + raise ValueError(msg) + + # Coherence I/O operation and operation + if ( + isinstance(model.kind, EndpointStepIO) + and operation_keys != {StepOperation.IO_WAITING_TIME} + ): + + msg = f"An I/O step must use {StepOperation.IO_WAITING_TIME}" + raise ValueError(msg) + + return model + + + + +class Endpoint(BaseModel): + """full endpoint structure to be validated with pydantic""" + + endpoint_name: str + steps: list[Step] + + @field_validator("endpoint_name", mode="before") + def name_to_lower(cls, v: str) -> str: # noqa: N805 + """Standardize endpoint name to be lowercase""" + return v.lower() + + diff --git a/src/asyncflow/schemas/topology/graph.py b/src/asyncflow/schemas/topology/graph.py new file mode 100644 index 0000000..91cf857 --- /dev/null +++ b/src/asyncflow/schemas/topology/graph.py @@ -0,0 +1,159 @@ +""" +Define the topology of the system as a directed graph +where nodes represents macro structure (server, client ecc ecc) +and edges how these strcutures are connected and the network +latency necessary for the requests generated to move from +one structure to another +""" + +from collections import Counter + +from pydantic import ( + BaseModel, + model_validator, +) + +from asyncflow.schemas.topology.edges import Edge +from asyncflow.schemas.topology.nodes import TopologyNodes + +#------------------------------------------------------------- +# Definition of the Graph structure representing +# the topogy of the system defined for the simulation +#------------------------------------------------------------- + +class TopologyGraph(BaseModel): + """ + data collection for the whole graph representing + the full system + """ + + nodes: TopologyNodes + edges: list[Edge] + + @model_validator(mode="after") # type: ignore[arg-type] + def unique_ids( + cls, # noqa: N805 + model: "TopologyGraph", + ) -> "TopologyGraph": + """Check that all id are unique""" + counter = Counter(edge.id for edge in model.edges) + duplicate = [edge_id for edge_id, value in counter.items() if value > 1] + if duplicate: + msg = f"There are multiple edges with the following ids {duplicate}" + raise ValueError(msg) + return model + + + @model_validator(mode="after") # type: ignore[arg-type] + def edge_refs_valid( + cls, # noqa: N805 + model: "TopologyGraph", + ) -> "TopologyGraph": + """ + Validate that the graph is self-consistent. + + * All targets must be nodes declared in ``m.nodes``. + * External IDs are allowed as sources (entry points, generator) but + they must never appear as a target anywhere else. + """ + # ------------------------------------------------------------------ + # 1. Collect declared node IDs (servers, client, optional LB) + # ------------------------------------------------------------------ + node_ids: set[str] = {srv.id for srv in model.nodes.servers} + node_ids.add(model.nodes.client.id) + if model.nodes.load_balancer is not None: + node_ids.add(model.nodes.load_balancer.id) + + # ------------------------------------------------------------------ + # 2. Scan every edge once + # ------------------------------------------------------------------ + external_sources: set[str] = set() + + for edge in model.edges: + # ── Rule 1: target must be a declared node + if edge.target not in node_ids: + msg = ( + f"Edge {edge.source}->{edge.target} references " + f"unknown target node '{edge.target}'." + ) + raise ValueError(msg) + + # Collect any source that is not a declared node + if edge.source not in node_ids: + external_sources.add(edge.source) + + # ------------------------------------------------------------------ + # 3. Ensure external sources never appear as targets elsewhere + # ------------------------------------------------------------------ + forbidden_targets = external_sources & {e.target for e in model.edges} + if forbidden_targets: + msg = ( + "External IDs cannot be used as targets as well:" + f"{sorted(forbidden_targets)}" + ) + raise ValueError(msg) + + return model + + @model_validator(mode="after") # type: ignore[arg-type] + def valid_load_balancer(cls, model: "TopologyGraph") -> "TopologyGraph": # noqa: N805 + """ + Check the validity of the load balancer: first we check + if is present in the simulation, second we check if the LB list + is a proper subset of the server sets of ids, then we check if + edge from LB to the servers are well defined + """ + lb = model.nodes.load_balancer + if lb is None: + return model + + server_ids = {s.id for s in model.nodes.servers} + + # 1) LB list ⊆ server_ids + missing = lb.server_covered - server_ids + if missing: + + msg = (f"Load balancer '{lb.id}'" + f"references unknown servers: {sorted(missing)}") + raise ValueError(msg) + + # edge are well defined + targets_from_lb = {e.target for e in model.edges if e.source == lb.id} + not_linked = lb.server_covered - targets_from_lb + if not_linked: + msg = ( + f"Servers {sorted(not_linked)} are covered by LB '{lb.id}' " + "but have no outgoing edge from it." + ) + + raise ValueError(msg) + + return model + + + @model_validator(mode="after") # type: ignore[arg-type] + def no_fanout_except_lb(cls, model: "TopologyGraph") -> "TopologyGraph": # noqa: N805 + """Ensure only the LB (declared node) can have multiple outgoing edges.""" + lb_id = model.nodes.load_balancer.id if model.nodes.load_balancer else None + + # let us consider only nodes declared in the topology + node_ids: set[str] = {server.id for server in model.nodes.servers} + node_ids.add(model.nodes.client.id) + if lb_id: + node_ids.add(lb_id) + + counts: dict[str, int] = {} + for edge in model.edges: + if edge.source not in node_ids: + continue + counts[edge.source] = counts.get(edge.source, 0) + 1 + + offenders = [src for src, c in counts.items() if c > 1 and src != lb_id] + if offenders: + msg = ( + "Only the load balancer can have multiple outgoing edges. " + f"Offending sources: {offenders}" + ) + raise ValueError(msg) + + return model diff --git a/src/asyncflow/schemas/topology/nodes.py b/src/asyncflow/schemas/topology/nodes.py new file mode 100644 index 0000000..d742421 --- /dev/null +++ b/src/asyncflow/schemas/topology/nodes.py @@ -0,0 +1,164 @@ +""" +Define the pydantic schemas of the nodes you are allowed +to define in the topology of the system you would like to +simulate +""" + +from collections import Counter + +from pydantic import ( + BaseModel, + ConfigDict, + Field, + PositiveInt, + field_validator, + model_validator, +) + +from asyncflow.config.constants import ( + LbAlgorithmsName, + ServerResourcesDefaults, + SystemNodes, +) +from asyncflow.schemas.topology.endpoint import Endpoint + +#------------------------------------------------------------- +# Definition of the nodes structure for the graph representing +# the topoogy of the system defined for the simulation +#------------------------------------------------------------- + +# ------------------------------------------------------------- +# CLIENT +# ------------------------------------------------------------- + +class Client(BaseModel): + """Definition of the client class""" + + id: str + type: SystemNodes = SystemNodes.CLIENT + + @field_validator("type", mode="after") + def ensure_type_is_standard(cls, v: SystemNodes) -> SystemNodes: # noqa: N805 + """Ensure the type of the client is standard""" + if v != SystemNodes.CLIENT: + msg = f"The type should have a standard value: {SystemNodes.CLIENT}" + raise ValueError(msg) + return v + +# ------------------------------------------------------------- +# SERVER RESOURCES +# ------------------------------------------------------------- + +class ServerResources(BaseModel): + """ + Defines the quantifiable resources available on a server node. + Each attribute maps directly to a SimPy resource primitive. + """ + + cpu_cores: PositiveInt = Field( + ServerResourcesDefaults.CPU_CORES, + ge = ServerResourcesDefaults.MINIMUM_CPU_CORES, + description="Number of CPU cores available for processing.", + ) + db_connection_pool: PositiveInt | None = Field( + ServerResourcesDefaults.DB_CONNECTION_POOL, + description="Size of the database connection pool, if applicable.", + ) + + # Risorse modellate come simpy.Container (livello) + ram_mb: PositiveInt = Field( + ServerResourcesDefaults.RAM_MB, + ge = ServerResourcesDefaults.MINIMUM_RAM_MB, + description="Total available RAM in Megabytes.") + + # for the future + # disk_iops_limit: PositiveInt | None = None + # network_throughput_mbps: PositiveInt | None = None + +# ------------------------------------------------------------- +# SERVER +# ------------------------------------------------------------- + +class Server(BaseModel): + """ + definition of the server class: + - id: is the server identifier + - type: is the type of node in the structure + - server resources: is a dictionary to define the resources + of the machine where the server is living + - endpoints: is the list of all endpoints in a server + """ + + id: str + type: SystemNodes = SystemNodes.SERVER + #Later define a valide structure for the keys of server resources + server_resources : ServerResources + endpoints : list[Endpoint] + + @field_validator("type", mode="after") + def ensure_type_is_standard(cls, v: SystemNodes) -> SystemNodes: # noqa: N805 + """Ensure the type of the server is standard""" + if v != SystemNodes.SERVER: + msg = f"The type should have a standard value: {SystemNodes.SERVER}" + raise ValueError(msg) + return v + +class LoadBalancer(BaseModel): + """ + basemodel for the load balancer + - id: unique name associated to the lb + - type: type of the node in the structure + - server_covered: list of server id connected to the lb + """ + + id: str + type: SystemNodes = SystemNodes.LOAD_BALANCER + algorithms: LbAlgorithmsName = LbAlgorithmsName.ROUND_ROBIN + server_covered: set[str] = Field(default_factory=set) + + + + @field_validator("type", mode="after") + def ensure_type_is_standard(cls, v: SystemNodes) -> SystemNodes: # noqa: N805 + """Ensure the type of the server is standard""" + if v != SystemNodes.LOAD_BALANCER: + msg = f"The type should have a standard value: {SystemNodes.LOAD_BALANCER}" + raise ValueError(msg) + return v + + +# ------------------------------------------------------------- +# NODES CLASS WITH ALL POSSIBLE OBJECTS REPRESENTED BY A NODE +# ------------------------------------------------------------- + +class TopologyNodes(BaseModel): + """ + Definition of the nodes class: + - server: represent all servers implemented in the system + - client: is a simple object with just a name representing + the origin of the graph + """ + + servers: list[Server] + client: Client + load_balancer: LoadBalancer | None = None + + @model_validator(mode="after") # type: ignore[arg-type] + def unique_ids( + cls, # noqa: N805 + model: "TopologyNodes", + ) -> "TopologyNodes": + """Check that all id are unique""" + ids = [server.id for server in model.servers] + [model.client.id] + + if model.load_balancer is not None: + ids.append(model.load_balancer.id) + + counter = Counter(ids) + duplicate = [node_id for node_id, value in counter.items() if value > 1] + if duplicate: + msg = f"The following node ids are duplicate {duplicate}" + raise ValueError(msg) + return model + + model_config = ConfigDict(extra="forbid") diff --git a/src/asyncflow/schemas/workload/rqs_generator.py b/src/asyncflow/schemas/workload/rqs_generator.py new file mode 100644 index 0000000..a6fbf3b --- /dev/null +++ b/src/asyncflow/schemas/workload/rqs_generator.py @@ -0,0 +1,59 @@ +"""Define the schemas for the simulator""" + + +from pydantic import BaseModel, Field, field_validator + +from asyncflow.config.constants import Distribution, SystemNodes, TimeDefaults +from asyncflow.schemas.common.random_variables import RVConfig + + +class RqsGenerator(BaseModel): + """Define the expected variables for the simulation""" + + id: str + type: SystemNodes = SystemNodes.GENERATOR + avg_active_users: RVConfig + avg_request_per_minute_per_user: RVConfig + + user_sampling_window: int = Field( + default=TimeDefaults.USER_SAMPLING_WINDOW, + ge=TimeDefaults.MIN_USER_SAMPLING_WINDOW, + le=TimeDefaults.MAX_USER_SAMPLING_WINDOW, + description=( + "Sampling window in seconds " + f"({TimeDefaults.MIN_USER_SAMPLING_WINDOW}-" + f"{TimeDefaults.MAX_USER_SAMPLING_WINDOW})." + ), + ) + + @field_validator("avg_request_per_minute_per_user", mode="after") + def ensure_avg_request_is_poisson( + cls, # noqa: N805 + v: RVConfig, + ) -> RVConfig: + """ + Force the distribution for the rqs generator to be poisson + at the moment we have a joint sampler just for the poisson-poisson + and gaussian-poisson case + """ + if v.distribution != Distribution.POISSON: + msg = "At the moment the variable avg request must be Poisson" + raise ValueError(msg) + return v + + @field_validator("avg_active_users", mode="after") + def ensure_avg_user_is_poisson_or_gaussian( + cls, # noqa: N805 + v: RVConfig, + ) -> RVConfig: + """ + Force the distribution for the rqs generator to be poisson + at the moment we have a joint sampler just for the poisson-poisson + and gaussian-poisson case + """ + if v.distribution not in {Distribution.POISSON, Distribution.NORMAL}: + msg = "At the moment the variable active user must be Poisson or Gaussian" + raise ValueError(msg) + return v + + diff --git a/src/asyncflow/settings/__init__.py b/src/asyncflow/settings/__init__.py new file mode 100644 index 0000000..4f4031c --- /dev/null +++ b/src/asyncflow/settings/__init__.py @@ -0,0 +1,6 @@ +"""Public settings API.""" +from __future__ import annotations + +from asyncflow.schemas.settings.simulation import SimulationSettings + +__all__ = ["SimulationSettings"] diff --git a/src/asyncflow/workload/__init__.py b/src/asyncflow/workload/__init__.py new file mode 100644 index 0000000..c4b8735 --- /dev/null +++ b/src/asyncflow/workload/__init__.py @@ -0,0 +1,7 @@ +"""Public workload API.""" +from __future__ import annotations + +from asyncflow.schemas.common.random_variables import RVConfig +from asyncflow.schemas.workload.rqs_generator import RqsGenerator + +__all__ = ["RVConfig", "RqsGenerator"] diff --git a/tests/conftest.py b/tests/conftest.py index 842f5ae..80955f0 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -1,123 +1,153 @@ """Pytest configuration file for setting up test fixtures and plugins.""" -import os -from collections.abc import AsyncGenerator, Generator -from pathlib import Path -from typing import cast import pytest -from alembic import command -from alembic.config import Config -from dotenv import load_dotenv -from fastapi.testclient import TestClient -from sqlalchemy.ext.asyncio import ( - AsyncEngine, - AsyncSession, - async_sessionmaker, - create_async_engine, +from numpy.random import Generator as NpGenerator +from numpy.random import default_rng + +from asyncflow.config.constants import ( + Distribution, + EventMetricName, + SampledMetricName, + SamplePeriods, + TimeDefaults, ) -from sqlalchemy_utils import create_database, database_exists, drop_database +from asyncflow.schemas.common.random_variables import RVConfig +from asyncflow.schemas.payload import SimulationPayload +from asyncflow.schemas.settings.simulation import SimulationSettings +from asyncflow.schemas.topology.edges import Edge +from asyncflow.schemas.topology.graph import TopologyGraph +from asyncflow.schemas.topology.nodes import ( + Client, + TopologyNodes, +) +from asyncflow.schemas.workload.rqs_generator import RqsGenerator -from app.config.settings import settings -from app.db.session import get_db -from app.main import app +# ============================================================================ +# STANDARD CONFIGURATION FOR INPUT VARIABLES +# ============================================================================ -# Load test environment variables from .env.test -ENV_PATH = Path(__file__).resolve().parents[1] / "docker" / ".env.test" -load_dotenv(dotenv_path=ENV_PATH, override=True) +# --------------------------------------------------------------------------- +# RANDOM VARIABLE GENERATOR +# --------------------------------------------------------------------------- -class DummySession: - """A no-op async session substitute for unit tests.""" +@pytest.fixture(scope="session") +def rng() -> NpGenerator: + """Deterministic NumPy RNG shared across tests (seed=0).""" + return default_rng(0) - def add(self, instance: object) -> None: - """Perform a no-op add.""" - async def commit(self) -> None: - """Perform a no-op commit.""" +# --------------------------------------------------------------------------- # +# Metric sets # +# --------------------------------------------------------------------------- # - async def refresh(self, instance: object) -> None: - """Perform a no-op refresh.""" - async def flush(self) -> None: - """Perform a no-op flush, required for unit tests.""" +@pytest.fixture(scope="session") +def enabled_sample_metrics() -> set[SampledMetricName]: + """Default time-series KPIs collected in most tests.""" + return { + SampledMetricName.READY_QUEUE_LEN, + SampledMetricName.RAM_IN_USE, + } -async def override_get_db() -> AsyncGenerator[AsyncSession, None]: - """Yield a dummy async session for DB dependency override in unit tests.""" - # Cast so mypy sees an AsyncSession - yield cast("AsyncSession", DummySession()) +@pytest.fixture(scope="session") +def enabled_event_metrics() -> set[EventMetricName]: + """Default per-event KPIs collected in most tests.""" + return { + EventMetricName.RQS_CLOCK, + } -# Override the get_db dependency for all tests that use the app directly -app.dependency_overrides[get_db] = override_get_db +# --------------------------------------------------------------------------- # +# Global simulation settings # +# --------------------------------------------------------------------------- # -@pytest.fixture(scope="module") -def client() -> TestClient: - """Return a TestClient with the database dependency overridden.""" - return TestClient(app) +@pytest.fixture +def sim_settings( + enabled_sample_metrics: set[SampledMetricName], + enabled_event_metrics: set[EventMetricName], +) -> SimulationSettings: + """ + Minimal :class:`SimulationSettings` instance. + + The simulation horizon is fixed to the lowest allowed value so that unit + tests run quickly. + """ + return SimulationSettings( + total_simulation_time=TimeDefaults.MIN_SIMULATION_TIME, + enabled_sample_metrics=enabled_sample_metrics, + enabled_event_metrics=enabled_event_metrics, + sample_period_s=SamplePeriods.STANDARD_TIME, + ) -@pytest.fixture(scope="session", autouse=True) -def setup_test_database( - request: pytest.FixtureRequest, -) -> Generator[None, None, None]: - """Drop and recreate the test database, then apply migrations.""" - markexpr = request.config.option.markexpr or "" - is_integration_test = ( - "integration" in markexpr and "not integration" not in markexpr - ) +# --------------------------------------------------------------------------- # +# Traffic profile # +# --------------------------------------------------------------------------- # - if not is_integration_test: - yield - return - # --- SETUP: This code runs before the test session starts --- - sync_db_url = settings.db_url.replace("+asyncpg", "") +@pytest.fixture +def rqs_input() -> RqsGenerator: + """ + One active user issuing two requests per minute—sufficient to + exercise the entire request-generator pipeline with minimal overhead. + """ + return RqsGenerator( + id="rqs-1", + avg_active_users=RVConfig(mean=1.0), + avg_request_per_minute_per_user=RVConfig(mean=2.0), + user_sampling_window=TimeDefaults.USER_SAMPLING_WINDOW, + ) + - if database_exists(sync_db_url): - drop_database(sync_db_url) - create_database(sync_db_url) +# --------------------------------------------------------------------------- # +# Minimal topology (one client, no servers, no edges) # +# --------------------------------------------------------------------------- # - os.environ["ENVIRONMENT"] = "test" - alembic_cfg = Config("alembic.ini") - command.upgrade(alembic_cfg, "head") - try: - # The test session runs at this point - yield - finally: - # --- TEARDOWN: This code runs after the test session ends --- - if database_exists(sync_db_url): - drop_database(sync_db_url) +@pytest.fixture +def topology_minimal() -> TopologyGraph: + """ + A valid *tiny* topology: one generator ➜ one client. + + The single edge has a negligible latency; its only purpose is to give the + generator a valid ``out_edge`` so that the runtime can start. + """ + client = Client(id="client-1") + + # Stub edge: generator id comes from rqs_input fixture (“rqs-1”) + edge = Edge( + id="gen-to-client", + source="rqs-1", + target="client-1", + latency=RVConfig(mean=0.001, distribution=Distribution.POISSON), + ) + nodes = TopologyNodes(servers=[], client=client) + return TopologyGraph(nodes=nodes, edges=[edge]) -@pytest.fixture(scope="session") -def async_engine() -> AsyncEngine: - """Create and return an async SQLAlchemy engine for the test session.""" - return create_async_engine(settings.db_url, future=True, echo=False) +# --------------------------------------------------------------------------- # +# Complete simulation payload # +# --------------------------------------------------------------------------- # @pytest.fixture -async def db_session(async_engine: AsyncEngine) -> AsyncGenerator[AsyncSession, None]: - """Yield an async DB session wrapped in a transaction that is always rolled back.""" - # This fixture ensures that each test is isolated from others - # within the same session. - connection = await async_engine.connect() - transaction = await connection.begin() - - session_factory = async_sessionmaker( - bind=connection, - expire_on_commit=False, - class_=AsyncSession, +def payload_base( + rqs_input: RqsGenerator, + sim_settings: SimulationSettings, + topology_minimal: TopologyGraph, +) -> SimulationPayload: + """ + End-to-end payload used by integration tests and FastAPI endpoint tests. + + It wires together the individual fixtures into the single object expected + by the simulation engine. + """ + return SimulationPayload( + rqs_input=rqs_input, + topology_graph=topology_minimal, + sim_settings=sim_settings, ) - session = session_factory() - - try: - yield session - finally: - # Roll back the transaction to discard any changes made during the test - await transaction.rollback() - # Close the connection - await connection.close() diff --git a/tests/integration/conftest.py b/tests/integration/conftest.py new file mode 100644 index 0000000..2c94d6f --- /dev/null +++ b/tests/integration/conftest.py @@ -0,0 +1,46 @@ +"""Shared fixtures used by several integration-test groups.""" + +from __future__ import annotations + +from typing import TYPE_CHECKING + +import pytest +import simpy + +from asyncflow.runtime.simulation_runner import SimulationRunner + +if TYPE_CHECKING: + from collections.abc import Callable + from pathlib import Path + + +# --------------------------------------------------------------------------- # +# Environment # +# --------------------------------------------------------------------------- # +@pytest.fixture +def env() -> simpy.Environment: + """A fresh SimPy environment per test.""" + return simpy.Environment() + + +# --------------------------------------------------------------------------- # +# Runner factory (load YAML scenarios) # +# --------------------------------------------------------------------------- # +@pytest.fixture +def make_runner( + env: simpy.Environment, +) -> Callable[[str | Path], SimulationRunner]: + """ + Factory that loads a YAML scenario and instantiates a + :class:`SimulationRunner`. + + Usage inside a test:: + + runner = make_runner("scenarios/minimal.yml") + results = runner.run() + """ + + def _factory(yaml_path: str | Path) -> SimulationRunner: + return SimulationRunner.from_yaml(env=env, yaml_path=yaml_path) + + return _factory diff --git a/tests/integration/db_initialization/__init__.py b/tests/integration/db_initialization/__init__.py deleted file mode 100644 index 8c00603..0000000 --- a/tests/integration/db_initialization/__init__.py +++ /dev/null @@ -1 +0,0 @@ -"""Database initialization integration tests.""" diff --git a/tests/integration/db_initialization/test_db_connection.py b/tests/integration/db_initialization/test_db_connection.py deleted file mode 100644 index 8f725ee..0000000 --- a/tests/integration/db_initialization/test_db_connection.py +++ /dev/null @@ -1,16 +0,0 @@ -import pytest -from sqlalchemy import text - -from app.db.session import engine - - -@pytest.mark.integration -async def test_db_connection() -> None: - """Verify that the SQLAlchemy engine can connect to the database. - - This test ensures that the database connection is properly configured - and can execute a simple query. - """ - async with engine.connect() as conn: - result = await conn.execute(text("SELECT 1")) - assert result.scalar_one() == 1 diff --git a/tests/integration/db_initialization/test_init_models.py b/tests/integration/db_initialization/test_init_models.py deleted file mode 100644 index 7783bdc..0000000 --- a/tests/integration/db_initialization/test_init_models.py +++ /dev/null @@ -1,20 +0,0 @@ -import pytest -from sqlalchemy import text -from sqlalchemy.ext.asyncio import create_async_engine - -from app.config.settings import settings - -pytestmark = [pytest.mark.integration, pytest.mark.asyncio] - -async def test_users_table_exists_after_migrations() -> None: - engine = create_async_engine(settings.db_url, echo=False) - async with engine.connect() as conn: - result = await conn.execute( - text( - "SELECT COUNT(*) " - "FROM information_schema.tables " - "WHERE table_schema = 'public' " - " AND table_name = 'users';", - ), - ) - assert result.scalar_one() == 1 diff --git a/tests/integration/minimal/conftest.py b/tests/integration/minimal/conftest.py new file mode 100644 index 0000000..f29bf49 --- /dev/null +++ b/tests/integration/minimal/conftest.py @@ -0,0 +1,75 @@ +""" +Local fixtures for the *minimal* integration scenario. + +We **do not** add any Edge to the TopologyGraph because the core schema +forbids generator-origin edges. Instead we patch the single +`RqsGeneratorRuntime` after the `SimulationRunner` is built, giving it a +*no-op* EdgeRuntime so its internal assertion passes. +""" + +from __future__ import annotations + +from typing import TYPE_CHECKING + +import pytest +import simpy + +from asyncflow.config.constants import TimeDefaults +from asyncflow.runtime.simulation_runner import SimulationRunner +from asyncflow.schemas.common.random_variables import RVConfig +from asyncflow.schemas.workload.rqs_generator import RqsGenerator + +if TYPE_CHECKING: + from asyncflow.schemas.payload import SimulationPayload + + +# ────────────────────────────────────────────────────────────────────────────── +# 0-traffic generator (shadows the project-wide fixture) +# ────────────────────────────────────────────────────────────────────────────── +@pytest.fixture(scope="session") +def rqs_input() -> RqsGenerator: + """A generator that never emits any request.""" + return RqsGenerator( + id="rqs-zero", + avg_active_users=RVConfig(mean=0.0), + avg_request_per_minute_per_user=RVConfig(mean=0.0), + user_sampling_window=TimeDefaults.USER_SAMPLING_WINDOW, + ) + + +# ────────────────────────────────────────────────────────────────────────────── +# SimPy env - local to this directory +# ────────────────────────────────────────────────────────────────────────────── +@pytest.fixture +def env() -> simpy.Environment: + """Fresh environment per test module.""" + return simpy.Environment() + + +class _NoOpEdge: + """EdgeRuntime stand-in that simply discards every state.""" + + def transport(self, _state: object) -> None: # ANN001: _state annotated + return # swallow the request silently + + +# ────────────────────────────────────────────────────────────────────────────── +# Runner factory - assigns the dummy edge *after* building the runner +# ────────────────────────────────────────────────────────────────────────────── +@pytest.fixture +def runner( + env: simpy.Environment, + payload_base: SimulationPayload, +) -> SimulationRunner: + """Build a `SimulationRunner` and patch the generator's `out_edge`.""" + sim_runner = SimulationRunner(env=env, simulation_input=payload_base) + + def _patch_noop_edge(r: SimulationRunner) -> None: + + gen_rt = next(iter(r._rqs_runtime.values())) # noqa: SLF001 + gen_rt.out_edge = _NoOpEdge() # type: ignore[assignment] + + + sim_runner._patch_noop_edge = _patch_noop_edge # type: ignore[attr-defined] # noqa: SLF001 + + return sim_runner diff --git a/tests/integration/minimal/test_minimal.py b/tests/integration/minimal/test_minimal.py new file mode 100644 index 0000000..7ae9507 --- /dev/null +++ b/tests/integration/minimal/test_minimal.py @@ -0,0 +1,100 @@ +""" +Smoke-test: the **smallest** valid topology boots, ticks and +shuts down without recording any metric. + +Topology under test +------------------- +generator ──Ø── client (Ø == no real EdgeRuntime) + +The request-generator cannot emit messages because its ``out_edge`` is +replaced by a no-op stub. The client is patched the same way so its own +forwarder never attempts a network send. +""" + +from __future__ import annotations + +from typing import TYPE_CHECKING + +import pytest +import simpy + +from asyncflow.metrics.analyzer import ResultsAnalyzer +from asyncflow.runtime.simulation_runner import SimulationRunner + +if TYPE_CHECKING: + from asyncflow.schemas.payload import SimulationPayload + + +# --------------------------------------------------------------------------- # +# Helpers # +# --------------------------------------------------------------------------- # + +class _NoOpEdge: + """Edge stub: swallows every transport call.""" + + def transport(self) -> None: + # Nothing to do - we just black-hole the message. + return + + +# --------------------------------------------------------------------------- # +# Local fixtures # +# --------------------------------------------------------------------------- # +@pytest.fixture +def env() -> simpy.Environment: + """Fresh SimPy environment for this test file.""" + return simpy.Environment() + + +@pytest.fixture +def runner( + env: simpy.Environment, + payload_base: SimulationPayload, # comes from project-wide conftest +) -> SimulationRunner: + """SimulationRunner already loaded with *minimal* payload.""" + return SimulationRunner(env=env, simulation_input=payload_base) + + +# --------------------------------------------------------------------------- # +# Tests # +# --------------------------------------------------------------------------- # +def test_smoke_minimal_runs(runner: SimulationRunner) -> None: + """ + The simulation should: + + * start without any server or edge, + * execute its clock, + * leave all metric collections empty. + """ + # ── 1. Build generator + patch its edge ────────────────────────────── + runner._build_rqs_generator() # noqa: SLF001 - private builder ok in test + gen_rt = next(iter(runner._rqs_runtime.values())) # noqa: SLF001 + gen_rt.out_edge = _NoOpEdge() # type: ignore[assignment] + + # ── 2. Build client + patch its edge ───────────────────────────────── + runner._build_client() # noqa: SLF001 + cli_rt = next(iter(runner._client_runtime.values())) # noqa: SLF001 + cli_rt.out_edge = _NoOpEdge() # type: ignore[assignment] + + # ── 3. Build remaining artefacts (no servers / no LB present) ─────── + runner._start_all_processes() # noqa: SLF001 + runner._start_metric_collector() # noqa: SLF001 + + # ── 4. Run the clock ───────────────────────────────────────────────── + runner.env.run(until=runner.simulation_settings.total_simulation_time) + + # ── 5. Post-processing - everything must be empty ─────────────────── + results: ResultsAnalyzer = ResultsAnalyzer( + client=cli_rt, + servers=[], # none built + edges=[], # none built + settings=runner.simulation_settings, + ) + + # No latencies were produced + assert results.get_latency_stats() == {} + # Throughput time-series must be entirely empty + timestamps, rps = results.get_throughput_series() + assert timestamps == [] + # No sampled metrics either + assert results.get_sampled_metrics() == {} diff --git a/tests/integration/payload/data/invalid/missing_field.yml b/tests/integration/payload/data/invalid/missing_field.yml new file mode 100644 index 0000000..c74102d --- /dev/null +++ b/tests/integration/payload/data/invalid/missing_field.yml @@ -0,0 +1,17 @@ +rqs_input: + id: gen-1 + avg_active_users: { mean: 1 } + avg_request_per_minute_per_user: { mean: 10 } + +topology_graph: + nodes: + client: { id: cli } + servers: + - id: srv-1 + endpoints: + - endpoint_name: ep + steps: + - { kind: cpu_parse, step_operation: { cpu_time: 0.001 } } + + edges: [] +sim_settings: { total_simulation_time: 10 } diff --git a/tests/integration/payload/data/invalid/negative_latency.yml b/tests/integration/payload/data/invalid/negative_latency.yml new file mode 100644 index 0000000..f69fb60 --- /dev/null +++ b/tests/integration/payload/data/invalid/negative_latency.yml @@ -0,0 +1,15 @@ +rqs_input: + id: gen-1 + avg_active_users: { mean: 1 } + avg_request_per_minute_per_user: { mean: 10 } + +topology_graph: + nodes: + client: { id: cli } + servers: [] + edges: + - id: bad-lat + source: gen-1 + target: cli + latency: { mean: -0.001 } +sim_settings: { total_simulation_time: 5 } diff --git a/tests/integration/payload/data/invalid/wrong_enum.yml b/tests/integration/payload/data/invalid/wrong_enum.yml new file mode 100644 index 0000000..58a1c50 --- /dev/null +++ b/tests/integration/payload/data/invalid/wrong_enum.yml @@ -0,0 +1,13 @@ +rqs_input: + id: gen-1 + avg_active_users: { mean: 1 } + avg_request_per_minute_per_user: + mean: 10 + distribution: gamma # not valid enum + +topology_graph: + nodes: + client: { id: cli } + servers: [] + edges: [] +sim_settings: { total_simulation_time: 5 } diff --git a/tests/integration/payload/test_payload_invalid.py b/tests/integration/payload/test_payload_invalid.py new file mode 100644 index 0000000..8cd5226 --- /dev/null +++ b/tests/integration/payload/test_payload_invalid.py @@ -0,0 +1,19 @@ +"""test to verify validation on invalid yml""" + +from pathlib import Path + +import pytest +import yaml +from pydantic import ValidationError + +from asyncflow.schemas.payload import SimulationPayload + +DATA_DIR = Path(__file__).parent / "data" / "invalid" +YMLS = sorted(DATA_DIR.glob("*.yml")) + +@pytest.mark.integration +@pytest.mark.parametrize("yaml_path", YMLS, ids=lambda p: p.stem) +def test_invalid_payloads_raise(yaml_path: Path) -> None : + raw = yaml.safe_load(yaml_path.read_text()) + with pytest.raises(ValidationError): + SimulationPayload.model_validate(raw) diff --git a/tests/integration/single_server/conftest.py b/tests/integration/single_server/conftest.py new file mode 100644 index 0000000..f45633a --- /dev/null +++ b/tests/integration/single_server/conftest.py @@ -0,0 +1,47 @@ +""" +Fixtures for the *single-server* integration scenario: + +generator ──edge──> server ──edge──> client + +The topology is stored as a YAML file (`tests/data/single_server.yml`) so +tests remain declarative and we avoid duplicating Pydantic wiring logic. +""" + +from __future__ import annotations + +from pathlib import Path +from typing import TYPE_CHECKING + +import pytest +import simpy + +if TYPE_CHECKING: # heavy imports only when type-checking + from asyncflow.runtime.simulation_runner import SimulationRunner + + +# --------------------------------------------------------------------------- # +# Shared SimPy environment (function-scope so every test starts fresh) # +# --------------------------------------------------------------------------- # +@pytest.fixture +def env() -> simpy.Environment: + """Return an empty ``simpy.Environment`` for each test.""" + return simpy.Environment() + + +# --------------------------------------------------------------------------- # +# Build a SimulationRunner from the YAML scenario # +# --------------------------------------------------------------------------- # +@pytest.fixture +def runner(env: simpy.Environment) -> SimulationRunner: + """ + Load *single_server.yml* through the public constructor + :pymeth:`SimulationRunner.from_yaml`. + """ + # import deferred to avoid ruff TC001 + from asyncflow.runtime.simulation_runner import SimulationRunner # noqa: PLC0415 + + yaml_path: Path = ( + Path(__file__).parent / "data" / "single_server.yml" + ) + + return SimulationRunner.from_yaml(env=env, yaml_path=yaml_path) diff --git a/tests/integration/single_server/data/single_server.yml b/tests/integration/single_server/data/single_server.yml new file mode 100644 index 0000000..c6ec078 --- /dev/null +++ b/tests/integration/single_server/data/single_server.yml @@ -0,0 +1,54 @@ +# ─────────────────────────────────────────────────────────────── +# AsyncFlow scenario: generator ➜ client ➜ server ➜ client +# ─────────────────────────────────────────────────────────────── + +# 1. Traffic generator (light load) +rqs_input: + id: rqs-1 + avg_active_users: { mean: 5 } + avg_request_per_minute_per_user: { mean: 40 } + user_sampling_window: 60 + +# 2. Topology +topology_graph: + nodes: + client: { id: client-1 } + servers: + - id: srv-1 + server_resources: { cpu_cores: 2, ram_mb: 2048 } + endpoints: + - endpoint_name: ep-1 + probability: 1.0 + steps: + - kind: initial_parsing + step_operation: { cpu_time: 0.001 } + - kind: io_wait + step_operation: { io_waiting_time: 0.002 } + + edges: + - id: gen-to-client + source: rqs-1 + target: client-1 + latency: { mean: 0.003, distribution: exponential } + + - id: client-to-server + source: client-1 + target: srv-1 + latency: { mean: 0.003, distribution: exponential } + + - id: server-to-client + source: srv-1 + target: client-1 + latency: { mean: 0.003, distribution: exponential } + +# 3. Simulation settings +sim_settings: + total_simulation_time: 50 + sample_period_s: 0.01 + enabled_sample_metrics: + - ready_queue_len + - event_loop_io_sleep + - ram_in_use + - edge_concurrent_connection + enabled_event_metrics: + - rqs_clock diff --git a/tests/integration/single_server/test_int_single_server.py b/tests/integration/single_server/test_int_single_server.py new file mode 100644 index 0000000..4d55310 --- /dev/null +++ b/tests/integration/single_server/test_int_single_server.py @@ -0,0 +1,52 @@ +""" +End-to-end verification of a *functional* topology (1 generator, 1 server). + +Assertions cover: + +* non-zero latency stats, +* throughput series length > 0, +* presence of sampled metrics for both edge & server. +""" + +from __future__ import annotations + +from typing import TYPE_CHECKING + +import pytest + +from asyncflow.config.constants import LatencyKey, SampledMetricName + +if TYPE_CHECKING: # only needed for type-checking + from asyncflow.metrics.analyzer import ResultsAnalyzer + from asyncflow.runtime.simulation_runner import SimulationRunner + + +# --------------------------------------------------------------------------- # +# Tests # +# --------------------------------------------------------------------------- # +@pytest.mark.integration +def test_single_server_happy_path(runner: SimulationRunner) -> None: + """Run the simulation and ensure that *something* was processed.""" + results: ResultsAnalyzer = runner.run() + + # ── Latency stats must exist ─────────────────────────────────────────── + stats = results.get_latency_stats() + assert stats, "Expected non-empty latency statistics." + assert stats[LatencyKey.TOTAL_REQUESTS] > 0 + assert stats[LatencyKey.MEAN] > 0.0 + + # ── Throughput series must have at least one bucket > 0 ─────────────── + ts, rps = results.get_throughput_series() + assert len(ts) == len(rps) > 0 + assert any(val > 0 for val in rps) + + # ── Sampled metrics must include *one* server and *one* edge ─────────── + sampled = results.get_sampled_metrics() + + # Server RAM & queues + assert SampledMetricName.RAM_IN_USE in sampled + assert sampled[SampledMetricName.RAM_IN_USE], "Server RAM time-series missing." + + # Edge concurrent-connection metric + assert SampledMetricName.EDGE_CONCURRENT_CONNECTION in sampled + assert sampled[SampledMetricName.EDGE_CONCURRENT_CONNECTION], "Edge metric missing." diff --git a/tests/system/test_sys_lb_two_servers.py b/tests/system/test_sys_lb_two_servers.py new file mode 100644 index 0000000..b273065 --- /dev/null +++ b/tests/system/test_sys_lb_two_servers.py @@ -0,0 +1,204 @@ +"""System test: load balancer + two identical servers (seeded, reproducible). + +Topology: + + generator → client → LB(round_robin) → srv-1 + └→ srv-2 + srv-1 → client + srv-2 → client + +Each server endpoint: CPU(2 ms) → RAM(128 MB) → IO(12 ms) +Edges: exponential latency ~2-3 ms. +We check: +- latency stats / throughput sanity vs nominal λ (~40 rps); +- balanced traffic across srv-1 / srv-2 via edge concurrency and RAM means. +""" + +from __future__ import annotations + +import os +import random +from typing import TYPE_CHECKING + +import numpy as np +import pytest +import simpy + +from asyncflow import AsyncFlow +from asyncflow.components import Client, Edge, Endpoint, LoadBalancer, Server +from asyncflow.config.constants import LatencyKey +from asyncflow.runtime.simulation_runner import SimulationRunner +from asyncflow.settings import SimulationSettings +from asyncflow.workload import RqsGenerator + +if TYPE_CHECKING: + # Imported only for type checking (ruff: TC001) + from asyncflow.metrics.analyzer import ResultsAnalyzer + from asyncflow.schemas.payload import SimulationPayload + +pytestmark = [ + pytest.mark.system, + pytest.mark.skipif( + os.getenv("ASYNCFLOW_RUN_SYSTEM_TESTS") != "1", + reason="System tests disabled (set ASYNCFLOW_RUN_SYSTEM_TESTS=1 to run).", + ), +] + +SEED = 4242 +REL_TOL = 0.30 # 30% for λ/latency +BAL_TOL = 0.25 # 25% imbalance tolerated between the two backends + + +def _seed_all(seed: int = SEED) -> None: + random.seed(seed) + np.random.seed(seed) # noqa: NPY002 + os.environ["PYTHONHASHSEED"] = str(seed) + + +def _build_payload() -> SimulationPayload: + gen = RqsGenerator( + id="rqs-1", + avg_active_users={"mean": 120}, + avg_request_per_minute_per_user={"mean": 20}, + user_sampling_window=60, + ) + client = Client(id="client-1") + + endpoint = Endpoint( + endpoint_name="/api", + steps=[ + {"kind": "initial_parsing", "step_operation": {"cpu_time": 0.002}}, + {"kind": "ram", "step_operation": {"necessary_ram": 128}}, + {"kind": "io_wait", "step_operation": {"io_waiting_time": 0.012}}, + ], + ) + srv1 = Server( + id="srv-1", + server_resources={"cpu_cores": 1, "ram_mb": 2048}, + endpoints=[endpoint], + ) + srv2 = Server( + id="srv-2", + server_resources={"cpu_cores": 1, "ram_mb": 2048}, + endpoints=[endpoint], + ) + + lb = LoadBalancer( + id="lb-1", + algorithms="round_robin", + server_covered={"srv-1", "srv-2"}, + ) + + edges = [ + Edge( + id="gen-client", + source="rqs-1", + target="client-1", + latency={"mean": 0.003, "distribution": "exponential"}, + ), + Edge( + id="client-lb", + source="client-1", + target="lb-1", + latency={"mean": 0.002, "distribution": "exponential"}, + ), + Edge( + id="lb-srv1", + source="lb-1", + target="srv-1", + latency={"mean": 0.002, "distribution": "exponential"}, + ), + Edge( + id="lb-srv2", + source="lb-1", + target="srv-2", + latency={"mean": 0.002, "distribution": "exponential"}, + ), + Edge( + id="srv1-client", + source="srv-1", + target="client-1", + latency={"mean": 0.003, "distribution": "exponential"}, + ), + Edge( + id="srv2-client", + source="srv-2", + target="client-1", + latency={"mean": 0.003, "distribution": "exponential"}, + ), + ] + + settings = SimulationSettings( + total_simulation_time=600, + sample_period_s=0.05, + enabled_sample_metrics=[ + "ready_queue_len", + "event_loop_io_sleep", + "ram_in_use", + "edge_concurrent_connection", + ], + enabled_event_metrics=["rqs_clock"], + ) + + flow = ( + AsyncFlow() + .add_generator(gen) + .add_client(client) + .add_load_balancer(lb) + .add_servers(srv1, srv2) + .add_edges(*edges) + .add_simulation_settings(settings) + ) + return flow.build_payload() + + +def _rel_diff(a: float, b: float) -> float: + denom = max(1e-9, (abs(a) + abs(b)) / 2.0) + return abs(a - b) / denom + + +def test_system_lb_two_servers_balanced_and_sane() -> None: + """End-to-end LB scenario: sanity + balance checks with seeded RNGs.""" + _seed_all() + + env = simpy.Environment() + runner = SimulationRunner(env=env, simulation_input=_build_payload()) + res: ResultsAnalyzer = runner.run() + + # Latency sanity + stats = res.get_latency_stats() + assert stats, "Expected non-empty stats." + assert LatencyKey.TOTAL_REQUESTS in stats + mean_lat = float(stats.get(LatencyKey.MEAN, 0.0)) + assert 0.020 <= mean_lat <= 0.060 + + # Throughput sanity vs nominal λ ≈ 40 rps + _, rps = res.get_throughput_series() + assert rps, "No throughput series produced." + rps_mean = float(np.mean(rps)) + lam = 120 * 20 / 60.0 + assert abs(rps_mean - lam) / lam <= REL_TOL + + # Load balance check: edge concurrency lb→srv1 vs lb→srv2 close + sampled = res.get_sampled_metrics() + edge_cc: dict[str, list[float]] = sampled.get( + "edge_concurrent_connection", + {}, + ) + assert "lb-srv1" in edge_cc + assert "lb-srv2" in edge_cc + m1 = float(np.mean(edge_cc["lb-srv1"])) + m2 = float(np.mean(edge_cc["lb-srv2"])) + assert _rel_diff(m1, m2) <= BAL_TOL + + # Server metrics present and broadly similar (RAM means close-ish) + ram_map: dict[str, list[float]] = sampled.get("ram_in_use", {}) + assert "srv-1" in ram_map + assert "srv-2" in ram_map + ram1 = float(np.mean(ram_map["srv-1"])) + ram2 = float(np.mean(ram_map["srv-2"])) + assert _rel_diff(ram1, ram2) <= BAL_TOL + + # IDs reported by analyzer + sids = res.list_server_ids() + assert set(sids) == {"srv-1", "srv-2"} diff --git a/tests/system/test_sys_single_server.py b/tests/system/test_sys_single_server.py new file mode 100644 index 0000000..ff2cd32 --- /dev/null +++ b/tests/system/test_sys_single_server.py @@ -0,0 +1,151 @@ +"""System test: single server (seeded, reproducible). + +Topology: + generator → client → srv-1 → client + +Endpoint: + CPU(1 ms) → RAM(64 MB) → IO(10 ms) +Edges: exponential latency ~2-3 ms. + +Checks: +- latency stats present and plausible (broad bounds); +- throughput roughly consistent with nominal λ; +- basic sampled metrics present for srv-1. +""" + +from __future__ import annotations + +import os +import random +from typing import TYPE_CHECKING + +import numpy as np +import pytest +import simpy + +from asyncflow import AsyncFlow +from asyncflow.components import Client, Edge, Endpoint, Server +from asyncflow.config.constants import LatencyKey +from asyncflow.runtime.simulation_runner import SimulationRunner +from asyncflow.settings import SimulationSettings +from asyncflow.workload import RqsGenerator + +if TYPE_CHECKING: + # Imported only for type checking (ruff: TC001) + from asyncflow.metrics.analyzer import ResultsAnalyzer + from asyncflow.schemas.payload import SimulationPayload + +pytestmark = [ + pytest.mark.system, + pytest.mark.skipif( + os.getenv("ASYNCFLOW_RUN_SYSTEM_TESTS") != "1", + reason="System tests disabled (set ASYNCFLOW_RUN_SYSTEM_TESTS=1 to run).", + ), +] + +SEED = 1337 +REL_TOL = 0.35 # generous bound for simple sanity + + +def _seed_all(seed: int = SEED) -> None: + random.seed(seed) + np.random.seed(seed) # noqa: NPY002 + os.environ["PYTHONHASHSEED"] = str(seed) + + +def _build_payload() -> SimulationPayload: + # Workload: ~26.7 rps (80 users * 20 rpm / 60) + gen = RqsGenerator( + id="rqs-1", + avg_active_users={"mean": 80}, + avg_request_per_minute_per_user={"mean": 20}, + user_sampling_window=60, + ) + client = Client(id="client-1") + + ep = Endpoint( + endpoint_name="/api", + steps=[ + {"kind": "initial_parsing", "step_operation": {"cpu_time": 0.001}}, + {"kind": "ram", "step_operation": {"necessary_ram": 64}}, + {"kind": "io_wait", "step_operation": {"io_waiting_time": 0.010}}, + ], + ) + srv = Server( + id="srv-1", + server_resources={"cpu_cores": 1, "ram_mb": 2048}, + endpoints=[ep], + ) + + edges = [ + Edge( + id="gen-client", + source="rqs-1", + target="client-1", + latency={"mean": 0.003, "distribution": "exponential"}, + ), + Edge( + id="client-srv", + source="client-1", + target="srv-1", + latency={"mean": 0.002, "distribution": "exponential"}, + ), + Edge( + id="srv-client", + source="srv-1", + target="client-1", + latency={"mean": 0.003, "distribution": "exponential"}, + ), + ] + + settings = SimulationSettings( + total_simulation_time=400, + sample_period_s=0.05, + enabled_sample_metrics=[ + "ready_queue_len", + "event_loop_io_sleep", + "ram_in_use", + "edge_concurrent_connection", + ], + enabled_event_metrics=["rqs_clock"], + ) + + flow = ( + AsyncFlow() + .add_generator(gen) + .add_client(client) + .add_servers(srv) + .add_edges(*edges) + ) + flow = flow.add_simulation_settings(settings) + return flow.build_payload() + + +def test_system_single_server_sane() -> None: + """End-to-end single-server scenario: sanity checks with seeded RNGs.""" + _seed_all() + + env = simpy.Environment() + runner = SimulationRunner(env=env, simulation_input=_build_payload()) + res: ResultsAnalyzer = runner.run() + + # Latency stats present and plausible + stats = res.get_latency_stats() + assert stats, "Expected non-empty stats." + assert LatencyKey.TOTAL_REQUESTS in stats + mean_lat = float(stats.get(LatencyKey.MEAN, 0.0)) + assert 0.015 <= mean_lat <= 0.060 + + # Throughput sanity vs nominal λ + _, rps = res.get_throughput_series() + assert rps, "No throughput series produced." + rps_mean = float(np.mean(rps)) + lam = 80 * 20 / 60.0 + assert abs(rps_mean - lam) / lam <= REL_TOL + + # Sampled metrics present for srv-1 + sampled: dict[str, dict[str, list[float]]] = res.get_sampled_metrics() + for key in ("ready_queue_len", "event_loop_io_sleep", "ram_in_use"): + assert key in sampled + assert "srv-1" in sampled[key] + assert len(sampled[key]["srv-1"]) > 0 diff --git a/tests/unit/metrics/test_analyzer.py b/tests/unit/metrics/test_analyzer.py new file mode 100644 index 0000000..901b646 --- /dev/null +++ b/tests/unit/metrics/test_analyzer.py @@ -0,0 +1,290 @@ +"""Extra unit tests for ``ResultsAnalyzer`` helpers and plots. + +This suite complements the basic analyzer tests by exercising: +- formatting helpers (latency stats pretty-printer), +- server-id ordering, +- throughput recomputation with a custom window, +- metric accessors tolerant to enum/string keys, +- per-metric series time bases, +- the compact "base dashboard" plotting helper, +- single-server plots (ready queue, I/O queue, RAM), +- multi-server helpers (axes allocation and error handling). +""" + +from __future__ import annotations + +from typing import TYPE_CHECKING, cast + +import pytest +from matplotlib.figure import Figure + +from asyncflow.analysis import ResultsAnalyzer +from asyncflow.enums import SampledMetricName + +if TYPE_CHECKING: + from asyncflow.runtime.actors.client import ClientRuntime + from asyncflow.runtime.actors.edge import EdgeRuntime + from asyncflow.runtime.actors.server import ServerRuntime + from asyncflow.schemas.settings.simulation import SimulationSettings + + +# ---------------------------------------------------------------------- # +# Test doubles (minimal) # +# ---------------------------------------------------------------------- # +class DummyClock: + """Clock with *start* and *finish* timestamps to emulate one request.""" + + def __init__(self, start: float, finish: float) -> None: + """Initialize a synthetic request completion interval.""" + self.start = start + self.finish = finish + + +class DummyClient: + """Emulates ``ClientRuntime`` by exposing ``rqs_clock``.""" + + def __init__(self, clocks: list[DummyClock]) -> None: + """Attach a list of dummy clocks to the stub client.""" + self.rqs_clock = clocks + + +class DummyName: + """Mimic an Enum member that carries a ``.value`` attribute.""" + + def __init__(self, value: str) -> None: + """Store the underlying string *value* used as a metric key.""" + self.value = value + + +class DummyServerConfig: + """Minimal server config with only the ``id`` attribute.""" + + def __init__(self, identifier: str) -> None: + """Set the server identifier used by the analyzer.""" + self.id = identifier + + +class DummyServer: + """Stub for ``ServerRuntime`` exposing ``enabled_metrics`` and config.""" + + def __init__(self, identifier: str, metrics: dict[str, list[float]]) -> None: + """Create a fake server with the given per-metric time series.""" + self.server_config = DummyServerConfig(identifier) + self.enabled_metrics = { + DummyName(name): values for name, values in metrics.items() + } + + +class DummyEdgeConfig: + """Minimal edge config with only the ``id`` attribute.""" + + def __init__(self, identifier: str) -> None: + """Set the edge identifier used by the analyzer.""" + self.id = identifier + + +class DummyEdge: + """Stub for ``EdgeRuntime`` exposing ``enabled_metrics`` and config.""" + + def __init__(self, identifier: str, metrics: dict[str, list[float]]) -> None: + """Create a fake edge with the given per-metric time series.""" + self.edge_config = DummyEdgeConfig(identifier) + self.enabled_metrics = { + DummyName(name): values for name, values in metrics.items() + } + + +# ---------------------------------------------------------------------- # +# Fixtures # +# ---------------------------------------------------------------------- # +@pytest.fixture +def analyzer_with_metrics(sim_settings: SimulationSettings) -> ResultsAnalyzer: + """Provide an analyzer with one server and ready/io/ram signals. + + The fixture sets: + - total_simulation_time = 3 s, + - sample_period_s = 1 s, + - two completed requests at t=1s and t=2s. + """ + sim_settings.total_simulation_time = 3 + sim_settings.sample_period_s = 1.0 + client = DummyClient([DummyClock(0.0, 1.0), DummyClock(0.0, 2.0)]) + server = DummyServer( + "srvX", + { + "ready_queue_len": [0, 1, 2], + "event_loop_io_sleep": [0, 0, 1], + "ram_in_use": [10.0, 20.0, 30.0], + }, + ) + edge = DummyEdge("edgeX", {}) + return ResultsAnalyzer( + client=cast("ClientRuntime", client), + servers=[cast("ServerRuntime", server)], + edges=[cast("EdgeRuntime", edge)], + settings=sim_settings, + ) + + +# ---------------------------------------------------------------------- # +# Accessors / formatting # +# ---------------------------------------------------------------------- # +def test_format_latency_stats_contains_header_and_lines( + analyzer_with_metrics: ResultsAnalyzer, +) -> None: + """Ensure the formatted stats contain a header and canonical keys.""" + text = analyzer_with_metrics.format_latency_stats() + assert "LATENCY STATS" in text + assert "MEAN" in text + assert "MEDIAN" in text + + +def test_list_server_ids_preserves_topology_order( + sim_settings: SimulationSettings, +) -> None: + """Verify that server IDs are returned in topology order.""" + sim_settings.total_simulation_time = 1 + client = DummyClient([]) + s1 = DummyServer("s1", {}) + s2 = DummyServer("s2", {}) + s3 = DummyServer("s3", {}) + an = ResultsAnalyzer( + client=cast("ClientRuntime", client), + servers=[ + cast("ServerRuntime", s1), + cast("ServerRuntime", s2), + cast("ServerRuntime", s3), + ], + edges=[], + settings=sim_settings, + ) + assert an.list_server_ids() == ["s1", "s2", "s3"] + + +# ---------------------------------------------------------------------- # +# Throughput with custom window # +# ---------------------------------------------------------------------- # +def test_get_throughput_series_custom_window_half_second( + analyzer_with_metrics: ResultsAnalyzer, +) -> None: + """Check recomputation of throughput with a 0.5 s window.""" + # Completions at 1s and 2s; with 0.5s buckets counts are [0,1,0,1,0,0]. + # Rates are counts / 0.5 => [0, 2, 0, 2, 0, 0]. + ts, rps = analyzer_with_metrics.get_throughput_series(window_s=0.5) + assert ts[:4] == [0.5, 1.0, 1.5, 2.0] + assert rps[:4] == [0.0, 2.0, 0.0, 2.0] + + +# ---------------------------------------------------------------------- # +# Metric map / series helpers # +# ---------------------------------------------------------------------- # +def test_get_metric_map_accepts_enum_and_string( + analyzer_with_metrics: ResultsAnalyzer, +) -> None: + """Ensure metric retrieval works for enum and raw-string keys.""" + m_enum = analyzer_with_metrics.get_metric_map( + SampledMetricName.READY_QUEUE_LEN, + ) + m_str = analyzer_with_metrics.get_metric_map("ready_queue_len") + + # PT018: split assertions into multiple parts. + assert "srvX" in m_enum + assert "srvX" in m_str + assert m_enum["srvX"] == [0, 1, 2] + assert m_str["srvX"] == [0, 1, 2] + +def test_get_series_respects_sample_period( + sim_settings: SimulationSettings, +) -> None: + """Confirm that series time base honors ``sample_period_s``.""" + sim_settings.total_simulation_time = 5 + sim_settings.sample_period_s = 1.5 + client = DummyClient([]) + server = DummyServer("srv1", {"ready_queue_len": [3, 4, 5]}) + an = ResultsAnalyzer( + client=cast("ClientRuntime", client), + servers=[cast("ServerRuntime", server)], + edges=[], + settings=sim_settings, + ) + times, vals = an.get_series(SampledMetricName.READY_QUEUE_LEN, "srv1") + assert vals == [3, 4, 5] + assert times == [0.0, 1.5, 3.0] + + +# ---------------------------------------------------------------------- # +# Plotting: base dashboard # +# ---------------------------------------------------------------------- # +def test_plot_base_dashboard_sets_titles( + analyzer_with_metrics: ResultsAnalyzer, +) -> None: + """Validate that the base dashboard sets expected axis titles.""" + fig = Figure() + ax_lat, ax_thr = fig.subplots(1, 2) + analyzer_with_metrics.plot_base_dashboard(ax_lat, ax_thr) + assert ax_lat.get_title() == "Request Latency Distribution" + assert ax_thr.get_title() == "Throughput (RPS)" + + +# ---------------------------------------------------------------------- # +# Plotting: single-server dedicated plots # +# ---------------------------------------------------------------------- # +def test_plot_single_server_ready_queue( + analyzer_with_metrics: ResultsAnalyzer, +) -> None: + """Ready-queue plot should have a title and a legend with mean/min/max.""" + fig = Figure() + ax = fig.subplots() + analyzer_with_metrics.plot_single_server_ready_queue(ax, "srvX") + + assert "Ready Queue" in ax.get_title() + + legend = ax.get_legend() + assert legend is not None + + labels = [t.get_text() for t in legend.get_texts()] + assert any(lbl.lower().startswith("mean") for lbl in labels) + assert any(lbl.lower().startswith("min") for lbl in labels) + assert any(lbl.lower().startswith("max") for lbl in labels) + assert len(labels) == 3 + + +def test_plot_single_server_io_queue( + analyzer_with_metrics: ResultsAnalyzer, +) -> None: + """I/O-queue plot should have a title and a legend with mean/min/max.""" + fig = Figure() + ax = fig.subplots() + analyzer_with_metrics.plot_single_server_io_queue(ax, "srvX") + + assert "I/O Queue" in ax.get_title() + + legend = ax.get_legend() + assert legend is not None + + labels = [t.get_text() for t in legend.get_texts()] + assert any(lbl.lower().startswith("mean") for lbl in labels) + assert any(lbl.lower().startswith("min") for lbl in labels) + assert any(lbl.lower().startswith("max") for lbl in labels) + assert len(labels) == 3 + + +def test_plot_single_server_ram( + analyzer_with_metrics: ResultsAnalyzer, +) -> None: + """RAM plot should have a title and a legend with mean/min/max.""" + fig = Figure() + ax = fig.subplots() + analyzer_with_metrics.plot_single_server_ram(ax, "srvX") + + assert "RAM" in ax.get_title() + + legend = ax.get_legend() + assert legend is not None + + labels = [t.get_text() for t in legend.get_texts()] + assert any(lbl.lower().startswith("mean") for lbl in labels) + assert any(lbl.lower().startswith("min") for lbl in labels) + assert any(lbl.lower().startswith("max") for lbl in labels) + assert len(labels) == 3 + diff --git a/tests/unit/public_api/test_import.py b/tests/unit/public_api/test_import.py new file mode 100644 index 0000000..cd708bc --- /dev/null +++ b/tests/unit/public_api/test_import.py @@ -0,0 +1,86 @@ +"""Unit tests for the public components import surface. + +Verifies that: +- `asyncflow.components` exposes the expected `__all__`. +- All symbols in `__all__` are importable and are classes. +""" + +from __future__ import annotations + +import importlib +from typing import TYPE_CHECKING + +from asyncflow.components import ( + Client, + Edge, + Endpoint, + LoadBalancer, + Server, + ServerResources, +) +from asyncflow.settings import SimulationSettings +from asyncflow.workload import RqsGenerator, RVConfig + +if TYPE_CHECKING: + from collections.abc import Iterable + + + +def _assert_all_equals(module_name: str, expected: Iterable[str]) -> None: + """Assert that a module's __all__ exactly matches `expected`.""" + mod = importlib.import_module(module_name) + assert hasattr(mod, "__all__"), f"{module_name} is missing __all__" + assert set(mod.__all__) == set(expected), ( + f"{module_name}.__all__ mismatch:\n" + f" expected: {set(expected)}\n" + f" actual: {set(mod.__all__)}" + ) + + +def test_components_public_symbols() -> None: + """`asyncflow.components` exposes the expected names.""" + expected = [ + "Client", + "Edge", + "Endpoint", + "LoadBalancer", + "Server", + "ServerResources", + ] + _assert_all_equals("asyncflow.components", expected) + + +def test_components_symbols_are_importable_classes() -> None: + """All public symbols are importable and are classes.""" + # Basic type sanity (avoid heavy imports/instantiation) + for cls, name in [ + (Client, "Client"), + (Edge, "Edge"), + (Endpoint, "Endpoint"), + (LoadBalancer, "LoadBalancer"), + (Server, "Server"), + (ServerResources, "ServerResources"), + ]: + assert isinstance(cls, type), f"{name} should be a class type" + assert cls.__name__ == name + +def test_workload_public_symbols() -> None: + """`asyncflow.workload` exposes RVConfig and RqsGenerator.""" + _assert_all_equals("asyncflow.workload", ["RVConfig", "RqsGenerator"]) + + +def test_workload_symbols_are_importable_classes() -> None: + """Public symbols are importable and are classes.""" + for cls, name in [(RVConfig, "RVConfig"), (RqsGenerator, "RqsGenerator")]: + assert isinstance(cls, type), f"{name} should be a class" + assert cls.__name__ == name + +def test_settings_public_symbols() -> None: + """`asyncflow.settings` exposes SimulationSettings.""" + _assert_all_equals("asyncflow.settings", ["SimulationSettings"]) + + +def test_settings_symbol_is_importable_class() -> None: + """Public symbol is importable and is a class.""" + assert isinstance(SimulationSettings, type), "SimulationSettings should be a class" + assert SimulationSettings.__name__ == "SimulationSettings" diff --git a/tests/unit/pybuilder/test_input_builder.py b/tests/unit/pybuilder/test_input_builder.py new file mode 100644 index 0000000..fa49fda --- /dev/null +++ b/tests/unit/pybuilder/test_input_builder.py @@ -0,0 +1,281 @@ +""" +Unit tests for the AsyncFlow builder. + +The goal is to verify that: +- The builder enforces types on each `add_*` method. +- Missing components produce clear ValueError exceptions on `build_payload()`. +- A valid, minimal scenario builds a `SimulationPayload` successfully. +- Methods return `self` to support fluent chaining. +- Servers and edges can be added in multiples and preserve order. +""" + +from __future__ import annotations + +import pytest + +from asyncflow.builder.asyncflow_builder import AsyncFlow +from asyncflow.schemas.payload import SimulationPayload +from asyncflow.schemas.settings.simulation import SimulationSettings +from asyncflow.schemas.topology.edges import Edge +from asyncflow.schemas.topology.endpoint import Endpoint +from asyncflow.schemas.topology.nodes import Client, Server +from asyncflow.schemas.workload.rqs_generator import RqsGenerator + + +# --------------------------------------------------------------------------- # +# Helpers: build minimal, valid components # +# --------------------------------------------------------------------------- # +def make_generator() -> RqsGenerator: + """Return a minimal valid request generator.""" + return RqsGenerator( + id="rqs-1", + avg_active_users={"mean": 10}, + avg_request_per_minute_per_user={"mean": 30}, + user_sampling_window=60, + ) + + +def make_client() -> Client: + """Return a minimal valid client.""" + return Client(id="client-1") + + +def make_endpoint() -> Endpoint: + """Return a minimal endpoint with CPU and IO steps.""" + return Endpoint( + endpoint_name="ep-1", + probability=1.0, + steps=[ + {"kind": "initial_parsing", "step_operation": {"cpu_time": 0.001}}, + {"kind": "io_wait", "step_operation": {"io_waiting_time": 0.001}}, + ], + ) + + +def make_server(server_id: str = "srv-1") -> Server: + """Return a minimal valid server with 1 core, 2GB RAM, and one endpoint.""" + return Server( + id=server_id, + server_resources={"cpu_cores": 1, "ram_mb": 2048}, + endpoints=[make_endpoint()], + ) + + +def make_edges() -> list[Edge]: + """Return a valid edge triplet for the minimal single-server scenario.""" + e1 = Edge( + id="gen-to-client", + source="rqs-1", + target="client-1", + latency={"mean": 0.003, "distribution": "exponential"}, + ) + e2 = Edge( + id="client-to-server", + source="client-1", + target="srv-1", + latency={"mean": 0.003, "distribution": "exponential"}, + ) + e3 = Edge( + id="server-to-client", + source="srv-1", + target="client-1", + latency={"mean": 0.003, "distribution": "exponential"}, + ) + return [e1, e2, e3] + + +def make_settings() -> SimulationSettings: + """Return minimal simulation settings within validation bounds.""" + return SimulationSettings( + total_simulation_time=5.0, # lower bound is 5 seconds + sample_period_s=0.1, + enabled_sample_metrics=[ + "ready_queue_len", + "event_loop_io_sleep", + "ram_in_use", + "edge_concurrent_connection", + ], + enabled_event_metrics=["rqs_clock"], + ) + + +# --------------------------------------------------------------------------- # +# Positive / “happy path” # +# --------------------------------------------------------------------------- # +def test_builder_happy_path_returns_payload() -> None: + """Building a minimal scenario returns a validated SimulationPayload.""" + flow = AsyncFlow() + generator = make_generator() + client = make_client() + server = make_server() + e1, e2, e3 = make_edges() + settings = make_settings() + + payload = ( + flow.add_generator(generator) + .add_client(client) + .add_servers(server) + .add_edges(e1, e2, e3) + .add_simulation_settings(settings) + .build_payload() + ) + + assert isinstance(payload, SimulationPayload) + assert payload.topology_graph.nodes.client.id == client.id + assert len(payload.topology_graph.nodes.servers) == 1 + assert {e.id for e in payload.topology_graph.edges} == { + "gen-to-client", + "client-to-server", + "server-to-client", + } + + +def test_add_methods_return_self_for_chaining() -> None: + """Every add_* method returns `self` to support fluent chaining.""" + flow = AsyncFlow() + ret = ( + flow.add_generator(make_generator()) + .add_client(make_client()) + .add_servers(make_server()) + .add_edges(*make_edges()) + .add_simulation_settings(make_settings()) + ) + assert ret is flow + + +def test_add_servers_accepts_multiple_and_keeps_order() -> None: + """Adding multiple servers keeps insertion order.""" + flow = AsyncFlow().add_generator(make_generator()).add_client(make_client()) + s1 = make_server("srv-1") + s2 = make_server("srv-2") + s3 = make_server("srv-3") + + flow.add_servers(s1, s2).add_servers(s3) + e1, e2, e3 = make_edges() + settings = make_settings() + payload = ( + flow.add_edges(e1, e2, e3) + .add_simulation_settings(settings) + .build_payload() + ) + + ids = [srv.id for srv in payload.topology_graph.nodes.servers] + assert ids == ["srv-1", "srv-2", "srv-3"] + + +# --------------------------------------------------------------------------- # +# Negative cases: missing components # +# --------------------------------------------------------------------------- # +def test_build_without_generator_raises() -> None: + """Building without a generator fails with a clear error.""" + flow = AsyncFlow() + flow.add_client(make_client()) + flow.add_servers(make_server()) + flow.add_edges(*make_edges()) + flow.add_simulation_settings(make_settings()) + + with pytest.raises( + ValueError, + match="The generator input must be instantiated before the simulation", + ): + flow.build_payload() + + +def test_build_without_client_raises() -> None: + """Building without a client fails with a clear error.""" + flow = AsyncFlow() + flow.add_generator(make_generator()) + flow.add_servers(make_server()) + flow.add_edges(*make_edges()) + flow.add_simulation_settings(make_settings()) + + with pytest.raises( + ValueError, + match="The client input must be instantiated before the simulation", + ): + flow.build_payload() + + +def test_build_without_servers_raises() -> None: + """Building without servers fails with a clear error.""" + flow = AsyncFlow() + flow.add_generator(make_generator()) + flow.add_client(make_client()) + flow.add_edges(*make_edges()) + flow.add_simulation_settings(make_settings()) + + with pytest.raises( + ValueError, + match="You must instantiate at least one server before the simulation", + ): + flow.build_payload() + + +def test_build_without_edges_raises() -> None: + """Building without edges fails with a clear error.""" + flow = AsyncFlow() + flow.add_generator(make_generator()) + flow.add_client(make_client()) + flow.add_servers(make_server()) + flow.add_simulation_settings(make_settings()) + + with pytest.raises( + ValueError, + match="You must instantiate edges before the simulation", + ): + flow.build_payload() + + +def test_build_without_settings_raises() -> None: + """Building without settings fails with a clear error.""" + flow = AsyncFlow() + flow.add_generator(make_generator()) + flow.add_client(make_client()) + flow.add_servers(make_server()) + flow.add_edges(*make_edges()) + + with pytest.raises( + ValueError, + match="The simulation settings must be instantiated before the simulation", + ): + flow.build_payload() + + +# --------------------------------------------------------------------------- # +# Negative cases: type enforcement in add_* methods # +# --------------------------------------------------------------------------- # +def test_add_generator_rejects_wrong_type() -> None: + """`add_generator` rejects non-RqsGenerator instances.""" + flow = AsyncFlow() + with pytest.raises(TypeError): + flow.add_generator("not-a-generator") # type: ignore[arg-type] + + +def test_add_client_rejects_wrong_type() -> None: + """`add_client` rejects non-Client instances.""" + flow = AsyncFlow() + with pytest.raises(TypeError): + flow.add_client(1234) # type: ignore[arg-type] + + +def test_add_servers_rejects_wrong_type() -> None: + """`add_servers` rejects any non-Server in the varargs.""" + flow = AsyncFlow() + good = make_server() + with pytest.raises(TypeError): + flow.add_servers(good, "not-a-server") # type: ignore[arg-type] + + +def test_add_edges_rejects_wrong_type() -> None: + """`add_edges` rejects any non-Edge in the varargs.""" + flow = AsyncFlow() + good = make_edges()[0] + with pytest.raises(TypeError): + flow.add_edges(good, 3.14) # type: ignore[arg-type] + + +def test_add_settings_rejects_wrong_type() -> None: + """`add_simulation_settings` rejects non-SimulationSettings instances.""" + flow = AsyncFlow() + with pytest.raises(TypeError): + flow.add_simulation_settings({"total_simulation_time": 1.0}) # type: ignore[arg-type] diff --git a/tests/unit/resources/test_registry.py b/tests/unit/resources/test_registry.py new file mode 100644 index 0000000..6581ae0 --- /dev/null +++ b/tests/unit/resources/test_registry.py @@ -0,0 +1,60 @@ +"""Unit tests for ResourcesRuntime (resource registry).""" + +from __future__ import annotations + +import pytest +import simpy + +from asyncflow.config.constants import ServerResourceName +from asyncflow.resources.registry import ResourcesRuntime +from asyncflow.schemas.topology.endpoint import Endpoint +from asyncflow.schemas.topology.graph import TopologyGraph +from asyncflow.schemas.topology.nodes import ( + Client, + Server, + ServerResources, + TopologyNodes, +) + + +def _minimal_server(server_id: str, cores: int, ram: int) -> Server: + """Create a Server with a dummy endpoint and resource spec.""" + res = ServerResources(cpu_cores=cores, ram_mb=ram) + dummy_ep = Endpoint(endpoint_name="/ping", steps=[]) + return Server(id=server_id, server_resources=res, endpoints=[dummy_ep]) + + +def _build_topology() -> TopologyGraph: + """Return a minimal but schema-valid topology with two servers.""" + servers = [ + _minimal_server("srv-A", 2, 1024), + _minimal_server("srv-B", 4, 2048), + ] + client = Client(id="clt-1") + nodes = TopologyNodes(servers=servers, client=client) + return TopologyGraph(nodes=nodes, edges=[]) + + +def test_registry_initialises_filled_containers() -> None: + """CPU and RAM containers must start full for every server.""" + env = simpy.Environment() + topo = _build_topology() + registry = ResourcesRuntime(env=env, data=topo) + + for srv in topo.nodes.servers: + containers = registry[srv.id] + + cpu = containers[ServerResourceName.CPU.value] + ram = containers[ServerResourceName.RAM.value] + + assert cpu.level == cpu.capacity == srv.server_resources.cpu_cores + assert ram.level == ram.capacity == srv.server_resources.ram_mb + + +def test_getitem_unknown_server_raises_keyerror() -> None: + """Accessing an undefined server ID should raise KeyError.""" + env = simpy.Environment() + registry = ResourcesRuntime(env=env, data=_build_topology()) + + with pytest.raises(KeyError): + _ = registry["non-existent-server"] diff --git a/tests/unit/resources/test_server_containers.py b/tests/unit/resources/test_server_containers.py new file mode 100644 index 0000000..b7a8243 --- /dev/null +++ b/tests/unit/resources/test_server_containers.py @@ -0,0 +1,19 @@ +"""Unit test: build_containers must return full containers.""" + +import simpy + +from asyncflow.config.constants import ServerResourceName +from asyncflow.resources.server_containers import build_containers +from asyncflow.schemas.topology.nodes import ServerResources + + +def test_containers_start_full() -> None: + env = simpy.Environment() + spec = ServerResources(cpu_cores=4, ram_mb=2048) + containers = build_containers(env, spec) + + cpu = containers[ServerResourceName.CPU.value] + ram = containers[ServerResourceName.RAM.value] + + assert cpu.level == cpu.capacity == 4 + assert ram.level == ram.capacity == 2048 diff --git a/tests/unit/runtime/actors/test_client.py b/tests/unit/runtime/actors/test_client.py new file mode 100644 index 0000000..d78c848 --- /dev/null +++ b/tests/unit/runtime/actors/test_client.py @@ -0,0 +1,95 @@ +"""Unit-tests for :class:`ClientRuntime` (outbound / inbound paths).""" + +from __future__ import annotations + +import simpy + +from asyncflow.config.constants import SystemEdges, SystemNodes +from asyncflow.runtime.actors.client import ClientRuntime +from asyncflow.runtime.rqs_state import RequestState +from asyncflow.schemas.topology.nodes import Client + +# --------------------------------------------------------------------------- # +# Dummy edge (no real network) # +# --------------------------------------------------------------------------- # + + +class DummyEdgeRuntime: + """Collect states passed through *transport* without SimPy side-effects.""" + + def __init__(self, env: simpy.Environment) -> None: + """Init attributes""" + self.env = env + self.forwarded: list[RequestState] = [] + + # Signature compatible with EdgeRuntime.transport but returns *None* + def transport(self, state: RequestState) -> None: + """Transport state""" + self.forwarded.append(state) + + +# --------------------------------------------------------------------------- # +# Helper # +# --------------------------------------------------------------------------- # + + +def _setup( + env: simpy.Environment, +) -> tuple[simpy.Store, simpy.Store, DummyEdgeRuntime]: + inbox: simpy.Store = simpy.Store(env) + completed: simpy.Store = simpy.Store(env) + edge_rt = DummyEdgeRuntime(env) + cli_cfg = Client(id="cli-1") + + client = ClientRuntime( + env=env, + out_edge=edge_rt, # type: ignore[arg-type] + client_box=inbox, + completed_box=completed, + client_config=cli_cfg, + ) + client.start() # start the forwarder + return inbox, completed, edge_rt + + +# --------------------------------------------------------------------------- # +# Tests # +# --------------------------------------------------------------------------- # + + +def test_outbound_is_forwarded() -> None: + """First visit ⇒ forwarded; completed store remains empty.""" + env = simpy.Environment() + inbox, completed, edge_rt = _setup(env) + + req = RequestState(id=1, initial_time=0.0) + req.record_hop(SystemNodes.GENERATOR, "gen-1", env.now) + + inbox.put(req) + env.run() + + assert len(edge_rt.forwarded) == 1 + assert len(completed.items) == 0 + assert req.history[-1].component_type is SystemNodes.CLIENT + assert req.finish_time is None + + +def test_inbound_is_completed() -> None: + """Second visit ⇒ request stored in *completed_box* and not re-forwarded.""" + env = simpy.Environment() + inbox, completed, edge_rt = _setup(env) + + req = RequestState(id=2, initial_time=0.0) + req.record_hop(SystemNodes.GENERATOR, "gen-1", env.now) + req.record_hop(SystemEdges.NETWORK_CONNECTION, "edge-X", env.now) + req.record_hop(SystemNodes.CLIENT, "cli-1", env.now) # simulate return + + inbox.put(req) + env.run() + + assert len(edge_rt.forwarded) == 0 + assert len(completed.items) == 1 + + done = completed.items[0] + assert done.finish_time is not None + assert done.history[-1].component_type is SystemNodes.CLIENT diff --git a/tests/unit/runtime/actors/test_edge.py b/tests/unit/runtime/actors/test_edge.py new file mode 100644 index 0000000..1800a12 --- /dev/null +++ b/tests/unit/runtime/actors/test_edge.py @@ -0,0 +1,185 @@ +""" +Unit tests for :class:`EdgeRuntime`: +* delivery vs. drop paths +* connection-counter bookkeeping +* public properties (`enabled_metrics`, `concurrent_connections`) +""" + +from __future__ import annotations + +from typing import TYPE_CHECKING, cast + +import simpy + +from asyncflow.config.constants import SampledMetricName, SystemEdges, SystemNodes +from asyncflow.runtime.actors.edge import EdgeRuntime +from asyncflow.runtime.rqs_state import RequestState +from asyncflow.schemas.common.random_variables import RVConfig +from asyncflow.schemas.topology.edges import Edge + +if TYPE_CHECKING: + import numpy as np + + from asyncflow.schemas.settings.simulation import SimulationSettings + + +# --------------------------------------------------------------------------- # +# Dummy RNG # +# --------------------------------------------------------------------------- # + + +class DummyRNG: + """Return preset values for ``uniform`` and ``normal``.""" + + def __init__(self, *, uniform_value: float, normal_value: float = 0.0) -> None: + """To complete""" + self.uniform_value = uniform_value + self.normal_value = normal_value + self.uniform_called = False + self.normal_called = False + + def uniform(self) -> float: # called by EdgeRuntime + """To complete""" + self.uniform_called = True + return self.uniform_value + + def normal(self, _mean: float, _sigma: float) -> float: # called by sampler + """To complete""" + self.normal_called = True + return self.normal_value + + +# --------------------------------------------------------------------------- # +# Minimal stub for SimulationSettings # +# --------------------------------------------------------------------------- # + + +class _SettingsStub: + """Only the attributes required by EdgeRuntime/build_edge_metrics.""" + + def __init__(self, enabled_sample_metrics: set[SampledMetricName]) -> None: + self.enabled_sample_metrics = enabled_sample_metrics + self.sample_period_s = 0.001 # not used in these unit tests + + +# --------------------------------------------------------------------------- # +# Helper factory # +# --------------------------------------------------------------------------- # + + +def _make_edge( + env: simpy.Environment, + *, + uniform_value: float, + normal_value: float = 0.0, + dropout_rate: float = 0.0, +) -> tuple[EdgeRuntime, DummyRNG, simpy.Store]: + """Create a fully wired :class:`EdgeRuntime` + associated objects.""" + rng = DummyRNG(uniform_value=uniform_value, normal_value=normal_value) + store: simpy.Store = simpy.Store(env) + + edge_cfg = Edge( + id="edge-1", + source="src", + target="dst", + latency=RVConfig(mean=1.0, variance=1.0, distribution="normal"), + dropout_rate=dropout_rate, + ) + + settings_stub = _SettingsStub( + enabled_sample_metrics={SampledMetricName.EDGE_CONCURRENT_CONNECTION}, + ) + + edge_rt = EdgeRuntime( + env=env, + edge_config=edge_cfg, + rng=cast("np.random.Generator", rng), + target_box=store, + settings=cast("SimulationSettings", settings_stub), + ) + return edge_rt, rng, store + + +# --------------------------------------------------------------------------- # +# Tests # +# --------------------------------------------------------------------------- # + + +def test_edge_delivers_message() -> None: + """A request traverses the edge when `uniform >= dropout_rate`.""" + env = simpy.Environment() + edge_rt, rng, store = _make_edge( + env, + uniform_value=0.9, + normal_value=0.5, + dropout_rate=0.2, + ) + + state = RequestState(id=1, initial_time=0.0) + state.record_hop(SystemNodes.GENERATOR, "gen-1", env.now) + + assert edge_rt.concurrent_connections == 0 + + edge_rt.transport(state) + env.run() + + # message delivered + assert len(store.items) == 1 + delivered: RequestState = store.items[0] + last = delivered.history[-1] + assert last.component_type is SystemEdges.NETWORK_CONNECTION + assert last.component_id == "edge-1" + + # RNG calls + assert rng.uniform_called is True + assert rng.normal_called is True + + # counter restored + assert edge_rt.concurrent_connections == 0 + + +def test_edge_drops_message() -> None: + """A request is dropped when `uniform < dropout_rate`.""" + env = simpy.Environment() + edge_rt, rng, store = _make_edge( + env, + uniform_value=0.1, # < dropout_rate → drop + dropout_rate=0.5, + ) + + state = RequestState(id=1, initial_time=0.0) + state.record_hop(SystemNodes.GENERATOR, "gen-1", env.now) + + edge_rt.transport(state) + env.run() + + # no delivery + assert len(store.items) == 0 + last = state.history[-1] + assert last.component_id.endswith("dropped") + + # RNG calls + assert rng.uniform_called is True + assert rng.normal_called is False + + # counter unchanged + assert edge_rt.concurrent_connections == 0 + + +def test_metric_dict_initialised_and_mutable() -> None: + """`enabled_metrics` exposes the default key and supports list append.""" + env = simpy.Environment() + edge_rt, _rng, _store = _make_edge( + env, + uniform_value=0.9, + dropout_rate=0.0, + ) + + key = SampledMetricName.EDGE_CONCURRENT_CONNECTION + assert key in edge_rt.enabled_metrics + assert edge_rt.enabled_metrics[key] == [] + + # Simulate a collector append + edge_rt.enabled_metrics[key].append(5) + assert edge_rt.enabled_metrics[key] == [5] + diff --git a/tests/unit/runtime/actors/test_load_balancer.py b/tests/unit/runtime/actors/test_load_balancer.py new file mode 100644 index 0000000..1905543 --- /dev/null +++ b/tests/unit/runtime/actors/test_load_balancer.py @@ -0,0 +1,131 @@ +"""Unit tests for ``LoadBalancerRuntime`` (round-robin & least-connections).""" + +from __future__ import annotations + +from typing import TYPE_CHECKING, cast + +import pytest +import simpy + +from asyncflow.config.constants import LbAlgorithmsName, SystemNodes +from asyncflow.runtime.actors.load_balancer import LoadBalancerRuntime +from asyncflow.schemas.topology.nodes import LoadBalancer + +if TYPE_CHECKING: + from asyncflow.runtime.actors.edge import EdgeRuntime + + + +# --------------------------------------------------------------------------- # +# Dummy objects (lightweight test doubles) # +# --------------------------------------------------------------------------- # +class DummyState: + """Tiny substitute for ``RequestState`` - only ``history`` is needed.""" + + def __init__(self) -> None: + """Instance of the state history""" + self.history: list[str] = [] + + def record_hop(self, comp_type: SystemNodes, comp_id: str, _: float) -> None: + """Append the hop as ``":"``.""" + self.history.append(f"{comp_type.value}:{comp_id}") + + +class DummyEdge: + """Stub that mimics just the pieces `LoadBalancerRuntime` relies on.""" + + def __init__(self, edge_id: str, concurrent: int = 0) -> None: + """Instance for the dummy edge""" + self.edge_config = type("Cfg", (), {"id": edge_id}) + self.concurrent_connections = concurrent + self.received: list[DummyState] = [] + + # Signature compatible with EdgeRuntime.transport + def transport(self, state: DummyState) -> None: + """Collect the state for later assertions.""" + self.received.append(state) + + +# --------------------------------------------------------------------------- # +# Fixtures # +# --------------------------------------------------------------------------- # +@pytest.fixture +def env() -> simpy.Environment: + """Return a fresh SimPy environment per test.""" + return simpy.Environment() + + +def _make_lb_runtime( + env: simpy.Environment, + algorithm: LbAlgorithmsName, + edges: list[DummyEdge], +) -> LoadBalancerRuntime: + """Wire LB, its inbox store and the supplied dummy edges.""" + lb_cfg = LoadBalancer( + id="lb-1", + algorithms=algorithm, + server_covered={e.edge_config.id for e in edges}, # type: ignore[attr-defined] + ) + inbox: simpy.Store = simpy.Store(env) + lb = LoadBalancerRuntime( + env=env, + lb_config=lb_cfg, + # ② cast DummyEdge list to the expected interface type + out_edges=cast("list[EdgeRuntime]", edges), + lb_box=inbox, + ) + lb.start() + return lb + +# --------------------------------------------------------------------------- # +# Tests # +# --------------------------------------------------------------------------- # +def test_round_robin_rotation(env: simpy.Environment) -> None: + """Three requests, two edges ⇒ order must be edge-0, edge-1, edge-0.""" + edge0, edge1 = DummyEdge("srv-A"), DummyEdge("srv-B") + lb = _make_lb_runtime(env, LbAlgorithmsName.ROUND_ROBIN, [edge0, edge1]) + + for _ in range(3): + lb.lb_box.put(DummyState()) + + env.run() + + assert len(edge0.received) == 2 + assert len(edge1.received) == 1 + + tag = SystemNodes.LOAD_BALANCER.value + assert edge0.received[0].history[0].startswith(f"{tag}:") + assert edge0.received[1].history[0].startswith(f"{tag}:") + + +def test_least_connections_picks_lowest(env: simpy.Environment) -> None: + """Edge with fewer concurrent connections must be selected.""" + busy = DummyEdge("busy", concurrent=10) + idle = DummyEdge("idle", concurrent=1) + + lb = _make_lb_runtime(env, LbAlgorithmsName.LEAST_CONNECTIONS, [busy, idle]) + lb.lb_box.put(DummyState()) + + env.run() + + assert idle.received + assert not busy.received + + +def test_start_raises_if_no_edges(env: simpy.Environment) -> None: + """`start()` followed by `env.run()` with `out_edges=None` must assert.""" + lb_cfg = LoadBalancer( + id="lb-bad", + algorithms=LbAlgorithmsName.ROUND_ROBIN, + server_covered=set(), + ) + lb = LoadBalancerRuntime( + env=env, + lb_config=lb_cfg, + out_edges=None, + lb_box=simpy.Store(env), + ) + + lb.start() + with pytest.raises(AssertionError): + env.run() diff --git a/tests/unit/runtime/actors/test_rqs_generator.py b/tests/unit/runtime/actors/test_rqs_generator.py new file mode 100644 index 0000000..fef5987 --- /dev/null +++ b/tests/unit/runtime/actors/test_rqs_generator.py @@ -0,0 +1,151 @@ +"""Unit-tests for the :class:`RqsGeneratorRuntime` dispatcher and event flow.""" +from __future__ import annotations + +from collections.abc import Iterator +from typing import TYPE_CHECKING, cast + +import numpy as np +import simpy + +from asyncflow.config.constants import Distribution +from asyncflow.runtime.actors.rqs_generator import RqsGeneratorRuntime + +if TYPE_CHECKING: + + import pytest + from numpy.random import Generator + + from asyncflow.runtime.actors.edge import EdgeRuntime + from asyncflow.runtime.rqs_state import RequestState + from asyncflow.schemas.settings.simulation import SimulationSettings + from asyncflow.schemas.workload.rqs_generator import RqsGenerator + +import importlib + +# --------------------------------------------------------------------------- # +# Helpers # +# --------------------------------------------------------------------------- # + + +class DummyEdgeRuntime: + """Minimal stub capturing transported :class:`RequestState`.""" + + def __init__(self) -> None: + """Definition of the attributes""" + self.received: list[RequestState] = [] + + def transport(self, state: RequestState) -> None: + """Collect every state passed through the edge.""" + self.received.append(state) + + +def _make_runtime( + env: simpy.Environment, + edge: DummyEdgeRuntime, + rqs_input: RqsGenerator, + sim_settings: SimulationSettings, + *, + seed: int = 0, +) -> RqsGeneratorRuntime: + """Factory returning a fully wired :class:`RqsGeneratorRuntime`.""" + rng: Generator = np.random.default_rng(seed) + return RqsGeneratorRuntime( + env=env, + out_edge=cast("EdgeRuntime", edge), + rqs_generator_data=rqs_input, + sim_settings=sim_settings, + rng=rng, + ) + + +# --------------------------------------------------------------------------- # +# Dispatcher behaviour # +# --------------------------------------------------------------------------- # + + +RGR_MODULE = importlib.import_module("asyncflow.runtime.actors.rqs_generator") + +def test_dispatcher_selects_poisson_poisson( + monkeypatch: pytest.MonkeyPatch, + rqs_input: RqsGenerator, + sim_settings: SimulationSettings, +) -> None: + """Default (Poisson) distribution must invoke *poisson_poisson_sampling*.""" + called = {"pp": False} + + def _fake_pp(*args: object, **kwargs: object) -> Iterator[float]: + called["pp"] = True + return iter(()) # iterator already exhausted + + monkeypatch.setattr(RGR_MODULE, "poisson_poisson_sampling", _fake_pp) + + env = simpy.Environment() + edge = DummyEdgeRuntime() + runtime = _make_runtime(env, edge, rqs_input, sim_settings) + + gen = runtime._requests_generator() # noqa: SLF001 + for _ in gen: + pass + + assert called["pp"] is True + assert isinstance(gen, Iterator) + + +def test_dispatcher_selects_gaussian_poisson( + monkeypatch: pytest.MonkeyPatch, + rqs_input: RqsGenerator, + sim_settings: SimulationSettings, +) -> None: + """Normal distribution must invoke *gaussian_poisson_sampling*.""" + rqs_input.avg_active_users.distribution = Distribution.NORMAL + called = {"gp": False} + + def _fake_gp(*args: object, **kwargs: object) -> Iterator[float]: + called["gp"] = True + return iter(()) + + monkeypatch.setattr(RGR_MODULE, "gaussian_poisson_sampling", _fake_gp) + + env = simpy.Environment() + edge = DummyEdgeRuntime() + runtime = _make_runtime(env, edge, rqs_input, sim_settings) + + gen = runtime._requests_generator() # noqa: SLF001 + for _ in gen: + pass + + assert called["gp"] is True + assert isinstance(gen, Iterator) + +# --------------------------------------------------------------------------- # +# Event-arrival flow # +# --------------------------------------------------------------------------- # + + +def test_event_arrival_generates_expected_number_of_requests( + monkeypatch: pytest.MonkeyPatch, + rqs_input: RqsGenerator, + sim_settings: SimulationSettings, +) -> None: + """Given a deterministic gap list, exactly that many requests are sent.""" + gaps = [1.0, 2.0, 3.0] + + def _fake_gen(self: object) -> Iterator[float]: + yield from gaps + + monkeypatch.setattr( + RqsGeneratorRuntime, + "_requests_generator", + _fake_gen, + ) + + env = simpy.Environment() + edge = DummyEdgeRuntime() + runtime = _make_runtime(env, edge, rqs_input, sim_settings) + + env.process(runtime._event_arrival()) # noqa: SLF001 + env.run(until=sum(gaps) + 0.1) # run slightly past the last gap + + assert len(edge.received) == len(gaps) + ids = [s.id for s in edge.received] + assert ids == [1, 2, 3] diff --git a/tests/unit/runtime/actors/test_server.py b/tests/unit/runtime/actors/test_server.py new file mode 100644 index 0000000..4c915ac --- /dev/null +++ b/tests/unit/runtime/actors/test_server.py @@ -0,0 +1,186 @@ +"""Unit-tests for ServerRuntime concurrency, resource handling and metrics. + +Each test spins up an isolated SimPy environment containing: + +* one ServerRuntime +* one mock edge with zero-latency delivery (InstantEdge) +* an inbox (simpy.Store) for incoming requests +* a sink (simpy.Store) that receives the request after the server + +The server exposes: + RAM = 1024 MB, CPU cores = 2 +and a single endpoint with the step sequence: + RAM(128 MB) ➜ CPU(5 ms) ➜ I/O(20 ms). + +All timings are in **seconds** because SimPy's clock is unit-agnostic. +""" + +from __future__ import annotations + +from typing import TYPE_CHECKING + +import simpy +from numpy.random import default_rng + +from asyncflow.config.constants import ( + EndpointStepCPU, + EndpointStepIO, + EndpointStepRAM, + SampledMetricName, + StepOperation, +) +from asyncflow.resources.server_containers import build_containers +from asyncflow.runtime.actors.server import ServerRuntime +from asyncflow.runtime.rqs_state import RequestState +from asyncflow.schemas.settings.simulation import SimulationSettings +from asyncflow.schemas.topology.endpoint import Endpoint, Step +from asyncflow.schemas.topology.nodes import ( + Server, + ServerResources, +) + +if TYPE_CHECKING: + + from collections.abc import Generator + + + +# ---------------------------------------------------------------------------# +# Helpers # +# ---------------------------------------------------------------------------# +class InstantEdge: + """Stub EdgeRuntime with zero latency and no drops.""" + + def __init__(self, env: simpy.Environment, sink: simpy.Store) -> None: + """Attribute""" + self._env = env + self._sink = sink + + def transport(self, state: RequestState) -> simpy.Process: + """Transport function""" + return self._env.process(self._deliver(state)) + + def _deliver(self, state: RequestState) -> Generator[simpy.Event, None, None]: + """Deliver function""" + yield self._sink.put(state) + + +def _make_server_runtime( + env: simpy.Environment, +) -> tuple[ServerRuntime, simpy.Store]: + """Return a (ServerRuntime, sink) ready for injection tests.""" + # Resources + res_spec = ServerResources(cpu_cores=2, ram_mb=1024) + containers = build_containers(env, res_spec) + + # Endpoint: RAM → CPU → I/O + endpoint = Endpoint( + endpoint_name="/predict", + steps=[ + Step( + kind=EndpointStepRAM.RAM, + step_operation={StepOperation.NECESSARY_RAM: 128}, + ), + Step( + kind=EndpointStepCPU.CPU_BOUND_OPERATION, + step_operation={StepOperation.CPU_TIME: 0.005}, + ), + Step( + kind=EndpointStepIO.DB, + step_operation={StepOperation.IO_WAITING_TIME: 0.020}, + ), + ], + ) + + server_cfg = Server(id="api_srv", endpoints=[endpoint], server_resources=res_spec) + + inbox: simpy.Store = simpy.Store(env) + sink: simpy.Store = simpy.Store(env) + edge = InstantEdge(env, sink) + + settings = SimulationSettings(total_simulation_time=1900, sample_period_s=0.1) + + runtime = ServerRuntime( + env=env, + server_resources=containers, + server_config=server_cfg, + out_edge=edge, # type: ignore[arg-type] + server_box=inbox, + settings=settings, + rng=default_rng(seed=0), + ) + return runtime, sink + + +# ---------------------------------------------------------------------------# +# Tests # +# ---------------------------------------------------------------------------# +def test_ram_is_released_at_end() -> None: + """RAM tokens must return to capacity once the request finishes.""" + env = simpy.Environment() + server, sink = _make_server_runtime(env) + + server.server_box.put(RequestState(id=1, initial_time=0.0)) + server.start() + env.run() + + ram = server.server_resources["RAM"] + assert ram.level == ram.capacity + assert len(sink.items) == 1 + + +def test_cpu_core_held_only_during_cpu_step() -> None: + """Exactly one core is busy during the CPU-bound window (0 5ms).""" + env = simpy.Environment() + server, _ = _make_server_runtime(env) + cpu = server.server_resources["CPU"] + + server.server_box.put(RequestState(id=2, initial_time=0.0)) + server.start() + + env.run(until=0.004) # mid-CPU step + assert cpu.level == 1 # 2-1 + + env.run(until=0.006) # after CPU step + assert cpu.level == 2 # released + + +def test_ready_and_io_queue_counters() -> None: + """ready_queue_len and io_queue_len should toggle as CPU⇄I/O phases alternate.""" + env = simpy.Environment() + server, _ = _make_server_runtime(env) + + server.server_box.put(RequestState(id=3, initial_time=0.0)) + server.start() + + # 1) before start queues are empty + assert server.ready_queue_len == 0 + assert server.io_queue_len == 0 + + # 2) during CPU (0 5ms) ready queue+1 + env.run(until=0.003) + assert server.ready_queue_len == 1 + assert server.io_queue_len == 0 + + # 3) during I/O (5 25ms) ready 0, io+1 + env.run(until=0.010) + assert server.ready_queue_len == 0 + assert server.io_queue_len == 1 + + # 4) completed both back to 0 + env.run() + assert server.ready_queue_len == 0 + assert server.io_queue_len == 0 + + +def test_enabled_metrics_dict_populated() -> None: + """ServerRuntime must create lists for every mandatory sampled metric.""" + env = simpy.Environment() + server, _ = _make_server_runtime(env) + + mandatory = { + SampledMetricName.RAM_IN_USE, + SampledMetricName.READY_QUEUE_LEN, + SampledMetricName.EVENT_LOOP_IO_SLEEP, + } + assert mandatory.issubset(server.enabled_metrics.keys()) diff --git a/tests/unit/runtime/test_rqs_state.py b/tests/unit/runtime/test_rqs_state.py new file mode 100644 index 0000000..eaf752b --- /dev/null +++ b/tests/unit/runtime/test_rqs_state.py @@ -0,0 +1,66 @@ +"""Unit-tests for :class:`RequestState` and :class:`Hop`.""" +from __future__ import annotations + +from asyncflow.config.constants import SystemEdges, SystemNodes +from asyncflow.runtime.rqs_state import Hop, RequestState + +# --------------------------------------------------------------------------- # +# Helpers # +# --------------------------------------------------------------------------- # + + +def _state() -> RequestState: + """Return a fresh RequestState with id='42' and t0=0.0.""" + return RequestState(id=42, initial_time=0.0) + + +def _hop( + c_type: SystemNodes | SystemEdges, + c_id: str, + ts: float, +) -> Hop: + """Shorthand to build an Hop literal in tests.""" + return Hop(c_type, c_id, ts) + + +# --------------------------------------------------------------------------- # +# Tests # +# --------------------------------------------------------------------------- # + + +def test_record_hop_appends_tuple() -> None: + """record_hop stores a :class:`Hop` instance with all three fields.""" + st = _state() + st.record_hop(SystemNodes.GENERATOR, "gen-1", now=1.23456) + + expected = [_hop(SystemNodes.GENERATOR, "gen-1", 1.23456)] + assert st.history == expected + assert isinstance(st.history[0], Hop) + + +def test_multiple_hops_preserve_global_order() -> None: + """History keeps exact insertion order for successive hops.""" + st = _state() + st.record_hop(SystemNodes.GENERATOR, "gen-1", 0.1) + st.record_hop(SystemEdges.NETWORK_CONNECTION, "edge-7", 0.2) + st.record_hop(SystemNodes.SERVER, "api-A", 0.3) + + expected: list[Hop] = [ + _hop(SystemNodes.GENERATOR, "gen-1", 0.1), + _hop(SystemEdges.NETWORK_CONNECTION, "edge-7", 0.2), + _hop(SystemNodes.SERVER, "api-A", 0.3), + ] + assert st.history == expected + + +def test_latency_none_until_finish_time_set() -> None: + """Latency is ``None`` if *finish_time* has not been assigned.""" + st = _state() + assert st.latency is None + + +def test_latency_returns_difference() -> None: + """Latency equals ``finish_time - initial_time`` once closed.""" + st = _state() + st.finish_time = 5.5 + assert st.latency == 5.5 # 5.5 - 0.0 diff --git a/tests/unit/runtime/test_simulation_runner.py b/tests/unit/runtime/test_simulation_runner.py new file mode 100644 index 0000000..3a352f5 --- /dev/null +++ b/tests/unit/runtime/test_simulation_runner.py @@ -0,0 +1,126 @@ +"""Unit-tests for :pyclass:`app.runtime.simulation_runner.SimulationRunner`. + +Purpose +------- +Validate each private builder in isolation and run a minimal end-to-end +execution without relying on the full integration scenarios. +""" + +from __future__ import annotations + +from typing import TYPE_CHECKING + +import pytest +import simpy +import yaml + +from asyncflow.runtime.simulation_runner import SimulationRunner + +if TYPE_CHECKING: + from pathlib import Path + + from asyncflow.runtime.actors.client import ClientRuntime + from asyncflow.runtime.actors.rqs_generator import RqsGeneratorRuntime + from asyncflow.schemas.payload import SimulationPayload + + +# --------------------------------------------------------------------------- # +# Fixtures # +# --------------------------------------------------------------------------- # +@pytest.fixture +def env() -> simpy.Environment: + """Return a fresh SimPy environment for every unit test.""" + return simpy.Environment() + + +@pytest.fixture +def runner( + env: simpy.Environment, + payload_base: SimulationPayload, +) -> SimulationRunner: + """Factory producing an **un-started** SimulationRunner.""" + return SimulationRunner(env=env, simulation_input=payload_base) + + +# --------------------------------------------------------------------------- # +# Builder-level tests # +# --------------------------------------------------------------------------- # +def test_build_rqs_generator_populates_dict(runner: SimulationRunner) -> None: + """_build_rqs_generator() must register one generator runtime.""" + runner._build_rqs_generator() # noqa: SLF001 + assert len(runner._rqs_runtime) == 1 # noqa: SLF001 + gen_rt: RqsGeneratorRuntime = next( + iter(runner._rqs_runtime.values()), # noqa: SLF001 + ) + assert gen_rt.rqs_generator_data.id == runner.rqs_generator.id + + +def test_build_client_populates_dict(runner: SimulationRunner) -> None: + """_build_client() must register exactly one client runtime.""" + runner._build_client() # noqa: SLF001 + assert len(runner._client_runtime) == 1 # noqa: SLF001 + cli_rt: ClientRuntime = next( + iter(runner._client_runtime.values()), # noqa: SLF001 + ) + assert cli_rt.client_config.id == runner.client.id + assert cli_rt.out_edge is None + + +def test_build_servers_keeps_empty_with_minimal_topology( + runner: SimulationRunner, +) -> None: + """Zero servers in the payload → dict stays empty.""" + runner._build_servers() # noqa: SLF001 + assert runner._servers_runtime == {} # noqa: SLF001 + + +def test_build_load_balancer_noop_when_absent( + runner: SimulationRunner, +) -> None: + """No LB in the payload → builder leaves dict empty.""" + runner._build_load_balancer() # noqa: SLF001 + assert runner._lb_runtime == {} # noqa: SLF001 + + +# --------------------------------------------------------------------------- # +# Edges builder # +# --------------------------------------------------------------------------- # +def test_build_edges_with_stub_edge(runner: SimulationRunner) -> None: + """ + `_build_edges()` must register exactly one `EdgeRuntime`, corresponding + to the single stub edge (generator → client) present in the minimal + topology fixture. + """ + runner._build_rqs_generator() # noqa: SLF001 + runner._build_client() # noqa: SLF001 + runner._build_edges() # noqa: SLF001 + assert len(runner._edges_runtime) == 1 # noqa: SLF001 + + +# --------------------------------------------------------------------------- # +# from_yaml utility # +# --------------------------------------------------------------------------- # +def test_from_yaml_minimal(tmp_path: Path, env: simpy.Environment) -> None: + """from_yaml() parses YAML, validates via Pydantic and returns a runner.""" + yml_payload = { + "rqs_input": { + "id": "gen-yaml", + "avg_active_users": {"mean": 1}, + "avg_request_per_minute_per_user": {"mean": 2}, + "user_sampling_window": 10, + }, + "topology_graph": { + "nodes": {"client": {"id": "cli-yaml"}, "servers": []}, + "edges": [], + }, + "sim_settings": {"total_simulation_time": 5}, + } + + yml_path: Path = tmp_path / "scenario.yml" + yml_path.write_text(yaml.safe_dump(yml_payload)) + + runner = SimulationRunner.from_yaml(env=env, yaml_path=yml_path) + + assert isinstance(runner, SimulationRunner) + assert runner.rqs_generator.id == "gen-yaml" + assert runner.client.id == "cli-yaml" diff --git a/tests/unit/samplers/test_gaussian_poisson.py b/tests/unit/samplers/test_gaussian_poisson.py new file mode 100644 index 0000000..657fae9 --- /dev/null +++ b/tests/unit/samplers/test_gaussian_poisson.py @@ -0,0 +1,110 @@ +"""Unit-tests for `gaussian_poisson_sampling`.""" + +from __future__ import annotations + +import itertools +from types import GeneratorType +from typing import TYPE_CHECKING + +import pytest +from numpy.random import Generator, default_rng + +from asyncflow.config.constants import TimeDefaults +from asyncflow.samplers.gaussian_poisson import ( + gaussian_poisson_sampling, +) +from asyncflow.schemas.common.random_variables import RVConfig +from asyncflow.schemas.workload.rqs_generator import RqsGenerator + +if TYPE_CHECKING: + + from asyncflow.schemas.settings.simulation import SimulationSettings + +# --------------------------------------------------------------------------- +# FIXTURES +# --------------------------------------------------------------------------- + + +@pytest.fixture +def rqs_cfg() -> RqsGenerator: + """Minimal, valid RqsGenerator for Gaussian-Poisson tests.""" + return RqsGenerator( + id= "gen-1", + avg_active_users=RVConfig( + mean=10.0, + variance=4.0, + distribution="normal", + ), + avg_request_per_minute_per_user=RVConfig(mean=30.0), + user_sampling_window=TimeDefaults.USER_SAMPLING_WINDOW, + ) + + + +# --------------------------------------------------------------------------- +# BASIC BEHAVIOUR +# --------------------------------------------------------------------------- + + +def test_returns_generator_type( + rqs_cfg: RqsGenerator, + sim_settings: SimulationSettings, + rng: Generator, +) -> None: + """The function must return a generator object.""" + gen = gaussian_poisson_sampling(rqs_cfg, sim_settings, rng=rng) + assert isinstance(gen, GeneratorType) + + +def test_generates_positive_gaps( + rqs_cfg: RqsGenerator, + sim_settings: SimulationSettings, +) -> None: + """ + With nominal parameters the sampler should emit at least a few positive + gaps, and the cumulative time must stay below the horizon. + """ + gaps: list[float] = list( + itertools.islice( + gaussian_poisson_sampling(rqs_cfg, sim_settings, rng=default_rng(42)), + 1000, + ), + ) + + assert gaps, "Expected at least one event" + assert all(g > 0.0 for g in gaps), "No gap may be ≤ 0" + assert sum(gaps) < sim_settings.total_simulation_time + + +# --------------------------------------------------------------------------- +# EDGE CASE: ZERO USERS +# --------------------------------------------------------------------------- + + +def test_zero_users_produces_no_events( + monkeypatch: pytest.MonkeyPatch, + rqs_cfg: RqsGenerator, + sim_settings: SimulationSettings, +) -> None: + """ + If every Gaussian draw returns 0 users, Λ == 0 and the generator must + yield no events at all. + """ + + def fake_truncated_gaussian( + mean: float, + var: float, + rng: Generator, + ) -> float: + return 0.0 # force U = 0 + + monkeypatch.setattr( + "asyncflow.samplers.gaussian_poisson.truncated_gaussian_generator", + fake_truncated_gaussian, + ) + + gaps: list[float] = list( + gaussian_poisson_sampling(rqs_cfg, sim_settings, rng=default_rng(123)), + ) + + assert gaps == [] # no events should be generated diff --git a/tests/unit/samplers/test_poisson_poisson.py b/tests/unit/samplers/test_poisson_poisson.py new file mode 100644 index 0000000..c5d4a18 --- /dev/null +++ b/tests/unit/samplers/test_poisson_poisson.py @@ -0,0 +1,126 @@ +"""Unit tests for `poisson_poisson_sampling`.""" + +from __future__ import annotations + +import itertools +import math +from types import GeneratorType +from typing import TYPE_CHECKING + +import pytest +from numpy.random import Generator, default_rng + +from asyncflow.config.constants import TimeDefaults +from asyncflow.samplers.poisson_poisson import poisson_poisson_sampling +from asyncflow.schemas.common.random_variables import RVConfig +from asyncflow.schemas.workload.rqs_generator import RqsGenerator + +if TYPE_CHECKING: + + from asyncflow.schemas.settings.simulation import SimulationSettings + + +@pytest.fixture +def rqs_cfg() -> RqsGenerator: + """Return a minimal, valid RqsGenerator for the sampler tests.""" + return RqsGenerator( + id="gen-1", + avg_active_users={"mean": 1.0, "distribution": "poisson"}, + avg_request_per_minute_per_user={"mean": 60.0, "distribution": "poisson"}, + user_sampling_window=TimeDefaults.USER_SAMPLING_WINDOW, + ) + +# -------------------------------------------------------- +# BASIC SHAPE AND TYPE TESTS +# -------------------------------------------------------- + + +def test_sampler_returns_generator( + rqs_cfg: RqsGenerator, + sim_settings: SimulationSettings, + rng: Generator, +) -> None: + """Function must return a generator object.""" + gen = poisson_poisson_sampling(rqs_cfg, sim_settings, rng=rng) + assert isinstance(gen, GeneratorType) + + +def test_all_gaps_are_positive( + rqs_cfg: RqsGenerator, + sim_settings: SimulationSettings, +) -> None: + """Every yielded gap must be strictly positive.""" + gaps = list( + itertools.islice( + poisson_poisson_sampling(rqs_cfg, sim_settings, rng=default_rng(1)), + 1_000, + ), + ) + assert all(g > 0.0 for g in gaps) + + +# --------------------------------------------------------------------------- +# REPRODUCIBILITY WITH FIXED SEED +# --------------------------------------------------------------------------- + + +def test_sampler_is_reproducible_with_fixed_seed( + rqs_cfg: RqsGenerator, + sim_settings: SimulationSettings, +) -> None: + """Same RNG seed must produce identical first N gaps.""" + seed = 42 + n_samples = 15 + + gaps_1 = list( + itertools.islice( + poisson_poisson_sampling(rqs_cfg, sim_settings, rng=default_rng(seed)), + n_samples, + ), + ) + gaps_2 = list( + itertools.islice( + poisson_poisson_sampling(rqs_cfg, sim_settings, rng=default_rng(seed)), + n_samples, + ), + ) + assert gaps_1 == gaps_2 + + +# --------------------------------------------------------------------------- +# EDGE CASE: ZERO USERS +# --------------------------------------------------------------------------- + + +def test_zero_users_produces_no_events( + sim_settings: SimulationSettings, +) -> None: + """If the mean user count is zero the generator must yield no events.""" + cfg_zero = RqsGenerator( + id="gen-1", + avg_active_users=RVConfig(mean=0.0, distribution="poisson"), + avg_request_per_minute_per_user=RVConfig(mean=60.0, distribution="poisson"), + user_sampling_window=TimeDefaults.USER_SAMPLING_WINDOW, + ) + + gaps: list[float] = list( + poisson_poisson_sampling(cfg_zero, sim_settings, rng=default_rng(123)), + ) + assert gaps == [] + + +# --------------------------------------------------------------------------- +# CUMULATIVE TIME NEVER EXCEEDS THE HORIZON +# --------------------------------------------------------------------------- + + +def test_cumulative_time_never_exceeds_horizon( + rqs_cfg: RqsGenerator, + sim_settings: SimulationSettings, +) -> None: + """Sum of gaps must stay below the simulation horizon.""" + gaps: list[float] = list( + poisson_poisson_sampling(rqs_cfg, sim_settings, rng=default_rng(7)), + ) + cum_time = math.fsum(gaps) + assert cum_time < sim_settings.total_simulation_time diff --git a/tests/unit/samplers/test_sampler_helper.py b/tests/unit/samplers/test_sampler_helper.py new file mode 100644 index 0000000..349a5fd --- /dev/null +++ b/tests/unit/samplers/test_sampler_helper.py @@ -0,0 +1,196 @@ +"""Unit-tests for helper-functions in +`app.core.event_samplers.common_helpers`. +""" +from __future__ import annotations + +from typing import cast + +import numpy as np +import pytest + +from asyncflow.config.constants import Distribution +from asyncflow.samplers.common_helpers import ( + exponential_variable_generator, + general_sampler, + lognormal_variable_generator, + poisson_variable_generator, + truncated_gaussian_generator, + uniform_variable_generator, +) +from asyncflow.schemas.common.random_variables import RVConfig + +# --------------------------------------------------------------------------- # +# Dummy RNG # +# --------------------------------------------------------------------------- # + + +class DummyRNG: + """Minimal stub mimicking the subset of the NumPy RNG API used in tests.""" + + def __init__( # noqa: D107 + self, + *, + uniform_value: float | None = None, + poisson_value: int | None = None, + normal_value: float | None = None, + lognormal_value: float | None = None, + exponential_value: float | None = None, + ) -> None: + self.uniform_value = uniform_value + self.poisson_value = poisson_value + self.normal_value = normal_value + self.lognormal_value = lognormal_value + self.exponential_value = exponential_value + + # --- uniform ----------------------------------------------------------- # + + def random(self) -> float: + """Return the preset ``uniform_value`` or fall back to a real RNG.""" + if self.uniform_value is not None: + return self.uniform_value + return float(np.random.default_rng().random()) + + # --- Poisson ----------------------------------------------------------- # + + def poisson(self, lam: float) -> int: + """Return the preset ``poisson_value`` or draw from a real Poisson.""" + if self.poisson_value is not None: + return self.poisson_value + return int(np.random.default_rng().poisson(lam)) + + # --- Normal ------------------------------------------------------------ # + + def normal(self, mean: float, sigma: float) -> float: + """Return the preset ``normal_value`` or draw from a real Normal.""" + if self.normal_value is not None: + return self.normal_value + return float(np.random.default_rng().normal(mean, sigma)) + + # --- Log-normal -------------------------------------------------------- # + + def lognormal(self, mean: float, sigma: float) -> float: + """Return the preset ``lognormal_value`` or draw from a real LogNormal.""" + if self.lognormal_value is not None: + return self.lognormal_value + return float(np.random.default_rng().lognormal(mean, sigma)) + + # --- Exponential ------------------------------------------------------- # + + def exponential(self, scale: float) -> float: + """Return the preset ``exponential_value`` or draw from a real Exponential.""" + if self.exponential_value is not None: + return self.exponential_value + return float(np.random.default_rng().exponential(scale)) + + +# --------------------------------------------------------------------------- # +# Tests for low-level generators # +# --------------------------------------------------------------------------- # + + +def test_uniform_variable_generator_with_dummy_rng() -> None: + """`uniform_variable_generator` returns the dummy's ``uniform_value``.""" + dummy = cast("np.random.Generator", DummyRNG(uniform_value=0.75)) + assert uniform_variable_generator(dummy) == 0.75 + + +def test_uniform_variable_generator_bounds() -> None: + """Calling with a real RNG yields a value in the half-open interval [0, 1).""" + rng = np.random.default_rng(1_234) + val = uniform_variable_generator(rng) + assert 0.0 <= val < 1.0 + + +def test_poisson_variable_generator_with_dummy_rng() -> None: + """`poisson_variable_generator` returns the dummy's ``poisson_value``.""" + dummy = cast("np.random.Generator", DummyRNG(poisson_value=3)) + assert poisson_variable_generator(mean=5.0, rng=dummy) == 3 + + +def test_poisson_variable_generator_reproducible() -> None: + """Two RNGs with the same seed produce identical Poisson draws.""" + rng1 = np.random.default_rng(42) + rng2 = np.random.default_rng(42) + v1 = poisson_variable_generator(7.0, rng1) + v2 = poisson_variable_generator(7.0, rng2) + assert v1 == v2 + + +def test_truncated_gaussian_generator_negative_clamped() -> None: + """Negative Normal draws are clamped to zero.""" + dummy = cast("np.random.Generator", DummyRNG(normal_value=-2.7)) + assert truncated_gaussian_generator(10.0, 5.0, dummy) == 0.0 + + +def test_truncated_gaussian_generator_positive_passthrough() -> None: + """Positive Normal draws pass through unchanged.""" + dummy = cast("np.random.Generator", DummyRNG(normal_value=3.9)) + val = truncated_gaussian_generator(10.0, 5.0, dummy) + assert isinstance(val, float) + assert val == 3.9 + + +def test_truncated_gaussian_generator_default_rng_non_negative() -> None: + """Real RNG always yields a non-negative float after truncation.""" + rng = np.random.default_rng(321) + assert truncated_gaussian_generator(10.0, 2.0, rng) >= 0.0 + + +def test_lognormal_variable_generator_reproducible() -> None: + """`lognormal_variable_generator` is reproducible with a fixed seed.""" + rng1 = np.random.default_rng(99) + rng2 = np.random.default_rng(99) + v1 = lognormal_variable_generator(1.0, 0.5, rng1) + v2 = lognormal_variable_generator(1.0, 0.5, rng2) + assert v1 == pytest.approx(v2) + + +def test_exponential_variable_generator_reproducible() -> None: + """`exponential_variable_generator` is reproducible with a fixed seed.""" + rng1 = np.random.default_rng(54_321) + rng2 = np.random.default_rng(54_321) + v1 = exponential_variable_generator(2.0, rng1) + v2 = exponential_variable_generator(2.0, rng2) + assert v1 == pytest.approx(v2) + + +# --------------------------------------------------------------------------- # +# Tests for `general_sampler` # +# --------------------------------------------------------------------------- # + + +def test_general_sampler_uniform_path() -> None: + """Uniform branch returns the dummy's preset value.""" + dummy = cast("np.random.Generator", DummyRNG(uniform_value=0.42)) + cfg = RVConfig(mean=1.0, distribution=Distribution.UNIFORM) + assert general_sampler(cfg, dummy) == 0.42 + + +def test_general_sampler_normal_path() -> None: + """Normal branch applies truncation logic (negative → 0).""" + dummy = cast("np.random.Generator", DummyRNG(normal_value=-1.2)) + cfg = RVConfig(mean=0.0, variance=1.0, distribution=Distribution.NORMAL) + assert general_sampler(cfg, dummy) == 0.0 + + +def test_general_sampler_poisson_path() -> None: + """Poisson branch returns the dummy's preset integer as *float*.""" + dummy = cast("np.random.Generator", DummyRNG(poisson_value=4)) + cfg = RVConfig(mean=5.0, distribution=Distribution.POISSON) + result = general_sampler(cfg, dummy) + assert isinstance(result, float) + assert result == 4.0 + + +def test_general_sampler_lognormal_path() -> None: + """Log-normal branch produces a strictly positive float.""" + rng = np.random.default_rng(2_025) + cfg = RVConfig(mean=0.0, variance=0.5, distribution=Distribution.LOG_NORMAL) + assert general_sampler(cfg, rng) > 0.0 + + +def test_general_sampler_exponential_path() -> None: + """Exponential branch produces a strictly positive float.""" + rng = np.random.default_rng(7) + cfg = RVConfig(mean=1.5, distribution=Distribution.EXPONENTIAL) + assert general_sampler(cfg, rng) > 0.0 diff --git a/tests/unit/schemas/test_endpoint.py b/tests/unit/schemas/test_endpoint.py new file mode 100644 index 0000000..080f55a --- /dev/null +++ b/tests/unit/schemas/test_endpoint.py @@ -0,0 +1,131 @@ +"""Unit tests for the Endpoint and Step Pydantic schemas.""" + +from __future__ import annotations + +import pytest +from pydantic import ValidationError + +from asyncflow.config.constants import ( + EndpointStepCPU, + EndpointStepIO, + EndpointStepRAM, + StepOperation, +) +from asyncflow.schemas.topology.endpoint import Endpoint, Step + + +# --------------------------------------------------------------------------- # +# Helper functions to build minimal valid Step objects +# --------------------------------------------------------------------------- # +def cpu_step(value: float = 0.1) -> Step: + """Return a minimal valid CPU-bound Step.""" + return Step( + kind=EndpointStepCPU.CPU_BOUND_OPERATION, + step_operation={StepOperation.CPU_TIME: value}, + ) + + +def ram_step(value: int = 128) -> Step: + """Return a minimal valid RAM Step.""" + return Step( + kind=EndpointStepRAM.RAM, + step_operation={StepOperation.NECESSARY_RAM: value}, + ) + + +def io_step(value: float = 0.05) -> Step: + """Return a minimal valid I/O Step.""" + return Step( + kind=EndpointStepIO.WAIT, + step_operation={StepOperation.IO_WAITING_TIME: value}, + ) + + +# --------------------------------------------------------------------------- # +# Positive test cases +# --------------------------------------------------------------------------- # +def test_valid_cpu_step() -> None: + """Test that a CPU step with correct 'cpu_time' operation passes validation.""" + step = cpu_step() + # The operation value must match the input + assert step.step_operation[StepOperation.CPU_TIME] == 0.1 + + +def test_valid_ram_step() -> None: + """Test that a RAM step with correct 'necessary_ram' operation passes validation.""" + step = ram_step() + assert step.step_operation[StepOperation.NECESSARY_RAM] == 128 + + +def test_valid_io_step() -> None: + """ + Test that an I/O step with correct 'io_waiting_time' + operation passes validation. + """ + step = io_step() + assert step.step_operation[StepOperation.IO_WAITING_TIME] == 0.05 + + +def test_endpoint_with_mixed_steps() -> None: + """Test that an Endpoint with multiple valid Step instances normalizes the name.""" + ep = Endpoint( + endpoint_name="/Predict", + steps=[cpu_step(), ram_step(), io_step()], + ) + # endpoint_name should be lowercased by the validator + assert ep.endpoint_name == "/predict" + # All steps should be present in the list + assert len(ep.steps) == 3 + + +# --------------------------------------------------------------------------- # +# Negative test cases +# --------------------------------------------------------------------------- # +@pytest.mark.parametrize( + ("kind", "bad_operation"), + [ + # CPU step with RAM operation + (EndpointStepCPU.CPU_BOUND_OPERATION, {StepOperation.NECESSARY_RAM: 64}), + # RAM step with CPU operation + (EndpointStepRAM.RAM, {StepOperation.CPU_TIME: 0.2}), + # I/O step with CPU operation + (EndpointStepIO.DB, {StepOperation.CPU_TIME: 0.05}), + ], +) +def test_incoherent_kind_operation_pair( + kind: EndpointStepCPU | EndpointStepRAM | EndpointStepIO, + bad_operation: dict[StepOperation, float | int], +) -> None: + """Test that mismatched kind and operation combinations raise ValidationError.""" + with pytest.raises(ValidationError): + Step(kind=kind, step_operation=bad_operation) + + +def test_multiple_operation_not_allowed() -> None: + """ + Test that providing multiple operation in a single Step + raises ValidationError. + """ + with pytest.raises(ValidationError): + Step( + kind=EndpointStepCPU.CPU_BOUND_OPERATION, + step_operation={ + StepOperation.CPU_TIME: 0.1, + StepOperation.NECESSARY_RAM: 64, + }, + ) + + +def test_empty_operation_rejected() -> None: + """Test that an empty operation dict is rejected by the validator.""" + with pytest.raises(ValidationError): + Step(kind=EndpointStepCPU.CPU_BOUND_OPERATION, step_operation={}) + + +def test_wrong_operation_name_for_io() -> None: + """Test that an I/O step with a non-I/O operation key is rejected.""" + with pytest.raises(ValidationError): + Step( + kind=EndpointStepIO.CACHE, + step_operation={StepOperation.NECESSARY_RAM: 64}, + ) diff --git a/tests/unit/schemas/test_generator.py b/tests/unit/schemas/test_generator.py new file mode 100644 index 0000000..608adc4 --- /dev/null +++ b/tests/unit/schemas/test_generator.py @@ -0,0 +1,219 @@ +"""Validation tests for RVConfig, RqsGenerator and SimulationSettings.""" +from __future__ import annotations + +import pytest +from pydantic import ValidationError + +from asyncflow.config.constants import Distribution, TimeDefaults +from asyncflow.schemas.common.random_variables import RVConfig +from asyncflow.schemas.settings.simulation import SimulationSettings +from asyncflow.schemas.workload.rqs_generator import RqsGenerator + +# --------------------------------------------------------------------------- # +# RVCONFIG # +# --------------------------------------------------------------------------- # + + +def test_normal_sets_variance_to_mean() -> None: + """If variance is omitted for 'normal', it defaults to mean.""" + cfg = RVConfig(mean=10, distribution=Distribution.NORMAL) + assert cfg.variance == 10.0 + + +def test_log_normal_sets_variance_to_mean() -> None: + """If variance is omitted for 'log_normal', it defaults to mean.""" + cfg = RVConfig(mean=5, distribution=Distribution.LOG_NORMAL) + assert cfg.variance == 5.0 + + +def test_poisson_keeps_variance_none() -> None: + """If variance is omitted for 'poisson', it remains None.""" + cfg = RVConfig(mean=5, distribution=Distribution.POISSON) + assert cfg.variance is None + + +def test_uniform_keeps_variance_none() -> None: + """If variance is omitted for 'uniform', it remains None.""" + cfg = RVConfig(mean=1, distribution=Distribution.UNIFORM) + assert cfg.variance is None + + +def test_exponential_keeps_variance_none() -> None: + """If variance is omitted for 'exponential', it remains None.""" + cfg = RVConfig(mean=2.5, distribution=Distribution.EXPONENTIAL) + assert cfg.variance is None + + +def test_explicit_variance_is_preserved() -> None: + """An explicit variance value is not modified.""" + cfg = RVConfig(mean=8, distribution=Distribution.NORMAL, variance=4) + assert cfg.variance == 4.0 + + +def test_mean_must_be_numeric() -> None: + """A non-numeric mean triggers a ValidationError.""" + with pytest.raises(ValidationError): + RVConfig(mean="not a number", distribution=Distribution.POISSON) + + +def test_missing_mean_field() -> None: + """Omitting mean raises a 'field required' ValidationError.""" + with pytest.raises(ValidationError): + RVConfig.model_validate({"distribution": Distribution.NORMAL}) + + +def test_default_distribution_is_poisson() -> None: + """If distribution is missing, it defaults to 'poisson'.""" + cfg = RVConfig(mean=3.3) + assert cfg.distribution == Distribution.POISSON + assert cfg.variance is None + + +def test_explicit_variance_kept_for_poisson() -> None: + """Variance is kept even when distribution is poisson.""" + cfg = RVConfig(mean=4.0, distribution=Distribution.POISSON, variance=2.2) + assert cfg.variance == pytest.approx(2.2) + + +def test_invalid_distribution_literal_raises() -> None: + """An unsupported distribution literal raises ValidationError.""" + with pytest.raises(ValidationError): + RVConfig(mean=5.0, distribution="not_a_dist") + + +# --------------------------------------------------------------------------- # +# RqsGenerator - USER_SAMPLING_WINDOW & DISTRIBUTION CONSTRAINTS # +# --------------------------------------------------------------------------- # + + +def _valid_poisson_cfg(mean: float = 1.0) -> dict[str, float | str]: + """Helper: minimal Poisson config for JSON-style input.""" + return {"mean": mean, "distribution": Distribution.POISSON} + + +def _valid_normal_cfg(mean: float = 1.0) -> dict[str, float | str]: + """Helper: minimal Normal config for JSON-style input.""" + return {"mean": mean, "distribution": Distribution.NORMAL} + + +def test_default_user_sampling_window() -> None: + """If user_sampling_window is missing it defaults to the constant.""" + inp = RqsGenerator( + id="rqs-1", + avg_active_users=_valid_poisson_cfg(), + avg_request_per_minute_per_user=_valid_poisson_cfg(), + ) + assert inp.user_sampling_window == TimeDefaults.USER_SAMPLING_WINDOW + + +def test_explicit_user_sampling_window_kept() -> None: + """An explicit user_sampling_window is preserved.""" + inp = RqsGenerator( + id="rqs-1", + avg_active_users=_valid_poisson_cfg(), + avg_request_per_minute_per_user=_valid_poisson_cfg(), + user_sampling_window=30, + ) + assert inp.user_sampling_window == 30 + + +def test_user_sampling_window_not_int_raises() -> None: + """A non-integer user_sampling_window raises ValidationError.""" + with pytest.raises(ValidationError): + RqsGenerator( + id="rqs-1", + avg_active_users=_valid_poisson_cfg(), + avg_request_per_minute_per_user=_valid_poisson_cfg(), + user_sampling_window="not-int", + ) + + +def test_user_sampling_window_above_max_raises() -> None: + """user_sampling_window above the max constant raises ValidationError.""" + too_large = TimeDefaults.MAX_USER_SAMPLING_WINDOW + 1 + with pytest.raises(ValidationError): + RqsGenerator( + id="rqs-1", + avg_active_users=_valid_poisson_cfg(), + avg_request_per_minute_per_user=_valid_poisson_cfg(), + user_sampling_window=too_large, + ) + + +def test_avg_request_must_be_poisson() -> None: + """avg_request_per_minute_per_user must be Poisson; Normal raises.""" + with pytest.raises(ValidationError): + RqsGenerator( + id="rqs-1", + avg_active_users=_valid_poisson_cfg(), + avg_request_per_minute_per_user=_valid_normal_cfg(), + ) + + +def test_avg_active_users_invalid_distribution_raises() -> None: + """avg_active_users cannot be Exponential; only Poisson or Normal allowed.""" + bad_cfg = {"mean": 1.0, "distribution": Distribution.EXPONENTIAL} + with pytest.raises(ValidationError): + RqsGenerator( + id="rqs-1", + avg_active_users=bad_cfg, + avg_request_per_minute_per_user=_valid_poisson_cfg(), + ) + + +def test_valid_poisson_poisson_configuration() -> None: + """Poisson-Poisson combo is accepted.""" + cfg = RqsGenerator( + id="rqs-1", + avg_active_users=_valid_poisson_cfg(), + avg_request_per_minute_per_user=_valid_poisson_cfg(), + ) + assert cfg.avg_active_users.distribution is Distribution.POISSON + assert ( + cfg.avg_request_per_minute_per_user.distribution + is Distribution.POISSON + ) + + +def test_valid_normal_poisson_configuration() -> None: + """Normal-Poisson combo is accepted.""" + cfg = RqsGenerator( + id="rqs-1", + avg_active_users=_valid_normal_cfg(), + avg_request_per_minute_per_user=_valid_poisson_cfg(), + ) + assert cfg.avg_active_users.distribution is Distribution.NORMAL + assert ( + cfg.avg_request_per_minute_per_user.distribution + is Distribution.POISSON + ) + + +# --------------------------------------------------------------------------- # +# SIMULATIONSETTINGS - TOTAL_SIMULATION_TIME # +# --------------------------------------------------------------------------- # + + +def test_default_total_simulation_time() -> None: + """If total_simulation_time is missing it defaults to the constant.""" + settings = SimulationSettings() + assert settings.total_simulation_time == TimeDefaults.SIMULATION_TIME + + +def test_explicit_total_simulation_time_kept() -> None: + """An explicit total_simulation_time is preserved.""" + settings = SimulationSettings(total_simulation_time=3_000) + assert settings.total_simulation_time == 3_000 + + +def test_total_simulation_time_not_int_raises() -> None: + """A non-integer total_simulation_time raises ValidationError.""" + with pytest.raises(ValidationError): + SimulationSettings(total_simulation_time="three thousand") + + +def test_total_simulation_time_below_minimum_raises() -> None: + """A total_simulation_time below the minimum constant raises ValidationError.""" + too_small = TimeDefaults.MIN_SIMULATION_TIME - 1 + with pytest.raises(ValidationError): + SimulationSettings(total_simulation_time=too_small) diff --git a/tests/unit/schemas/test_topology.py b/tests/unit/schemas/test_topology.py new file mode 100644 index 0000000..0ef53e0 --- /dev/null +++ b/tests/unit/schemas/test_topology.py @@ -0,0 +1,305 @@ +"""Unit-tests for topology schemas (Client, ServerResources, Edge, …)""" + +from __future__ import annotations + +import pytest +from pydantic import ValidationError + +from asyncflow.config.constants import ( + EndpointStepCPU, + NetworkParameters, + ServerResourcesDefaults, + StepOperation, + SystemEdges, + SystemNodes, +) +from asyncflow.schemas.common.random_variables import RVConfig +from asyncflow.schemas.topology.edges import Edge +from asyncflow.schemas.topology.endpoint import Endpoint, Step +from asyncflow.schemas.topology.graph import TopologyGraph +from asyncflow.schemas.topology.nodes import ( + Client, + LoadBalancer, + Server, + ServerResources, + TopologyNodes, +) + +# --------------------------------------------------------------------------- # +# Client # +# --------------------------------------------------------------------------- # + + +def test_valid_client() -> None: + """A client with correct ``type`` validates.""" + cli = Client(id="frontend", type=SystemNodes.CLIENT) + assert cli.type is SystemNodes.CLIENT + + +def test_invalid_client_type() -> None: + """Wrong ``type`` enumeration on Client raises ValidationError.""" + with pytest.raises(ValidationError): + Client(id="oops", type=SystemNodes.SERVER) + + +# --------------------------------------------------------------------------- # +# ServerResources # +# --------------------------------------------------------------------------- # + + +def test_server_resources_defaults() -> None: + """All defaults match constant table.""" + res = ServerResources() + assert res.cpu_cores == ServerResourcesDefaults.CPU_CORES + assert res.ram_mb == ServerResourcesDefaults.RAM_MB + assert res.db_connection_pool is ServerResourcesDefaults.DB_CONNECTION_POOL + + +def test_server_resources_min_constraints() -> None: + """Values below minimum trigger validation failure.""" + with pytest.raises(ValidationError): + ServerResources(cpu_cores=0, ram_mb=128) # too small + + +# --------------------------------------------------------------------------- # +# Server # +# --------------------------------------------------------------------------- # + + +def _dummy_endpoint() -> Endpoint: + """Return a minimal valid Endpoint for Server construction.""" + step = Step( + kind=EndpointStepCPU.CPU_BOUND_OPERATION, + step_operation={StepOperation.CPU_TIME: 0.1}, + ) + return Endpoint(endpoint_name="/ping", steps=[step]) + + +def test_valid_server() -> None: + """Server with correct ``type`` and resources passes validation.""" + srv = Server( + id="api-1", + type=SystemNodes.SERVER, + server_resources=ServerResources(cpu_cores=2, ram_mb=1024), + endpoints=[_dummy_endpoint()], + ) + assert srv.id == "api-1" + + +def test_invalid_server_type() -> None: + """Server with wrong ``type`` raises ValidationError.""" + with pytest.raises(ValidationError): + Server( + id="bad-srv", + type=SystemNodes.CLIENT, + server_resources=ServerResources(), + endpoints=[_dummy_endpoint()], + ) + +# --------------------------------------------------------------------------- # +# Load Balancer # +# --------------------------------------------------------------------------- # + +def test_valid_lb() -> None: + """A LB with correct ``type`` validates.""" + cli = LoadBalancer( + id="LB", + type=SystemNodes.LOAD_BALANCER, + server_covered=["s1", "s2"], + ) + assert cli.type is SystemNodes.LOAD_BALANCER + +# --------------------------------------------------------------------------- # +# TopologyNodes # +# --------------------------------------------------------------------------- # + + +def _single_node_topology() -> TopologyNodes: + """Helper returning one server + one client topology.""" + srv = Server( + id="svc-A", + server_resources=ServerResources(), + endpoints=[_dummy_endpoint()], + ) + cli = Client(id="browser") + return TopologyNodes(servers=[srv], client=cli) + + +def test_unique_ids_validator() -> None: + """Duplicate node IDs trigger the ``unique_ids`` validator.""" + nodes = _single_node_topology() + dup_srv = nodes.servers[0].model_copy(update={"id": "browser"}) + with pytest.raises(ValidationError): + TopologyNodes(servers=[dup_srv], client=nodes.client) + + +# --------------------------------------------------------------------------- # +# Edge # +# --------------------------------------------------------------------------- # + + +def test_edge_source_equals_target_fails() -> None: + """Edge with identical source/target raises ValidationError.""" + latency_cfg = RVConfig(mean=0.05) + with pytest.raises(ValidationError): + Edge( + id="edge-dup", + source="same", + target="same", + latency=latency_cfg, + edge_type=SystemEdges.NETWORK_CONNECTION, + ) + + +def test_edge_missing_id_raises() -> None: + """Omitting mandatory ``id`` field raises ValidationError.""" + latency_cfg = RVConfig(mean=0.01) + with pytest.raises(ValidationError): + Edge( # type: ignore[call-arg] + source="a", + target="b", + latency=latency_cfg, + ) + + +@pytest.mark.parametrize( + "bad_rate", + [-0.1, NetworkParameters.MAX_DROPOUT_RATE + 0.1], +) +def test_edge_dropout_rate_bounds(bad_rate: float) -> None: + """Drop-out rate outside valid range triggers ValidationError.""" + with pytest.raises(ValidationError): + Edge( + id="edge-bad-drop", + source="n1", + target="n2", + latency=RVConfig(mean=0.01), + dropout_rate=bad_rate, + ) + + +# --------------------------------------------------------------------------- # +# TopologyGraph # +# --------------------------------------------------------------------------- # + + +def _latency() -> RVConfig: + """Tiny helper for latency objects.""" + return RVConfig(mean=0.02) + +def _topology_with_lb( + cover: set[str], + extra_edges: list[Edge] | None = None, +) -> TopologyGraph: + """Build a minimal graph with 1 client, 1 server and a load balancer.""" + nodes = _single_node_topology() + lb = LoadBalancer(id="lb-1", server_covered=cover) + nodes = TopologyNodes( + servers=nodes.servers, + client=nodes.client, + load_balancer=lb, + ) + + edges: list[Edge] = [ + Edge( # client -> LB + id="cli-lb", + source="browser", + target="lb-1", + latency=_latency(), + ), + Edge( # LB -> server (may be removed in invalid tests) + id="lb-srv", + source="lb-1", + target="svc-A", + latency=_latency(), + ), + ] + if extra_edges: + edges.extend(extra_edges) + return TopologyGraph(nodes=nodes, edges=edges) + + +def test_valid_topology_graph() -> None: + """Happy-path graph passes validation.""" + nodes = _single_node_topology() + edge = Edge( + id="edge-1", + source="browser", + target="svc-A", + latency=_latency(), + probability=1.0, + ) + graph = TopologyGraph(nodes=nodes, edges=[edge]) + assert len(graph.edges) == 1 + +def test_topology_graph_without_lb_still_valid() -> None: + """Graph without load balancer validates just like before.""" + nodes = _single_node_topology() + edge = Edge( + id="edge-1", + source="browser", + target="svc-A", + latency=_latency(), + ) + graph = TopologyGraph(nodes=nodes, edges=[edge]) + assert graph.nodes.load_balancer is None + + + +def test_edge_refers_unknown_node() -> None: + """Edge pointing to a non-existent node fails validation.""" + nodes = _single_node_topology() + bad_edge = Edge( + id="edge-ghost", + source="browser", + target="ghost-srv", + latency=_latency(), + ) + with pytest.raises(ValidationError): + TopologyGraph(nodes=nodes, edges=[bad_edge]) + + +# --------------------------------------------------------------------------- # +# 2) LB is valid # +# --------------------------------------------------------------------------- # +def test_load_balancer_valid_graph() -> None: + """LB covering a server with proper edges passes validation.""" + graph = _topology_with_lb({"svc-A"}) + assert graph.nodes.load_balancer is not None + assert graph.nodes.load_balancer.server_covered == {"svc-A"} + + +# --------------------------------------------------------------------------- # +# 3) LB con server inesistente # +# --------------------------------------------------------------------------- # +def test_lb_references_unknown_server() -> None: + """LB that lists a non-existent server triggers ValidationError.""" + with pytest.raises(ValidationError): + _topology_with_lb({"ghost-srv"}) + + +# --------------------------------------------------------------------------- # +# 4) LB no edge with a server covered # +# --------------------------------------------------------------------------- # +def test_lb_missing_edge_to_covered_server() -> None: + """LB covers svc-A but edge LB→svc-A is missing → ValidationError.""" + # costruiamo il grafo senza l'edge lb-srv + nodes = _single_node_topology() + lb = LoadBalancer(id="lb-1", server_covered={"svc-A"}) + nodes = TopologyNodes( + servers=nodes.servers, + client=nodes.client, + load_balancer=lb, + ) + edges = [ + Edge( + id="cli-lb", + source="browser", + target="lb-1", + latency=_latency(), + ), + ] + with pytest.raises(ValidationError): + TopologyGraph(nodes=nodes, edges=edges) + + diff --git a/tests/unit/test_health_route.py b/tests/unit/test_health_route.py deleted file mode 100644 index 57cfe1f..0000000 --- a/tests/unit/test_health_route.py +++ /dev/null @@ -1,8 +0,0 @@ -from fastapi.testclient import TestClient - - -def test_health_endpoint_returns_ok(client: TestClient) -> None: - """Ensure the /health endpoint returns HTTP 200 and the expected JSON payload.""" - response = client.get("/health") - assert response.status_code == 200 - assert response.json() == {"status": "ok"} diff --git a/tests/unit/test_settings.py b/tests/unit/test_settings.py deleted file mode 100644 index 9a277bc..0000000 --- a/tests/unit/test_settings.py +++ /dev/null @@ -1,18 +0,0 @@ -from app.config.settings import Settings - - -def test_settings_defaults() -> None: - """Ensure that default settings are applied correctly. - - This test verifies that the Settings class properly handles explicit configuration - values and applies the correct defaults for unspecified fields. - """ - s = Settings( - db_host="localhost", - db_user="x", - db_password="y", - db_name="z", - db_url="postgresql+asyncpg://x:y@localhost/z", - ) - assert s.environment == "test" - assert "postgresql" in s.db_url