diff --git a/.github/workflows/ci-develop.yml b/.github/workflows/ci-develop.yml index 9727868..0f82cfd 100644 --- a/.github/workflows/ci-develop.yml +++ b/.github/workflows/ci-develop.yml @@ -1,100 +1,51 @@ -name: CI – Develop Branch - - -# Triggers -# -------- -# • pull_request → quick job (lint + type-check + unit tests) -# • push → full job (quick job + DB migrations + integration tests + -# Docker build & smoke test) +name: CI – Main Branch (Single Job, System Required) on: pull_request: - branches: [develop] - push: - branches: [develop] + branches: [main] + workflow_dispatch: +concurrency: + group: ci-main-${{ github.ref }} + cancel-in-progress: true -# Job 1 ─ Quick validation (executed only on pull_request events) -# --------------------------------------------------------------------------- # -# Runs fast checks that give reviewers immediate feedback. No external -# services or Docker are used to keep runtime under one minute. +env: + PYTHON_VERSION: "3.12" + MPLBACKEND: Agg + ASYNCFLOW_RUN_SYSTEM_TESTS: "1" jobs: - quick: - if: github.event_name == 'pull_request' + all-checks: runs-on: ubuntu-latest + timeout-minutes: 25 steps: - # Checkout repository - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - # Install Python 3.12 - - uses: actions/setup-python@v4 + - uses: actions/setup-python@v5 with: - python-version: "3.12" + python-version: ${{ env.PYTHON_VERSION }} + cache: 'pip' - # Restore Poetry cache for faster installs - uses: actions/cache@v3 with: path: ~/.cache/pypoetry key: ${{ runner.os }}-poetry-${{ hashFiles('**/poetry.lock') }} - # Install project + development dependencies - - name: Install dependencies + - name: Install Poetry & deps run: | curl -sSL https://install.python-poetry.org | python3 - + export PATH="$HOME/.local/bin:$PATH" poetry config virtualenvs.create false poetry install --with dev --no-interaction - # Code quality gates - - name: Run Ruff (lint & formatting check) + - name: Ruff (lint) run: poetry run ruff check src tests - - name: Run MyPy (type-check) + - name: MyPy (type-check) run: poetry run mypy src tests - - # Unit-tests only (exclude integration markers) - - name: Run unit tests - env: - ENVIRONMENT: test - run: poetry run pytest -m "not integration" --disable-warnings - - - -# Job 2 ─ Full validation (executed only on push events) - full: - if: | - github.event_name == 'push' && - github.ref == 'refs/heads/develop' - runs-on: ubuntu-latest - - steps: - - uses: actions/checkout@v3 - - uses: actions/setup-python@v4 - with: { python-version: '3.12' } - - uses: actions/cache@v3 - with: - path: ~/.cache/pypoetry - key: ${{ runner.os }}-poetry-${{ hashFiles('**/poetry.lock') }} - - name: Install dependencies - run: | - curl -sSL https://install.python-poetry.org | python3 - - poetry config virtualenvs.create false - poetry install --with dev --no-interaction - - - name: Run Ruff - run: poetry run ruff check src tests - - - name: Run mypy - run: poetry run mypy src - - - name: Run all tests + - name: All tests (unit + integration + system) run: | poetry run pytest \ - --cov=src --cov-report=term \ --disable-warnings - - - - \ No newline at end of file diff --git a/.github/workflows/ci-main.yml b/.github/workflows/ci-main.yml new file mode 100644 index 0000000..e69de29 diff --git a/README.md b/README.md index 2005eea..fca1fce 100644 --- a/README.md +++ b/README.md @@ -1,149 +1,98 @@ -# **AsyncFlow – Event-Loop Aware Simulation for Backend Systems** -## **1. Overview** +# AsyncFlow — Event-Loop Aware Simulator for Async Distributed Systems -Modern asynchronous Python stacks such as **FastAPI + Uvicorn** deliver impressive performance, yet capacity planning for production workloads often relies on guesswork, costly cloud-based load tests, or late-stage troubleshooting. +Created and maintained by @GioeleB00. -**AsyncFlow** addresses this challenge by providing a **digital twin** of your service that can be run entirely offline. It models event-loop behaviour, resource constraints, and request lifecycles, enabling you to forecast performance under different workloads and architectural choices **before deployment**. +[![PyPI](https://img.shields.io/pypi/v/asyncflow-sim)](https://pypi.org/project/asyncflow-sim/) +[![Python](https://img.shields.io/pypi/pyversions/asyncflow-sim)](https://pypi.org/project/asyncflow-sim/) +[![License](https://img.shields.io/github/license/AsyncFlow-Sim/AsyncFlow)](LICENSE) +[![Status](https://img.shields.io/badge/status-v0.1.0alpha-orange)](#) +[![Ruff](https://img.shields.io/badge/lint-ruff-informational)](https://github.com/astral-sh/ruff) +[![Typing](https://img.shields.io/badge/typing-mypy-blueviolet)](https://mypy-lang.org/) +[![Tests](https://img.shields.io/badge/tests-pytest-6DA55F)](https://docs.pytest.org/) +[![SimPy](https://img.shields.io/badge/built%20with-SimPy-1f425f)](https://simpy.readthedocs.io/) -AsyncFlow allows you to answer questions such as: +----- -* *What happens to p95 latency if traffic doubles during a peak event?* -* *How many cores are required to maintain SLAs at scale?* -* *What is the cost-per-request of adding a large language model (LLM) inference step?* +AsyncFlow is a discrete-event simulator for modeling and analyzing the performance of asynchronous, distributed backend systems built with SimPy. You describe your system's topology—its servers, network links, and load balancers—and AsyncFlow simulates the entire lifecycle of requests as they move through it. -The simulation outputs metrics identical in form to those collected in production—such as p50/p95/p99 latency, concurrency, resource utilisation, and throughput—making results directly actionable. +It provides a **digital twin** of your service, modeling not just the high-level architecture but also the low-level behavior of each server's **event loop**, including explicit **CPU work**, **RAM residency**, and **I/O waits**. This allows you to run realistic "what-if" scenarios that behave like production systems rather than toy benchmarks. -**Outcome:** Data-driven capacity planning, early performance tuning, and reduced operational surprises. +### What Problem Does It Solve? ---- - -## **2. Key Features** - -* **Event-loop accuracy** – Models a single-threaded asynchronous runtime, including CPU-bound work, I/O waits, and parsing. -* **Resource modelling** – Simulates CPU cores, memory, connection pools, and rate limiters as discrete resources. -* **Network simulation** – Models transport latency per edge in the system topology. -* **Workload flexibility** – Supports stochastic arrival processes (e.g., Poisson, Normal) for user traffic generation. -* **Metrics parity with production** – Produces time-series and event-level metrics aligned with observability tools. -* **Offline and repeatable** – No need for costly cloud infrastructure to conduct performance tests. - ---- - -## **3. Installation** +Modern async stacks like FastAPI are incredibly performant, but predicting their behavior under real-world load is difficult. Capacity planning often relies on guesswork, expensive cloud-based load tests, or discovering bottlenecks only after a production failure. AsyncFlow is designed to replace that uncertainty with **data-driven forecasting**, allowing you to understand how your system will perform before you deploy a single line of code. -Until published, clone the repository and install in editable mode: - -### Requirements -- Python 3.11+ (recommended 3.12) -- Poetry ≥ 1.6 +### How Does It Work? An Example Topology -AsyncFlow uses [Poetry](https://python-poetry.org/) for dependency management. -If you do not have Poetry installed, follow these steps. +AsyncFlow models your system as a directed graph of interconnected components. A typical setup might look like this: -### 3.1 Install Poetry (official method) +![Topology at a glance](readme_img/topology.png) -**Linux / macOS** +### What Questions Can It Answer? -```bash -curl -sSL https://install.python-poetry.org | python3 - -``` - -**Windows (PowerShell)** - -```powershell -(Invoke-WebRequest -Uri https://install.python-poetry.org -UseBasicParsing).Content | python - -``` - -> **Note:** Ensure that Poetry’s binary directory is in your `PATH`. -> On Linux/macOS this is typically `~/.local/bin`; -> on Windows it is `%APPDATA%\Python\Scripts` or the path printed at the end of installation. +By running simulations on your defined topology, you can get quantitative answers to critical engineering questions, such as: + * How does **p95 latency** change if active users increase from 100 to 200? + * What is the impact on the system if the **client-to-server network latency** increases by 3ms? + * Will a specific API endpoint—with a pipeline of parsing, RAM allocation, and database I/O—hold its **SLA at a load of 40 requests per second**? --- -### 3.2 Clone the repository and set up a local virtual environment - -```bash -# Clone the repository -git clone https://github.com/GioeleB00/AsyncFlow-Backend.git -cd AsyncFlow-Backend - -# Configure Poetry to always create a local `.venv` inside the project -poetry config virtualenvs.in-project true - -# Install all dependencies (main + dev) inside the local venv -poetry install --with dev -``` - -After this step, you will see a `.venv/` directory inside the project root. -To activate the environment manually: +## Installation -```bash -source .venv/bin/activate # Linux / macOS -.venv\Scripts\activate # Windows -``` +Install from PyPI: `pip install asyncflow-sim` -Or simply run commands via Poetry without manual activation, for example: -```bash -poetry run pytest -poetry run python examples/single_server.py -``` +## Requirements +* **Python 3.12+** (tested on 3.12, 3.13) +* **OS:** Linux, macOS, or Windows +* **Installed automatically (runtime deps):** + **SimPy** (DES engine), **NumPy**, **Matplotlib**, **Pydantic** + **pydantic-settings**, **PyYAML**. --- -## **4. Quick Start** +## Quick Start -### 1. Define your simulation payload +### 1) Define a realistic YAML -Go to the folder `/examples` open the file `single_server.py` -and run it from the terminal, you will see the output of the system -described in `/examples/data/single_server.yml` and you will see a -`.png` file with different plots. +Save as `my_service.yml`. -If you want to build your own configuration, read the guide in the `/docs` folder on how to craft a `.yml` input correctly. +The full YAML schema is explained in `docs/guides/yaml-input-builder.md` and validated by Pydantic models (see `docs/internals/simulation-input.md`). ```yaml rqs_input: id: generator-1 - avg_active_users: - mean: 100 - distribution: poisson - avg_request_per_minute_per_user: - mean: 20 - distribution: poisson + avg_active_users: { mean: 100, distribution: poisson } + avg_request_per_minute_per_user: { mean: 20, distribution: poisson } user_sampling_window: 60 topology_graph: nodes: - client: - id: client-1 - type: client + client: { id: client-1 } + servers: - - id: app-server-1 - type: server - server_resources: - cpu_cores: 2 - ram_mb: 2048 + - id: app-1 + server_resources: { cpu_cores: 1, ram_mb: 2048 } endpoints: - - endpoint_name: /predict + - endpoint_name: /api + # Realistic pipeline on one async server: + # - 2 ms CPU parsing (blocks the event loop) + # - 120 MB RAM working set (held until the request leaves the server) + # - 12 ms DB-like I/O (non-blocking wait) steps: + - kind: initial_parsing + step_operation: { cpu_time: 0.002 } - kind: ram - step_operation: { necessary_ram: 100 } - - kind: cpu - step_operation: { cpu_time: 0.005 } + step_operation: { necessary_ram: 120 } + - kind: io_db + step_operation: { io_waiting_time: 0.012 } + edges: - - id: gen-to-client - source: generator-1 - target: client-1 - latency: { mean: 0.003, distribution: exponential } - - id: client-to-server - source: client-1 - target: app-server-1 - latency: { mean: 0.003, distribution: exponential } - - id: server-to-client - source: app-server-1 - target: client-1 - latency: { mean: 0.003, distribution: exponential } + - { id: gen-client, source: generator-1, target: client-1, + latency: { mean: 0.003, distribution: exponential } } + - { id: client-app, source: client-1, target: app-1, + latency: { mean: 0.003, distribution: exponential } } + - { id: app-client, source: app-1, target: client-1, + latency: { mean: 0.003, distribution: exponential } } sim_settings: total_simulation_time: 300 @@ -151,183 +100,275 @@ sim_settings: enabled_sample_metrics: - ready_queue_len - ram_in_use + - edge_concurrent_connection enabled_event_metrics: - rqs_clock ``` -and add it to the `/examples/data` folder - -### 2. Run the simulation +Prefer building scenarios in Python? There’s a Python builder with the same semantics (create nodes, edges, endpoints programmatically). See **`docs/guides/python-builder.md`**. -build a python file in the `/examples` folder and copy the -following script replacing `` with the -real name +### 2) Run and export charts +Save as `run_my_service.py`. ```python +from __future__ import annotations from pathlib import Path - import simpy import matplotlib.pyplot as plt -from asyncflow.config.constants import LatencyKey from asyncflow.runtime.simulation_runner import SimulationRunner from asyncflow.metrics.analyzer import ResultsAnalyzer -def print_latency_stats(res: ResultsAnalyzer) -> None: - """Print latency statistics returned by the analyzer.""" - stats = res.get_latency_stats() - print("\n=== LATENCY STATS ===") - if not stats: - print("(empty)") - return - - order: list[LatencyKey] = [ - LatencyKey.TOTAL_REQUESTS, - LatencyKey.MEAN, - LatencyKey.MEDIAN, - LatencyKey.STD_DEV, - LatencyKey.P95, - LatencyKey.P99, - LatencyKey.MIN, - LatencyKey.MAX, - ] - for key in order: - if key in stats: - print(f"{key.name:<20} = {stats[key]:.6f}") - -def save_all_plots(res: ResultsAnalyzer, out_path: Path) -> None: - """Generate the 2x2 plot figure and save it to `out_path`.""" - fig, axes = plt.subplots(2, 2, figsize=(12, 8)) + +def main() -> None: + script_dir = Path(__file__).parent + yaml_path = script_dir / "my_service.yml" + out_path = script_dir / "my_service_plots.png" + + env = simpy.Environment() + runner = SimulationRunner.from_yaml(env=env, yaml_path=yaml_path) + res: ResultsAnalyzer = runner.run() + + # Print a concise latency summary + print(res.format_latency_stats()) + + # 2x2: Latency | Throughput | Ready (first server) | RAM (first server) + fig, axes = plt.subplots(2, 2, figsize=(12, 8), dpi=160) + res.plot_latency_distribution(axes[0, 0]) res.plot_throughput(axes[0, 1]) - res.plot_server_queues(axes[1, 0]) - res.plot_ram_usage(axes[1, 1]) + + sids = res.list_server_ids() + if sids: + sid = sids[0] + res.plot_single_server_ready_queue(axes[1, 0], sid) + res.plot_single_server_ram(axes[1, 1], sid) + else: + for ax in (axes[1, 0], axes[1, 1]): + ax.text(0.5, 0.5, "No servers", ha="center", va="center") + ax.axis("off") + fig.tight_layout() fig.savefig(out_path) print(f"Plots saved to: {out_path}") -# Paths -yaml_path = Path(__file__).parent / "data" /".yml" -out_path = Path(__file__).parent / "_plots.png" -# Simulation -env = simpy.Environment() -runner = SimulationRunner.from_yaml(env=env, yaml_path=yaml_path) -results: ResultsAnalyzer = runner.run() - -# Output -print_latency_stats(results) -save_all_plots(results, out_path) +if __name__ == "__main__": + main() ``` -run the script and you will see the different plots and on your terminal -you will see the latency stats ---- +Run the python script + +You’ll get latency stats in the terminal and a PNG with four charts (latency distribution, throughput, server queues, RAM usage). + +**Want more?** -## **5. Target Users and Use Cases** +For ready-to-run scenarios—including examples using the Pythonic builder and multi-server topologies—check out the `examples/` directory in the repository. -| Audience | Challenge | AsyncFlow Value | -| ------------------------ | ------------------------------------------------- | -------------------------------------------------------------------------------- | -| Backend Engineers | Sizing services for variable workloads | Model endpoint workflows and resource bottlenecks before deployment | -| DevOps / SRE | Balancing cost and SLA | Simulate scaling scenarios to choose optimal capacity | -| ML / LLM Teams | Unclear latency/cost impact of inference steps | Integrate stochastic inference times and cost models into the service simulation | -| Educators | Explaining async runtime internals | Demonstrate queueing, blocking effects, and CPU vs. I/O trade-offs | -| System Design Candidates | Quantifying architecture trade-offs in interviews | Prototype a simulated design to visualise scalability and bottlenecks | +## Development + +If you want to contribute or run the full test suite locally, follow these steps. + +### Requirements + +* **Python 3.12+** (tested on 3.12, 3.13) +* **OS:** Linux, macOS, or Windows +* **Runtime deps installed by the package:** SimPy, NumPy, Matplotlib, Pydantic, PyYAML, pydantic-settings + +**Prerequisites:** Git, Python 3.12+ in `PATH`, `curl` (Linux/macOS/WSL), PowerShell 7+ (Windows) --- -## **6. Project Structure** +## Project setup -The project follows a standard Python package layout, managed with Poetry. +```bash +git clone https://github.com/AsyncFlow-Sim/AsyncFlow.git +cd AsyncFlow +``` +From the repo root, run the **one-shot post-clone setup**: + +**Linux / macOS / WSL** + +```bash +bash scripts/dev_setup.sh ``` -AsyncFlow-Backend/ -├── examples/ # Examples payloads and datasets -├── scripts/ # Utility scripts (linting, startup) -├── docs/ # Project vision and technical documentation -├── tests/ # Unit and integration tests -├── src/ -│ └── app/ -│ ├── config/ # Settings and constants -│ ├── metrics/ # KPI computation and aggregation -│ ├── resources/ # SimPy resource registry -│ ├── runtime/ # Simulation core and actors -│ ├── samplers/ # Random variable generators -│ └── schemas/ # Pydantic models for validation -├── pyproject.toml -└── README.md + +**Windows (PowerShell)** + +```powershell +# If scripts are blocked by policy, run this in the same PowerShell session: +# Set-ExecutionPolicy -Scope Process -ExecutionPolicy Bypass +.\scripts\dev_setup.ps1 ``` ---- +**What this does (concise):** -## **7. Development Workflow** +* Ensures **Poetry** is available (installs if missing). +* Uses a **project-local `.venv`**. +* Removes `poetry.lock` for a **clean dependency resolve** (dev policy). +* Installs the project **with dev extras**. +* Runs **ruff**, **mypy**, and **pytest (with coverage)**. -AsyncFlow uses **Poetry** for dependency management and enforces quality via **Ruff** and **MyPy**. +**Quick sanity check after setup:** -| Task | Command | Description | -| ------------- | --------------------------------- | -------------------------------------- | -| Install deps | `poetry install --with dev` | Main and development dependencies | -| Lint & format | `poetry run ruff check src tests` | Style and best-practice checks | -| Type checking | `poetry run mypy src tests` | Static type enforcement | -| Run tests | `poetry run pytest` | Execute all unit and integration tests | +```bash +poetry --version +poetry run python -V +``` + +> **Note (lock policy):** `dev_setup` intentionally removes `poetry.lock` to avoid cross-platform conflicts during development. + +**Scripts (for quick access):** + +* [`scripts/dev_setup.sh`](scripts/dev_setup.sh) / [`scripts/dev_setup.ps1`](scripts/dev_setup.ps1) +* [`scripts/quality_check.sh`](scripts/quality_check.sh) / [`scripts/quality_check.ps1`](scripts/quality_check.ps1) +* [`scripts/run_tests.sh`](scripts/run_tests.sh) / [`scripts/run_tests.ps1`](scripts/run_tests.ps1) --- -## **8. Continuous Integration** +### Handy scripts (after setup) -The GitHub Actions pipeline enforces: +#### 1) Lint + type check -* **Pull Requests:** Ruff, MyPy, and unit tests for rapid feedback. -* **Develop branch:** Full suite including integration tests and coverage reporting. +**Linux / macOS / WSL** -No code is merged without passing all checks, ensuring maintainability and reliability. +```bash +bash scripts/quality_check.sh +``` ---- +**Windows (PowerShell)** -## **9. Current Limitations (v0.1)** +```powershell +.\scripts\quality_check.ps1 +``` -1. **Network delay model** – Bandwidth effects and payload size are not yet considered. -2. **Concurrency model** – Single-threaded async event-loop; no native multi-thread or multi-process simulation. -3. **CPU allocation** – One process per server instance; multi-core within a process is not yet modelled. +Runs **ruff** (lint/format check) and **mypy** on `src` and `tests`. -In addition to the items already listed (simplified network delay, single-threaded async model, and one process per server), keep in mind: +#### 2) Run tests with coverage (unit + integration) -* **Stationary, independent workload.** Traffic is sampled from stationary distributions; there is no diurnal seasonality, burst shaping, or feedback coupling (e.g., servers slowing down arrivals). Average users and per-user RPM are sampled independently. -* **Simplified request flow.** Endpoints execute a linear sequence of steps; there is no conditional branching/fan-out within an endpoint (e.g., cache hit/miss paths, error paths) and no per-request control flow. -* **Network realism is limited.** Beyond base latency and optional drops, the model does not account for payload size, bandwidth constraints, TCP behavior (slow start, congestion), retries/timeouts, or jitter. -* **No backpressure or autoscaling.** The generator does not adapt to server state (queues, errors), and there is no policy loop for rate limiting or scaling during the run. -* **Telemetry granularity.** Sampled metrics are collected at a fixed `sample_period_s` and may miss very short-lived spikes unless you lower the period (at a runtime cost). Event resolution itself is not affected by the sampling period. -* **Reproducibility.** Unless you fix a random seed (not yet exposed in all entry points), repeated runs will vary within the chosen distributions. +**Linux / macOS / WSL** ---- +```bash +bash scripts/run_tests.sh +``` -## Mini Roadmap +**Windows (PowerShell)** -Short, high-impact items we plan to add next: +```powershell +.\scripts\run_tests.ps1 +``` -1. **Cache modeling.** First-class cache layers (per-endpoint hit/miss with TTL and warm-up), configurable hit-ratio profiles, and their effect on latency, CPU, and RAM. -2. **LLM inference as a step + cost accounting.** Treat inference as a dedicated endpoint step with its own latency distribution, concurrency limits/batching, and per-request cost model (tokens, provider pricing). -3. **Fault and event injection.** Time-based events (node down/up, degraded edge, error-rate spikes) with deterministic timelines to test resilience and recovery. -4. **Network bandwidth & payload size.** Throughput-aware links, request/response sizes, retries/timeouts, and simple congestion effects. -5. **Branching/control-flow within endpoints.** Conditional steps (e.g., cache hit vs. miss), probabilistic routing, and fan-out/fan-in to external services. -6. **Backpressure and autoscaling loops.** Rate limiting tied to queue depth/latency SLOs and simple scale-up/down policies during a run. +#### 3) Run system tests +**Linux / macOS / WSL** -Future milestones will extend these capabilities. +```bash +bash scripts/run_sys_tests.sh +``` ---- +**Windows (PowerShell)** -## **10. Documentation** +```powershell +.\scripts\run_sys_tests.ps1 +``` -Comprehensive documentation is available in the `/docs` directory, covering: +Executes **pytest** with a terminal coverage summary (no XML, no slowest list). -* Simulation model and architecture -* Schema definitions -* Example scenarios -* Extension guidelines -* Guide to build valid .yaml as valid simulation input ---- + +## What AsyncFlow Models (v0.1) + +AsyncFlow provides a detailed simulation of your backend system. Here is a high-level overview of the core components it models. For a deeper technical dive into the implementation and design rationale, follow the links to the internal documentation. + +* **Async Event Loop:** Simulates a single-threaded, non-blocking event loop per server. **CPU steps** block the loop, while **I/O steps** are non-blocking, accurately modeling `asyncio` behavior. + * *(Deep Dive: `docs/internals/runtime-and-resources.md`)* + +* **System Resources:** Models finite server resources, including **CPU cores** and **RAM (MB)**. Requests must acquire these resources, creating natural back-pressure and contention when the system is under load. + * *(Deep Dive: `docs/internals/runtime-and-resources.md`)* + +* **Endpoints & Request Lifecycles:** Models server endpoints as a linear sequence of **steps**. Each step is a distinct operation, such as `cpu_bound_operation`, `io_wait`, or `ram` allocation. + * *(Schema Definition: `docs/internals/simulation-input.md`)* + +* **Network Edges:** Simulates the connections between system components. Each edge has a configurable **latency** (drawn from a probability distribution) and an optional **dropout rate** to model packet loss. + * *(Schema Definition: `docs/internals/simulation-input.md` | Runtime Behavior: `docs/internals/runtime-and-resources.md`)* + +* **Stochastic Workload:** Generates user traffic based on a two-stage sampling model, combining the number of active users and their request rate per minute to produce a realistic, fluctuating load (RPS) on the system. + * *(Modeling Details with mathematical explanation and clear assumptions: `docs/internals/requests-generator.md`)* + +* **Metrics & Outputs:** Collects two types of data: **time-series metrics** (e.g., `ready_queue_len`, `ram_in_use`) and **event-based data** (`RqsClock`). This raw data is used to calculate final KPIs like **p95/p99 latency** and **throughput**. + * *(Metric Reference: `docs/internals/metrics`)* + +## Current Limitations (v0.1) + +* Network realism: base latency + optional drops (no bandwidth/payload/TCP yet). +* Single event loop per server: no multi-process/multi-node servers yet. +* Linear endpoint flows: no branching/fan-out within an endpoint. +* No thread-level concurrency; modeling OS threads and scheduler/context switching is out of scope.” +* Stationary workload: no diurnal patterns or feedback/backpressure. +* Sampling cadence: very short spikes can be missed if `sample_period_s` is large. + + +## Roadmap (Order is not indicative of priority) + +This roadmap outlines the key development areas to transform AsyncFlow into a comprehensive framework for statistical analysis and resilience modeling of distributed systems. + +### 1. Monte Carlo Simulation Engine + +**Why:** To overcome the limitations of a single simulation run and obtain statistically robust results. This transforms the simulator from an "intuition" tool into an engineering tool for data-driven decisions with confidence intervals. + +* **Independent Replications:** Run the same simulation N times with different random seeds to sample the space of possible outcomes. +* **Warm-up Period Management:** Introduce a "warm-up" period to be discarded from the analysis, ensuring that metrics are calculated only on the steady-state portion of the simulation. +* **Ensemble Aggregation:** Calculate means, standard deviations, and confidence intervals for aggregated metrics (latency, throughput) across all replications. +* **Confidence Bands:** Visualize time-series data (e.g., queue lengths) with confidence bands to show variability over time. + +### 2. Realistic Service Times (Stochastic Service Times) + +**Why:** Constant service times underestimate tail latencies (p95/p99), which are almost always driven by "slow" requests. Modeling this variability is crucial for a realistic analysis of bottlenecks. + +* **Distributions for Steps:** Allow parameters like `cpu_time` and `io_waiting_time` in an `EndpointStep` to be sampled from statistical distributions (e.g., Lognormal, Gamma, Weibull) instead of being fixed values. +* **Per-Request Sampling:** Each request will sample its own service times independently, simulating the natural variability of a real-world system. + +### 3. Component Library Expansion + +**Why:** To increase the variety and realism of the architectures that can be modeled. + +* **New System Nodes:** + * `CacheRuntime`: To model caching layers (e.g., Redis) with hit/miss logic, TTL, and warm-up behavior. + * `APIGatewayRuntime`: To simulate API Gateways with features like rate-limiting and authentication caching. + * `DBRuntime`: A more advanced model for databases featuring connection pool contention and row-level locking. +* **New Load Balancer Algorithms:** Add more advanced routing strategies (e.g., Weighted Round Robin, Least Response Time). + +### 4. Fault and Event Injection + +**Why:** To test the resilience and behavior of the system under non-ideal conditions, a fundamental use case for Site Reliability Engineering (SRE). + +* **API for Scheduled Events:** Introduce a system to schedule events at specific simulation times, such as: + * **Node Down/Up:** Turn a server off and on to test the load balancer's failover logic. + * **Degraded Edge:** Drastically increase the latency or drop rate of a network link. + * **Error Bursts:** Simulate a temporary increase in the rate of application errors. + +### 5. Advanced Network Modeling + +**Why:** To more faithfully model network-related bottlenecks that are not solely dependent on latency. + +* **Bandwidth and Payload Size:** Introduce the concepts of link bandwidth and request/response size to simulate delays caused by data transfer. +* **Retries and Timeouts:** Model retry and timeout logic at the client or internal service level. + +### 6. Complex Endpoint Flows + +**Why:** To model more realistic business logic that does not follow a linear path. + +* **Conditional Branching:** Introduce the ability to have conditional steps within an endpoint (e.g., a different path for a cache hit vs. a cache miss). +* **Fan-out / Fan-in:** Model scenarios where a service calls multiple downstream services in parallel and waits for their responses. + +### 7. Backpressure and Autoscaling + +**Why:** To simulate the behavior of modern, adaptive systems that react to load. + +* **Dynamic Rate Limiting:** Introduce backpressure mechanisms where services slow down the acceptance of new requests if their internal queues exceed a certain threshold. +* **Autoscaling Policies:** Model simple Horizontal Pod Autoscaler (HPA) policies where the number of server replicas increases or decreases based on metrics like CPU utilization or queue length. + diff --git a/docs/api/analyzer.md b/docs/api/analyzer.md new file mode 100644 index 0000000..eb86e6b --- /dev/null +++ b/docs/api/analyzer.md @@ -0,0 +1,208 @@ +# ResultsAnalyzer — Public API Documentation + +Analyze and visualize the outcome of an AsyncFlow simulation. +`ResultsAnalyzer` consumes raw runtime objects (client, servers, edges, settings), +computes latency and throughput aggregates, exposes sampled series, and offers +compact plotting helpers built on Matplotlib. + +--- + +## Quick start + +```python +import simpy +from matplotlib import pyplot as plt +from asyncflow.runtime.simulation_runner import SimulationRunner +from asyncflow.metrics.analyzer import ResultsAnalyzer, SampledMetricName + +# 1) Run a simulation and get an analyzer +env = simpy.Environment() +runner = SimulationRunner.from_yaml(env=env, yaml_path="data/single_server.yml") +res: ResultsAnalyzer = runner.run() + +# 2) Text summary +print(res.format_latency_stats()) + +# 3) Plot the dashboard (latency histogram + throughput) +fig, (ax_lat, ax_rps) = plt.subplots(1, 2, figsize=(12, 4), dpi=160) +res.plot_base_dashboard(ax_lat, ax_rps) +fig.tight_layout() +fig.savefig("dashboard.png") + +# 4) Single-server plots +server_id = res.list_server_ids()[0] +fig_rdy, ax_rdy = plt.subplots(figsize=(8, 4), dpi=160) +res.plot_single_server_ready_queue(ax_rdy, server_id) +fig_rdy.tight_layout() +fig_rdy.savefig(f"ready_{server_id}.png") +``` + +--- + +## Data model & units + +* **Latency**: seconds (s). +* **Throughput**: requests per second (RPS). +* **Sampled metrics** (per server/edge): series captured at a fixed sampling + period `settings.sample_period_s` (e.g., queue length, RAM usage). + Units depend on the metric (RAM is typically MB). + +--- + +## Computed metrics + +* **Latency statistics** (global): + `TOTAL_REQUESTS, MEAN, MEDIAN, STD_DEV, P95, P99, MIN, MAX`. +* **Throughput time series**: per-window RPS (default cached at 1 s buckets). +* **Sampled metrics**: raw, per-entity series keyed by + `SampledMetricName` (or its string value). + +--- + +## Class reference + +### Constructor + +```python +ResultsAnalyzer( + *, + client: ClientRuntime, + servers: list[ServerRuntime], + edges: list[EdgeRuntime], + settings: SimulationSettings, +) +``` + +The analyzer is **lazy**: metrics are computed on first access. + +### Core methods + +* `process_all_metrics() -> None` + Forces computation of latency stats, throughput cache (1 s), and sampled metrics. + +* `get_latency_stats() -> dict[LatencyKey, float]` + Returns the global latency stats. Computes them if needed. + +* `format_latency_stats() -> str` + Returns a ready-to-print block with latency statistics. + +* `get_throughput_series(window_s: float | None = None) -> tuple[list[float], list[float]]` + Returns `(timestamps, rps)`. If `window_s` is `None` or `1.0`, the cached + 1-second series is returned; otherwise a fresh series is computed. + +* `get_sampled_metrics() -> dict[str, dict[str, list[float]]]` + Returns sampled metrics as `{metric_key: {entity_id: [values...]}}`. + +* `get_metric_map(key: SampledMetricName | str) -> dict[str, list[float]]` + Gets the per-entity series map for a metric. Accepts either the enum value or + the raw string key. + +* `get_series(key: SampledMetricName | str, entity_id: str) -> tuple[list[float], list[float]]` + Returns time/value series for a given metric and entity. + Time coordinates are `i * settings.sample_period_s`. + +* `list_server_ids() -> list[str]` + Returns server IDs in a stable, topology order. + +--- + +## Plotting helpers + +All plotting methods draw on a **Matplotlib `Axes`** provided by the caller and +do **not** manage figure lifecycles. + +> When there is no data for the requested plot, the axis is annotated with the +> corresponding `no_data` message from `plot_constants`. + +### Dashboard + +* `plot_base_dashboard(ax_latency: Axes, ax_throughput: Axes) -> None` + Convenience: calls the two methods below. + +* `plot_latency_distribution(ax: Axes) -> None` + Latency histogram with **vertical overlays** (mean, P50, P95, P99) and a + **single legend box** (top-right) that shows each statistic with its matching + colored handle. + +* `plot_throughput(ax: Axes, *, window_s: float | None = None) -> None` + Throughput line with **horizontal overlays** (mean, P95, max) and a + **single legend box** (top-right) that shows values and colors for each line. + +### Single-server plots + +Each single-server plot: + +* draws the main series, + +* overlays **mean / min / max** as horizontal lines (distinct styles/colors), + +* shows a **single legend box** with values for mean/min/max, + +* **does not** include a legend entry for the main series (title suffices). + +* `plot_single_server_ready_queue(ax: Axes, server_id: str) -> None` + Ready queue length over time (per server). + +* `plot_single_server_io_queue(ax: Axes, server_id: str) -> None` + I/O queue/sleep metric over time (per server). + +* `plot_single_server_ram(ax: Axes, server_id: str) -> None` + RAM usage over time (per server). + +## Behavior & design notes + +* **Laziness & caching** + + * Latency stats and the 1 s throughput series are cached on first use. + * Calling `get_throughput_series(window_s=...)` with a custom window computes + a fresh series (not cached). + +* **Stability** + + * `list_server_ids()` follows the topology order for readability across runs. + +* **Error handling** + + * Multi-server plotting methods validate the number of axes and raise + `ValueError` with a descriptive message. + +* **Matplotlib integration** + + * The analyzer **does not** close figures or call `plt.show()`. + * Titles, axes labels, and “no data” messages are taken from + `asyncflow.config.plot_constants`. + +* **Thread-safety** + + * The analyzer is not designed for concurrent mutation. Use from a single + thread after the simulation completes. + +--- + +## Examples + +### Custom throughput window + +```python +fig, ax = plt.subplots(figsize=(8, 3), dpi=160) +res.plot_throughput(ax, window_s=2.0) # 2-second buckets +fig.tight_layout() +fig.savefig("throughput_2s.png") +``` + +### Access a sampled metric series + +```python +from asyncflow.metrics.analyzer import SampledMetricName + +server_id = res.list_server_ids()[0] +t, qlen = res.get_series(SampledMetricName.READY_QUEUE_LEN, server_id) +# t: [0.0, 0.1, 0.2, ...] (scaled by sample_period_s) +# qlen: [.. values ..] +``` + +--- + +If you need additional KPIs (e.g., tail latency over time, backlog, or +utilization), the current structure makes it straightforward to add new helpers +alongside the existing plotting methods. diff --git a/docs/guides/builder.md b/docs/guides/python-builder.md similarity index 100% rename from docs/guides/builder.md rename to docs/guides/python-builder.md diff --git a/docs/internals/workload-samplers.md b/docs/internals/requests-generator.md similarity index 100% rename from docs/internals/workload-samplers.md rename to docs/internals/requests-generator.md diff --git a/examples/builder_input/load_balancer/lb_dashboard.png b/examples/builder_input/load_balancer/lb_dashboard.png new file mode 100644 index 0000000..4d94cfe Binary files /dev/null and b/examples/builder_input/load_balancer/lb_dashboard.png differ diff --git a/examples/builder_input/load_balancer/lb_server_srv-1_metrics.png b/examples/builder_input/load_balancer/lb_server_srv-1_metrics.png new file mode 100644 index 0000000..1665766 Binary files /dev/null and b/examples/builder_input/load_balancer/lb_server_srv-1_metrics.png differ diff --git a/examples/builder_input/load_balancer/lb_server_srv-2_metrics.png b/examples/builder_input/load_balancer/lb_server_srv-2_metrics.png new file mode 100644 index 0000000..cdda50f Binary files /dev/null and b/examples/builder_input/load_balancer/lb_server_srv-2_metrics.png differ diff --git a/examples/builder_input/load_balancer/two_servers.png b/examples/builder_input/load_balancer/two_servers.png deleted file mode 100644 index 11ceaf3..0000000 Binary files a/examples/builder_input/load_balancer/two_servers.png and /dev/null differ diff --git a/examples/builder_input/load_balancer/two_servers.py b/examples/builder_input/load_balancer/two_servers.py index d179959..fb2eb35 100644 --- a/examples/builder_input/load_balancer/two_servers.py +++ b/examples/builder_input/load_balancer/two_servers.py @@ -1,213 +1,104 @@ #!/usr/bin/env python3 """ -Didactic example: build and run an AsyncFlow scenario **with a Load Balancer** -and two backend servers, using the builder (AsyncFlow) — no YAML. - -Topology: - generator ──> client ──> LB ──> srv-1 - └─> srv-2 - srv-1 ──> client - srv-2 ──> client - -Load: - ~120 active users, 20 req/min each (Poisson by default). - -Servers: - srv-1: 1 CPU core, 1GB RAM, endpoint with CPU→RAM→IO - srv-2: 2 CPU cores, 2GB RAM, endpoint with RAM→IO(DB-like) - -Network: - 2–3ms mean (exponential) latency on each edge. - -What this script does: - 1) Build Pydantic models (generator, client, LB, servers, edges, settings). - 2) Compose the SimulationPayload via AsyncFlow (builder pattern). - 3) Run the simulation with SimulationRunner. - 4) Print latency stats, throughput timeline, and a sampled-metrics preview. - 5) Save a 2×2 plot figure (latency, throughput, server queues, RAM). +Didactic example: AsyncFlow with a Load Balancer and two **identical** servers. + +Goal +---- +Show a realistic, symmetric backend behind a load balancer, and export plots +that match the public `ResultsAnalyzer` API (no YAML needed). + +Topology +-------- + generator ──edge──> client ──edge──> LB ──edge──> srv-1 + └──edge──> srv-2 + srv-1 ──edge──> client + srv-2 ──edge──> client + +Load model +---------- +~120 active users, 20 requests/min each (Poisson-like aggregate by default). + +Server model (both srv-1 and srv-2) +----------------------------------- +• 1 CPU cores, 2 GB RAM +• Endpoint pipeline: CPU(2 ms) → RAM(128 MB) → I/O wait (15 ms) + - CPU step blocks the event loop + - RAM step holds a working set until the request completes + - I/O step is non-blocking (event-loop friendly) + +Network model +------------- +Every edge uses an exponential latency with mean 3 ms. + +Outputs +------- +• Prints latency statistics to stdout +• Saves, in the same folder as this script: + - `lb_dashboard.png` (Latency histogram + Throughput) + - `lb_server__metrics.png` for each server (Ready / I/O / RAM) """ from __future__ import annotations from pathlib import Path -from typing import Iterable, List, Mapping, TYPE_CHECKING -import numpy as np import simpy +import matplotlib.pyplot as plt + +# Public AsyncFlow API (builder-style) +from asyncflow import AsyncFlow +from asyncflow.components import Client, Server, Edge, Endpoint, LoadBalancer +from asyncflow.settings import SimulationSettings +from asyncflow.workload import RqsGenerator -# ── AsyncFlow domain imports (match your working paths) ──────────────────────── -from asyncflow.builder.asyncflow_builder import AsyncFlow +# Runner + Analyzer from asyncflow.runtime.simulation_runner import SimulationRunner from asyncflow.metrics.analyzer import ResultsAnalyzer -from asyncflow.schemas.payload import SimulationPayload -from asyncflow.schemas.workload.rqs_generator import RqsGenerator -from asyncflow.schemas.settings.simulation import SimulationSettings -from asyncflow.schemas.topology.endpoint import Endpoint -from asyncflow.schemas.topology.nodes import Client, Server, LoadBalancer -from asyncflow.schemas.topology.edges import Edge -from asyncflow.config.constants import LatencyKey, SampledMetricName - - - - -# ───────────────────────────────────────────────────────────── -# Pretty printers (compact, readable output) -# ───────────────────────────────────────────────────────────── -def print_latency_stats(res: ResultsAnalyzer) -> None: - stats: Mapping[LatencyKey, float] = res.get_latency_stats() - print("\n════════ LATENCY STATS ════════") - if not stats: - print("(empty)") - return - - order: List[LatencyKey] = [ - LatencyKey.TOTAL_REQUESTS, - LatencyKey.MEAN, - LatencyKey.MEDIAN, - LatencyKey.STD_DEV, - LatencyKey.P95, - LatencyKey.P99, - LatencyKey.MIN, - LatencyKey.MAX, - ] - for key in order: - if key in stats: - print(f"{key.name:<20} = {stats[key]:.6f}") - - -def print_throughput(res: ResultsAnalyzer) -> None: - timestamps, rps = res.get_throughput_series() - print("\n════════ THROUGHPUT (req/sec) ════════") - if not timestamps: - print("(empty)") - return - for t, rate in zip(timestamps, rps): - print(f"t={t:4.1f}s → {rate:6.2f} rps") - - -def print_sampled_preview(res: ResultsAnalyzer) -> None: - sampled = res.get_sampled_metrics() - print("\n════════ SAMPLED METRICS (preview) ════════") - if not sampled: - print("(empty)") - return - - # Keys may be enums or strings depending on your analyzer; handle both. - def _name(m): # pragma: no cover - return m.name if hasattr(m, "name") else str(m) - - for metric, series in sampled.items(): - print(f"\n📈 {_name(metric)}:") - for entity, vals in series.items(): - head = list(vals[:5]) if vals else [] - print(f" - {entity}: len={len(vals)}, first={head}") - - -# ───────────────────────────────────────────────────────────── -# Tiny helpers for sanity checks (optional) -# ───────────────────────────────────────────────────────────── -def _mean(series: Iterable[float]) -> float: - arr = np.asarray(list(series), dtype=float) - return float(np.mean(arr)) if arr.size else 0.0 - - -def run_sanity_checks( - runner: SimulationRunner, - res: ResultsAnalyzer, -) -> None: - print("\n════════ SANITY CHECKS (rough) ════════") - w = runner.simulation_input.rqs_input - lam_rps = ( - float(w.avg_active_users.mean) - * float(w.avg_request_per_minute_per_user.mean) - / 60.0 - ) - # Observed throughput - _, rps_series = res.get_throughput_series() - rps_observed = _mean(rps_series) - print( - f"• Mean throughput (rps) expected≈{lam_rps:.3f} " - f"observed={rps_observed:.3f}" - ) - - sampled = res.get_sampled_metrics() - ram_series = sampled.get(SampledMetricName.RAM_IN_USE, {}) - ioq_series = sampled.get(SampledMetricName.EVENT_LOOP_IO_SLEEP, {}) - ready_series = sampled.get(SampledMetricName.READY_QUEUE_LEN, {}) - - ram_mean = _mean([_mean(v) for v in ram_series.values()]) if ram_series else 0.0 - ioq_mean = _mean([_mean(v) for v in ioq_series.values()]) if ioq_series else 0.0 - ready_mean = _mean([_mean(v) for v in ready_series.values()]) if ready_series else 0.0 - print(f"• Mean RAM in use (MB) observed={ram_mean:.3f}") - print(f"• Mean I/O queue length observed={ioq_mean:.3f}") - print(f"• Mean ready queue length observed={ready_mean:.3f}") - - -# ───────────────────────────────────────────────────────────── -# Build the LB + 2 servers scenario via AsyncFlow (builder) -# ───────────────────────────────────────────────────────────── -def build_payload_with_lb() -> SimulationPayload: - """ - Construct the SimulationPayload programmatically using the builder: - - Generator (120 users, 20 rpm each) - - Client - - Load balancer (round_robin) covering two servers - - Two servers with distinct endpoints - - Edges for all hops (gen→client, client→lb, lb→srv1/2, srv1/2→client) - - Simulation settings: 600s total, sample period 20ms - """ - # 1) Request generator +def main() -> None: + # ── 1) Build the scenario programmatically (no YAML) ──────────────────── + # Workload (traffic generator) generator = RqsGenerator( id="rqs-1", - avg_active_users={"mean": 120}, # Poisson default - avg_request_per_minute_per_user={"mean": 20}, # MUST be Poisson + avg_active_users={"mean": 120}, + avg_request_per_minute_per_user={"mean": 20}, user_sampling_window=60, ) - # 2) Client + # Client client = Client(id="client-1") - # 3) Servers with distinct endpoints - ep_srv1 = Endpoint( + # Two identical servers: CPU(2ms) → RAM(128MB) → IO(15ms) + endpoint = Endpoint( endpoint_name="/api", - # include 'probability' if your Endpoint schema supports it - probability=1.0, # remove if your Endpoint doesn't have this field + probability=1.0, steps=[ {"kind": "initial_parsing", "step_operation": {"cpu_time": 0.002}}, - {"kind": "ram", "step_operation": {"necessary_ram": 64}}, - {"kind": "io_wait", "step_operation": {"io_waiting_time": 0.012}}, + {"kind": "ram", "step_operation": {"necessary_ram": 128}}, + {"kind": "io_wait", "step_operation": {"io_waiting_time": 0.015}}, ], ) + srv1 = Server( id="srv-1", - server_resources={"cpu_cores": 1, "ram_mb": 1024}, - endpoints=[ep_srv1], - ) - - ep_srv2 = Endpoint( - endpoint_name="/api", - probability=1.0, # remove if not supported in your schema - steps=[ - {"kind": "ram", "step_operation": {"necessary_ram": 96}}, - {"kind": "io_db", "step_operation": {"io_waiting_time": 0.020}}, - ], + server_resources={"cpu_cores": 1, "ram_mb": 2048}, + endpoints=[endpoint], ) srv2 = Server( id="srv-2", - server_resources={"cpu_cores": 2, "ram_mb": 2048}, - endpoints=[ep_srv2], + server_resources={"cpu_cores": 1, "ram_mb": 2048}, + endpoints=[endpoint], ) - # 4) Load balancer (round_robin) + # Load balancer (round-robin) lb = LoadBalancer( id="lb-1", algorithms="round_robin", server_covered={"srv-1", "srv-2"}, ) - # 5) Edges with exponential latency (2–3 ms) + # Network edges (3 ms mean, exponential) edges = [ Edge( id="gen-client", @@ -219,19 +110,19 @@ def build_payload_with_lb() -> SimulationPayload: id="client-lb", source="client-1", target="lb-1", - latency={"mean": 0.002, "distribution": "exponential"}, + latency={"mean": 0.003, "distribution": "exponential"}, ), Edge( id="lb-srv1", source="lb-1", target="srv-1", - latency={"mean": 0.002, "distribution": "exponential"}, + latency={"mean": 0.003, "distribution": "exponential"}, ), Edge( id="lb-srv2", source="lb-1", target="srv-2", - latency={"mean": 0.002, "distribution": "exponential"}, + latency={"mean": 0.003, "distribution": "exponential"}, ), Edge( id="srv1-client", @@ -247,10 +138,10 @@ def build_payload_with_lb() -> SimulationPayload: ), ] - # 6) Simulation settings + # Simulation settings settings = SimulationSettings( total_simulation_time=600, - sample_period_s=0.02, + sample_period_s=0.05, enabled_sample_metrics=[ "ready_queue_len", "event_loop_io_sleep", @@ -260,8 +151,8 @@ def build_payload_with_lb() -> SimulationPayload: enabled_event_metrics=["rqs_clock"], ) - # 7) Assemble the payload via the builder - flow = ( + # Assemble the payload with the builder + payload = ( AsyncFlow() .add_generator(generator) .add_client(client) @@ -269,48 +160,41 @@ def build_payload_with_lb() -> SimulationPayload: .add_load_balancer(lb) .add_edges(*edges) .add_simulation_settings(settings) - ) - - return flow.build_payload() - + ).build_payload() -# ───────────────────────────────────────────────────────────── -# Main entry-point -# ───────────────────────────────────────────────────────────── -def main() -> None: - """ - Build → wire → run the simulation, then print diagnostics and save plots. - """ + # ── 2) Run the simulation ─────────────────────────────────────────────── env = simpy.Environment() - payload = build_payload_with_lb() - runner = SimulationRunner(env=env, simulation_input=payload) results: ResultsAnalyzer = runner.run() - # Human-friendly diagnostics - print_latency_stats(results) - print_throughput(results) - print_sampled_preview(results) - - # Optional sanity checks (very rough) - run_sanity_checks(runner, results) + # ── 3) Print a concise latency summary ────────────────────────────────── + print(results.format_latency_stats()) - # Save plots (2×2 figure) - try: - from matplotlib import pyplot as plt # noqa: PLC0415 + # ── 4) Save plots (same directory as this script) ─────────────────────── + out_dir = Path(__file__).parent - fig, axes = plt.subplots(2, 2, figsize=(12, 8)) - results.plot_latency_distribution(axes[0, 0]) - results.plot_throughput(axes[0, 1]) - results.plot_server_queues(axes[1, 0]) - results.plot_ram_usage(axes[1, 1]) - fig.tight_layout() - - out_path = Path(__file__).parent / "two_servers.png" - fig.savefig(out_path) - print(f"\n🖼️ Plots saved to: {out_path}") - except Exception as exc: # Matplotlib not installed or plotting failed - print(f"\n[plotting skipped] {exc!r}") + # 4a) Dashboard: latency + throughput (single figure) + fig_dash, axes = plt.subplots( + 1, 2, figsize=(14, 5), dpi=160, constrained_layout=True + ) + results.plot_latency_distribution(axes[0]) + results.plot_throughput(axes[1]) + dash_path = out_dir / "lb_dashboard.png" + fig_dash.savefig(dash_path, bbox_inches="tight") + print(f"🖼️ Dashboard saved to: {dash_path}") + + # 4b) Per-server figures: Ready | I/O | RAM (one row per server) + for sid in results.list_server_ids(): + fig_srv, axs = plt.subplots( + 1, 3, figsize=(18, 4.2), dpi=160, constrained_layout=True + ) + results.plot_single_server_ready_queue(axs[0], sid) + results.plot_single_server_io_queue(axs[1], sid) + results.plot_single_server_ram(axs[2], sid) + fig_srv.suptitle(f"Server metrics — {sid}", fontsize=16) + srv_path = out_dir / f"lb_server_{sid}_metrics.png" + fig_srv.savefig(srv_path, bbox_inches="tight") + print(f"🖼️ Per-server plots saved to: {srv_path}") if __name__ == "__main__": diff --git a/examples/builder_input/single_server/builder_service_plots.png b/examples/builder_input/single_server/builder_service_plots.png new file mode 100644 index 0000000..22fc27d Binary files /dev/null and b/examples/builder_input/single_server/builder_service_plots.png differ diff --git a/examples/builder_input/single_server/single_server.png b/examples/builder_input/single_server/single_server.png deleted file mode 100644 index f723f7c..0000000 Binary files a/examples/builder_input/single_server/single_server.png and /dev/null differ diff --git a/examples/builder_input/single_server/single_server.py b/examples/builder_input/single_server/single_server.py index 6088a12..bb54344 100644 --- a/examples/builder_input/single_server/single_server.py +++ b/examples/builder_input/single_server/single_server.py @@ -1,179 +1,53 @@ #!/usr/bin/env python3 """ -Didactic example: build and run a AsyncFlow scenario **without** YAML, -using the 'builder' (AsyncFlow) to assemble the SimulationPayload. +AsyncFlow builder example — build, run, and visualize a single-server async system. -Scenario reproduced (same as the previous YAML): +Topology (single server) generator ──edge──> client ──edge──> server ──edge──> client -Load: - ~100 active users, 20 req/min each. - -Server: - 1 CPU core, 2GB RAM, endpoint with steps: - CPU(1ms) → RAM(100MB) → IO(100ms) - -Network: - 3ms mean (exponential) latency on each edge. - -What this script does: - 1) Build Pydantic models (generator, client, server, edges, settings). - 2) Compose the final SimulationPayload via AsyncFlow (builder pattern). - 3) Run the simulation with SimulationRunner. - 4) Print latency stats, throughput timeline, and a sampled-metrics preview. - 5) (Optional) Visualize the topology with Matplotlib. - -Run: - python run_with_builder.py +Load model + ~100 active users, 20 requests/min each (Poisson-like aggregate). + +Server model + 1 CPU core, 2 GB RAM + Endpoint pipeline: CPU(1 ms) → RAM(100 MB) → I/O wait (100 ms) + Semantics: + - CPU step blocks the event loop + - RAM step holds a working set until request completion + - I/O step is non-blocking (event-loop friendly) + +Network model + Each edge has exponential latency with mean 3 ms. + +Outputs + - Prints latency statistics to stdout + - Saves a 2×2 PNG in the same directory as this script: + [0,0] Latency histogram (with mean/P50/P95/P99) + [0,1] Throughput (with mean/P95/max overlays) + [1,0] Ready queue for the first server + [1,1] RAM usage for the first server """ from __future__ import annotations from pathlib import Path -from typing import Iterable, List, Mapping - -import numpy as np import simpy +import matplotlib.pyplot as plt -# ── AsyncFlow domain imports ─────────────────────────────────────────────────── -from asyncflow.builder.asyncflow_builder import AsyncFlow +# Public AsyncFlow API (builder) +from asyncflow import AsyncFlow +from asyncflow.components import Client, Server, Edge, Endpoint +from asyncflow.settings import SimulationSettings +from asyncflow.workload import RqsGenerator + +# Runner + Analyzer from asyncflow.runtime.simulation_runner import SimulationRunner from asyncflow.metrics.analyzer import ResultsAnalyzer -from asyncflow.schemas.payload import SimulationPayload -from asyncflow.schemas.workload.rqs_generator import RqsGenerator -from asyncflow.schemas.settings.simulation import SimulationSettings -from asyncflow.schemas.topology.endpoint import Endpoint -from asyncflow.schemas.topology.nodes import ( - Client, - Server, -) -from asyncflow.schemas.topology.edges import Edge - -from asyncflow.config.constants import LatencyKey, SampledMetricName - - -# ───────────────────────────────────────────────────────────── -# Pretty printers (compact, readable output) -# ───────────────────────────────────────────────────────────── -def print_latency_stats(res: ResultsAnalyzer) -> None: - """Print latency statistics calculated by the analyzer.""" - stats: Mapping[LatencyKey, float] = res.get_latency_stats() - print("\n════════ LATENCY STATS ════════") - if not stats: - print("(empty)") - return - - order: List[LatencyKey] = [ - LatencyKey.TOTAL_REQUESTS, - LatencyKey.MEAN, - LatencyKey.MEDIAN, - LatencyKey.STD_DEV, - LatencyKey.P95, - LatencyKey.P99, - LatencyKey.MIN, - LatencyKey.MAX, - ] - for key in order: - if key in stats: - print(f"{key.name:<20} = {stats[key]:.6f}") -def print_throughput(res: ResultsAnalyzer) -> None: - """Print the 1-second throughput buckets.""" - timestamps, rps = res.get_throughput_series() - print("\n════════ THROUGHPUT (req/sec) ════════") - if not timestamps: - print("(empty)") - return - - for t, rate in zip(timestamps, rps): - print(f"t={t:4.1f}s → {rate:6.2f} rps") - - -def print_sampled_preview(res: ResultsAnalyzer) -> None: - """ - Print a small preview for each sampled metric series (first 5 values). - This helps verify that sampler pipelines are running. - """ - sampled = res.get_sampled_metrics() - print("\n════════ SAMPLED METRICS (preview) ════════") - if not sampled: - print("(empty)") - return - - for metric, series in sampled.items(): - metric_name = ( - metric.name if isinstance(metric, SampledMetricName) else str(metric) - ) - print(f"\n📈 {metric_name}:") - for entity, vals in series.items(): - head = list(vals[:5]) if vals else [] - print(f" - {entity}: len={len(vals)}, first={head}") - - -# ───────────────────────────────────────────────────────────── -# Tiny helpers for sanity checks (optional) -# ───────────────────────────────────────────────────────────── -def _mean(series: Iterable[float]) -> float: - """Numerically stable mean for a generic float iterable.""" - arr = np.asarray(list(series), dtype=float) - return float(np.mean(arr)) if arr.size else 0.0 - - -def run_sanity_checks( - runner: SimulationRunner, - res: ResultsAnalyzer, -) -> None: - """ - Back-of-the-envelope checks to compare rough expectations vs observations. - These are intentionally simplistic approximations. - """ - print("\n════════ SANITY CHECKS (rough) ════════") - w = runner.simulation_input.rqs_input - lam_rps = ( - float(w.avg_active_users.mean) - * float(w.avg_request_per_minute_per_user.mean) - / 60.0 - ) - - # Observed throughput - _, rps_series = res.get_throughput_series() - rps_observed = _mean(rps_series) - print(f"• Mean throughput (rps) expected≈{lam_rps:.3f} " - f"observed={rps_observed:.3f}") - - # A few sampled signals (RAM, queues) just to show they are populated. - sampled = res.get_sampled_metrics() - ram_series = sampled.get(SampledMetricName.RAM_IN_USE, {}) - ioq_series = sampled.get(SampledMetricName.EVENT_LOOP_IO_SLEEP, {}) - ready_series = sampled.get(SampledMetricName.READY_QUEUE_LEN, {}) - - ram_mean = _mean([_mean(v) for v in ram_series.values()]) if ram_series else 0.0 - ioq_mean = _mean([_mean(v) for v in ioq_series.values()]) if ioq_series else 0.0 - ready_mean = ( - _mean([_mean(v) for v in ready_series.values()]) if ready_series else 0.0 - ) - - print(f"• Mean RAM in use (MB) observed={ram_mean:.3f}") - print(f"• Mean I/O queue length observed={ioq_mean:.3f}") - print(f"• Mean ready queue length observed={ready_mean:.3f}") - - -# ───────────────────────────────────────────────────────────── -# Build the same scenario via AsyncFlow (builder) -# ───────────────────────────────────────────────────────────── -def build_payload_with_builder() -> SimulationPayload: - """ - Construct the SimulationPayload programmatically using the builder. - - This mirrors the YAML: - - Generator (100 users, 20 rpm each) - - Client - - One server with a single endpoint (CPU → RAM → IO) - - Three edges with exponential latency (3ms mean) - - Simulation settings: 500s total, sample period 50ms - """ - # 1) Request generator +def build_and_run() -> ResultsAnalyzer: + """Build the scenario via the Pythonic builder and run the simulation.""" + # Workload (generator) generator = RqsGenerator( id="rqs-1", avg_active_users={"mean": 100}, @@ -181,50 +55,48 @@ def build_payload_with_builder() -> SimulationPayload: user_sampling_window=60, ) - # 2) Client + # Client client = Client(id="client-1") - # 3) Server (1 CPU core, 2GB RAM) with one endpoint and three steps - # We let Pydantic coerce nested dicts for the endpoint steps. + # Server + endpoint (CPU → RAM → I/O) endpoint = Endpoint( - endpoint_name="ep-1", + endpoint_name="/api", probability=1.0, steps=[ - {"kind": "initial_parsing", "step_operation": {"cpu_time": 0.001}}, - {"kind": "ram", "step_operation": {"necessary_ram": 100}}, - {"kind": "io_wait", "step_operation": {"io_waiting_time": 0.1}}, + {"kind": "initial_parsing", "step_operation": {"cpu_time": 0.001}}, # 1 ms + {"kind": "ram", "step_operation": {"necessary_ram": 100}}, # 100 MB + {"kind": "io_wait", "step_operation": {"io_waiting_time": 0.100}}, # 100 ms ], ) - server = Server( - id="srv-1", + id="app-1", server_resources={"cpu_cores": 1, "ram_mb": 2048}, endpoints=[endpoint], ) - # 4) Edges: exponential latency with 3ms mean (same as YAML) + # Network edges (3 ms mean, exponential) e_gen_client = Edge( - id="gen-to-client", + id="gen-client", source="rqs-1", target="client-1", latency={"mean": 0.003, "distribution": "exponential"}, ) - e_client_server = Edge( - id="client-to-server", + e_client_app = Edge( + id="client-app", source="client-1", - target="srv-1", + target="app-1", latency={"mean": 0.003, "distribution": "exponential"}, ) - e_server_client = Edge( - id="server-to-client", - source="srv-1", + e_app_client = Edge( + id="app-client", + source="app-1", target="client-1", latency={"mean": 0.003, "distribution": "exponential"}, ) - # 5) Simulation settings + # Simulation settings settings = SimulationSettings( - total_simulation_time=500, + total_simulation_time=300, sample_period_s=0.05, enabled_sample_metrics=[ "ready_queue_len", @@ -235,59 +107,56 @@ def build_payload_with_builder() -> SimulationPayload: enabled_event_metrics=["rqs_clock"], ) - # 6) Assemble the payload via the builder (AsyncFlow). - # The builder will validate the final structure on build. - flow = ( + # Assemble payload with the builder + payload = ( AsyncFlow() .add_generator(generator) .add_client(client) .add_servers(server) - .add_edges(e_gen_client, e_client_server, e_server_client) + .add_edges(e_gen_client, e_client_app, e_app_client) .add_simulation_settings(settings) - ) + ).build_payload() - return flow.build_payload() + # Run + env = simpy.Environment() + runner = SimulationRunner(env=env, simulation_input=payload) + results: ResultsAnalyzer = runner.run() + return results -# ───────────────────────────────────────────────────────────── -# Main entry-point -# ───────────────────────────────────────────────────────────── def main() -> None: - """ - Build → wire → run the simulation, then print diagnostics. - Mirrors run_from_yaml.py but uses the builder to construct the input. - Also saves a 2x2 plot figure (latency, throughput, server queues, RAM). - """ - env = simpy.Environment() - payload = build_payload_with_builder() + # Build & run + res = build_and_run() - runner = SimulationRunner(env=env, simulation_input=payload) - results: ResultsAnalyzer = runner.run() + # Print concise latency summary + print(res.format_latency_stats()) + + # Prepare figure in the same folder as this script + script_dir = Path(__file__).parent + out_path = script_dir / "builder_service_plots.png" + + # 2×2: Latency | Throughput | Ready (first server) | RAM (first server) + fig, axes = plt.subplots(2, 2, figsize=(12, 8), dpi=160) - # Human-friendly diagnostics - print_latency_stats(results) - print_throughput(results) - print_sampled_preview(results) + # Top row + res.plot_latency_distribution(axes[0, 0]) + res.plot_throughput(axes[0, 1]) - # Optional sanity checks (very rough) - run_sanity_checks(runner, results) + # Bottom row — first server, if present + sids = res.list_server_ids() + if sids: + sid = sids[0] + res.plot_single_server_ready_queue(axes[1, 0], sid) + res.plot_single_server_ram(axes[1, 1], sid) + else: + for ax in (axes[1, 0], axes[1, 1]): + ax.text(0.5, 0.5, "No servers", ha="center", va="center") + ax.axis("off") - # Save plots (2x2 figure), same layout as in the YAML-based example - try: - from matplotlib import pyplot as plt # noqa: PLC0415 + fig.tight_layout() + fig.savefig(out_path) + print(f"Plots saved to: {out_path}") - fig, axes = plt.subplots(2, 2, figsize=(12, 8)) - results.plot_latency_distribution(axes[0, 0]) - results.plot_throughput(axes[0, 1]) - results.plot_server_queues(axes[1, 0]) - results.plot_ram_usage(axes[1, 1]) - fig.tight_layout() - out_path = Path(__file__).parent / "single_server.png" - fig.savefig(out_path) - print(f"\n🖼️ Plots saved to: {out_path}") - except Exception as exc: # Matplotlib not installed or plotting failed - print(f"\n[plotting skipped] {exc!r}") - if __name__ == "__main__": - main() \ No newline at end of file + main() diff --git a/examples/yaml_input/data/two_servers_lb.yml b/examples/yaml_input/data/two_servers_lb.yml index 9b71943..100a46b 100644 --- a/examples/yaml_input/data/two_servers_lb.yml +++ b/examples/yaml_input/data/two_servers_lb.yml @@ -1,96 +1,71 @@ -# AsyncFlow SimulationPayload (LB + 2 servers, realistic steps) +# AsyncFlow SimulationPayload — Load Balancer + 2 identical app servers +# +# Topology: +# generator → client → LB → srv-1 +# └→ srv-2 +# srv-1 → client +# srv-2 → client +# +# Each server runs: CPU(2 ms) → RAM(128 MB) → IO wait(12 ms) +# All network links use exponential latency with small means (2–3 ms). +# +# Workload targets ~40 rps (120 users × 20 req/min ÷ 60). rqs_input: - id: "rqs-1" - avg_active_users: - mean: 120 - avg_request_per_minute_per_user: - mean: 20 + id: rqs-1 + avg_active_users: { mean: 120 } + avg_request_per_minute_per_user: { mean: 20 } user_sampling_window: 60 topology_graph: nodes: - client: - id: "client-1" + client: { id: client-1 } + + load_balancer: + id: lb-1 + algorithms: round_robin + server_covered: [srv-1, srv-2] servers: - - id: "srv-1" - server_resources: - cpu_cores: 1 - ram_mb: 1024 + - id: srv-1 + server_resources: { cpu_cores: 1, ram_mb: 2048 } endpoints: - - endpoint_name: "/api" + - endpoint_name: /api steps: - - kind: "initial_parsing" - step_operation: - cpu_time: 0.002 - - kind: "ram" - step_operation: - necessary_ram: 64 - - kind: "io_wait" - step_operation: - io_waiting_time: 0.012 + - kind: initial_parsing + step_operation: { cpu_time: 0.002 } # 2 ms CPU (blocks event loop) + - kind: ram + step_operation: { necessary_ram: 128 } # 128 MB working set + - kind: io_wait + step_operation: { io_waiting_time: 0.012 } # 12 ms non-blocking I/O - - id: "srv-2" - server_resources: - cpu_cores: 2 - ram_mb: 2048 + - id: srv-2 + server_resources: { cpu_cores: 1, ram_mb: 2048 } endpoints: - - endpoint_name: "/api" + - endpoint_name: /api steps: - - kind: "ram" - step_operation: - necessary_ram: 96 - - kind: "io_db" - step_operation: - io_waiting_time: 0.020 - - kind: "cpu_bound_operation" # <-- was 'final_processing' (invalid) - step_operation: - cpu_time: 0.001 - - load_balancer: - id: "lb-1" - algorithms: "round_robin" - server_covered: ["srv-1", "srv-2"] + - kind: initial_parsing + step_operation: { cpu_time: 0.002 } + - kind: ram + step_operation: { necessary_ram: 128 } + - kind: io_wait + step_operation: { io_waiting_time: 0.012 } edges: - - id: "gen-client" - source: "rqs-1" - target: "client-1" - latency: { mean: 0.003, distribution: "exponential" } - - - id: "client-lb" - source: "client-1" - target: "lb-1" - latency: { mean: 0.002, distribution: "exponential" } - - - id: "lb-srv1" - source: "lb-1" - target: "srv-1" - latency: { mean: 0.002, distribution: "exponential" } - - - id: "lb-srv2" - source: "lb-1" - target: "srv-2" - latency: { mean: 0.002, distribution: "exponential" } - - - id: "srv1-client" - source: "srv-1" - target: "client-1" - latency: { mean: 0.003, distribution: "exponential" } - - - id: "srv2-client" - source: "srv-2" - target: "client-1" - latency: { mean: 0.003, distribution: "exponential" } + - { id: gen-client, source: rqs-1, target: client-1, latency: { mean: 0.003, distribution: exponential } } + - { id: client-lb, source: client-1, target: lb-1, latency: { mean: 0.002, distribution: exponential } } + - { id: lb-srv1, source: lb-1, target: srv-1, latency: { mean: 0.002, distribution: exponential } } + - { id: lb-srv2, source: lb-1, target: srv-2, latency: { mean: 0.002, distribution: exponential } } + - { id: srv1-client, source: srv-1, target: client-1, latency: { mean: 0.003, distribution: exponential } } + - { id: srv2-client, source: srv-2, target: client-1, latency: { mean: 0.003, distribution: exponential } } sim_settings: total_simulation_time: 600 - sample_period_s: 0.02 + sample_period_s: 0.05 enabled_sample_metrics: - - "ready_queue_len" - - "event_loop_io_sleep" - - "ram_in_use" - - "edge_concurrent_connection" + - ready_queue_len + - event_loop_io_sleep + - ram_in_use + - edge_concurrent_connection enabled_event_metrics: - - "rqs_clock" + - rqs_clock diff --git a/examples/yaml_input/lb_dashboard.png b/examples/yaml_input/lb_dashboard.png new file mode 100644 index 0000000..dbe7d42 Binary files /dev/null and b/examples/yaml_input/lb_dashboard.png differ diff --git a/examples/yaml_input/lb_server_srv-1_metrics.png b/examples/yaml_input/lb_server_srv-1_metrics.png new file mode 100644 index 0000000..6d3ac35 Binary files /dev/null and b/examples/yaml_input/lb_server_srv-1_metrics.png differ diff --git a/examples/yaml_input/lb_server_srv-2_metrics.png b/examples/yaml_input/lb_server_srv-2_metrics.png new file mode 100644 index 0000000..916c60f Binary files /dev/null and b/examples/yaml_input/lb_server_srv-2_metrics.png differ diff --git a/examples/yaml_input/load_balancer/two_servers.png b/examples/yaml_input/load_balancer/two_servers.png deleted file mode 100644 index 9ee7796..0000000 Binary files a/examples/yaml_input/load_balancer/two_servers.png and /dev/null differ diff --git a/examples/yaml_input/load_balancer/two_servers.py b/examples/yaml_input/load_balancer/two_servers.py index afc4408..1a01277 100644 --- a/examples/yaml_input/load_balancer/two_servers.py +++ b/examples/yaml_input/load_balancer/two_servers.py @@ -1,282 +1,69 @@ #!/usr/bin/env python3 """ -Run an AsyncFlow scenario with a Load Balancer (2 servers) from YAML and print diagnostics. - -What it does: -- Loads the simulation payload from YAML via `SimulationRunner.from_yaml`. -- Runs the simulation. -- Prints latency stats, 1s-bucket throughput, and a preview of sampled metrics. -- Saves four plots (latency histogram, throughput, server queues, RAM). -- Performs sanity checks (expected vs observed) with simple LB-aware heuristics. - -Usage: - python src/app/example/run_lb_from_yaml.py \ - --yaml src/app/example/data/two_servers_lb.yml +Walkthrough: run a Load-Balanced (2 servers) AsyncFlow scenario from YAML. + +What this script does +--------------------- +1) Loads the SimulationPayload from a YAML file (round-robin LB, 2 identical servers). +2) Runs the simulation via `SimulationRunner`. +3) Prints a concise latency summary to stdout. +4) Saves plots **in the same folder as this script**: + • `lb_dashboard.png` (Latency histogram + Throughput) + • One figure per server with 3 panels: Ready Queue, I/O Queue, RAM usage. + +How to use +---------- +- Put this script and `two_servers_lb.yml` in the same directory. +- Run: `python run_lb_from_yaml.py` """ from __future__ import annotations -from argparse import ArgumentParser from pathlib import Path -from typing import Dict, Iterable, List, Mapping, Tuple - -import matplotlib.pyplot as plt -import numpy as np import simpy +import matplotlib.pyplot as plt -from asyncflow.config.constants import ( # only for basic step-kind/ops inspection - EndpointStepCPU, - EndpointStepIO, - EndpointStepRAM, - LatencyKey, - StepOperation, -) -from asyncflow.metrics.analyzer import ResultsAnalyzer from asyncflow.runtime.simulation_runner import SimulationRunner +from asyncflow.metrics.analyzer import ResultsAnalyzer -# ───────────────────────────────────────────────────────────── -# Pretty printers (same style as your single-server script) -# ───────────────────────────────────────────────────────────── -def print_latency_stats(res: ResultsAnalyzer) -> None: - """Print latency statistics returned by the analyzer.""" - stats: Mapping[LatencyKey, float] = res.get_latency_stats() - print("\n════════ LATENCY STATS ════════") - if not stats: - print("(empty)") - return - - order: List[LatencyKey] = [ - LatencyKey.TOTAL_REQUESTS, - LatencyKey.MEAN, - LatencyKey.MEDIAN, - LatencyKey.STD_DEV, - LatencyKey.P95, - LatencyKey.P99, - LatencyKey.MIN, - LatencyKey.MAX, - ] - for key in order: - if key in stats: - print(f"{key.name:<20} = {stats[key]:.6f}") - - -def print_throughput(res: ResultsAnalyzer) -> None: - """Print 1-second throughput buckets.""" - timestamps, rps = res.get_throughput_series() - print("\n════════ THROUGHPUT (req/sec) ════════") - if not timestamps: - print("(empty)") - return - - for t, rate in zip(timestamps, rps): - print(f"t={t:4.1f}s → {rate:6.2f} rps") - - -def print_sampled_preview(res: ResultsAnalyzer) -> None: - """Print first 5 samples of each sampled metric series.""" - sampled: Dict[str, Dict[str, List[float]]] = res.get_sampled_metrics() - print("\n════════ SAMPLED METRICS ════════") - if not sampled: - print("(empty)") - return - - for metric, series in sampled.items(): - print(f"\n📈 {metric}:") - for entity, vals in series.items(): - head = list(vals[:5]) if vals else [] - print(f" - {entity}: len={len(vals)}, first={head}") - - -# ───────────────────────────────────────────────────────────── -# Plotting -# ───────────────────────────────────────────────────────────── -def save_all_plots(res: ResultsAnalyzer, out_path: Path) -> None: - """Generate the 2x2 plot figure and save it to `out_path`.""" - fig, axes = plt.subplots(2, 2, figsize=(12, 8)) - res.plot_latency_distribution(axes[0, 0]) - res.plot_throughput(axes[0, 1]) - res.plot_server_queues(axes[1, 0]) - res.plot_ram_usage(axes[1, 1]) - fig.tight_layout() - fig.savefig(out_path) - print(f"\n🖼️ Plots saved to: {out_path}") - - -# ───────────────────────────────────────────────────────────── -# Sanity checks (LB-aware, still rough) -# ───────────────────────────────────────────────────────────── - -def run_sanity_checks(runner: SimulationRunner, res: ResultsAnalyzer) -> None: - """ - Sanity checks LB-aware (round-robin): observed vs expected - """ - from asyncflow.config.constants import ( - EndpointStepCPU, EndpointStepIO, EndpointStepRAM, StepOperation, LatencyKey - ) - import numpy as np - - def _mean(arr): - a = np.asarray(list(arr), dtype=float) - return float(a.mean()) if a.size else 0.0 - - # 1) λ - w = runner.simulation_input.rqs_input - lam = float(w.avg_active_users.mean) * float(w.avg_request_per_minute_per_user.mean) / 60.0 - - topo = runner.simulation_input.topology_graph - servers = {s.id: s for s in topo.nodes.servers} - client_id = topo.nodes.client.id - lb = topo.nodes.load_balancer - lb_id = lb.id if lb else None - gen_id = runner.simulation_input.rqs_input.id - - # 2) LB (round_robin -> 1/N) - if lb and lb.server_covered: - covered = [sid for sid in lb.server_covered if sid in servers] - N = max(1, len(covered)) - shares = {sid: 1.0 / N for sid in covered} - else: - only = next(iter(servers.keys())) - shares = {only: 1.0} - - # 3) endpoint totals per server - def endpoint_totals(server): - cpu_s = io_s = ram_mb = 0.0 - for ep in getattr(server, "endpoints", []) or []: - prob = getattr(ep, "probability", 1.0) - for step in ep.steps: - k = step.kind - op = step.step_operation - if isinstance(k, EndpointStepCPU): - cpu_s += prob * float(op[StepOperation.CPU_TIME]) - elif isinstance(k, EndpointStepIO): - io_s += prob * float(op[StepOperation.IO_WAITING_TIME]) - elif isinstance(k, EndpointStepRAM): - ram_mb += prob * float(op[StepOperation.NECESSARY_RAM]) - return cpu_s, io_s, ram_mb - - per_srv = {sid: endpoint_totals(srv) for sid, srv in servers.items()} - - # 4) mappa latencies of edges per role (source,target) - mean_gen_client = 0.0; id_gen_client = None - mean_client_lb = 0.0; id_client_lb = None - mean_lb_srv = {} # sid -> mean - mean_srv_client = {} # sid -> mean - id_lb_srv = {} # sid -> edge_id - id_srv_client = {} # sid -> edge_id - - for e in topo.edges: - s, t, mu = e.source, e.target, float(e.latency.mean) - if s == gen_id and t == client_id: - mean_gen_client = mu; id_gen_client = e.id - elif s == client_id and lb_id and t == lb_id: - mean_client_lb = mu; id_client_lb = e.id - elif lb_id and s == lb_id and t in servers: - mean_lb_srv[t] = mu; id_lb_srv[t] = e.id - elif s in servers and t == client_id: - mean_srv_client[s] = mu; id_srv_client[s] = e.id - - # 5) expected: average latencies - cpu_exp = sum(shares[sid] * per_srv[sid][0] for sid in shares) - io_exp = sum(shares[sid] * per_srv[sid][1] for sid in shares) - net_exp = ( - mean_gen_client + mean_client_lb + - sum(shares[sid] * (mean_lb_srv.get(sid, 0.0) + mean_srv_client.get(sid, 0.0)) for sid in shares) - ) - latency_expected = cpu_exp + io_exp + net_exp - - # 6) observed: throughput & latencies - stats = res.get_latency_stats() - latency_observed = float(stats.get(LatencyKey.MEAN, 0.0)) - _, rps_series = res.get_throughput_series() - rps_observed = _mean(rps_series) - - # 7) expected: RAM e I/O queue as a sum over server - ram_expected = sum((shares[sid] * lam) * (per_srv[sid][0] + per_srv[sid][1]) * per_srv[sid][2] for sid in shares) - ioq_expected = sum((shares[sid] * lam) * per_srv[sid][1] for sid in shares) - - # 8) observed: RAM (sum) and I/O queue sum - sampled = res.get_sampled_metrics() - ram_series = sampled.get("ram_in_use", {}) - ioq_series = sampled.get("event_loop_io_sleep", {}) - ram_observed = sum(_mean(vals) for vals in ram_series.values()) if ram_series else 0.0 - ioq_observed = sum(_mean(vals) for vals in ioq_series.values()) if ioq_series else 0.0 - - # 9) print - REL_TOL = 0.30 - def tick(label, exp, obs): - delta = (abs(obs - exp) / abs(exp)) if exp else 0.0 - icon = "✓" if delta <= REL_TOL else "⚠" - print(f"{icon} {label:<28} expected≈{exp:.3f} observed={obs:.3f} Δ={delta*100:.1f}%") - - print("\n════════ SANITY CHECKS (LB-aware) ════════") - tick("Mean throughput (rps)", lam, rps_observed) - tick("Mean latency (s)", latency_expected, latency_observed) - tick("Mean RAM (MB)", ram_expected, ram_observed) - tick("Mean I/O queue", ioq_expected, ioq_observed) - - # 10) Edge concurrency estimation - edge_conc = sampled.get("edge_concurrent_connection", {}) - if edge_conc: - print("\n— Edge concurrency (LB-aware) —") - means_obs = {eid: _mean(vals) for eid, vals in edge_conc.items()} - - if id_gen_client: - tick(f"edge {id_gen_client}", lam * mean_gen_client, means_obs.get(id_gen_client, 0.0)) - if id_client_lb: - tick(f"edge {id_client_lb}", lam * mean_client_lb, means_obs.get(id_client_lb, 0.0)) - - for sid, p in shares.items(): - lam_i = p * lam - eid = id_lb_srv.get(sid) - if eid: - tick(f"edge {eid}", lam_i * mean_lb_srv.get(sid, 0.0), means_obs.get(eid, 0.0)) - eid = id_srv_client.get(sid) - if eid: - tick(f"edge {eid}", lam_i * mean_srv_client.get(sid, 0.0), means_obs.get(eid, 0.0)) - - # Extra - print("\n— Diagnostics —") - print("λ={:.3f} rps | E[cpu]={:.3f}s E[io]={:.3f}s E[net]≈{:.3f}s | E[RAM/req]={:.1f} MB" - .format(lam, cpu_exp, io_exp, net_exp, sum(shares[sid]*per_srv[sid][2] for sid in shares))) - - - -# ───────────────────────────────────────────────────────────── -# Main -# ───────────────────────────────────────────────────────────── def main() -> None: - """Parse args, run simulation, print/plot, sanity-check (LB topology).""" - parser = ArgumentParser(description="Run AsyncFlow LB scenario from YAML and print outputs + sanity checks.") - parser.add_argument( - "--yaml", - type=Path, - default=Path(__file__).parent.parent / "data" / "two_servers_lb.yml", - help="Path to the simulation YAML file.", - ) - parser.add_argument( - "--out", - type=Path, - default=Path(__file__).parent / "two_servers.png", - help="Path to the output image (plots).", - ) - args = parser.parse_args() - - yaml_path: Path = args.yaml + # Paths (same directory as this script) + script_dir = Path(__file__).parent.parent + yaml_path = script_dir / "data" / "two_servers_lb.yml" if not yaml_path.exists(): - raise FileNotFoundError(f"YAML not found: {yaml_path}") + raise FileNotFoundError(f"YAML configuration not found: {yaml_path}") + # Run the simulation + print(f"🚀 Loading and running simulation from: {yaml_path}") env = simpy.Environment() runner = SimulationRunner.from_yaml(env=env, yaml_path=yaml_path) results: ResultsAnalyzer = runner.run() - - print_latency_stats(results) - print_throughput(results) - print_sampled_preview(results) - - run_sanity_checks(runner, results) - save_all_plots(results, args.out) + print("✅ Simulation finished!") + + # Print concise latency summary + print(results.format_latency_stats()) + + # ---- Plots: dashboard (latency + throughput) ---- + fig_dash, axes_dash = plt.subplots(1, 2, figsize=(14, 5), dpi=160) + results.plot_latency_distribution(axes_dash[0]) + results.plot_throughput(axes_dash[1]) + fig_dash.tight_layout() + out_dashboard = script_dir / "lb_dashboard.png" + fig_dash.savefig(out_dashboard) + print(f"🖼️ Dashboard saved to: {out_dashboard}") + + # ---- Per-server metrics: one figure per server (Ready | I/O | RAM) ---- + for sid in results.list_server_ids(): + fig_row, axes = plt.subplots(1, 3, figsize=(16, 3.8), dpi=160) + results.plot_single_server_ready_queue(axes[0], sid) + results.plot_single_server_io_queue(axes[1], sid) + results.plot_single_server_ram(axes[2], sid) + fig_row.suptitle(f"Server metrics — {sid}", y=1.04, fontsize=14) + fig_row.tight_layout() + out_path = script_dir / f"lb_server_{sid}_metrics.png" + fig_row.savefig(out_path, bbox_inches="tight") + print(f"🖼️ Server metrics for '{sid}' saved to: {out_path}") if __name__ == "__main__": diff --git a/examples/yaml_input/single_server/single_server.png b/examples/yaml_input/single_server/single_server.png deleted file mode 100644 index c03f9b2..0000000 Binary files a/examples/yaml_input/single_server/single_server.png and /dev/null differ diff --git a/examples/yaml_input/single_server/single_server.py b/examples/yaml_input/single_server/single_server.py index 004cd0c..ec14998 100644 --- a/examples/yaml_input/single_server/single_server.py +++ b/examples/yaml_input/single_server/single_server.py @@ -1,272 +1,108 @@ -#!/usr/bin/env python3 """ -Run a AsyncFlow scenario from a YAML file and print diagnostics. - -What it does: -- Loads the simulation payload from YAML via `SimulationRunner.from_yaml`. -- Runs the simulation. -- Prints latency stats, 1s-bucket throughput, and a preview of sampled metrics. -- Saves four plots (latency histogram, throughput, server queues, RAM). -- Performs sanity checks (expected vs observed) with simple queueing heuristics. - -Usage: - python src/app/example/run_from_yaml.py \ - --yaml src/app/example/data/single_server.yml +AsyncFlow — YAML single-server example: run and export charts. + +System (single server) + generator → client → server → client + +Load + ~100 active users, ~20 requests/min each (stochastic aggregate). + +Server + 1 CPU core, 2 GB RAM, endpoint "ep-1": + CPU(1 ms) → RAM(100 MB) → I/O wait (100 ms) + Semantics: + - CPU step blocks the event loop + - RAM step holds a working set until the request leaves the server + - I/O step is non-blocking (event-loop friendly) + +Network + Each edge has exponential latency with mean 3 ms. + +Simulation settings + Duration: 500 s + Sampling period: 50 ms + +What this script does + 1) Loads the YAML scenario and runs the simulation. + 2) Prints latency statistics to stdout. + 3) Saves charts next to this script: + - Dashboard PNG: latency histogram (mean/P50/P95/P99) + and throughput (mean/P95/max) side-by-side. + - Per-server PNGs: Ready queue, I/O queue, and RAM usage for each server. """ + from __future__ import annotations -from argparse import ArgumentParser +import logging from pathlib import Path -from typing import Dict, Iterable, List, Mapping, Tuple - -import matplotlib.pyplot as plt -import numpy as np -import simpy -from asyncflow.config.constants import ( - EndpointStepCPU, - EndpointStepIO, - EndpointStepRAM, - LatencyKey, - StepOperation, -) -from asyncflow.metrics.analyzer import ResultsAnalyzer -from asyncflow.runtime.simulation_runner import SimulationRunner - - -# ───────────────────────────────────────────────────────────── -# Pretty printers -# ───────────────────────────────────────────────────────────── -def print_latency_stats(res: ResultsAnalyzer) -> None: - """Print latency statistics returned by the analyzer.""" - stats: Mapping[LatencyKey, float] = res.get_latency_stats() - print("\n════════ LATENCY STATS ════════") - if not stats: - print("(empty)") - return - - # Keep deterministic ordering for readability. - order: List[LatencyKey] = [ - LatencyKey.TOTAL_REQUESTS, - LatencyKey.MEAN, - LatencyKey.MEDIAN, - LatencyKey.STD_DEV, - LatencyKey.P95, - LatencyKey.P99, - LatencyKey.MIN, - LatencyKey.MAX, - ] - for key in order: - if key in stats: - print(f"{key.name:<20} = {stats[key]:.6f}") - - -def print_throughput(res: ResultsAnalyzer) -> None: - """Print 1-second throughput buckets.""" - timestamps, rps = res.get_throughput_series() - print("\n════════ THROUGHPUT (req/sec) ════════") - if not timestamps: - print("(empty)") - return - - for t, rate in zip(timestamps, rps): - print(f"t={t:4.1f}s → {rate:6.2f} rps") - - -def print_sampled_preview(res: ResultsAnalyzer) -> None: - """Print first 5 samples of each sampled metric series.""" - sampled: Dict[str, Dict[str, List[float]]] = res.get_sampled_metrics() - print("\n════════ SAMPLED METRICS ════════") - if not sampled: - print("(empty)") - return - - for metric, series in sampled.items(): - print(f"\n📈 {metric}:") - for entity, vals in series.items(): - head = list(vals[:5]) if vals else [] - print(f" - {entity}: len={len(vals)}, first={head}") - - -# ───────────────────────────────────────────────────────────── -# Plotting -# ───────────────────────────────────────────────────────────── -def save_all_plots(res: ResultsAnalyzer, out_path: Path) -> None: - """Generate the 2x2 plot figure and save it to `out_path`.""" - fig, axes = plt.subplots(2, 2, figsize=(12, 8)) - res.plot_latency_distribution(axes[0, 0]) - res.plot_throughput(axes[0, 1]) - res.plot_server_queues(axes[1, 0]) - res.plot_ram_usage(axes[1, 1]) - fig.tight_layout() - fig.savefig(out_path) - print(f"\n🖼️ Plots saved to: {out_path}") - - -# ───────────────────────────────────────────────────────────── -# Sanity checks (expected vs observed) -# ───────────────────────────────────────────────────────────── -REL_TOL = 0.30 # 30% tolerance for rough sanity checks - - -def _tick(label: str, expected: float, observed: float) -> None: - """Print a ✓ or ⚠ depending on relative error vs `REL_TOL`.""" - if expected == 0.0: - delta_pct = 0.0 - icon = "•" - else: - delta = abs(observed - expected) / abs(expected) - delta_pct = delta * 100.0 - icon = "✓" if delta <= REL_TOL else "⚠" - print(f"{icon} {label:<28} expected≈{expected:.3f} observed={observed:.3f} Δ={delta_pct:.1f}%") - -def _endpoint_totals(runner: SimulationRunner) -> Tuple[float, float, float]: - """ - Return (CPU_seconds, IO_seconds, RAM_MB) of the first endpoint on the first server. +# SimPy environment is required by SimulationRunner.from_yaml +import simpy - This keeps the check simple. If you use multiple endpoints weighted by probability, - extend this function to compute a probability-weighted average. - """ - servers = runner.simulation_input.topology_graph.nodes.servers - if not servers or not servers[0].endpoints: - return (0.0, 0.0, 0.0) +# matplotlib is needed to create figures for plotting +import matplotlib.pyplot as plt - ep = servers[0].endpoints[0] - cpu_s = 0.0 - io_s = 0.0 - ram_mb = 0.0 +# The only imports a user needs to run a simulation +from asyncflow.metrics.analyzer import ResultsAnalyzer +from asyncflow.runtime.simulation_runner import SimulationRunner - for step in ep.steps: - if isinstance(step.kind, EndpointStepCPU): - cpu_s += float(step.step_operation[StepOperation.CPU_TIME]) - elif isinstance(step.kind, EndpointStepIO): - io_s += float(step.step_operation[StepOperation.IO_WAITING_TIME]) - elif isinstance(step.kind, EndpointStepRAM): - ram_mb += float(step.step_operation[StepOperation.NECESSARY_RAM]) +# --- Basic Logging Setup --- +logging.basicConfig(level=logging.INFO, format="%(levelname)s: %(message)s") - return (cpu_s, io_s, ram_mb) - -def _edges_mean_latency(runner: SimulationRunner) -> float: - """Sum of edge mean latencies across the graph (simple additive approximation).""" - return float(sum(e.latency.mean for e in runner.simulation_input.topology_graph.edges)) - - -def _mean(series: Iterable[float]) -> float: - """Numerically stable mean for a generic float iterable.""" - arr = np.asarray(list(series), dtype=float) - return float(np.mean(arr)) if arr.size else 0.0 - - -def run_sanity_checks(runner: SimulationRunner, res: ResultsAnalyzer) -> None: - """ - Compare expected vs observed metrics using back-of-the-envelope approximations. - - Approximations used: - - Throughput ≈ λ = users * RPM / 60 - - Mean latency ≈ CPU_s + IO_s + NET_s (ignores queueing inside the server) - - Mean RAM ≈ λ * T_srv * RAM_per_request (Little’s law approximation) - - Mean I/O queue length ≈ λ * IO_s - - Edge concurrency ≈ λ * edge_mean_latency - """ - print("\n════════ SANITY CHECKS (expected vs observed) ════════") - - # Arrival rate λ (requests per second) - w = runner.simulation_input.rqs_input - lam_rps = float(w.avg_active_users.mean) * float(w.avg_request_per_minute_per_user.mean) / 60.0 - - # Endpoint sums - cpu_s, io_s, ram_mb = _endpoint_totals(runner) - net_s = _edges_mean_latency(runner) - t_srv = cpu_s + io_s - latency_expected = cpu_s + io_s + net_s - - # Observed latency, throughput - stats = res.get_latency_stats() - latency_observed = float(stats.get(LatencyKey.MEAN, 0.0)) - _, rps_series = res.get_throughput_series() - rps_observed = _mean(rps_series) - - # Observed RAM and queues - sampled = res.get_sampled_metrics() - ram_series = sampled.get("ram_in_use", {}) - ram_means = [_mean(vals) for vals in ram_series.values()] - ram_observed = float(sum(ram_means)) if ram_means else 0.0 - - ready_series = sampled.get("ready_queue_len", {}) - ioq_series = sampled.get("event_loop_io_sleep", {}) - ready_observed = _mean([_mean(v) for v in ready_series.values()]) if ready_series else 0.0 - ioq_observed = _mean([_mean(v) for v in ioq_series.values()]) if ioq_series else 0.0 - - # Expected quantities (very rough) - rps_expected = lam_rps - ram_expected = lam_rps * t_srv * ram_mb - ioq_expected = lam_rps * io_s - - _tick("Mean throughput (rps)", rps_expected, rps_observed) - _tick("Mean latency (s)", latency_expected, latency_observed) - _tick("Mean RAM (MB)", ram_expected, ram_observed) - _tick("Mean I/O queue", ioq_expected, ioq_observed) - - # Edge concurrency - edge_conc = sampled.get("edge_concurrent_connection", {}) - if edge_conc: - print("\n— Edge concurrency —") - edge_means: Dict[str, float] = {eid: _mean(vals) for eid, vals in edge_conc.items()} - for e in runner.simulation_input.topology_graph.edges: - exp = lam_rps * float(e.latency.mean) - obs = edge_means.get(e.id, 0.0) - _tick(f"edge {e.id}", exp, obs) - - # Extra diagnostics - print("\n— Diagnostics —") - print( - "λ={:.3f} rps | CPU_s={:.3f} IO_s={:.3f} NET_s≈{:.3f} | RAM/req={:.1f} MB" - .format(lam_rps, cpu_s, io_s, net_s, ram_mb) - ) - print("T_srv={:.3f}s → RAM_expected≈λ*T_srv*RAM = {:.1f} MB".format(t_srv, ram_expected)) - - -# ───────────────────────────────────────────────────────────── -# Main -# ───────────────────────────────────────────────────────────── def main() -> None: - """Entry-point: parse args, run simulation, print/plot, sanity-check.""" - parser = ArgumentParser(description="Run AsyncFlow from YAML and print outputs + sanity checks.") - parser.add_argument( - "--yaml", - type=Path, - default=Path(__file__).parent.parent / "data" /"single_server.yml", - help="Path to the simulation YAML file.", - ) - parser.add_argument( - "--out", - type=Path, - default=Path(__file__).parent / "single_server.png", - help="Path to the output image (plots).", - ) - args = parser.parse_args() + """Defines paths, runs the simulation, and generates all outputs.""" + # --- 1. Define File Paths --- + script_dir = Path(__file__).parent # <-- same folder as this file + out_dir = script_dir # <-- save outputs here + yaml_path = script_dir.parent / "data" / "single_server.yml" + output_base_name = "single_server_results" # prefix for all output files - yaml_path: Path = args.yaml if not yaml_path.exists(): - raise FileNotFoundError(f"YAML not found: {yaml_path}") + raise FileNotFoundError(f"YAML configuration file not found: {yaml_path}") - # Build runner from YAML and execute - env = simpy.Environment() - runner = SimulationRunner.from_yaml(env=env, yaml_path=yaml_path) + # --- 2. Run the Simulation --- + print(f"🚀 Loading and running simulation from: {yaml_path}") + env = simpy.Environment() # Create the SimPy environment + runner = SimulationRunner.from_yaml(env=env, yaml_path=yaml_path) # pass env results: ResultsAnalyzer = runner.run() - - # Prints - print_latency_stats(results) - print_throughput(results) - print_sampled_preview(results) - - # Sanity checks - run_sanity_checks(runner, results) - - # Plots - save_all_plots(results, args.out) + print("✅ Simulation finished!") + + # Plot 1: The main dashboard (Latency Distribution + Throughput) + fig_base, axes_base = plt.subplots(1, 2, figsize=(14, 5)) + results.plot_base_dashboard(axes_base[0], axes_base[1]) + fig_base.tight_layout() + base_plot_path = out_dir / f"{output_base_name}_dashboard.png" + fig_base.savefig(base_plot_path) + print(f"🖼️ Base dashboard saved to: {base_plot_path}") + + # Plot 2: Individual plots for each server's metrics + server_ids = results.list_server_ids() + for sid in server_ids: + # Ready queue (separate) + fig_rdy, ax_rdy = plt.subplots(figsize=(10, 5)) + results.plot_single_server_ready_queue(ax_rdy, sid) + fig_rdy.tight_layout() + rdy_path = out_dir / f"{output_base_name}_ready_queue_{sid}.png" + fig_rdy.savefig(rdy_path) + print(f"🖼️ Ready queue for '{sid}' saved to: {rdy_path}") + + # I/O queue (separate) + fig_io, ax_io = plt.subplots(figsize=(10, 5)) + results.plot_single_server_io_queue(ax_io, sid) + fig_io.tight_layout() + io_path = out_dir / f"{output_base_name}_io_queue_{sid}.png" + fig_io.savefig(io_path) + print(f"🖼️ I/O queue for '{sid}' saved to: {io_path}") + + # RAM (separate) + fig_r, ax_r = plt.subplots(figsize=(10, 5)) + results.plot_single_server_ram(ax_r, sid) + fig_r.tight_layout() + r_path = out_dir / f"{output_base_name}_ram_{sid}.png" + fig_r.savefig(r_path) + print(f"🖼️ RAM plot for '{sid}' saved to: {r_path}") if __name__ == "__main__": diff --git a/examples/yaml_input/single_server/single_server_results_dashboard.png b/examples/yaml_input/single_server/single_server_results_dashboard.png new file mode 100644 index 0000000..0a6f994 Binary files /dev/null and b/examples/yaml_input/single_server/single_server_results_dashboard.png differ diff --git a/examples/yaml_input/single_server/single_server_results_io_queue_srv-1.png b/examples/yaml_input/single_server/single_server_results_io_queue_srv-1.png new file mode 100644 index 0000000..f2bb1f0 Binary files /dev/null and b/examples/yaml_input/single_server/single_server_results_io_queue_srv-1.png differ diff --git a/examples/yaml_input/single_server/single_server_results_ram_srv-1.png b/examples/yaml_input/single_server/single_server_results_ram_srv-1.png new file mode 100644 index 0000000..c7a33af Binary files /dev/null and b/examples/yaml_input/single_server/single_server_results_ram_srv-1.png differ diff --git a/examples/yaml_input/single_server/single_server_results_ready_queue_srv-1.png b/examples/yaml_input/single_server/single_server_results_ready_queue_srv-1.png new file mode 100644 index 0000000..7539852 Binary files /dev/null and b/examples/yaml_input/single_server/single_server_results_ready_queue_srv-1.png differ diff --git a/examples/yaml_input/your_example.py b/examples/yaml_input/your_example.py deleted file mode 100644 index ea2c3fa..0000000 --- a/examples/yaml_input/your_example.py +++ /dev/null @@ -1,54 +0,0 @@ -from pathlib import Path - -import simpy -import matplotlib.pyplot as plt - -from asyncflow.config.constants import LatencyKey -from asyncflow.runtime.simulation_runner import SimulationRunner -from asyncflow.metrics.analyzer import ResultsAnalyzer - -def print_latency_stats(res: ResultsAnalyzer) -> None: - """Print latency statistics returned by the analyzer.""" - stats = res.get_latency_stats() - print("\n=== LATENCY STATS ===") - if not stats: - print("(empty)") - return - - order: list[LatencyKey] = [ - LatencyKey.TOTAL_REQUESTS, - LatencyKey.MEAN, - LatencyKey.MEDIAN, - LatencyKey.STD_DEV, - LatencyKey.P95, - LatencyKey.P99, - LatencyKey.MIN, - LatencyKey.MAX, - ] - for key in order: - if key in stats: - print(f"{key.name:<20} = {stats[key]:.6f}") - -def save_all_plots(res: ResultsAnalyzer, out_path: Path) -> None: - """Generate the 2x2 plot figure and save it to `out_path`.""" - fig, axes = plt.subplots(2, 2, figsize=(12, 8)) - res.plot_latency_distribution(axes[0, 0]) - res.plot_throughput(axes[0, 1]) - res.plot_server_queues(axes[1, 0]) - res.plot_ram_usage(axes[1, 1]) - fig.tight_layout() - fig.savefig(out_path) - print(f"Plots saved to: {out_path}") - -# Paths -yaml_path = Path(__file__).parent / "data" / ".yml" -out_path = Path(__file__).parent / "_plots.png" - -# Simulation -env = simpy.Environment() -runner = SimulationRunner.from_yaml(env=env, yaml_path=yaml_path) -results: ResultsAnalyzer = runner.run() - -# Output -print_latency_stats(results) -save_all_plots(results, out_path) diff --git a/poetry.lock b/poetry.lock index 9147300..493ce80 100644 --- a/poetry.lock +++ b/poetry.lock @@ -115,99 +115,99 @@ test-no-images = ["pytest", "pytest-cov", "pytest-rerunfailures", "pytest-xdist" [[package]] name = "coverage" -version = "7.10.3" +version = "7.10.4" description = "Code coverage measurement for Python" optional = false python-versions = ">=3.9" files = [ - {file = "coverage-7.10.3-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:53808194afdf948c462215e9403cca27a81cf150d2f9b386aee4dab614ae2ffe"}, - {file = "coverage-7.10.3-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:f4d1b837d1abf72187a61645dbf799e0d7705aa9232924946e1f57eb09a3bf00"}, - {file = "coverage-7.10.3-cp310-cp310-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:2a90dd4505d3cc68b847ab10c5ee81822a968b5191664e8a0801778fa60459fa"}, - {file = "coverage-7.10.3-cp310-cp310-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:d52989685ff5bf909c430e6d7f6550937bc6d6f3e6ecb303c97a86100efd4596"}, - {file = "coverage-7.10.3-cp310-cp310-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:bdb558a1d97345bde3a9f4d3e8d11c9e5611f748646e9bb61d7d612a796671b5"}, - {file = "coverage-7.10.3-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:c9e6331a8f09cb1fc8bda032752af03c366870b48cce908875ba2620d20d0ad4"}, - {file = "coverage-7.10.3-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:992f48bf35b720e174e7fae916d943599f1a66501a2710d06c5f8104e0756ee1"}, - {file = "coverage-7.10.3-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:c5595fc4ad6a39312c786ec3326d7322d0cf10e3ac6a6df70809910026d67cfb"}, - {file = "coverage-7.10.3-cp310-cp310-win32.whl", hash = "sha256:9e92fa1f2bd5a57df9d00cf9ce1eb4ef6fccca4ceabec1c984837de55329db34"}, - {file = "coverage-7.10.3-cp310-cp310-win_amd64.whl", hash = "sha256:b96524d6e4a3ce6a75c56bb15dbd08023b0ae2289c254e15b9fbdddf0c577416"}, - {file = "coverage-7.10.3-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:f2ff2e2afdf0d51b9b8301e542d9c21a8d084fd23d4c8ea2b3a1b3c96f5f7397"}, - {file = "coverage-7.10.3-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:18ecc5d1b9a8c570f6c9b808fa9a2b16836b3dd5414a6d467ae942208b095f85"}, - {file = "coverage-7.10.3-cp311-cp311-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:1af4461b25fe92889590d438905e1fc79a95680ec2a1ff69a591bb3fdb6c7157"}, - {file = "coverage-7.10.3-cp311-cp311-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:3966bc9a76b09a40dc6063c8b10375e827ea5dfcaffae402dd65953bef4cba54"}, - {file = "coverage-7.10.3-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:205a95b87ef4eb303b7bc5118b47b6b6604a644bcbdb33c336a41cfc0a08c06a"}, - {file = "coverage-7.10.3-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:5b3801b79fb2ad61e3c7e2554bab754fc5f105626056980a2b9cf3aef4f13f84"}, - {file = "coverage-7.10.3-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:b0dc69c60224cda33d384572da945759756e3f06b9cdac27f302f53961e63160"}, - {file = "coverage-7.10.3-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:a83d4f134bab2c7ff758e6bb1541dd72b54ba295ced6a63d93efc2e20cb9b124"}, - {file = "coverage-7.10.3-cp311-cp311-win32.whl", hash = "sha256:54e409dd64e5302b2a8fdf44ec1c26f47abd1f45a2dcf67bd161873ee05a59b8"}, - {file = "coverage-7.10.3-cp311-cp311-win_amd64.whl", hash = "sha256:30c601610a9b23807c5e9e2e442054b795953ab85d525c3de1b1b27cebeb2117"}, - {file = "coverage-7.10.3-cp311-cp311-win_arm64.whl", hash = "sha256:dabe662312a97958e932dee056f2659051d822552c0b866823e8ba1c2fe64770"}, - {file = "coverage-7.10.3-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:449c1e2d3a84d18bd204258a897a87bc57380072eb2aded6a5b5226046207b42"}, - {file = "coverage-7.10.3-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:1d4f9ce50b9261ad196dc2b2e9f1fbbee21651b54c3097a25ad783679fd18294"}, - {file = "coverage-7.10.3-cp312-cp312-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:4dd4564207b160d0d45c36a10bc0a3d12563028e8b48cd6459ea322302a156d7"}, - {file = "coverage-7.10.3-cp312-cp312-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:5ca3c9530ee072b7cb6a6ea7b640bcdff0ad3b334ae9687e521e59f79b1d0437"}, - {file = "coverage-7.10.3-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:b6df359e59fa243c9925ae6507e27f29c46698359f45e568fd51b9315dbbe587"}, - {file = "coverage-7.10.3-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:a181e4c2c896c2ff64c6312db3bda38e9ade2e1aa67f86a5628ae85873786cea"}, - {file = "coverage-7.10.3-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:a374d4e923814e8b72b205ef6b3d3a647bb50e66f3558582eda074c976923613"}, - {file = "coverage-7.10.3-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:daeefff05993e5e8c6e7499a8508e7bd94502b6b9a9159c84fd1fe6bce3151cb"}, - {file = "coverage-7.10.3-cp312-cp312-win32.whl", hash = "sha256:187ecdcac21f9636d570e419773df7bd2fda2e7fa040f812e7f95d0bddf5f79a"}, - {file = "coverage-7.10.3-cp312-cp312-win_amd64.whl", hash = "sha256:4a50ad2524ee7e4c2a95e60d2b0b83283bdfc745fe82359d567e4f15d3823eb5"}, - {file = "coverage-7.10.3-cp312-cp312-win_arm64.whl", hash = "sha256:c112f04e075d3495fa3ed2200f71317da99608cbb2e9345bdb6de8819fc30571"}, - {file = "coverage-7.10.3-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:b99e87304ffe0eb97c5308447328a584258951853807afdc58b16143a530518a"}, - {file = "coverage-7.10.3-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:4af09c7574d09afbc1ea7da9dcea23665c01f3bc1b1feb061dac135f98ffc53a"}, - {file = "coverage-7.10.3-cp313-cp313-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:488e9b50dc5d2aa9521053cfa706209e5acf5289e81edc28291a24f4e4488f46"}, - {file = "coverage-7.10.3-cp313-cp313-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:913ceddb4289cbba3a310704a424e3fb7aac2bc0c3a23ea473193cb290cf17d4"}, - {file = "coverage-7.10.3-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:6b1f91cbc78c7112ab84ed2a8defbccd90f888fcae40a97ddd6466b0bec6ae8a"}, - {file = "coverage-7.10.3-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:b0bac054d45af7cd938834b43a9878b36ea92781bcb009eab040a5b09e9927e3"}, - {file = "coverage-7.10.3-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:fe72cbdd12d9e0f4aca873fa6d755e103888a7f9085e4a62d282d9d5b9f7928c"}, - {file = "coverage-7.10.3-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:c1e2e927ab3eadd7c244023927d646e4c15c65bb2ac7ae3c3e9537c013700d21"}, - {file = "coverage-7.10.3-cp313-cp313-win32.whl", hash = "sha256:24d0c13de473b04920ddd6e5da3c08831b1170b8f3b17461d7429b61cad59ae0"}, - {file = "coverage-7.10.3-cp313-cp313-win_amd64.whl", hash = "sha256:3564aae76bce4b96e2345cf53b4c87e938c4985424a9be6a66ee902626edec4c"}, - {file = "coverage-7.10.3-cp313-cp313-win_arm64.whl", hash = "sha256:f35580f19f297455f44afcd773c9c7a058e52eb6eb170aa31222e635f2e38b87"}, - {file = "coverage-7.10.3-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:07009152f497a0464ffdf2634586787aea0e69ddd023eafb23fc38267db94b84"}, - {file = "coverage-7.10.3-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:8dd2ba5f0c7e7e8cc418be2f0c14c4d9e3f08b8fb8e4c0f83c2fe87d03eb655e"}, - {file = "coverage-7.10.3-cp313-cp313t-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:1ae22b97003c74186e034a93e4f946c75fad8c0ce8d92fbbc168b5e15ee2841f"}, - {file = "coverage-7.10.3-cp313-cp313t-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:eb329f1046888a36b1dc35504d3029e1dd5afe2196d94315d18c45ee380f67d5"}, - {file = "coverage-7.10.3-cp313-cp313t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:ce01048199a91f07f96ca3074b0c14021f4fe7ffd29a3e6a188ac60a5c3a4af8"}, - {file = "coverage-7.10.3-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:08b989a06eb9dfacf96d42b7fb4c9a22bafa370d245dc22fa839f2168c6f9fa1"}, - {file = "coverage-7.10.3-cp313-cp313t-musllinux_1_2_i686.whl", hash = "sha256:669fe0d4e69c575c52148511029b722ba8d26e8a3129840c2ce0522e1452b256"}, - {file = "coverage-7.10.3-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:3262d19092771c83f3413831d9904b1ccc5f98da5de4ffa4ad67f5b20c7aaf7b"}, - {file = "coverage-7.10.3-cp313-cp313t-win32.whl", hash = "sha256:cc0ee4b2ccd42cab7ee6be46d8a67d230cb33a0a7cd47a58b587a7063b6c6b0e"}, - {file = "coverage-7.10.3-cp313-cp313t-win_amd64.whl", hash = "sha256:03db599f213341e2960430984e04cf35fb179724e052a3ee627a068653cf4a7c"}, - {file = "coverage-7.10.3-cp313-cp313t-win_arm64.whl", hash = "sha256:46eae7893ba65f53c71284585a262f083ef71594f05ec5c85baf79c402369098"}, - {file = "coverage-7.10.3-cp314-cp314-macosx_10_13_x86_64.whl", hash = "sha256:bce8b8180912914032785850d8f3aacb25ec1810f5f54afc4a8b114e7a9b55de"}, - {file = "coverage-7.10.3-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:07790b4b37d56608536f7c1079bd1aa511567ac2966d33d5cec9cf520c50a7c8"}, - {file = "coverage-7.10.3-cp314-cp314-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:e79367ef2cd9166acedcbf136a458dfe9a4a2dd4d1ee95738fb2ee581c56f667"}, - {file = "coverage-7.10.3-cp314-cp314-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:419d2a0f769f26cb1d05e9ccbc5eab4cb5d70231604d47150867c07822acbdf4"}, - {file = "coverage-7.10.3-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:ee221cf244757cdc2ac882e3062ab414b8464ad9c884c21e878517ea64b3fa26"}, - {file = "coverage-7.10.3-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:c2079d8cdd6f7373d628e14b3357f24d1db02c9dc22e6a007418ca7a2be0435a"}, - {file = "coverage-7.10.3-cp314-cp314-musllinux_1_2_i686.whl", hash = "sha256:bd8df1f83c0703fa3ca781b02d36f9ec67ad9cb725b18d486405924f5e4270bd"}, - {file = "coverage-7.10.3-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:6b4e25e0fa335c8aa26e42a52053f3786a61cc7622b4d54ae2dad994aa754fec"}, - {file = "coverage-7.10.3-cp314-cp314-win32.whl", hash = "sha256:d7c3d02c2866deb217dce664c71787f4b25420ea3eaf87056f44fb364a3528f5"}, - {file = "coverage-7.10.3-cp314-cp314-win_amd64.whl", hash = "sha256:9c8916d44d9e0fe6cdb2227dc6b0edd8bc6c8ef13438bbbf69af7482d9bb9833"}, - {file = "coverage-7.10.3-cp314-cp314-win_arm64.whl", hash = "sha256:1007d6a2b3cf197c57105cc1ba390d9ff7f0bee215ced4dea530181e49c65ab4"}, - {file = "coverage-7.10.3-cp314-cp314t-macosx_10_13_x86_64.whl", hash = "sha256:ebc8791d346410d096818788877d675ca55c91db87d60e8f477bd41c6970ffc6"}, - {file = "coverage-7.10.3-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:1f4e4d8e75f6fd3c6940ebeed29e3d9d632e1f18f6fb65d33086d99d4d073241"}, - {file = "coverage-7.10.3-cp314-cp314t-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:24581ed69f132b6225a31b0228ae4885731cddc966f8a33fe5987288bdbbbd5e"}, - {file = "coverage-7.10.3-cp314-cp314t-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:ec151569ddfccbf71bac8c422dce15e176167385a00cd86e887f9a80035ce8a5"}, - {file = "coverage-7.10.3-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:2ae8e7c56290b908ee817200c0b65929b8050bc28530b131fe7c6dfee3e7d86b"}, - {file = "coverage-7.10.3-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:5fb742309766d7e48e9eb4dc34bc95a424707bc6140c0e7d9726e794f11b92a0"}, - {file = "coverage-7.10.3-cp314-cp314t-musllinux_1_2_i686.whl", hash = "sha256:c65e2a5b32fbe1e499f1036efa6eb9cb4ea2bf6f7168d0e7a5852f3024f471b1"}, - {file = "coverage-7.10.3-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:d48d2cb07d50f12f4f18d2bb75d9d19e3506c26d96fffabf56d22936e5ed8f7c"}, - {file = "coverage-7.10.3-cp314-cp314t-win32.whl", hash = "sha256:dec0d9bc15ee305e09fe2cd1911d3f0371262d3cfdae05d79515d8cb712b4869"}, - {file = "coverage-7.10.3-cp314-cp314t-win_amd64.whl", hash = "sha256:424ea93a323aa0f7f01174308ea78bde885c3089ec1bef7143a6d93c3e24ef64"}, - {file = "coverage-7.10.3-cp314-cp314t-win_arm64.whl", hash = "sha256:f5983c132a62d93d71c9ef896a0b9bf6e6828d8d2ea32611f58684fba60bba35"}, - {file = "coverage-7.10.3-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:da749daa7e141985487e1ff90a68315b0845930ed53dc397f4ae8f8bab25b551"}, - {file = "coverage-7.10.3-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:f3126fb6a47d287f461d9b1aa5d1a8c97034d1dffb4f452f2cf211289dae74ef"}, - {file = "coverage-7.10.3-cp39-cp39-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:3da794db13cc27ca40e1ec8127945b97fab78ba548040047d54e7bfa6d442dca"}, - {file = "coverage-7.10.3-cp39-cp39-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:4e27bebbd184ef8d1c1e092b74a2b7109dcbe2618dce6e96b1776d53b14b3fe8"}, - {file = "coverage-7.10.3-cp39-cp39-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:8fd4ee2580b9fefbd301b4f8f85b62ac90d1e848bea54f89a5748cf132782118"}, - {file = "coverage-7.10.3-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:6999920bdd73259ce11cabfc1307484f071ecc6abdb2ca58d98facbcefc70f16"}, - {file = "coverage-7.10.3-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:c3623f929db885fab100cb88220a5b193321ed37e03af719efdbaf5d10b6e227"}, - {file = "coverage-7.10.3-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:25b902c5e15dea056485d782e420bb84621cc08ee75d5131ecb3dbef8bd1365f"}, - {file = "coverage-7.10.3-cp39-cp39-win32.whl", hash = "sha256:f930a4d92b004b643183451fe9c8fe398ccf866ed37d172ebaccfd443a097f61"}, - {file = "coverage-7.10.3-cp39-cp39-win_amd64.whl", hash = "sha256:08e638a93c8acba13c7842953f92a33d52d73e410329acd472280d2a21a6c0e1"}, - {file = "coverage-7.10.3-py3-none-any.whl", hash = "sha256:416a8d74dc0adfd33944ba2f405897bab87b7e9e84a391e09d241956bd953ce1"}, - {file = "coverage-7.10.3.tar.gz", hash = "sha256:812ba9250532e4a823b070b0420a36499859542335af3dca8f47fc6aa1a05619"}, + {file = "coverage-7.10.4-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:d92d6edb0ccafd20c6fbf9891ca720b39c2a6a4b4a6f9cf323ca2c986f33e475"}, + {file = "coverage-7.10.4-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:7202da14dc0236884fcc45665ffb2d79d4991a53fbdf152ab22f69f70923cc22"}, + {file = "coverage-7.10.4-cp310-cp310-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:ada418633ae24ec8d0fcad5efe6fc7aa3c62497c6ed86589e57844ad04365674"}, + {file = "coverage-7.10.4-cp310-cp310-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:b828e33eca6c3322adda3b5884456f98c435182a44917ded05005adfa1415500"}, + {file = "coverage-7.10.4-cp310-cp310-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:802793ba397afcfdbe9f91f89d65ae88b958d95edc8caf948e1f47d8b6b2b606"}, + {file = "coverage-7.10.4-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:d0b23512338c54101d3bf7a1ab107d9d75abda1d5f69bc0887fd079253e4c27e"}, + {file = "coverage-7.10.4-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:f36b7dcf72d06a8c5e2dd3aca02be2b1b5db5f86404627dff834396efce958f2"}, + {file = "coverage-7.10.4-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:fce316c367a1dc2c411821365592eeb335ff1781956d87a0410eae248188ba51"}, + {file = "coverage-7.10.4-cp310-cp310-win32.whl", hash = "sha256:8c5dab29fc8070b3766b5fc85f8d89b19634584429a2da6d42da5edfadaf32ae"}, + {file = "coverage-7.10.4-cp310-cp310-win_amd64.whl", hash = "sha256:4b0d114616f0fccb529a1817457d5fb52a10e106f86c5fb3b0bd0d45d0d69b93"}, + {file = "coverage-7.10.4-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:05d5f98ec893d4a2abc8bc5f046f2f4367404e7e5d5d18b83de8fde1093ebc4f"}, + {file = "coverage-7.10.4-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:9267efd28f8994b750d171e58e481e3bbd69e44baed540e4c789f8e368b24b88"}, + {file = "coverage-7.10.4-cp311-cp311-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:4456a039fdc1a89ea60823d0330f1ac6f97b0dbe9e2b6fb4873e889584b085fb"}, + {file = "coverage-7.10.4-cp311-cp311-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:c2bfbd2a9f7e68a21c5bd191be94bfdb2691ac40d325bac9ef3ae45ff5c753d9"}, + {file = "coverage-7.10.4-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:0ab7765f10ae1df7e7fe37de9e64b5a269b812ee22e2da3f84f97b1c7732a0d8"}, + {file = "coverage-7.10.4-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:0a09b13695166236e171ec1627ff8434b9a9bae47528d0ba9d944c912d33b3d2"}, + {file = "coverage-7.10.4-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:5c9e75dfdc0167d5675e9804f04a56b2cf47fb83a524654297000b578b8adcb7"}, + {file = "coverage-7.10.4-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:c751261bfe6481caba15ec005a194cb60aad06f29235a74c24f18546d8377df0"}, + {file = "coverage-7.10.4-cp311-cp311-win32.whl", hash = "sha256:051c7c9e765f003c2ff6e8c81ccea28a70fb5b0142671e4e3ede7cebd45c80af"}, + {file = "coverage-7.10.4-cp311-cp311-win_amd64.whl", hash = "sha256:1a647b152f10be08fb771ae4a1421dbff66141e3d8ab27d543b5eb9ea5af8e52"}, + {file = "coverage-7.10.4-cp311-cp311-win_arm64.whl", hash = "sha256:b09b9e4e1de0d406ca9f19a371c2beefe3193b542f64a6dd40cfcf435b7d6aa0"}, + {file = "coverage-7.10.4-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:a1f0264abcabd4853d4cb9b3d164adbf1565da7dab1da1669e93f3ea60162d79"}, + {file = "coverage-7.10.4-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:536cbe6b118a4df231b11af3e0f974a72a095182ff8ec5f4868c931e8043ef3e"}, + {file = "coverage-7.10.4-cp312-cp312-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:9a4c0d84134797b7bf3f080599d0cd501471f6c98b715405166860d79cfaa97e"}, + {file = "coverage-7.10.4-cp312-cp312-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:7c155fc0f9cee8c9803ea0ad153ab6a3b956baa5d4cd993405dc0b45b2a0b9e0"}, + {file = "coverage-7.10.4-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:0a5f2ab6e451d4b07855d8bcf063adf11e199bff421a4ba57f5bb95b7444ca62"}, + {file = "coverage-7.10.4-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:685b67d99b945b0c221be0780c336b303a7753b3e0ec0d618c795aada25d5e7a"}, + {file = "coverage-7.10.4-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:0c079027e50c2ae44da51c2e294596cbc9dbb58f7ca45b30651c7e411060fc23"}, + {file = "coverage-7.10.4-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:3749aa72b93ce516f77cf5034d8e3c0dfd45c6e8a163a602ede2dc5f9a0bb927"}, + {file = "coverage-7.10.4-cp312-cp312-win32.whl", hash = "sha256:fecb97b3a52fa9bcd5a7375e72fae209088faf671d39fae67261f37772d5559a"}, + {file = "coverage-7.10.4-cp312-cp312-win_amd64.whl", hash = "sha256:26de58f355626628a21fe6a70e1e1fad95702dafebfb0685280962ae1449f17b"}, + {file = "coverage-7.10.4-cp312-cp312-win_arm64.whl", hash = "sha256:67e8885408f8325198862bc487038a4980c9277d753cb8812510927f2176437a"}, + {file = "coverage-7.10.4-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:2b8e1d2015d5dfdbf964ecef12944c0c8c55b885bb5c0467ae8ef55e0e151233"}, + {file = "coverage-7.10.4-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:25735c299439018d66eb2dccf54f625aceb78645687a05f9f848f6e6c751e169"}, + {file = "coverage-7.10.4-cp313-cp313-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:715c06cb5eceac4d9b7cdf783ce04aa495f6aff657543fea75c30215b28ddb74"}, + {file = "coverage-7.10.4-cp313-cp313-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:e017ac69fac9aacd7df6dc464c05833e834dc5b00c914d7af9a5249fcccf07ef"}, + {file = "coverage-7.10.4-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:bad180cc40b3fccb0f0e8c702d781492654ac2580d468e3ffc8065e38c6c2408"}, + {file = "coverage-7.10.4-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:becbdcd14f685fada010a5f792bf0895675ecf7481304fe159f0cd3f289550bd"}, + {file = "coverage-7.10.4-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:0b485ca21e16a76f68060911f97ebbe3e0d891da1dbbce6af7ca1ab3f98b9097"}, + {file = "coverage-7.10.4-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:6c1d098ccfe8e1e0a1ed9a0249138899948afd2978cbf48eb1cc3fcd38469690"}, + {file = "coverage-7.10.4-cp313-cp313-win32.whl", hash = "sha256:8630f8af2ca84b5c367c3df907b1706621abe06d6929f5045fd628968d421e6e"}, + {file = "coverage-7.10.4-cp313-cp313-win_amd64.whl", hash = "sha256:f68835d31c421736be367d32f179e14ca932978293fe1b4c7a6a49b555dff5b2"}, + {file = "coverage-7.10.4-cp313-cp313-win_arm64.whl", hash = "sha256:6eaa61ff6724ca7ebc5326d1fae062d85e19b38dd922d50903702e6078370ae7"}, + {file = "coverage-7.10.4-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:702978108876bfb3d997604930b05fe769462cc3000150b0e607b7b444f2fd84"}, + {file = "coverage-7.10.4-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:e8f978e8c5521d9c8f2086ac60d931d583fab0a16f382f6eb89453fe998e2484"}, + {file = "coverage-7.10.4-cp313-cp313t-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:df0ac2ccfd19351411c45e43ab60932b74472e4648b0a9edf6a3b58846e246a9"}, + {file = "coverage-7.10.4-cp313-cp313t-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:73a0d1aaaa3796179f336448e1576a3de6fc95ff4f07c2d7251d4caf5d18cf8d"}, + {file = "coverage-7.10.4-cp313-cp313t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:873da6d0ed6b3ffc0bc01f2c7e3ad7e2023751c0d8d86c26fe7322c314b031dc"}, + {file = "coverage-7.10.4-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:c6446c75b0e7dda5daa876a1c87b480b2b52affb972fedd6c22edf1aaf2e00ec"}, + {file = "coverage-7.10.4-cp313-cp313t-musllinux_1_2_i686.whl", hash = "sha256:6e73933e296634e520390c44758d553d3b573b321608118363e52113790633b9"}, + {file = "coverage-7.10.4-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:52073d4b08d2cb571234c8a71eb32af3c6923149cf644a51d5957ac128cf6aa4"}, + {file = "coverage-7.10.4-cp313-cp313t-win32.whl", hash = "sha256:e24afb178f21f9ceb1aefbc73eb524769aa9b504a42b26857243f881af56880c"}, + {file = "coverage-7.10.4-cp313-cp313t-win_amd64.whl", hash = "sha256:be04507ff1ad206f4be3d156a674e3fb84bbb751ea1b23b142979ac9eebaa15f"}, + {file = "coverage-7.10.4-cp313-cp313t-win_arm64.whl", hash = "sha256:f3e3ff3f69d02b5dad67a6eac68cc9c71ae343b6328aae96e914f9f2f23a22e2"}, + {file = "coverage-7.10.4-cp314-cp314-macosx_10_13_x86_64.whl", hash = "sha256:a59fe0af7dd7211ba595cf7e2867458381f7e5d7b4cffe46274e0b2f5b9f4eb4"}, + {file = "coverage-7.10.4-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:3a6c35c5b70f569ee38dc3350cd14fdd0347a8b389a18bb37538cc43e6f730e6"}, + {file = "coverage-7.10.4-cp314-cp314-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:acb7baf49f513554c4af6ef8e2bd6e8ac74e6ea0c7386df8b3eb586d82ccccc4"}, + {file = "coverage-7.10.4-cp314-cp314-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:a89afecec1ed12ac13ed203238b560cbfad3522bae37d91c102e690b8b1dc46c"}, + {file = "coverage-7.10.4-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:480442727f464407d8ade6e677b7f21f3b96a9838ab541b9a28ce9e44123c14e"}, + {file = "coverage-7.10.4-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:a89bf193707f4a17f1ed461504031074d87f035153239f16ce86dfb8f8c7ac76"}, + {file = "coverage-7.10.4-cp314-cp314-musllinux_1_2_i686.whl", hash = "sha256:3ddd912c2fc440f0fb3229e764feec85669d5d80a988ff1b336a27d73f63c818"}, + {file = "coverage-7.10.4-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:8a538944ee3a42265e61c7298aeba9ea43f31c01271cf028f437a7b4075592cf"}, + {file = "coverage-7.10.4-cp314-cp314-win32.whl", hash = "sha256:fd2e6002be1c62476eb862b8514b1ba7e7684c50165f2a8d389e77da6c9a2ebd"}, + {file = "coverage-7.10.4-cp314-cp314-win_amd64.whl", hash = "sha256:ec113277f2b5cf188d95fb66a65c7431f2b9192ee7e6ec9b72b30bbfb53c244a"}, + {file = "coverage-7.10.4-cp314-cp314-win_arm64.whl", hash = "sha256:9744954bfd387796c6a091b50d55ca7cac3d08767795b5eec69ad0f7dbf12d38"}, + {file = "coverage-7.10.4-cp314-cp314t-macosx_10_13_x86_64.whl", hash = "sha256:5af4829904dda6aabb54a23879f0f4412094ba9ef153aaa464e3c1b1c9bc98e6"}, + {file = "coverage-7.10.4-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:7bba5ed85e034831fac761ae506c0644d24fd5594727e174b5a73aff343a7508"}, + {file = "coverage-7.10.4-cp314-cp314t-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:d57d555b0719834b55ad35045de6cc80fc2b28e05adb6b03c98479f9553b387f"}, + {file = "coverage-7.10.4-cp314-cp314t-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:ba62c51a72048bb1ea72db265e6bd8beaabf9809cd2125bbb5306c6ce105f214"}, + {file = "coverage-7.10.4-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:0acf0c62a6095f07e9db4ec365cc58c0ef5babb757e54745a1aa2ea2a2564af1"}, + {file = "coverage-7.10.4-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:e1033bf0f763f5cf49ffe6594314b11027dcc1073ac590b415ea93463466deec"}, + {file = "coverage-7.10.4-cp314-cp314t-musllinux_1_2_i686.whl", hash = "sha256:92c29eff894832b6a40da1789b1f252305af921750b03ee4535919db9179453d"}, + {file = "coverage-7.10.4-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:822c4c830989c2093527e92acd97be4638a44eb042b1bdc0e7a278d84a070bd3"}, + {file = "coverage-7.10.4-cp314-cp314t-win32.whl", hash = "sha256:e694d855dac2e7cf194ba33653e4ba7aad7267a802a7b3fc4347d0517d5d65cd"}, + {file = "coverage-7.10.4-cp314-cp314t-win_amd64.whl", hash = "sha256:efcc54b38ef7d5bfa98050f220b415bc5bb3d432bd6350a861cf6da0ede2cdcd"}, + {file = "coverage-7.10.4-cp314-cp314t-win_arm64.whl", hash = "sha256:6f3a3496c0fa26bfac4ebc458747b778cff201c8ae94fa05e1391bab0dbc473c"}, + {file = "coverage-7.10.4-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:48fd4d52600c2a9d5622e52dfae674a7845c5e1dceaf68b88c99feb511fbcfd6"}, + {file = "coverage-7.10.4-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:56217b470d09d69e6b7dcae38200f95e389a77db801cb129101697a4553b18b6"}, + {file = "coverage-7.10.4-cp39-cp39-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:44ac3f21a6e28c5ff7f7a47bca5f87885f6a1e623e637899125ba47acd87334d"}, + {file = "coverage-7.10.4-cp39-cp39-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:3387739d72c84d17b4d2f7348749cac2e6700e7152026912b60998ee9a40066b"}, + {file = "coverage-7.10.4-cp39-cp39-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:3f111ff20d9a6348e0125be892608e33408dd268f73b020940dfa8511ad05503"}, + {file = "coverage-7.10.4-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:01a852f0a9859734b018a3f483cc962d0b381d48d350b1a0c47d618c73a0c398"}, + {file = "coverage-7.10.4-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:225111dd06759ba4e37cee4c0b4f3df2b15c879e9e3c37bf986389300b9917c3"}, + {file = "coverage-7.10.4-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:2178d4183bd1ba608f0bb12e71e55838ba1b7dbb730264f8b08de9f8ef0c27d0"}, + {file = "coverage-7.10.4-cp39-cp39-win32.whl", hash = "sha256:93d175fe81913aee7a6ea430abbdf2a79f1d9fd451610e12e334e4fe3264f563"}, + {file = "coverage-7.10.4-cp39-cp39-win_amd64.whl", hash = "sha256:2221a823404bb941c7721cf0ef55ac6ee5c25d905beb60c0bba5e5e85415d353"}, + {file = "coverage-7.10.4-py3-none-any.whl", hash = "sha256:065d75447228d05121e5c938ca8f0e91eed60a1eb2d1258d42d5084fecfc3302"}, + {file = "coverage-7.10.4.tar.gz", hash = "sha256:25f5130af6c8e7297fd14634955ba9e1697f47143f289e2a23284177c0061d27"}, ] [package.extras] @@ -265,53 +265,69 @@ idna = ">=2.0.0" [[package]] name = "fonttools" -version = "4.59.0" +version = "4.59.1" description = "Tools to manipulate font files" optional = false python-versions = ">=3.9" files = [ - {file = "fonttools-4.59.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:524133c1be38445c5c0575eacea42dbd44374b310b1ffc4b60ff01d881fabb96"}, - {file = "fonttools-4.59.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:21e606b2d38fed938dde871c5736822dd6bda7a4631b92e509a1f5cd1b90c5df"}, - {file = "fonttools-4.59.0-cp310-cp310-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:e93df708c69a193fc7987192f94df250f83f3851fda49413f02ba5dded639482"}, - {file = "fonttools-4.59.0-cp310-cp310-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:62224a9bb85b4b66d1b46d45cbe43d71cbf8f527d332b177e3b96191ffbc1e64"}, - {file = "fonttools-4.59.0-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:b8974b2a266b54c96709bd5e239979cddfd2dbceed331aa567ea1d7c4a2202db"}, - {file = "fonttools-4.59.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:209b75943d158f610b78320eacb5539aa9e920bee2c775445b2846c65d20e19d"}, - {file = "fonttools-4.59.0-cp310-cp310-win32.whl", hash = "sha256:4c908a7036f0f3677f8afa577bcd973e3e20ddd2f7c42a33208d18bee95cdb6f"}, - {file = "fonttools-4.59.0-cp310-cp310-win_amd64.whl", hash = "sha256:8b4309a2775e4feee7356e63b163969a215d663399cce1b3d3b65e7ec2d9680e"}, - {file = "fonttools-4.59.0-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:841b2186adce48903c0fef235421ae21549020eca942c1da773ac380b056ab3c"}, - {file = "fonttools-4.59.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:9bcc1e77fbd1609198966ded6b2a9897bd6c6bcbd2287a2fc7d75f1a254179c5"}, - {file = "fonttools-4.59.0-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:37c377f7cb2ab2eca8a0b319c68146d34a339792f9420fca6cd49cf28d370705"}, - {file = "fonttools-4.59.0-cp311-cp311-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:fa39475eaccb98f9199eccfda4298abaf35ae0caec676ffc25b3a5e224044464"}, - {file = "fonttools-4.59.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:d3972b13148c1d1fbc092b27678a33b3080d1ac0ca305742b0119b75f9e87e38"}, - {file = "fonttools-4.59.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:a408c3c51358c89b29cfa5317cf11518b7ce5de1717abb55c5ae2d2921027de6"}, - {file = "fonttools-4.59.0-cp311-cp311-win32.whl", hash = "sha256:6770d7da00f358183d8fd5c4615436189e4f683bdb6affb02cad3d221d7bb757"}, - {file = "fonttools-4.59.0-cp311-cp311-win_amd64.whl", hash = "sha256:84fc186980231a287b28560d3123bd255d3c6b6659828c642b4cf961e2b923d0"}, - {file = "fonttools-4.59.0-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:f9b3a78f69dcbd803cf2fb3f972779875b244c1115481dfbdd567b2c22b31f6b"}, - {file = "fonttools-4.59.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:57bb7e26928573ee7c6504f54c05860d867fd35e675769f3ce01b52af38d48e2"}, - {file = "fonttools-4.59.0-cp312-cp312-manylinux1_x86_64.manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:4536f2695fe5c1ffb528d84a35a7d3967e5558d2af58b4775e7ab1449d65767b"}, - {file = "fonttools-4.59.0-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:885bde7d26e5b40e15c47bd5def48b38cbd50830a65f98122a8fb90962af7cd1"}, - {file = "fonttools-4.59.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:6801aeddb6acb2c42eafa45bc1cb98ba236871ae6f33f31e984670b749a8e58e"}, - {file = "fonttools-4.59.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:31003b6a10f70742a63126b80863ab48175fb8272a18ca0846c0482968f0588e"}, - {file = "fonttools-4.59.0-cp312-cp312-win32.whl", hash = "sha256:fbce6dae41b692a5973d0f2158f782b9ad05babc2c2019a970a1094a23909b1b"}, - {file = "fonttools-4.59.0-cp312-cp312-win_amd64.whl", hash = "sha256:332bfe685d1ac58ca8d62b8d6c71c2e52a6c64bc218dc8f7825c9ea51385aa01"}, - {file = "fonttools-4.59.0-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:78813b49d749e1bb4db1c57f2d4d7e6db22c253cb0a86ad819f5dc197710d4b2"}, - {file = "fonttools-4.59.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:401b1941ce37e78b8fd119b419b617277c65ae9417742a63282257434fd68ea2"}, - {file = "fonttools-4.59.0-cp313-cp313-manylinux1_x86_64.manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:efd7e6660674e234e29937bc1481dceb7e0336bfae75b856b4fb272b5093c5d4"}, - {file = "fonttools-4.59.0-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:51ab1ff33c19e336c02dee1e9fd1abd974a4ca3d8f7eef2a104d0816a241ce97"}, - {file = "fonttools-4.59.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:a9bf8adc9e1f3012edc8f09b08336272aec0c55bc677422273e21280db748f7c"}, - {file = "fonttools-4.59.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:37e01c6ec0c98599778c2e688350d624fa4770fbd6144551bd5e032f1199171c"}, - {file = "fonttools-4.59.0-cp313-cp313-win32.whl", hash = "sha256:70d6b3ceaa9cc5a6ac52884f3b3d9544e8e231e95b23f138bdb78e6d4dc0eae3"}, - {file = "fonttools-4.59.0-cp313-cp313-win_amd64.whl", hash = "sha256:26731739daa23b872643f0e4072d5939960237d540c35c14e6a06d47d71ca8fe"}, - {file = "fonttools-4.59.0-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:8d77f92438daeaddc05682f0f3dac90c5b9829bcac75b57e8ce09cb67786073c"}, - {file = "fonttools-4.59.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:60f6665579e909b618282f3c14fa0b80570fbf1ee0e67678b9a9d43aa5d67a37"}, - {file = "fonttools-4.59.0-cp39-cp39-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:169b99a2553a227f7b5fea8d9ecd673aa258617f466b2abc6091fe4512a0dcd0"}, - {file = "fonttools-4.59.0-cp39-cp39-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:052444a5d0151878e87e3e512a1aa1a0ab35ee4c28afde0a778e23b0ace4a7de"}, - {file = "fonttools-4.59.0-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:d40dcf533ca481355aa7b682e9e079f766f35715defa4929aeb5597f9604272e"}, - {file = "fonttools-4.59.0-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:b818db35879d2edf7f46c7e729c700a0bce03b61b9412f5a7118406687cb151d"}, - {file = "fonttools-4.59.0-cp39-cp39-win32.whl", hash = "sha256:2e7cf8044ce2598bb87e44ba1d2c6e45d7a8decf56055b92906dc53f67c76d64"}, - {file = "fonttools-4.59.0-cp39-cp39-win_amd64.whl", hash = "sha256:902425f5afe28572d65d2bf9c33edd5265c612ff82c69e6f83ea13eafc0dcbea"}, - {file = "fonttools-4.59.0-py3-none-any.whl", hash = "sha256:241313683afd3baacb32a6bd124d0bce7404bc5280e12e291bae1b9bba28711d"}, - {file = "fonttools-4.59.0.tar.gz", hash = "sha256:be392ec3529e2f57faa28709d60723a763904f71a2b63aabe14fee6648fe3b14"}, + {file = "fonttools-4.59.1-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:e90a89e52deb56b928e761bb5b5f65f13f669bfd96ed5962975debea09776a23"}, + {file = "fonttools-4.59.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:5d29ab70658d2ec19422b25e6ace00a0b0ae4181ee31e03335eaef53907d2d83"}, + {file = "fonttools-4.59.1-cp310-cp310-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:94f9721a564978a10d5c12927f99170d18e9a32e5a727c61eae56f956a4d118b"}, + {file = "fonttools-4.59.1-cp310-cp310-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:8c8758a7d97848fc8b514b3d9b4cb95243714b2f838dde5e1e3c007375de6214"}, + {file = "fonttools-4.59.1-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:2aeb829ad9d41a2ef17cab8bb5d186049ba38a840f10352e654aa9062ec32dc1"}, + {file = "fonttools-4.59.1-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:ac216a2980a2d2b3b88c68a24f8a9bfb203e2490e991b3238502ad8f1e7bfed0"}, + {file = "fonttools-4.59.1-cp310-cp310-win32.whl", hash = "sha256:d31dc137ed8ec71dbc446949eba9035926e6e967b90378805dcf667ff57cabb1"}, + {file = "fonttools-4.59.1-cp310-cp310-win_amd64.whl", hash = "sha256:5265bc52ed447187d39891b5f21d7217722735d0de9fe81326566570d12851a9"}, + {file = "fonttools-4.59.1-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:4909cce2e35706f3d18c54d3dcce0414ba5e0fb436a454dffec459c61653b513"}, + {file = "fonttools-4.59.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:efbec204fa9f877641747f2d9612b2b656071390d7a7ef07a9dbf0ecf9c7195c"}, + {file = "fonttools-4.59.1-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:39dfd42cc2dc647b2c5469bc7a5b234d9a49e72565b96dd14ae6f11c2c59ef15"}, + {file = "fonttools-4.59.1-cp311-cp311-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:b11bc177a0d428b37890825d7d025040d591aa833f85f8d8878ed183354f47df"}, + {file = "fonttools-4.59.1-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:5b9b4c35b3be45e5bc774d3fc9608bbf4f9a8d371103b858c80edbeed31dd5aa"}, + {file = "fonttools-4.59.1-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:01158376b8a418a0bae9625c476cebfcfcb5e6761e9d243b219cd58341e7afbb"}, + {file = "fonttools-4.59.1-cp311-cp311-win32.whl", hash = "sha256:cf7c5089d37787387123f1cb8f1793a47c5e1e3d1e4e7bfbc1cc96e0f925eabe"}, + {file = "fonttools-4.59.1-cp311-cp311-win_amd64.whl", hash = "sha256:c866eef7a0ba320486ade6c32bfc12813d1a5db8567e6904fb56d3d40acc5116"}, + {file = "fonttools-4.59.1-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:43ab814bbba5f02a93a152ee61a04182bb5809bd2bc3609f7822e12c53ae2c91"}, + {file = "fonttools-4.59.1-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:4f04c3ffbfa0baafcbc550657cf83657034eb63304d27b05cff1653b448ccff6"}, + {file = "fonttools-4.59.1-cp312-cp312-manylinux1_x86_64.manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:d601b153e51a5a6221f0d4ec077b6bfc6ac35bfe6c19aeaa233d8990b2b71726"}, + {file = "fonttools-4.59.1-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:c735e385e30278c54f43a0d056736942023c9043f84ee1021eff9fd616d17693"}, + {file = "fonttools-4.59.1-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:1017413cdc8555dce7ee23720da490282ab7ec1cf022af90a241f33f9a49afc4"}, + {file = "fonttools-4.59.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:5c6d8d773470a5107052874341ed3c487c16ecd179976d81afed89dea5cd7406"}, + {file = "fonttools-4.59.1-cp312-cp312-win32.whl", hash = "sha256:2a2d0d33307f6ad3a2086a95dd607c202ea8852fa9fb52af9b48811154d1428a"}, + {file = "fonttools-4.59.1-cp312-cp312-win_amd64.whl", hash = "sha256:0b9e4fa7eaf046ed6ac470f6033d52c052481ff7a6e0a92373d14f556f298dc0"}, + {file = "fonttools-4.59.1-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:89d9957b54246c6251345297dddf77a84d2c19df96af30d2de24093bbdf0528b"}, + {file = "fonttools-4.59.1-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:8156b11c0d5405810d216f53907bd0f8b982aa5f1e7e3127ab3be1a4062154ff"}, + {file = "fonttools-4.59.1-cp313-cp313-manylinux1_x86_64.manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:8387876a8011caec52d327d5e5bca705d9399ec4b17afb8b431ec50d47c17d23"}, + {file = "fonttools-4.59.1-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:fb13823a74b3a9204a8ed76d3d6d5ec12e64cc5bc44914eb9ff1cdac04facd43"}, + {file = "fonttools-4.59.1-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:e1ca10da138c300f768bb68e40e5b20b6ecfbd95f91aac4cc15010b6b9d65455"}, + {file = "fonttools-4.59.1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:2beb5bfc4887a3130f8625349605a3a45fe345655ce6031d1bac11017454b943"}, + {file = "fonttools-4.59.1-cp313-cp313-win32.whl", hash = "sha256:419f16d750d78e6d704bfe97b48bba2f73b15c9418f817d0cb8a9ca87a5b94bf"}, + {file = "fonttools-4.59.1-cp313-cp313-win_amd64.whl", hash = "sha256:c536f8a852e8d3fa71dde1ec03892aee50be59f7154b533f0bf3c1174cfd5126"}, + {file = "fonttools-4.59.1-cp314-cp314-macosx_10_13_universal2.whl", hash = "sha256:d5c3bfdc9663f3d4b565f9cb3b8c1efb3e178186435b45105bde7328cfddd7fe"}, + {file = "fonttools-4.59.1-cp314-cp314-macosx_10_13_x86_64.whl", hash = "sha256:ea03f1da0d722fe3c2278a05957e6550175571a4894fbf9d178ceef4a3783d2b"}, + {file = "fonttools-4.59.1-cp314-cp314-manylinux1_x86_64.manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:57a3708ca6bfccb790f585fa6d8f29432ec329618a09ff94c16bcb3c55994643"}, + {file = "fonttools-4.59.1-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:729367c91eb1ee84e61a733acc485065a00590618ca31c438e7dd4d600c01486"}, + {file = "fonttools-4.59.1-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:8f8ef66ac6db450193ed150e10b3b45dde7aded10c5d279968bc63368027f62b"}, + {file = "fonttools-4.59.1-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:075f745d539a998cd92cb84c339a82e53e49114ec62aaea8307c80d3ad3aef3a"}, + {file = "fonttools-4.59.1-cp314-cp314-win32.whl", hash = "sha256:c2b0597522d4c5bb18aa5cf258746a2d4a90f25878cbe865e4d35526abd1b9fc"}, + {file = "fonttools-4.59.1-cp314-cp314-win_amd64.whl", hash = "sha256:e9ad4ce044e3236f0814c906ccce8647046cc557539661e35211faadf76f283b"}, + {file = "fonttools-4.59.1-cp314-cp314t-macosx_10_13_universal2.whl", hash = "sha256:652159e8214eb4856e8387ebcd6b6bd336ee258cbeb639c8be52005b122b9609"}, + {file = "fonttools-4.59.1-cp314-cp314t-macosx_10_13_x86_64.whl", hash = "sha256:43d177cd0e847ea026fedd9f099dc917da136ed8792d142298a252836390c478"}, + {file = "fonttools-4.59.1-cp314-cp314t-manylinux1_x86_64.manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:e54437651e1440ee53a95e6ceb6ee440b67a3d348c76f45f4f48de1a5ecab019"}, + {file = "fonttools-4.59.1-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:6065fdec8ff44c32a483fd44abe5bcdb40dd5e2571a5034b555348f2b3a52cea"}, + {file = "fonttools-4.59.1-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:42052b56d176f8b315fbc09259439c013c0cb2109df72447148aeda677599612"}, + {file = "fonttools-4.59.1-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:bcd52eaa5c4c593ae9f447c1d13e7e4a00ca21d755645efa660b6999425b3c88"}, + {file = "fonttools-4.59.1-cp314-cp314t-win32.whl", hash = "sha256:02e4fdf27c550dded10fe038a5981c29f81cb9bc649ff2eaa48e80dab8998f97"}, + {file = "fonttools-4.59.1-cp314-cp314t-win_amd64.whl", hash = "sha256:412a5fd6345872a7c249dac5bcce380393f40c1c316ac07f447bc17d51900922"}, + {file = "fonttools-4.59.1-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:1ab4c1fb45f2984b8b4a3face7cff0f67f9766e9414cbb6fd061e9d77819de98"}, + {file = "fonttools-4.59.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:8ee39da0227950f88626c91e219659e6cd725ede826b1c13edd85fc4cec9bbe6"}, + {file = "fonttools-4.59.1-cp39-cp39-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:58a8844f96cff35860647a65345bfca87f47a2494bfb4bef754e58c082511443"}, + {file = "fonttools-4.59.1-cp39-cp39-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:5f3f021cea6e36410874763f4a517a5e2d6ac36ca8f95521f3a9fdaad0fe73dc"}, + {file = "fonttools-4.59.1-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:bf5fb864f80061a40c1747e0dbc4f6e738de58dd6675b07eb80bd06a93b063c4"}, + {file = "fonttools-4.59.1-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:c29ea087843e27a7cffc78406d32a5abf166d92afde7890394e9e079c9b4dbe9"}, + {file = "fonttools-4.59.1-cp39-cp39-win32.whl", hash = "sha256:a960b09ff50c2e87864e83f352e5a90bcf1ad5233df579b1124660e1643de272"}, + {file = "fonttools-4.59.1-cp39-cp39-win_amd64.whl", hash = "sha256:e3680884189e2b7c3549f6d304376e64711fd15118e4b1ae81940cb6b1eaa267"}, + {file = "fonttools-4.59.1-py3-none-any.whl", hash = "sha256:647db657073672a8330608970a984d51573557f328030566521bc03415535042"}, + {file = "fonttools-4.59.1.tar.gz", hash = "sha256:74995b402ad09822a4c8002438e54940d9f1ecda898d2bb057729d7da983e4cb"}, ] [package.extras] @@ -1188,29 +1204,30 @@ files = [ [[package]] name = "ruff" -version = "0.12.8" +version = "0.12.9" description = "An extremely fast Python linter and code formatter, written in Rust." optional = false python-versions = ">=3.7" files = [ - {file = "ruff-0.12.8-py3-none-linux_armv6l.whl", hash = "sha256:63cb5a5e933fc913e5823a0dfdc3c99add73f52d139d6cd5cc8639d0e0465513"}, - {file = "ruff-0.12.8-py3-none-macosx_10_12_x86_64.whl", hash = "sha256:9a9bbe28f9f551accf84a24c366c1aa8774d6748438b47174f8e8565ab9dedbc"}, - {file = "ruff-0.12.8-py3-none-macosx_11_0_arm64.whl", hash = "sha256:2fae54e752a3150f7ee0e09bce2e133caf10ce9d971510a9b925392dc98d2fec"}, - {file = "ruff-0.12.8-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c0acbcf01206df963d9331b5838fb31f3b44fa979ee7fa368b9b9057d89f4a53"}, - {file = "ruff-0.12.8-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:ae3e7504666ad4c62f9ac8eedb52a93f9ebdeb34742b8b71cd3cccd24912719f"}, - {file = "ruff-0.12.8-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:cb82efb5d35d07497813a1c5647867390a7d83304562607f3579602fa3d7d46f"}, - {file = "ruff-0.12.8-py3-none-manylinux_2_17_ppc64.manylinux2014_ppc64.whl", hash = "sha256:dbea798fc0065ad0b84a2947b0aff4233f0cb30f226f00a2c5850ca4393de609"}, - {file = "ruff-0.12.8-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:49ebcaccc2bdad86fd51b7864e3d808aad404aab8df33d469b6e65584656263a"}, - {file = "ruff-0.12.8-py3-none-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:0ac9c570634b98c71c88cb17badd90f13fc076a472ba6ef1d113d8ed3df109fb"}, - {file = "ruff-0.12.8-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:560e0cd641e45591a3e42cb50ef61ce07162b9c233786663fdce2d8557d99818"}, - {file = "ruff-0.12.8-py3-none-musllinux_1_2_aarch64.whl", hash = "sha256:71c83121512e7743fba5a8848c261dcc454cafb3ef2934a43f1b7a4eb5a447ea"}, - {file = "ruff-0.12.8-py3-none-musllinux_1_2_armv7l.whl", hash = "sha256:de4429ef2ba091ecddedd300f4c3f24bca875d3d8b23340728c3cb0da81072c3"}, - {file = "ruff-0.12.8-py3-none-musllinux_1_2_i686.whl", hash = "sha256:a2cab5f60d5b65b50fba39a8950c8746df1627d54ba1197f970763917184b161"}, - {file = "ruff-0.12.8-py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:45c32487e14f60b88aad6be9fd5da5093dbefb0e3e1224131cb1d441d7cb7d46"}, - {file = "ruff-0.12.8-py3-none-win32.whl", hash = "sha256:daf3475060a617fd5bc80638aeaf2f5937f10af3ec44464e280a9d2218e720d3"}, - {file = "ruff-0.12.8-py3-none-win_amd64.whl", hash = "sha256:7209531f1a1fcfbe8e46bcd7ab30e2f43604d8ba1c49029bb420b103d0b5f76e"}, - {file = "ruff-0.12.8-py3-none-win_arm64.whl", hash = "sha256:c90e1a334683ce41b0e7a04f41790c429bf5073b62c1ae701c9dc5b3d14f0749"}, - {file = "ruff-0.12.8.tar.gz", hash = "sha256:4cb3a45525176e1009b2b64126acf5f9444ea59066262791febf55e40493a033"}, + {file = "ruff-0.12.9-py3-none-linux_armv6l.whl", hash = "sha256:fcebc6c79fcae3f220d05585229463621f5dbf24d79fdc4936d9302e177cfa3e"}, + {file = "ruff-0.12.9-py3-none-macosx_10_12_x86_64.whl", hash = "sha256:aed9d15f8c5755c0e74467731a007fcad41f19bcce41cd75f768bbd687f8535f"}, + {file = "ruff-0.12.9-py3-none-macosx_11_0_arm64.whl", hash = "sha256:5b15ea354c6ff0d7423814ba6d44be2807644d0c05e9ed60caca87e963e93f70"}, + {file = "ruff-0.12.9-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d596c2d0393c2502eaabfef723bd74ca35348a8dac4267d18a94910087807c53"}, + {file = "ruff-0.12.9-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:1b15599931a1a7a03c388b9c5df1bfa62be7ede6eb7ef753b272381f39c3d0ff"}, + {file = "ruff-0.12.9-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:3d02faa2977fb6f3f32ddb7828e212b7dd499c59eb896ae6c03ea5c303575756"}, + {file = "ruff-0.12.9-py3-none-manylinux_2_17_ppc64.manylinux2014_ppc64.whl", hash = "sha256:17d5b6b0b3a25259b69ebcba87908496e6830e03acfb929ef9fd4c58675fa2ea"}, + {file = "ruff-0.12.9-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:72db7521860e246adbb43f6ef464dd2a532ef2ef1f5dd0d470455b8d9f1773e0"}, + {file = "ruff-0.12.9-py3-none-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a03242c1522b4e0885af63320ad754d53983c9599157ee33e77d748363c561ce"}, + {file = "ruff-0.12.9-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9fc83e4e9751e6c13b5046d7162f205d0a7bac5840183c5beebf824b08a27340"}, + {file = "ruff-0.12.9-py3-none-manylinux_2_31_riscv64.whl", hash = "sha256:881465ed56ba4dd26a691954650de6ad389a2d1fdb130fe51ff18a25639fe4bb"}, + {file = "ruff-0.12.9-py3-none-musllinux_1_2_aarch64.whl", hash = "sha256:43f07a3ccfc62cdb4d3a3348bf0588358a66da756aa113e071b8ca8c3b9826af"}, + {file = "ruff-0.12.9-py3-none-musllinux_1_2_armv7l.whl", hash = "sha256:07adb221c54b6bba24387911e5734357f042e5669fa5718920ee728aba3cbadc"}, + {file = "ruff-0.12.9-py3-none-musllinux_1_2_i686.whl", hash = "sha256:f5cd34fabfdea3933ab85d72359f118035882a01bff15bd1d2b15261d85d5f66"}, + {file = "ruff-0.12.9-py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:f6be1d2ca0686c54564da8e7ee9e25f93bdd6868263805f8c0b8fc6a449db6d7"}, + {file = "ruff-0.12.9-py3-none-win32.whl", hash = "sha256:cc7a37bd2509974379d0115cc5608a1a4a6c4bff1b452ea69db83c8855d53f93"}, + {file = "ruff-0.12.9-py3-none-win_amd64.whl", hash = "sha256:6fb15b1977309741d7d098c8a3cb7a30bc112760a00fb6efb7abc85f00ba5908"}, + {file = "ruff-0.12.9-py3-none-win_arm64.whl", hash = "sha256:63c8c819739d86b96d500cce885956a1a48ab056bbcbc61b747ad494b2485089"}, + {file = "ruff-0.12.9.tar.gz", hash = "sha256:fbd94b2e3c623f659962934e52c2bea6fc6da11f667a427a368adaf3af2c866a"}, ] [[package]] diff --git a/pyproject.toml b/pyproject.toml index 6566c84..09c14e1 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [tool.poetry] -name = "asyncflow-simulator" -version = "0.1.0" +name = "AsyncFlow-Sim" +version = "0.1.0a1" description = "Digital-twin simulator for distributed async systems. Build what-if scenarios and quantify capacity, latency and throughput offline—before you deploy." authors = ["Gioele Botta"] readme = "README.md" @@ -18,9 +18,9 @@ keywords = [ "fastapi", "uvicorn", "distributed-systems", "queuing-theory" ] -homepage = "https://github.com//AsyncFlow-Simulator" # TO COMPLETE -repository = "https://github.com//AsyncFlow-Simulator" # TO COMPLETE -documentation = "https://github.com//AsyncFlow-Simulator/tree/main/docs" # TO COMPLETE +homepage = "https://github.com/AsyncFlow-Sim" +repository = "https://github.com/AsyncFlow-Sim/AsyncFlow" +documentation = "https://github.com/AsyncFlow-Sim/AsyncFlow-Sim/tree/v0.1.0/docs" classifiers = [ "Development Status :: 3 - Alpha", diff --git a/pytest.ini b/pytest.ini index 6d4afe2..c61714b 100644 --- a/pytest.ini +++ b/pytest.ini @@ -2,5 +2,6 @@ addopts = -ra -q testpaths = tests markers = - integration: tests that require external services (database, HTTP calls, etc.) -asyncio_mode = auto \ No newline at end of file + integration: tests that require components interactions + system: end-to-end/system scenarios +asyncio_mode = auto diff --git a/readme_img/topology.png b/readme_img/topology.png new file mode 100644 index 0000000..bbb89e7 Binary files /dev/null and b/readme_img/topology.png differ diff --git a/scripts/dev_setup.ps1 b/scripts/dev_setup.ps1 new file mode 100644 index 0000000..88bee3d --- /dev/null +++ b/scripts/dev_setup.ps1 @@ -0,0 +1,176 @@ +# Post-clone developer setup for AsyncFlow (Windows / PowerShell). +# +# What it does: +# 1) Ensures Poetry is available (official installer if missing). +# 2) Configures Poetry to create an in-project virtualenv (.venv). +# 3) Removes poetry.lock (fresh dependency resolution by policy). +# 4) Installs the project with dev extras. +# 5) Runs ruff, mypy, and pytest (with coverage if available). +# +# Usage: +# .\scripts\dev_setup.ps1 +# +# Notes: +# - Run this from anywhere; it will cd to repo root. +# - Requires Python >= 3.12 to be available (via 'py' launcher or python.exe). +# - We do NOT delete an existing .venv; it will be reused if compatible. + +# Strict error handling +$ErrorActionPreference = 'Stop' +Set-StrictMode -Version Latest + +# --- helpers ------------------------------------------------------------------ + +function Write-Info { param([string]$Msg) Write-Host "==> $Msg" } +function Write-Ok { param([string]$Msg) Write-Host "✅ $Msg" -ForegroundColor Green } +function Fail { param([string]$Msg) Write-Error $Msg; exit 1 } + +# Resolve repo root (this script lives in scripts/) +$ScriptDir = Split-Path -Parent $MyInvocation.MyCommand.Path +$RepoRoot = Resolve-Path (Join-Path $ScriptDir '..') + +function Require-Pyproject { + if (-not (Test-Path (Join-Path $RepoRoot 'pyproject.toml'))) { + Fail "pyproject.toml not found at repo root ($RepoRoot)" + } +} + +function Get-PythonPath-3_12Plus { + <# + Try common Windows launchers/executables and return the *actual* Python + interpreter path (sys.executable) for a version >= 3.12. + #> + $candidates = @( + @('py', '-3.13'), + @('py', '-3.12'), + @('py', '-3'), + @('python3.13'), + @('python3.12'), + @('python') + ) + + foreach ($cand in $candidates) { + $exe = $cand[0] + $args = @() + if ($cand.Count -gt 1) { $args = $cand[1..($cand.Count-1)] } + + if (-not (Get-Command $exe -ErrorAction SilentlyContinue)) { continue } + + # Check version + & $exe @args -c "import sys; import sys as s; raise SystemExit(0 if sys.version_info[:2] >= (3,12) else 1)" 2>$null + if ($LASTEXITCODE -ne 0) { continue } + + # Obtain the real interpreter path + $pyPath = & $exe @args -c "import sys; print(sys.executable)" 2>$null + if ($LASTEXITCODE -eq 0 -and $pyPath) { + return $pyPath.Trim() + } + } + + return $null +} + +function Ensure-Poetry { + if (Get-Command poetry -ErrorAction SilentlyContinue) { + poetry --version | Out-Null + return + } + + Write-Info "Poetry not found; attempting installation…" + + # Official installer (recommended by Poetry) + $installer = (Invoke-WebRequest -Uri https://install.python-poetry.org -UseBasicParsing).Content + # Pipe installer to Python (stdin) + $pythonToUse = (Get-Command py -ErrorAction SilentlyContinue) ? 'py' : 'python' + $installer | & $pythonToUse - + + # Common locations (make available for current session) + $poetryCandidates = @( + (Join-Path $env:APPDATA 'pypoetry\venv\Scripts'), + (Join-Path $env:USERPROFILE '.local\bin') + ) + foreach ($p in $poetryCandidates) { + if (Test-Path $p) { $env:Path = "$p;$env:Path" } + } + + if (-not (Get-Command poetry -ErrorAction SilentlyContinue)) { + Fail "Poetry installation failed (not on PATH). Close & reopen PowerShell or add the Poetry path to PATH." + } + + poetry --version | Out-Null +} + +function Run-Tests-WithOptionalCoverage { + <# + Try pytest with coverage first; if the plugin is missing, + fall back to plain pytest. Propagate failure if tests fail. + #> + $cmd = { poetry run pytest --cov=src --cov-report=term-missing:skip-covered --cov-report=xml --disable-warnings -q } + try { + & $cmd + if ($LASTEXITCODE -eq 0) { + Write-Ok "Tests (with coverage) PASSED" + return + } + } catch { + # ignore; retry without coverage below + } + + Write-Info "Coverage run failed (likely pytest-cov not installed). Falling back to plain pytest…" + poetry run pytest --disable-warnings -q + if ($LASTEXITCODE -ne 0) { + Fail "Tests FAILED" + } + Write-Ok "Tests PASSED" +} + +# --- main --------------------------------------------------------------------- + +Set-Location $RepoRoot +Require-Pyproject + +$PythonExe = Get-PythonPath-3_12Plus +if (-not $PythonExe) { + Fail "Python >= 3.12 not found. Install Python 3.12+ and re-run." +} +Write-Info ("Using Python: " + (& $PythonExe -V)) + +Ensure-Poetry + +# Make sure Poetry venv lives inside the repo +Write-Info "Configuring Poetry to use in-project virtualenv (.venv)…" +poetry config virtualenvs.in-project true +Write-Ok "Poetry configured to use .venv" + +# Bind Poetry to the chosen interpreter (creates .venv if needed) +poetry env use "$PythonExe" | Out-Null +Write-Ok "Virtualenv ready (.venv)" + +# Policy: always remove lock to avoid conflicts across environments +$lockPath = Join-Path $RepoRoot 'poetry.lock' +if (Test-Path $lockPath) { + Write-Info "Removing poetry.lock for a clean resolution…" + Remove-Item $lockPath -Force + Write-Ok "poetry.lock removed" +} + +# Faster installs and stable headless plotting +$env:PIP_DISABLE_PIP_VERSION_CHECK = '1' +$env:MPLBACKEND = 'Agg' + +Write-Info "Installing project with dev extras…" +poetry install --with dev --no-interaction --no-ansi +Write-Ok "Dependencies installed (dev)" + +Write-Info "Running Ruff (lint)…" +poetry run ruff check src tests +Write-Ok "Ruff PASSED" + +Write-Info "Running MyPy (type-check)…" +poetry run mypy src tests +Write-Ok "MyPy PASSED" + +Write-Info "Running tests (with coverage if available)…" +Run-Tests-WithOptionalCoverage + +Write-Ok "All checks completed SUCCESSFULLY 🎉" diff --git a/scripts/dev_setup.sh b/scripts/dev_setup.sh new file mode 100644 index 0000000..fe97c0c --- /dev/null +++ b/scripts/dev_setup.sh @@ -0,0 +1,135 @@ +# Post-clone developer setup for AsyncFlow (Linux/macOS/WSL). +# +# What it does: +# 1) Ensures Poetry is available (prefers pipx if present; otherwise uses +# the official installer). +# 2) Configures Poetry to create an in-project virtualenv (.venv). +# 3) Removes poetry.lock (fresh dependency resolution by policy). +# 4) Installs the project with dev extras. +# 5) Runs ruff, mypy, and pytest (with coverage if available). +# +# Usage: +# bash scripts/dev_setup.sh +# +# Notes: +# - Run this from anywhere; it will cd to repo root. +# - Requires Python >= 3.12 to be available (python3.12 or python3). +# - We do NOT delete an existing .venv; it will be reused if compatible. + +set -Eeuo pipefail + +# --- helpers ----------------------------------------------------------------- + +repo_root="$(cd "$(dirname "${BASH_SOURCE[0]}")/.." && pwd)" + +err() { echo "ERROR: $*" >&2; exit 1; } +info() { echo "==> $*"; } +ok() { echo "✅ $*"; } + +require_pyproject() { + [[ -f "$repo_root/pyproject.toml" ]] || err "pyproject.toml not found at repo root ($repo_root)" +} + +pick_python() { + # Return a python executable >= 3.12 + for cand in python3.13 python3.12 python3; do + if command -v "$cand" >/dev/null 2>&1; then + if "$cand" -c 'import sys; sys.exit(0 if sys.version_info[:2] >= (3,12) else 1)'; then + echo "$cand" + return 0 + fi + fi + done + err "Python >= 3.12 not found. Install python3.12+ and re-run." +} + +ensure_poetry() { + if command -v poetry >/dev/null 2>&1; then + poetry --version || true + return 0 + fi + + info "Poetry not found; attempting installation…" + + if command -v pipx >/dev/null 2>&1; then + pipx install poetry || pipx upgrade poetry || true + else + # Official installer (recommended by Poetry) — installs to ~/.local/bin + curl -sSL https://install.python-poetry.org | python3 - + export PATH="$HOME/.local/bin:$PATH" + fi + + # Ensure poetry is now available on PATH + export PATH="$HOME/.local/bin:$PATH" + command -v poetry >/dev/null 2>&1 || err "Poetry installation failed (not on PATH)." + poetry --version || true +} + +run_tests_with_optional_coverage() { + # Try pytest with coverage first; if plugin missing, fallback to plain pytest. + set +e + poetry run pytest \ + --cov=src \ + --cov-report=term-missing:skip-covered \ + --cov-report=xml \ + --disable-warnings -q + local status=$? + set -e + + if [[ $status -eq 0 ]]; then + ok "Tests (with coverage) PASSED" + return 0 + fi + + info "Coverage run failed (likely pytest-cov not installed). Falling back to plain pytest…" + + poetry run pytest --disable-warnings -q + ok "Tests PASSED" +} + +# --- main -------------------------------------------------------------------- + +cd "$repo_root" +require_pyproject + +PY_BIN="$(pick_python)" +info "Using Python: $("$PY_BIN" -V)" + +ensure_poetry + +# Make sure Poetry venv lives inside the repo +info "Configuring Poetry to use in-project virtualenv (.venv)…" +poetry config virtualenvs.in-project true +ok "Poetry configured to use .venv" + +# Bind Poetry to the chosen interpreter (creates .venv if needed) +poetry env use "$PY_BIN" >/dev/null 2>&1 || true +ok "Virtualenv ready (.venv)" + +# Policy: always remove lock to avoid conflicts across environments +if [[ -f poetry.lock ]]; then + info "Removing poetry.lock for a clean resolution…" + rm -f poetry.lock + ok "poetry.lock removed" +fi + +# Faster installs and stable headless plotting +export PIP_DISABLE_PIP_VERSION_CHECK=1 +export MPLBACKEND=Agg + +info "Installing project with dev extras…" +poetry install --with dev --no-interaction --no-ansi +ok "Dependencies installed (dev)" + +info "Running Ruff (lint)…" +poetry run ruff check src tests +ok "Ruff PASSED" + +info "Running MyPy (type-check)…" +poetry run mypy src tests +ok "MyPy PASSED" + +info "Running tests (with coverage if available)…" +run_tests_with_optional_coverage + +ok "All checks completed SUCCESSFULLY 🎉" diff --git a/scripts/quality_check.ps1 b/scripts/quality_check.ps1 new file mode 100644 index 0000000..74bbd4c --- /dev/null +++ b/scripts/quality_check.ps1 @@ -0,0 +1,14 @@ +# Lint & format with Ruff (applies --fix) and type-check with MyPy. +# Usage: +# .\scripts\quality_check.ps1 + +$ErrorActionPreference = 'Stop' +Set-StrictMode -Version Latest + +# Ruff (lint + auto-fix) +poetry run ruff check src tests --fix + +# MyPy (type-check) +poetry run mypy src tests + +Write-Host "✅ Linting and type-checking completed SUCCESSFULLY" diff --git a/scripts/quality-check.sh b/scripts/quality_check.sh similarity index 92% rename from scripts/quality-check.sh rename to scripts/quality_check.sh index eb2a66f..85c269e 100644 --- a/scripts/quality-check.sh +++ b/scripts/quality_check.sh @@ -1,4 +1,3 @@ -#!/usr/bin/env bash set -euo pipefail # Lint & format with ruff, automatic corrections applied (--fix) diff --git a/scripts/run_sys_tests.ps1 b/scripts/run_sys_tests.ps1 new file mode 100644 index 0000000..aa3630f --- /dev/null +++ b/scripts/run_sys_tests.ps1 @@ -0,0 +1,55 @@ +# Run only system tests (marked @pytest.mark.system) with the required env var. +# Keeps output concise (no XML, no slowest list), shows the usual pytest summary. +# +# Usage: +# .\scripts\run_system_tests.ps1 +# +# Notes: +# - Uses `poetry run` when Poetry + pyproject.toml are present; otherwise falls back to `pytest`. +# - Forces a headless backend for any plots generated during tests. + +Set-StrictMode -Version Latest +$ErrorActionPreference = 'Stop' + +# Resolve repo root +$ScriptDir = Split-Path -Parent $MyInvocation.MyCommand.Path +$RepoRoot = Resolve-Path (Join-Path $ScriptDir '..') + +# Collect test paths (default: tests/system) +if ($args.Count -ge 1) { + $TestPaths = $args +} else { + $TestPaths = @('tests/system') +} + +# Decide runner prefix +$UsePoetry = (Get-Command poetry -ErrorAction SilentlyContinue) -ne $null -and + (Test-Path (Join-Path $RepoRoot 'pyproject.toml')) +$Runner = if ($UsePoetry) { 'poetry run pytest' } else { 'pytest' } + +# Set env vars for this process +$env:MPLBACKEND = if ($env:MPLBACKEND) { $env:MPLBACKEND } else { 'Agg' } +$env:ASYNCFLOW_RUN_SYSTEM_TESTS = '1' + +Push-Location $RepoRoot +try { + Write-Host "==> Running system tests…" + # Clear any configured addopts and run only system-marked tests + $pytestArgs = @( + '-o', 'addopts=', + '-m', 'system', + '--disable-warnings', + '-q' + ) + $TestPaths + + if ($UsePoetry) { + poetry run pytest @pytestArgs + } else { + pytest @pytestArgs + } + + Write-Host "✅ System tests PASSED" +} +finally { + Pop-Location +} diff --git a/scripts/run_sys_tests.sh b/scripts/run_sys_tests.sh new file mode 100644 index 0000000..78ee74b --- /dev/null +++ b/scripts/run_sys_tests.sh @@ -0,0 +1,42 @@ +# Run only system tests (marked @pytest.mark.system) with the required env var. +# Keeps output concise (no XML, no slowest list), shows the usual pytest summary. +# +# Usage: +# bash scripts/run_system_tests.sh +# +# Notes: +# - Uses `poetry run` when Poetry + pyproject.toml are present; otherwise falls back to `pytest`. +# - Forces a headless backend for any plots generated during tests. + +set -Eeuo pipefail + +# Pick test paths (default to tests/system) +if [[ $# -ge 1 ]]; then + TEST_PATHS=("$@") +else + TEST_PATHS=(tests/system) +fi + +REPO_ROOT="$(cd "$(dirname "${BASH_SOURCE[0]}")/.." && pwd)" +RUN_PREFIX="" +if command -v poetry >/dev/null 2>&1 && [[ -f "$REPO_ROOT/pyproject.toml" ]]; then + RUN_PREFIX="poetry run" +fi + +# Headless plotting; enable system tests +export MPLBACKEND="${MPLBACKEND:-Agg}" +export ASYNCFLOW_RUN_SYSTEM_TESTS=1 + +cd "$REPO_ROOT" + +echo "==> Running system tests…" +# Clear any configured addopts and run only system-marked tests +# Keep output short but with the final summary line. +$RUN_PREFIX pytest \ + -o addopts= \ + -m system \ + --disable-warnings \ + -q \ + "${TEST_PATHS[@]}" + +echo "✅ System tests PASSED" diff --git a/scripts/run_tests.ps1 b/scripts/run_tests.ps1 new file mode 100644 index 0000000..86bacfd --- /dev/null +++ b/scripts/run_tests.ps1 @@ -0,0 +1,39 @@ +# Run tests with coverage ONLY (no XML, no durations), showing pytest’s usual summary. +# It also overrides any configured addopts (e.g. durations/xml) via `-o addopts=`. +# +# Usage: +# .\scripts\run_tests.ps1 + +$ErrorActionPreference = 'Stop' +Set-StrictMode -Version Latest + +# Pick test paths +[string[]]$TestPaths = if ($args.Count -ge 1) { $args } else { @('tests') } + +# Resolve repo root (this script lives in scripts/) +$ScriptDir = Split-Path -Parent $MyInvocation.MyCommand.Path +$RepoRoot = Resolve-Path (Join-Path $ScriptDir '..') + +# Use Poetry if available and pyproject exists +$RunWithPoetry = (Get-Command poetry -ErrorAction SilentlyContinue) -and (Test-Path (Join-Path $RepoRoot 'pyproject.toml')) + +# Headless backend if plots are generated during tests +if (-not $env:MPLBACKEND) { $env:MPLBACKEND = 'Agg' } + +Set-Location $RepoRoot + +# Build command +$cmd = @() +if ($RunWithPoetry) { $cmd += @('poetry', 'run') } +$cmd += 'pytest' +$cmd += @( + '-o', 'addopts=', + '--cov=src', + '--cov-report=term', + '--disable-warnings', + '-q' +) +$cmd += $TestPaths + +# Execute +& $cmd diff --git a/scripts/run_tests.sh b/scripts/run_tests.sh new file mode 100644 index 0000000..089d8e4 --- /dev/null +++ b/scripts/run_tests.sh @@ -0,0 +1,34 @@ +# Run tests with coverage ONLY (no XML, no durations), showing pytest’s usual summary. +# It also overrides any configured addopts (e.g. durations/xml) via `-o addopts=`. +# +# Usage: +# bash scripts/run_tests.sh +set -Eeuo pipefail + +# Pick test paths +if [[ $# -ge 1 ]]; then + TEST_PATHS=("$@") +else + TEST_PATHS=(tests) +fi + +REPO_ROOT="$(cd "$(dirname "${BASH_SOURCE[0]}")/.." && pwd)" +RUN_PREFIX="" +if command -v poetry >/dev/null 2>&1 && [[ -f "$REPO_ROOT/pyproject.toml" ]]; then + RUN_PREFIX="poetry run" +fi + +# Headless backend if plots are generated during tests +export MPLBACKEND="${MPLBACKEND:-Agg}" + +cd "$REPO_ROOT" + +# Run pytest with coverage summary in terminal, no xml, no durations, +# and wipe any addopts coming from config files. +$RUN_PREFIX pytest \ + -o addopts= \ + --cov=src \ + --cov-report=term \ + --disable-warnings \ + -q \ + "${TEST_PATHS[@]}" diff --git a/simulation_base.png b/simulation_base.png new file mode 100644 index 0000000..d8c8e16 Binary files /dev/null and b/simulation_base.png differ diff --git a/src/asyncflow/components/__init__.py b/src/asyncflow/components/__init__.py index 774a77f..52d66c7 100644 --- a/src/asyncflow/components/__init__.py +++ b/src/asyncflow/components/__init__.py @@ -1,4 +1,4 @@ -"""Public components: re-exports Pydantic leaf schemas (topology).""" +"""Public components: re-exports Pydantic schemas (topology).""" from __future__ import annotations from asyncflow.schemas.topology.edges import Edge diff --git a/src/asyncflow/metrics/analyzer.py b/src/asyncflow/metrics/analyzer.py index 16448bc..b9a6ea2 100644 --- a/src/asyncflow/metrics/analyzer.py +++ b/src/asyncflow/metrics/analyzer.py @@ -17,7 +17,7 @@ ) if TYPE_CHECKING: - + # Standard library typing imports in type-checking block (TC003). from collections.abc import Iterable from matplotlib.axes import Axes @@ -29,6 +29,10 @@ from asyncflow.schemas.settings.simulation import SimulationSettings +# Short alias to keep signatures within 88 chars (E501). +Series = tuple[list[float], list[float]] + + class ResultsAnalyzer: """Analyze and visualize the results of a completed simulation. @@ -36,9 +40,12 @@ class ResultsAnalyzer: - latency statistics - throughput time series (RPS) - sampled metrics from servers and edges + + It also exposes compact plotting/rendering helpers so that CLI scripts + can be short and consistent. """ - # Class attribute to define the period to calculate the throughput in s + # Default bucket size (seconds) used for cached throughput. _WINDOW_SIZE_S: float = 1.0 def __init__( @@ -49,14 +56,7 @@ def __init__( edges: list[EdgeRuntime], settings: SimulationSettings, ) -> None: - """ - Args: - client: Client runtime object, containing RqsClock entries. - servers: List of server runtime objects. - edges: List of edge runtime objects. - settings: Original simulation settings. - - """ + """Initialize with the runtime objects and original settings.""" self._client = client self._servers = servers self._edges = edges @@ -65,25 +65,13 @@ def __init__( # Lazily computed caches self.latencies: list[float] | None = None self.latency_stats: dict[LatencyKey, float] | None = None - self.throughput_series: tuple[list[float], list[float]] | None = None + self.throughput_series: Series | None = None + # Sampled metrics are stored with string metric keys for simplicity. self.sampled_metrics: dict[str, dict[str, list[float]]] | None = None - @staticmethod - def _apply_plot_cfg( - ax: Axes, - cfg: PlotCfg, - *, - legend_handles: Iterable[Line2D] | None = None, - ) -> None: - """Apply title / axis labels / grid and (optionally) legend to ax.""" - ax.set_title(cfg.title) - ax.set_xlabel(cfg.x_label) - ax.set_ylabel(cfg.y_label) - ax.grid(visible=True) - - if legend_handles: - ax.legend(handles=legend_handles) - + # ───────────────────────────────────────────── + # Core computation + # ───────────────────────────────────────────── def process_all_metrics(self) -> None: """Compute all aggregated and sampled metrics if not already done.""" if self.latency_stats is None and self._client.rqs_clock: @@ -93,15 +81,16 @@ def process_all_metrics(self) -> None: self._extract_sampled_metrics() def _process_event_metrics(self) -> None: - """Calculate latency stats and throughput time series (RPS).""" + """Calculate latency stats and throughput time series (1s RPS).""" # 1) Latencies self.latencies = [ - clock.finish - clock.start for clock in self._client.rqs_clock + clock.finish - clock.start + for clock in self._client.rqs_clock ] # 2) Summary stats if self.latencies: - arr = np.array(self.latencies) + arr = np.array(self.latencies, dtype=float) self.latency_stats = { LatencyKey.TOTAL_REQUESTS: float(arr.size), LatencyKey.MEAN: float(np.mean(arr)), @@ -115,24 +104,23 @@ def _process_event_metrics(self) -> None: else: self.latency_stats = {} - # 3) Throughput per 1s window + # 3) Throughput per 1s window (cached) completion_times = sorted(clock.finish for clock in self._client.rqs_clock) end_time = self._settings.total_simulation_time timestamps: list[float] = [] rps_values: list[float] = [] - count = 0 idx = 0 current_end = ResultsAnalyzer._WINDOW_SIZE_S while current_end <= end_time: + count = 0 while idx < len(completion_times) and completion_times[idx] <= current_end: count += 1 idx += 1 timestamps.append(current_end) rps_values.append(count / ResultsAnalyzer._WINDOW_SIZE_S) current_end += ResultsAnalyzer._WINDOW_SIZE_S - count = 0 self.throughput_series = (timestamps, rps_values) @@ -143,6 +131,7 @@ def _extract_sampled_metrics(self) -> None: for server in self._servers: sid = server.server_config.id for name, values in server.enabled_metrics.items(): + # Store with string key for a consistent external API. metrics[name.value][sid] = values for edge in self._edges: @@ -152,18 +141,79 @@ def _extract_sampled_metrics(self) -> None: self.sampled_metrics = metrics + # ───────────────────────────────────────────── + # Public accessors & formatting + # ───────────────────────────────────────────── + def list_server_ids(self) -> list[str]: + """Return server IDs in a stable order as given in the topology.""" + return [s.server_config.id for s in self._servers] + def get_latency_stats(self) -> dict[LatencyKey, float]: """Return latency statistics, computing them if necessary.""" self.process_all_metrics() return self.latency_stats or {} - def get_throughput_series(self) -> tuple[list[float], list[float]]: - """Return (timestamps, RPS). Empty lists when no traffic.""" + def format_latency_stats(self) -> str: + """Return a human-readable block with latency stats.""" + stats = self.get_latency_stats() + if not stats: + return "Latency stats: (empty)" + + by_name: dict[str, float] = { + getattr(k, "name", str(k)): v + for k, v in stats.items() + } + order = [ + "TOTAL_REQUESTS", + "MEAN", + "MEDIAN", + "STD_DEV", + "P95", + "P99", + "MIN", + "MAX", + ] + + lines = ["════════ LATENCY STATS ════════"] + # PERF401: build then extend instead of append in a loop. + formatted = [ + f"{k:<20} = {by_name[k]:.6f}" + for k in order + if k in by_name + ] + lines.extend(formatted) + return "\n".join(lines) + + def get_throughput_series( + self, + window_s: float | None = None, + ) -> Series: + """Return (timestamps, RPS). If `window_s` is provided, recompute on the fly.""" self.process_all_metrics() - if self.throughput_series is None: - return [], [] - return self.throughput_series + # Use cached (1s) series when suitable. + if window_s is None or window_s == ResultsAnalyzer._WINDOW_SIZE_S: + return self.throughput_series or ([], []) + + # Recompute with a custom window size. + completion_times = sorted(clock.finish for clock in self._client.rqs_clock) + end_time = self._settings.total_simulation_time + + timestamps: list[float] = [] + rps_values: list[float] = [] + idx = 0 + current_end = float(window_s) + + while current_end <= end_time: + count = 0 + while idx < len(completion_times) and completion_times[idx] <= current_end: + count += 1 + idx += 1 + timestamps.append(current_end) + rps_values.append(count / float(window_s)) + current_end += float(window_s) + + return (timestamps, rps_values) def get_sampled_metrics(self) -> dict[str, dict[str, list[float]]]: """Return sampled metrics from servers and edges.""" @@ -171,64 +221,369 @@ def get_sampled_metrics(self) -> dict[str, dict[str, list[float]]]: assert self.sampled_metrics is not None return self.sampled_metrics + def get_metric_map(self, key: SampledMetricName | str) -> dict[str, list[float]]: + """Return a series map for a metric, tolerant to enum/string keys.""" + self.process_all_metrics() + assert self.sampled_metrics is not None + + if isinstance(key, SampledMetricName): + # Prefer the canonical .value key; fall back to .name. + found = ( + self.sampled_metrics.get(key.value) + or self.sampled_metrics.get(key.name, {}) + ) + return found or {} + # If caller used a raw string: + return self.sampled_metrics.get(key, {}) + + def get_series(self, key: SampledMetricName | str, entity_id: str) -> Series: + """Return (times, values) for a given sampled metric and entity id.""" + series_map = self.get_metric_map(key) + vals = series_map.get(entity_id, []) + times = (np.arange(len(vals)) * self._settings.sample_period_s).tolist() + return times, vals + + # ───────────────────────────────────────────── + # Plotting helpers + # ───────────────────────────────────────────── + @staticmethod + def _apply_plot_cfg( + ax: Axes, + cfg: PlotCfg, + *, + legend_handles: Iterable[Line2D] | None = None, + ) -> None: + """Apply title / axis labels / grid and (optionally) legend to ax.""" + ax.set_title(cfg.title) + ax.set_xlabel(cfg.x_label) + ax.set_ylabel(cfg.y_label) + ax.grid(visible=True) + if legend_handles: + ax.legend(handles=legend_handles) + + def plot_base_dashboard(self, ax_latency: Axes, ax_throughput: Axes) -> None: + """Plot a 2x1 header: latency histogram + throughput line.""" + self.plot_latency_distribution(ax_latency) + self.plot_throughput(ax_throughput) + def plot_latency_distribution(self, ax: Axes) -> None: - """Plot the distribution of the latency""" + """Plot latency histogram with mean/P50/P95/P99 lines and a single + legend box with values. + """ + self.process_all_metrics() if not self.latencies: ax.text(0.5, 0.5, LATENCY_PLOT.no_data, ha="center", va="center") return - ax.hist(self.latencies, bins=50) + # Colors that pop on blue/white + col_mean = "#d62728" # red + col_p50 = "#ff7f0e" # orange + col_p95 = "#2ca02c" # green + col_p99 = "#9467bd" # purple + hist_color = "#1f77b4" # soft blue + + arr = np.asarray(self.latencies, dtype=float) + v_mean = float(np.mean(arr)) + v_p50 = float(np.percentile(arr, 50)) + v_p95 = float(np.percentile(arr, 95)) + v_p99 = float(np.percentile(arr, 99)) + + # Histogram (subtle to let overlays stand out) + ax.hist( + arr, bins=50, color=hist_color, alpha=0.40, + edgecolor="none", zorder=1, + ) + + # Vertical overlays + ax.axvline( + v_mean, color=col_mean, linestyle=":", linewidth=1.8, + alpha=0.95, zorder=3, + ) + ax.axvline( + v_p50, color=col_p50, linestyle="-.", linewidth=1.6, + alpha=0.90, zorder=3, + ) + ax.axvline( + v_p95, color=col_p95, linestyle="--", linewidth=1.6, + alpha=0.90, zorder=3, + ) + ax.axvline( + v_p99, color=col_p99, linestyle="--", linewidth=1.6, + alpha=0.90, zorder=3, + ) + + # Build legend handles (dummy lines, no data) + h_mean = ax.plot( + [], [], color=col_mean, linestyle=":", linewidth=2.4, + label=f"mean = {v_mean:.3f}", + )[0] + h_p50 = ax.plot( + [], [], color=col_p50, linestyle="-.", linewidth=2.4, + label=f"P50 = {v_p50:.3f}", + )[0] + h_p95 = ax.plot( + [], [], color=col_p95, linestyle="--", linewidth=2.4, + label=f"P95 = {v_p95:.3f}", + )[0] + h_p99 = ax.plot( + [], [], color=col_p99, linestyle="--", linewidth=2.4, + label=f"P99 = {v_p99:.3f}", + )[0] + + # Titles / labels / grid self._apply_plot_cfg(ax, LATENCY_PLOT) - def plot_throughput(self, ax: Axes) -> None: - """Plot the distribution of the throughput""" - timestamps, values = self.get_throughput_series() + # Legend (top-right) with readable background + leg = ax.legend( + handles=[h_mean, h_p50, h_p95, h_p99], + loc="upper right", + bbox_to_anchor=(0.98, 0.98), + borderaxespad=0.0, + framealpha=0.90, + fancybox=True, + handlelength=2.6, + fontsize=9.5, + ) + leg.get_frame().set_facecolor("white") + + + def plot_throughput(self, ax: Axes, *, window_s: float | None = None) -> None: + """Plot throughput with mean/P95/max lines, EWMA curve, and a single + legend box with values. + """ + timestamps, values = self.get_throughput_series(window_s=window_s) if not timestamps: ax.text(0.5, 0.5, THROUGHPUT_PLOT.no_data, ha="center", va="center") return - ax.plot(timestamps, values, marker="o", linestyle="-") + # Colors (high contrast on blue/white) + col_series = "#1f77b4" # blue main series + col_mean = "#d62728" # red + col_p95 = "#2ca02c" # green + col_max = "#9467bd" # purple + + + vals = np.asarray(values, dtype=float) + v_mean = float(np.mean(vals)) + v_p95 = float(np.percentile(vals, 95)) + v_max = float(np.max(vals)) + + # Main series + ax.plot( + timestamps, vals, marker="o", linewidth=1.6, alpha=0.95, + color=col_series, zorder=2, + ) + + # Horizontal overlays (match legend colors) + ax.axhline( + v_mean, color=col_mean, linestyle=":", linewidth=1.8, + alpha=0.95, zorder=4, + ) + ax.axhline( + v_p95, color=col_p95, linestyle="--", linewidth=1.6, + alpha=0.90, zorder=4, + ) + ax.axhline( + v_max, color=col_max, linestyle="--", linewidth=1.6, + alpha=0.90, zorder=4, + ) + + # Legend handles (dummy, no data) + h_mean = ax.plot( + [], [], color=col_mean, linestyle=":", linewidth=2.4, + label=f"mean = {v_mean:.3f}", + )[0] + h_p95 = ax.plot( + [], [], color=col_p95, linestyle="--", linewidth=2.4, + label=f"P95 = {v_p95:.3f}", + )[0] + h_max = ax.plot( + [], [], color=col_max, linestyle="--", linewidth=2.4, + label=f"max = {v_max:.3f}", + )[0] + + # Apply base cfg (titles/labels/grid) self._apply_plot_cfg(ax, THROUGHPUT_PLOT) - def plot_server_queues(self, ax: Axes) -> None: - """Plot the server queues""" - metrics = self.get_sampled_metrics() - ready = metrics.get(SampledMetricName.READY_QUEUE_LEN.value, {}) - io_q = metrics.get(SampledMetricName.EVENT_LOOP_IO_SLEEP.value, {}) + # Legend: upper-right; single box with values + leg = ax.legend( + handles=[h_mean, h_p95, h_max], + loc="upper right", + bbox_to_anchor=(0.98, 0.98), + borderaxespad=0.0, + framealpha=0.90, + fancybox=True, + handlelength=2.6, + fontsize=9.5, + ) + leg.get_frame().set_facecolor("white") + + - if not (ready or io_q): + def plot_single_server_ready_queue(self, ax: Axes, server_id: str) -> None: + """Plot Ready queue with mean/min/max lines and a single legend box with + values. No trend/ewma, no legend entry for the main series. + """ + times, vals = self.get_series(SampledMetricName.READY_QUEUE_LEN, server_id) + if not vals: ax.text(0.5, 0.5, SERVER_QUEUES_PLOT.no_data, ha="center", va="center") return - samples = len(next(iter(ready.values()), [])) - times = np.arange(samples) * self._settings.sample_period_s - - for sid, vals in ready.items(): - ax.plot(times, vals, label=f"{sid} {SERVER_QUEUES_PLOT.ready_label}") - for sid, vals in io_q.items(): - ax.plot( - times, - vals, - label=f"{sid} {SERVER_QUEUES_PLOT.io_label}", - linestyle="--", - ) + # Colors consistent with other charts + col_mean = "#d62728" # red + col_min = "#2ca02c" # green + col_max = "#9467bd" # purple + + y = np.asarray(vals, dtype=float) + v_mean = float(np.mean(y)) + v_min = float(np.min(y)) + v_max = float(np.max(y)) + + # Main series (no label/legend as requested) + ax.plot(times, y, linewidth=1.6, alpha=0.95) + + # Overlays + ax.axhline(v_mean, color=col_mean, linestyle=":", linewidth=1.8, alpha=0.95) + ax.axhline(v_min, color=col_min, linestyle="--", linewidth=1.6, alpha=0.90) + ax.axhline(v_max, color=col_max, linestyle="--", linewidth=1.6, alpha=0.90) + + # Legend handles (dummy lines with values) + h_mean = ax.plot( + [], [], color=col_mean, linestyle=":", linewidth=2.4, + label=f"mean = {v_mean:.3f}", + )[0] + h_min = ax.plot( + [], [], color=col_min, linestyle="--", linewidth=2.4, + label=f"min = {v_min:.3f}", + )[0] + h_max = ax.plot( + [], [], color=col_max, linestyle="--", linewidth=2.4, + label=f"max = {v_max:.3f}", + )[0] + + ax.set_title(f"Ready Queue — {server_id}") + ax.set_xlabel(SERVER_QUEUES_PLOT.x_label) + ax.set_ylabel(SERVER_QUEUES_PLOT.y_label) + ax.grid(visible=True) + + leg = ax.legend( + handles=[h_mean, h_min, h_max], + loc="upper right", + bbox_to_anchor=(0.98, 0.98), + borderaxespad=0.0, + framealpha=0.90, + fancybox=True, + handlelength=2.6, + fontsize=9.5, + ) + leg.get_frame().set_facecolor("white") + + def plot_single_server_io_queue(self, ax: Axes, server_id: str) -> None: + """Plot I/O queue with mean/min/max lines and a single legend box with + values. No trend/ewma, no legend entry for the main series. + """ + times, vals = self.get_series(SampledMetricName.EVENT_LOOP_IO_SLEEP, server_id) + if not vals: + ax.text(0.5, 0.5, SERVER_QUEUES_PLOT.no_data, ha="center", va="center") + return - self._apply_plot_cfg(ax, SERVER_QUEUES_PLOT, legend_handles=ax.lines) + col_mean = "#d62728" # red + col_min = "#2ca02c" # green + col_max = "#9467bd" # purple + + y = np.asarray(vals, dtype=float) + v_mean = float(np.mean(y)) + v_min = float(np.min(y)) + v_max = float(np.max(y)) + + ax.plot(times, y, linewidth=1.6, alpha=0.95) + + ax.axhline(v_mean, color=col_mean, linestyle=":", linewidth=1.8, alpha=0.95) + ax.axhline(v_min, color=col_min, linestyle="--", linewidth=1.6, alpha=0.90) + ax.axhline(v_max, color=col_max, linestyle="--", linewidth=1.6, alpha=0.90) + + h_mean = ax.plot( + [], [], color=col_mean, linestyle=":", linewidth=2.4, + label=f"mean = {v_mean:.3f}", + )[0] + h_min = ax.plot( + [], [], color=col_min, linestyle="--", linewidth=2.4, + label=f"min = {v_min:.3f}", + )[0] + h_max = ax.plot( + [], [], color=col_max, linestyle="--", linewidth=2.4, + label=f"max = {v_max:.3f}", + )[0] + + ax.set_title(f"I/O Queue — {server_id}") + ax.set_xlabel(SERVER_QUEUES_PLOT.x_label) + ax.set_ylabel(SERVER_QUEUES_PLOT.y_label) + ax.grid(visible=True) + + leg = ax.legend( + handles=[h_mean, h_min, h_max], + loc="upper right", + bbox_to_anchor=(0.98, 0.98), + borderaxespad=0.0, + framealpha=0.90, + fancybox=True, + handlelength=2.6, + fontsize=9.5, + ) + leg.get_frame().set_facecolor("white") - def plot_ram_usage(self, ax: Axes) -> None: - """Plot the ram usage""" - metrics = self.get_sampled_metrics() - ram = metrics.get(SampledMetricName.RAM_IN_USE.value, {}) - if not ram: + def plot_single_server_ram(self, ax: Axes, server_id: str) -> None: + """Plot RAM usage with mean/min/max lines and a single legend box with + values. No trend/ewma, no legend entry for the main series. + """ + times, vals = self.get_series(SampledMetricName.RAM_IN_USE, server_id) + if not vals: ax.text(0.5, 0.5, RAM_PLOT.no_data, ha="center", va="center") return - samples = len(next(iter(ram.values()))) - times = np.arange(samples) * self._settings.sample_period_s - - for sid, vals in ram.items(): - ax.plot(times, vals, label=f"{sid} {RAM_PLOT.legend_label}") + col_mean = "#d62728" # red + col_min = "#2ca02c" # green + col_max = "#9467bd" # purple + + y = np.asarray(vals, dtype=float) + v_mean = float(np.mean(y)) + v_min = float(np.min(y)) + v_max = float(np.max(y)) + + ax.plot(times, y, linewidth=1.6, alpha=0.95) + + ax.axhline(v_mean, color=col_mean, linestyle=":", linewidth=1.8, alpha=0.95) + ax.axhline(v_min, color=col_min, linestyle="--", linewidth=1.6, alpha=0.90) + ax.axhline(v_max, color=col_max, linestyle="--", linewidth=1.6, alpha=0.90) + + h_mean = ax.plot( + [], [], color=col_mean, linestyle=":", linewidth=2.4, + label=f"mean = {v_mean:.3f}", + )[0] + h_min = ax.plot( + [], [], color=col_min, linestyle="--", linewidth=2.4, + label=f"min = {v_min:.3f}", + )[0] + h_max = ax.plot( + [], [], color=col_max, linestyle="--", linewidth=2.4, + label=f"max = {v_max:.3f}", + )[0] + + ax.set_title(f"{RAM_PLOT.title} — {server_id}") + ax.set_xlabel(RAM_PLOT.x_label) + ax.set_ylabel(RAM_PLOT.y_label) + ax.grid(visible=True) - self._apply_plot_cfg(ax, RAM_PLOT, legend_handles=ax.lines) + leg = ax.legend( + handles=[h_mean, h_min, h_max], + loc="upper right", + bbox_to_anchor=(0.98, 0.98), + borderaxespad=0.0, + framealpha=0.90, + fancybox=True, + handlelength=2.6, + fontsize=9.5, + ) + leg.get_frame().set_facecolor("white") diff --git a/tests/integration/single_server/test_single_server.py b/tests/integration/single_server/test_int_single_server.py similarity index 100% rename from tests/integration/single_server/test_single_server.py rename to tests/integration/single_server/test_int_single_server.py diff --git a/tests/system/test_sys_lb_two_servers.py b/tests/system/test_sys_lb_two_servers.py new file mode 100644 index 0000000..0f7f6eb --- /dev/null +++ b/tests/system/test_sys_lb_two_servers.py @@ -0,0 +1,180 @@ +"""System test: load balancer + two identical servers (seeded, reproducible). + +Topology: + + generator → client → LB(round_robin) → srv-1 + └→ srv-2 + srv-1 → client + srv-2 → client + +Each server endpoint: CPU(2 ms) → RAM(128 MB) → IO(12 ms) +Edges: exponential latency ~2–3 ms. +We check: +- latency stats / throughput sanity vs nominal λ (~40 rps); +- balanced traffic across srv-1 / srv-2 via edge concurrency and RAM means. +""" + +from __future__ import annotations + +import os +import random +from typing import Dict, List + +import numpy as np +import pytest +import simpy + +from asyncflow import AsyncFlow +from asyncflow.components import Client, Edge, Endpoint, LoadBalancer, Server +from asyncflow.metrics.analyzer import ResultsAnalyzer +from asyncflow.runtime.simulation_runner import SimulationRunner +from asyncflow.settings import SimulationSettings +from asyncflow.workload import RqsGenerator +from asyncflow.config.constants import LatencyKey + +pytestmark = [ + pytest.mark.system, + pytest.mark.skipif( + os.getenv("ASYNCFLOW_RUN_SYSTEM_TESTS") != "1", + reason="System tests disabled (set ASYNCFLOW_RUN_SYSTEM_TESTS=1 to run).", + ), +] + +SEED = 4242 +REL_TOL = 0.30 # 30% for λ/latency +BAL_TOL = 0.25 # 25% imbalance tolerated between the two backends + + +def _seed_all(seed: int = SEED) -> None: + random.seed(seed) + np.random.seed(seed) + os.environ["PYTHONHASHSEED"] = str(seed) + + +def _build_payload(): + gen = RqsGenerator( + id="rqs-1", + avg_active_users={"mean": 120}, + avg_request_per_minute_per_user={"mean": 20}, + user_sampling_window=60, + ) + client = Client(id="client-1") + + endpoint = Endpoint( + endpoint_name="/api", + steps=[ + {"kind": "initial_parsing", "step_operation": {"cpu_time": 0.002}}, + {"kind": "ram", "step_operation": {"necessary_ram": 128}}, + {"kind": "io_wait", "step_operation": {"io_waiting_time": 0.012}}, + ], + ) + srv1 = Server(id="srv-1", server_resources={"cpu_cores": 1, "ram_mb": 2048}, endpoints=[endpoint]) + srv2 = Server(id="srv-2", server_resources={"cpu_cores": 1, "ram_mb": 2048}, endpoints=[endpoint]) + + lb = LoadBalancer(id="lb-1", algorithms="round_robin", server_covered={"srv-1", "srv-2"}) + + edges = [ + Edge( + id="gen-client", + source="rqs-1", + target="client-1", + latency={"mean": 0.003, "distribution": "exponential"}, + ), + Edge( + id="client-lb", + source="client-1", + target="lb-1", + latency={"mean": 0.002, "distribution": "exponential"}, + ), + Edge( + id="lb-srv1", + source="lb-1", + target="srv-1", + latency={"mean": 0.002, "distribution": "exponential"}, + ), + Edge( + id="lb-srv2", + source="lb-1", + target="srv-2", + latency={"mean": 0.002, "distribution": "exponential"}, + ), + Edge( + id="srv1-client", + source="srv-1", + target="client-1", + latency={"mean": 0.003, "distribution": "exponential"}, + ), + Edge( + id="srv2-client", + source="srv-2", + target="client-1", + latency={"mean": 0.003, "distribution": "exponential"}, + ), + ] + + settings = SimulationSettings( + total_simulation_time=600, + sample_period_s=0.05, + enabled_sample_metrics=[ + "ready_queue_len", + "event_loop_io_sleep", + "ram_in_use", + "edge_concurrent_connection", + ], + enabled_event_metrics=["rqs_clock"], + ) + + flow = ( + AsyncFlow() + .add_generator(gen) + .add_client(client) + .add_load_balancer(lb) + .add_servers(srv1, srv2) + .add_edges(*edges) + .add_simulation_settings(settings) + ) + return flow.build_payload() + + +def _rel_diff(a: float, b: float) -> float: + denom = max(1e-9, (abs(a) + abs(b)) / 2.0) + return abs(a - b) / denom + + +def test_system_lb_two_servers_balanced_and_sane() -> None: + """End-to-end LB scenario: sanity + balance checks with seeded RNGs.""" + _seed_all() + + env = simpy.Environment() + runner = SimulationRunner(env=env, simulation_input=_build_payload()) + res: ResultsAnalyzer = runner.run() + + # Latency sanity + stats = res.get_latency_stats() + assert stats and LatencyKey.TOTAL_REQUESTS in stats + mean_lat = float(stats.get(LatencyKey.MEAN, 0.0)) + assert 0.020 <= mean_lat <= 0.060 + + # Throughput sanity vs nominal λ ≈ 40 rps + _, rps = res.get_throughput_series() + assert rps, "No throughput series produced." + rps_mean = float(np.mean(rps)) + lam = 120 * 20 / 60.0 + assert abs(rps_mean - lam) / lam <= REL_TOL + + # Load balance check: edge concurrency lb→srv1 vs lb→srv2 close + sampled = res.get_sampled_metrics() + edge_cc: Dict[str, List[float]] = sampled.get("edge_concurrent_connection", {}) + assert "lb-srv1" in edge_cc and "lb-srv2" in edge_cc + m1, m2 = float(np.mean(edge_cc["lb-srv1"])), float(np.mean(edge_cc["lb-srv2"])) + assert _rel_diff(m1, m2) <= BAL_TOL + + # Server metrics present and broadly similar (RAM means close-ish) + ram_map: Dict[str, List[float]] = sampled.get("ram_in_use", {}) + assert "srv-1" in ram_map and "srv-2" in ram_map + ram1, ram2 = float(np.mean(ram_map["srv-1"])), float(np.mean(ram_map["srv-2"])) + assert _rel_diff(ram1, ram2) <= BAL_TOL + + # IDs reported by analyzer + sids = res.list_server_ids() + assert set(sids) == {"srv-1", "srv-2"} diff --git a/tests/system/test_sys_single_server.py b/tests/system/test_sys_single_server.py new file mode 100644 index 0000000..160899c --- /dev/null +++ b/tests/system/test_sys_single_server.py @@ -0,0 +1,136 @@ +"""System test: single-server scenario (deterministic-seeded, reproducible). + +Runs a compact but realistic topology: + + generator → client → srv-1 → client + +Endpoint on srv-1: CPU(1.5 ms) → RAM(96 MB) → IO(10 ms) +Edges: exponential latency ~3 ms each way. + +Assertions (with sensible tolerances): +- non-empty latency stats; mean latency in a plausible band; +- mean throughput close to the nominal λ (±30%); +- sampled metrics exist for srv-1 and are non-empty. +""" + +from __future__ import annotations + +import os +import random +from typing import Dict, List + +import numpy as np +import pytest +import simpy + +from asyncflow import AsyncFlow +from asyncflow.components import Client, Edge, Endpoint, Server +from asyncflow.metrics.analyzer import ResultsAnalyzer +from asyncflow.runtime.simulation_runner import SimulationRunner +from asyncflow.settings import SimulationSettings +from asyncflow.workload import RqsGenerator +from asyncflow.config.constants import LatencyKey + +# Mark as system and skip unless explicitly enabled in CI (or locally) +pytestmark = [ + pytest.mark.system, + pytest.mark.skipif( + os.getenv("ASYNCFLOW_RUN_SYSTEM_TESTS") != "1", + reason="System tests disabled (set ASYNCFLOW_RUN_SYSTEM_TESTS=1 to run).", + ), +] + +SEED = 1337 +REL_TOL = 0.30 # 30% tolerance for stochastic expectations + + +def _seed_all(seed: int = SEED) -> None: + random.seed(seed) + np.random.seed(seed) + os.environ["PYTHONHASHSEED"] = str(seed) + + +def _build_payload(): + # Workload: ~26.7 rps (80 users * 20 rpm / 60) + gen = RqsGenerator( + id="rqs-1", + avg_active_users={"mean": 80}, + avg_request_per_minute_per_user={"mean": 20}, + user_sampling_window=60, + ) + client = Client(id="client-1") + + ep = Endpoint( + endpoint_name="/api", + steps=[ + {"kind": "initial_parsing", "step_operation": {"cpu_time": 0.0015}}, + {"kind": "ram", "step_operation": {"necessary_ram": 96}}, + {"kind": "io_wait", "step_operation": {"io_waiting_time": 0.010}}, + ], + ) + srv = Server(id="srv-1", server_resources={"cpu_cores": 1, "ram_mb": 2048}, endpoints=[ep]) + + edges = [ + Edge( + id="gen-client", + source="rqs-1", + target="client-1", + latency={"mean": 0.003, "distribution": "exponential"}, + ), + Edge( + id="client-srv", + source="client-1", + target="srv-1", + latency={"mean": 0.003, "distribution": "exponential"}, + ), + Edge( + id="srv-client", + source="srv-1", + target="client-1", + latency={"mean": 0.003, "distribution": "exponential"}, + ), + ] + + settings = SimulationSettings( + total_simulation_time=180, # virtual time; keeps wall time fast + sample_period_s=0.05, + enabled_sample_metrics=[ + "ready_queue_len", + "event_loop_io_sleep", + "ram_in_use", + "edge_concurrent_connection", + ], + enabled_event_metrics=["rqs_clock"], + ) + + flow = AsyncFlow().add_generator(gen).add_client(client).add_servers(srv).add_edges(*edges) + flow = flow.add_simulation_settings(settings) + return flow.build_payload() + + +def test_system_single_server_end_to_end() -> None: + """End-to-end single-server check with tolerances and seeded RNGs.""" + _seed_all() + + env = simpy.Environment() + runner = SimulationRunner(env=env, simulation_input=_build_payload()) + res: ResultsAnalyzer = runner.run() + + # Latency stats present and plausible + stats = res.get_latency_stats() + assert stats and LatencyKey.TOTAL_REQUESTS in stats + mean_lat = float(stats.get(LatencyKey.MEAN, 0.0)) + assert 0.015 <= mean_lat <= 0.060 + + # Throughput close to nominal lambda + timestamps, rps = res.get_throughput_series() + assert timestamps, "No throughput series produced." + rps_mean = float(np.mean(rps)) + lam = 80 * 20 / 60.0 + assert abs(rps_mean - lam) / lam <= REL_TOL + + # Sampled metrics exist for srv-1 + sampled: Dict[str, Dict[str, List[float]]] = res.get_sampled_metrics() + for key in ("ready_queue_len", "event_loop_io_sleep", "ram_in_use"): + assert key in sampled and "srv-1" in sampled[key] + assert len(sampled[key]["srv-1"]) > 0 diff --git a/tests/unit/metrics/test_analyzer.py b/tests/unit/metrics/test_analyzer.py index 365cb1c..901b646 100644 --- a/tests/unit/metrics/test_analyzer.py +++ b/tests/unit/metrics/test_analyzer.py @@ -1,4 +1,15 @@ -"""Unit-tests for ``ResultsAnalyzer`` (latency, throughput, plots).""" +"""Extra unit tests for ``ResultsAnalyzer`` helpers and plots. + +This suite complements the basic analyzer tests by exercising: +- formatting helpers (latency stats pretty-printer), +- server-id ordering, +- throughput recomputation with a custom window, +- metric accessors tolerant to enum/string keys, +- per-metric series time bases, +- the compact "base dashboard" plotting helper, +- single-server plots (ready queue, I/O queue, RAM), +- multi-server helpers (axes allocation and error handling). +""" from __future__ import annotations @@ -7,8 +18,8 @@ import pytest from matplotlib.figure import Figure -from asyncflow.config.constants import LatencyKey -from asyncflow.metrics.analyzer import ResultsAnalyzer +from asyncflow.analysis import ResultsAnalyzer +from asyncflow.enums import SampledMetricName if TYPE_CHECKING: from asyncflow.runtime.actors.client import ClientRuntime @@ -18,13 +29,13 @@ # ---------------------------------------------------------------------- # -# Dummy objects (test doubles) # +# Test doubles (minimal) # # ---------------------------------------------------------------------- # class DummyClock: - """Clock with *start* / *finish* timestamps to emulate a request.""" + """Clock with *start* and *finish* timestamps to emulate one request.""" def __init__(self, start: float, finish: float) -> None: - """Save *start* and *finish* times.""" + """Initialize a synthetic request completion interval.""" self.start = start self.finish = finish @@ -41,23 +52,23 @@ class DummyName: """Mimic an Enum member that carries a ``.value`` attribute.""" def __init__(self, value: str) -> None: - """Store the dummy string *value*.""" + """Store the underlying string *value* used as a metric key.""" self.value = value class DummyServerConfig: - """Lightweight replacement for the real ``ServerConfig``.""" + """Minimal server config with only the ``id`` attribute.""" def __init__(self, identifier: str) -> None: - """Expose only the *id* field required by the analyzer.""" + """Set the server identifier used by the analyzer.""" self.id = identifier class DummyServer: - """Stub for ``ServerRuntime`` exposing ``enabled_metrics``.""" + """Stub for ``ServerRuntime`` exposing ``enabled_metrics`` and config.""" def __init__(self, identifier: str, metrics: dict[str, list[float]]) -> None: - """Create a fake server with the given *metrics*.""" + """Create a fake server with the given per-metric time series.""" self.server_config = DummyServerConfig(identifier) self.enabled_metrics = { DummyName(name): values for name, values in metrics.items() @@ -65,18 +76,18 @@ def __init__(self, identifier: str, metrics: dict[str, list[float]]) -> None: class DummyEdgeConfig: - """Minified replacement for the real ``EdgeConfig``.""" + """Minimal edge config with only the ``id`` attribute.""" def __init__(self, identifier: str) -> None: - """Expose only the *id* property.""" + """Set the edge identifier used by the analyzer.""" self.id = identifier class DummyEdge: - """Stub for ``EdgeRuntime`` exposing ``enabled_metrics``.""" + """Stub for ``EdgeRuntime`` exposing ``enabled_metrics`` and config.""" def __init__(self, identifier: str, metrics: dict[str, list[float]]) -> None: - """Create a fake edge with the given *metrics*.""" + """Create a fake edge with the given per-metric time series.""" self.edge_config = DummyEdgeConfig(identifier) self.enabled_metrics = { DummyName(name): values for name, values in metrics.items() @@ -84,107 +95,196 @@ def __init__(self, identifier: str, metrics: dict[str, list[float]]) -> None: # ---------------------------------------------------------------------- # -# Fixtures # +# Fixtures # # ---------------------------------------------------------------------- # @pytest.fixture -def simple_analyzer(sim_settings: SimulationSettings) -> ResultsAnalyzer: - """ - Analyzer with two synthetic requests (durations 1 s and 2 s) on a - 3-second horizon and **no** sampled metrics. +def analyzer_with_metrics(sim_settings: SimulationSettings) -> ResultsAnalyzer: + """Provide an analyzer with one server and ready/io/ram signals. + + The fixture sets: + - total_simulation_time = 3 s, + - sample_period_s = 1 s, + - two completed requests at t=1s and t=2s. """ sim_settings.total_simulation_time = 3 - clocks = [DummyClock(0.0, 1.0), DummyClock(0.0, 2.0)] - client = DummyClient(clocks) + sim_settings.sample_period_s = 1.0 + client = DummyClient([DummyClock(0.0, 1.0), DummyClock(0.0, 2.0)]) + server = DummyServer( + "srvX", + { + "ready_queue_len": [0, 1, 2], + "event_loop_io_sleep": [0, 0, 1], + "ram_in_use": [10.0, 20.0, 30.0], + }, + ) + edge = DummyEdge("edgeX", {}) return ResultsAnalyzer( client=cast("ClientRuntime", client), - servers=[], - edges=[], + servers=[cast("ServerRuntime", server)], + edges=[cast("EdgeRuntime", edge)], settings=sim_settings, ) # ---------------------------------------------------------------------- # -# Tests for computed metrics # +# Accessors / formatting # # ---------------------------------------------------------------------- # -def test_latency_stats(simple_analyzer: ResultsAnalyzer) -> None: - stats = simple_analyzer.get_latency_stats() - assert stats[LatencyKey.TOTAL_REQUESTS] == 2.0 - assert stats[LatencyKey.MEAN] == pytest.approx(1.5) - assert stats[LatencyKey.MEDIAN] == pytest.approx(1.5) - assert stats[LatencyKey.MIN] == pytest.approx(1.0) - assert stats[LatencyKey.MAX] == pytest.approx(2.0) - assert stats[LatencyKey.P95] == pytest.approx(1.95, rel=1e-3) +def test_format_latency_stats_contains_header_and_lines( + analyzer_with_metrics: ResultsAnalyzer, +) -> None: + """Ensure the formatted stats contain a header and canonical keys.""" + text = analyzer_with_metrics.format_latency_stats() + assert "LATENCY STATS" in text + assert "MEAN" in text + assert "MEDIAN" in text + + +def test_list_server_ids_preserves_topology_order( + sim_settings: SimulationSettings, +) -> None: + """Verify that server IDs are returned in topology order.""" + sim_settings.total_simulation_time = 1 + client = DummyClient([]) + s1 = DummyServer("s1", {}) + s2 = DummyServer("s2", {}) + s3 = DummyServer("s3", {}) + an = ResultsAnalyzer( + client=cast("ClientRuntime", client), + servers=[ + cast("ServerRuntime", s1), + cast("ServerRuntime", s2), + cast("ServerRuntime", s3), + ], + edges=[], + settings=sim_settings, + ) + assert an.list_server_ids() == ["s1", "s2", "s3"] -def test_throughput_series(simple_analyzer: ResultsAnalyzer) -> None: - timestamps, rps = simple_analyzer.get_throughput_series() - assert timestamps == [1.0, 2.0, 3.0] - assert rps == [1.0, 1.0, 0.0] +# ---------------------------------------------------------------------- # +# Throughput with custom window # +# ---------------------------------------------------------------------- # +def test_get_throughput_series_custom_window_half_second( + analyzer_with_metrics: ResultsAnalyzer, +) -> None: + """Check recomputation of throughput with a 0.5 s window.""" + # Completions at 1s and 2s; with 0.5s buckets counts are [0,1,0,1,0,0]. + # Rates are counts / 0.5 => [0, 2, 0, 2, 0, 0]. + ts, rps = analyzer_with_metrics.get_throughput_series(window_s=0.5) + assert ts[:4] == [0.5, 1.0, 1.5, 2.0] + assert rps[:4] == [0.0, 2.0, 0.0, 2.0] -def test_sampled_metrics_empty(simple_analyzer: ResultsAnalyzer) -> None: - assert simple_analyzer.get_sampled_metrics() == {} +# ---------------------------------------------------------------------- # +# Metric map / series helpers # +# ---------------------------------------------------------------------- # +def test_get_metric_map_accepts_enum_and_string( + analyzer_with_metrics: ResultsAnalyzer, +) -> None: + """Ensure metric retrieval works for enum and raw-string keys.""" + m_enum = analyzer_with_metrics.get_metric_map( + SampledMetricName.READY_QUEUE_LEN, + ) + m_str = analyzer_with_metrics.get_metric_map("ready_queue_len") + + # PT018: split assertions into multiple parts. + assert "srvX" in m_enum + assert "srvX" in m_str + assert m_enum["srvX"] == [0, 1, 2] + assert m_str["srvX"] == [0, 1, 2] + +def test_get_series_respects_sample_period( + sim_settings: SimulationSettings, +) -> None: + """Confirm that series time base honors ``sample_period_s``.""" + sim_settings.total_simulation_time = 5 + sim_settings.sample_period_s = 1.5 + client = DummyClient([]) + server = DummyServer("srv1", {"ready_queue_len": [3, 4, 5]}) + an = ResultsAnalyzer( + client=cast("ClientRuntime", client), + servers=[cast("ServerRuntime", server)], + edges=[], + settings=sim_settings, + ) + times, vals = an.get_series(SampledMetricName.READY_QUEUE_LEN, "srv1") + assert vals == [3, 4, 5] + assert times == [0.0, 1.5, 3.0] # ---------------------------------------------------------------------- # -# Tests for plotting methods # +# Plotting: base dashboard # # ---------------------------------------------------------------------- # -def test_plot_latency_distribution(simple_analyzer: ResultsAnalyzer) -> None: +def test_plot_base_dashboard_sets_titles( + analyzer_with_metrics: ResultsAnalyzer, +) -> None: + """Validate that the base dashboard sets expected axis titles.""" fig = Figure() - ax = fig.subplots() - simple_analyzer.process_all_metrics() - simple_analyzer.plot_latency_distribution(ax) - assert ax.get_title() == "Request Latency Distribution" + ax_lat, ax_thr = fig.subplots(1, 2) + analyzer_with_metrics.plot_base_dashboard(ax_lat, ax_thr) + assert ax_lat.get_title() == "Request Latency Distribution" + assert ax_thr.get_title() == "Throughput (RPS)" -def test_plot_throughput(simple_analyzer: ResultsAnalyzer) -> None: +# ---------------------------------------------------------------------- # +# Plotting: single-server dedicated plots # +# ---------------------------------------------------------------------- # +def test_plot_single_server_ready_queue( + analyzer_with_metrics: ResultsAnalyzer, +) -> None: + """Ready-queue plot should have a title and a legend with mean/min/max.""" fig = Figure() ax = fig.subplots() - simple_analyzer.process_all_metrics() - simple_analyzer.plot_throughput(ax) - assert ax.get_title() == "Throughput (RPS)" + analyzer_with_metrics.plot_single_server_ready_queue(ax, "srvX") + assert "Ready Queue" in ax.get_title() + + legend = ax.get_legend() + assert legend is not None + + labels = [t.get_text() for t in legend.get_texts()] + assert any(lbl.lower().startswith("mean") for lbl in labels) + assert any(lbl.lower().startswith("min") for lbl in labels) + assert any(lbl.lower().startswith("max") for lbl in labels) + assert len(labels) == 3 -def test_plot_server_queues_with_data(sim_settings: SimulationSettings) -> None: - sim_settings.total_simulation_time = 3 - client = DummyClient([]) - server = DummyServer("srv1", {"ready_queue_len": [1, 2, 3]}) - edge = DummyEdge("edge1", {}) - analyzer = ResultsAnalyzer( - client=cast("ClientRuntime", client), - servers=[cast("ServerRuntime", server)], - edges=[cast("EdgeRuntime", edge)], - settings=sim_settings, - ) +def test_plot_single_server_io_queue( + analyzer_with_metrics: ResultsAnalyzer, +) -> None: + """I/O-queue plot should have a title and a legend with mean/min/max.""" fig = Figure() ax = fig.subplots() - analyzer.process_all_metrics() - analyzer.plot_server_queues(ax) - assert ax.get_title() == "Server Queues" + analyzer_with_metrics.plot_single_server_io_queue(ax, "srvX") + + assert "I/O Queue" in ax.get_title() + legend = ax.get_legend() assert legend is not None - texts = [t.get_text() for t in legend.get_texts()] - assert "srv1 Ready queue" in texts + labels = [t.get_text() for t in legend.get_texts()] + assert any(lbl.lower().startswith("mean") for lbl in labels) + assert any(lbl.lower().startswith("min") for lbl in labels) + assert any(lbl.lower().startswith("max") for lbl in labels) + assert len(labels) == 3 -def test_plot_ram_usage_with_data(sim_settings: SimulationSettings) -> None: - sim_settings.total_simulation_time = 3 - client = DummyClient([]) - edge = DummyEdge("edgeA", {"ram_in_use": [10.0, 20.0]}) - analyzer = ResultsAnalyzer( - client=cast("ClientRuntime", client), - servers=[], - edges=[cast("EdgeRuntime", edge)], - settings=sim_settings, - ) +def test_plot_single_server_ram( + analyzer_with_metrics: ResultsAnalyzer, +) -> None: + """RAM plot should have a title and a legend with mean/min/max.""" fig = Figure() ax = fig.subplots() - analyzer.process_all_metrics() - analyzer.plot_ram_usage(ax) - assert ax.get_title() == "RAM Usage" + analyzer_with_metrics.plot_single_server_ram(ax, "srvX") + + assert "RAM" in ax.get_title() + legend = ax.get_legend() assert legend is not None - texts = [t.get_text() for t in legend.get_texts()] - assert "edgeA RAM" in texts + + labels = [t.get_text() for t in legend.get_texts()] + assert any(lbl.lower().startswith("mean") for lbl in labels) + assert any(lbl.lower().startswith("min") for lbl in labels) + assert any(lbl.lower().startswith("max") for lbl in labels) + assert len(labels) == 3 + diff --git a/tests/unit/public_api/test_import.py b/tests/unit/public_api/test_import.py new file mode 100644 index 0000000..cd708bc --- /dev/null +++ b/tests/unit/public_api/test_import.py @@ -0,0 +1,86 @@ +"""Unit tests for the public components import surface. + +Verifies that: +- `asyncflow.components` exposes the expected `__all__`. +- All symbols in `__all__` are importable and are classes. +""" + +from __future__ import annotations + +import importlib +from typing import TYPE_CHECKING + +from asyncflow.components import ( + Client, + Edge, + Endpoint, + LoadBalancer, + Server, + ServerResources, +) +from asyncflow.settings import SimulationSettings +from asyncflow.workload import RqsGenerator, RVConfig + +if TYPE_CHECKING: + from collections.abc import Iterable + + + +def _assert_all_equals(module_name: str, expected: Iterable[str]) -> None: + """Assert that a module's __all__ exactly matches `expected`.""" + mod = importlib.import_module(module_name) + assert hasattr(mod, "__all__"), f"{module_name} is missing __all__" + assert set(mod.__all__) == set(expected), ( + f"{module_name}.__all__ mismatch:\n" + f" expected: {set(expected)}\n" + f" actual: {set(mod.__all__)}" + ) + + +def test_components_public_symbols() -> None: + """`asyncflow.components` exposes the expected names.""" + expected = [ + "Client", + "Edge", + "Endpoint", + "LoadBalancer", + "Server", + "ServerResources", + ] + _assert_all_equals("asyncflow.components", expected) + + +def test_components_symbols_are_importable_classes() -> None: + """All public symbols are importable and are classes.""" + # Basic type sanity (avoid heavy imports/instantiation) + for cls, name in [ + (Client, "Client"), + (Edge, "Edge"), + (Endpoint, "Endpoint"), + (LoadBalancer, "LoadBalancer"), + (Server, "Server"), + (ServerResources, "ServerResources"), + ]: + assert isinstance(cls, type), f"{name} should be a class type" + assert cls.__name__ == name + +def test_workload_public_symbols() -> None: + """`asyncflow.workload` exposes RVConfig and RqsGenerator.""" + _assert_all_equals("asyncflow.workload", ["RVConfig", "RqsGenerator"]) + + +def test_workload_symbols_are_importable_classes() -> None: + """Public symbols are importable and are classes.""" + for cls, name in [(RVConfig, "RVConfig"), (RqsGenerator, "RqsGenerator")]: + assert isinstance(cls, type), f"{name} should be a class" + assert cls.__name__ == name + +def test_settings_public_symbols() -> None: + """`asyncflow.settings` exposes SimulationSettings.""" + _assert_all_equals("asyncflow.settings", ["SimulationSettings"]) + + +def test_settings_symbol_is_importable_class() -> None: + """Public symbol is importable and is a class.""" + assert isinstance(SimulationSettings, type), "SimulationSettings should be a class" + assert SimulationSettings.__name__ == "SimulationSettings"