1- """System test: single-server scenario (deterministic-seeded, reproducible).
2-
3- Runs a compact but realistic topology:
1+ """System test: single server (seeded, reproducible).
42
3+ Topology:
54 generator → client → srv-1 → client
65
7- Endpoint on srv-1: CPU(1.5 ms) → RAM(96 MB) → IO(10 ms)
8- Edges: exponential latency ~3 ms each way.
6+ Endpoint:
7+ CPU(1 ms) → RAM(64 MB) → IO(10 ms)
8+ Edges: exponential latency ~2-3 ms.
99
10- Assertions (with sensible tolerances) :
11- - non-empty latency stats; mean latency in a plausible band ;
12- - mean throughput close to the nominal λ (±30%) ;
13- - sampled metrics exist for srv-1 and are non-empty .
10+ Checks :
11+ - latency stats present and plausible (broad bounds) ;
12+ - throughput roughly consistent with nominal λ;
13+ - basic sampled metrics present for srv-1.
1414"""
1515
1616from __future__ import annotations
1717
1818import os
1919import random
20- from typing import Dict , List
20+ from typing import TYPE_CHECKING
2121
2222import numpy as np
2323import pytest
2424import simpy
2525
2626from asyncflow import AsyncFlow
2727from asyncflow .components import Client , Edge , Endpoint , Server
28- from asyncflow .metrics . analyzer import ResultsAnalyzer
28+ from asyncflow .config . constants import LatencyKey
2929from asyncflow .runtime .simulation_runner import SimulationRunner
3030from asyncflow .settings import SimulationSettings
3131from asyncflow .workload import RqsGenerator
32- from asyncflow .config .constants import LatencyKey
3332
34- # Mark as system and skip unless explicitly enabled in CI (or locally)
33+ if TYPE_CHECKING :
34+ # Imported only for type checking (ruff: TC001)
35+ from asyncflow .metrics .analyzer import ResultsAnalyzer
36+ from asyncflow .schemas .payload import SimulationPayload
37+
3538pytestmark = [
3639 pytest .mark .system ,
3740 pytest .mark .skipif (
4144]
4245
4346SEED = 1337
44- REL_TOL = 0.30 # 30% tolerance for stochastic expectations
47+ REL_TOL = 0.35 # generous bound for simple sanity
4548
4649
4750def _seed_all (seed : int = SEED ) -> None :
4851 random .seed (seed )
49- np .random .seed (seed )
52+ np .random .seed (seed ) # noqa: NPY002
5053 os .environ ["PYTHONHASHSEED" ] = str (seed )
5154
5255
53- def _build_payload ():
56+ def _build_payload () -> SimulationPayload :
5457 # Workload: ~26.7 rps (80 users * 20 rpm / 60)
5558 gen = RqsGenerator (
5659 id = "rqs-1" ,
@@ -63,12 +66,16 @@ def _build_payload():
6366 ep = Endpoint (
6467 endpoint_name = "/api" ,
6568 steps = [
66- {"kind" : "initial_parsing" , "step_operation" : {"cpu_time" : 0.0015 }},
67- {"kind" : "ram" , "step_operation" : {"necessary_ram" : 96 }},
69+ {"kind" : "initial_parsing" , "step_operation" : {"cpu_time" : 0.001 }},
70+ {"kind" : "ram" , "step_operation" : {"necessary_ram" : 64 }},
6871 {"kind" : "io_wait" , "step_operation" : {"io_waiting_time" : 0.010 }},
6972 ],
7073 )
71- srv = Server (id = "srv-1" , server_resources = {"cpu_cores" : 1 , "ram_mb" : 2048 }, endpoints = [ep ])
74+ srv = Server (
75+ id = "srv-1" ,
76+ server_resources = {"cpu_cores" : 1 , "ram_mb" : 2048 },
77+ endpoints = [ep ],
78+ )
7279
7380 edges = [
7481 Edge (
@@ -81,7 +88,7 @@ def _build_payload():
8188 id = "client-srv" ,
8289 source = "client-1" ,
8390 target = "srv-1" ,
84- latency = {"mean" : 0.003 , "distribution" : "exponential" },
91+ latency = {"mean" : 0.002 , "distribution" : "exponential" },
8592 ),
8693 Edge (
8794 id = "srv-client" ,
@@ -92,7 +99,7 @@ def _build_payload():
9299 ]
93100
94101 settings = SimulationSettings (
95- total_simulation_time = 180 , # virtual time; keeps wall time fast
102+ total_simulation_time = 400 ,
96103 sample_period_s = 0.05 ,
97104 enabled_sample_metrics = [
98105 "ready_queue_len" ,
@@ -103,13 +110,19 @@ def _build_payload():
103110 enabled_event_metrics = ["rqs_clock" ],
104111 )
105112
106- flow = AsyncFlow ().add_generator (gen ).add_client (client ).add_servers (srv ).add_edges (* edges )
113+ flow = (
114+ AsyncFlow ()
115+ .add_generator (gen )
116+ .add_client (client )
117+ .add_servers (srv )
118+ .add_edges (* edges )
119+ )
107120 flow = flow .add_simulation_settings (settings )
108121 return flow .build_payload ()
109122
110123
111- def test_system_single_server_end_to_end () -> None :
112- """End-to-end single-server check with tolerances and seeded RNGs."""
124+ def test_system_single_server_sane () -> None :
125+ """End-to-end single-server scenario: sanity checks with seeded RNGs."""
113126 _seed_all ()
114127
115128 env = simpy .Environment ()
@@ -118,19 +131,21 @@ def test_system_single_server_end_to_end() -> None:
118131
119132 # Latency stats present and plausible
120133 stats = res .get_latency_stats ()
121- assert stats and LatencyKey .TOTAL_REQUESTS in stats
134+ assert stats , "Expected non-empty stats."
135+ assert LatencyKey .TOTAL_REQUESTS in stats
122136 mean_lat = float (stats .get (LatencyKey .MEAN , 0.0 ))
123137 assert 0.015 <= mean_lat <= 0.060
124138
125- # Throughput close to nominal lambda
126- timestamps , rps = res .get_throughput_series ()
127- assert timestamps , "No throughput series produced."
139+ # Throughput sanity vs nominal λ
140+ _ , rps = res .get_throughput_series ()
141+ assert rps , "No throughput series produced."
128142 rps_mean = float (np .mean (rps ))
129143 lam = 80 * 20 / 60.0
130144 assert abs (rps_mean - lam ) / lam <= REL_TOL
131145
132- # Sampled metrics exist for srv-1
133- sampled : Dict [str , Dict [str , List [float ]]] = res .get_sampled_metrics ()
146+ # Sampled metrics present for srv-1
147+ sampled : dict [str , dict [str , list [float ]]] = res .get_sampled_metrics ()
134148 for key in ("ready_queue_len" , "event_loop_io_sleep" , "ram_in_use" ):
135- assert key in sampled and "srv-1" in sampled [key ]
149+ assert key in sampled
150+ assert "srv-1" in sampled [key ]
136151 assert len (sampled [key ]["srv-1" ]) > 0
0 commit comments