Skip to content

Commit 09d99dc

Browse files
committed
bug fixed
1 parent fb08027 commit 09d99dc

File tree

4 files changed

+100
-49
lines changed

4 files changed

+100
-49
lines changed

.gitattributes

Lines changed: 12 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,12 @@
1+
# Unix-style line endings (LF)
2+
*.sh text eol=lf
3+
*.py text eol=lf
4+
*.yml text eol=lf
5+
*.yaml text eol=lf
6+
*.toml text eol=lf
7+
*.cfg text eol=lf
8+
*.ini text eol=lf
9+
Makefile text eol=lf
10+
11+
# PowerShell
12+
*.ps1 text eol=crlf

simulation_base.png

-121 KB
Binary file not shown.

tests/system/test_sys_lb_two_servers.py

Lines changed: 42 additions & 18 deletions
Original file line numberDiff line numberDiff line change
@@ -8,7 +8,7 @@
88
srv-2 → client
99
1010
Each server endpoint: CPU(2 ms) → RAM(128 MB) → IO(12 ms)
11-
Edges: exponential latency ~23 ms.
11+
Edges: exponential latency ~2-3 ms.
1212
We check:
1313
- latency stats / throughput sanity vs nominal λ (~40 rps);
1414
- balanced traffic across srv-1 / srv-2 via edge concurrency and RAM means.
@@ -18,19 +18,23 @@
1818

1919
import os
2020
import random
21-
from typing import Dict, List
21+
from typing import TYPE_CHECKING
2222

2323
import numpy as np
2424
import pytest
2525
import simpy
2626

2727
from asyncflow import AsyncFlow
2828
from asyncflow.components import Client, Edge, Endpoint, LoadBalancer, Server
29-
from asyncflow.metrics.analyzer import ResultsAnalyzer
29+
from asyncflow.config.constants import LatencyKey
3030
from asyncflow.runtime.simulation_runner import SimulationRunner
3131
from asyncflow.settings import SimulationSettings
3232
from asyncflow.workload import RqsGenerator
33-
from asyncflow.config.constants import LatencyKey
33+
34+
if TYPE_CHECKING:
35+
# Imported only for type checking (ruff: TC001)
36+
from asyncflow.metrics.analyzer import ResultsAnalyzer
37+
from asyncflow.schemas.payload import SimulationPayload
3438

3539
pytestmark = [
3640
pytest.mark.system,
@@ -41,17 +45,17 @@
4145
]
4246

4347
SEED = 4242
44-
REL_TOL = 0.30 # 30% for λ/latency
45-
BAL_TOL = 0.25 # 25% imbalance tolerated between the two backends
48+
REL_TOL = 0.30 # 30% for λ/latency
49+
BAL_TOL = 0.25 # 25% imbalance tolerated between the two backends
4650

4751

4852
def _seed_all(seed: int = SEED) -> None:
4953
random.seed(seed)
50-
np.random.seed(seed)
54+
np.random.seed(seed) # noqa: NPY002
5155
os.environ["PYTHONHASHSEED"] = str(seed)
5256

5357

54-
def _build_payload():
58+
def _build_payload() -> SimulationPayload:
5559
gen = RqsGenerator(
5660
id="rqs-1",
5761
avg_active_users={"mean": 120},
@@ -68,10 +72,22 @@ def _build_payload():
6872
{"kind": "io_wait", "step_operation": {"io_waiting_time": 0.012}},
6973
],
7074
)
71-
srv1 = Server(id="srv-1", server_resources={"cpu_cores": 1, "ram_mb": 2048}, endpoints=[endpoint])
72-
srv2 = Server(id="srv-2", server_resources={"cpu_cores": 1, "ram_mb": 2048}, endpoints=[endpoint])
75+
srv1 = Server(
76+
id="srv-1",
77+
server_resources={"cpu_cores": 1, "ram_mb": 2048},
78+
endpoints=[endpoint],
79+
)
80+
srv2 = Server(
81+
id="srv-2",
82+
server_resources={"cpu_cores": 1, "ram_mb": 2048},
83+
endpoints=[endpoint],
84+
)
7385

74-
lb = LoadBalancer(id="lb-1", algorithms="round_robin", server_covered={"srv-1", "srv-2"})
86+
lb = LoadBalancer(
87+
id="lb-1",
88+
algorithms="round_robin",
89+
server_covered={"srv-1", "srv-2"},
90+
)
7591

7692
edges = [
7793
Edge(
@@ -151,7 +167,8 @@ def test_system_lb_two_servers_balanced_and_sane() -> None:
151167

152168
# Latency sanity
153169
stats = res.get_latency_stats()
154-
assert stats and LatencyKey.TOTAL_REQUESTS in stats
170+
assert stats, "Expected non-empty stats."
171+
assert LatencyKey.TOTAL_REQUESTS in stats
155172
mean_lat = float(stats.get(LatencyKey.MEAN, 0.0))
156173
assert 0.020 <= mean_lat <= 0.060
157174

@@ -164,15 +181,22 @@ def test_system_lb_two_servers_balanced_and_sane() -> None:
164181

165182
# Load balance check: edge concurrency lb→srv1 vs lb→srv2 close
166183
sampled = res.get_sampled_metrics()
167-
edge_cc: Dict[str, List[float]] = sampled.get("edge_concurrent_connection", {})
168-
assert "lb-srv1" in edge_cc and "lb-srv2" in edge_cc
169-
m1, m2 = float(np.mean(edge_cc["lb-srv1"])), float(np.mean(edge_cc["lb-srv2"]))
184+
edge_cc: dict[str, list[float]] = sampled.get(
185+
"edge_concurrent_connection",
186+
{},
187+
)
188+
assert "lb-srv1" in edge_cc
189+
assert "lb-srv2" in edge_cc
190+
m1 = float(np.mean(edge_cc["lb-srv1"]))
191+
m2 = float(np.mean(edge_cc["lb-srv2"]))
170192
assert _rel_diff(m1, m2) <= BAL_TOL
171193

172194
# Server metrics present and broadly similar (RAM means close-ish)
173-
ram_map: Dict[str, List[float]] = sampled.get("ram_in_use", {})
174-
assert "srv-1" in ram_map and "srv-2" in ram_map
175-
ram1, ram2 = float(np.mean(ram_map["srv-1"])), float(np.mean(ram_map["srv-2"]))
195+
ram_map: dict[str, list[float]] = sampled.get("ram_in_use", {})
196+
assert "srv-1" in ram_map
197+
assert "srv-2" in ram_map
198+
ram1 = float(np.mean(ram_map["srv-1"]))
199+
ram2 = float(np.mean(ram_map["srv-2"]))
176200
assert _rel_diff(ram1, ram2) <= BAL_TOL
177201

178202
# IDs reported by analyzer
Lines changed: 46 additions & 31 deletions
Original file line numberDiff line numberDiff line change
@@ -1,37 +1,40 @@
1-
"""System test: single-server scenario (deterministic-seeded, reproducible).
2-
3-
Runs a compact but realistic topology:
1+
"""System test: single server (seeded, reproducible).
42
3+
Topology:
54
generator → client → srv-1 → client
65
7-
Endpoint on srv-1: CPU(1.5 ms) → RAM(96 MB) → IO(10 ms)
8-
Edges: exponential latency ~3 ms each way.
6+
Endpoint:
7+
CPU(1 ms) → RAM(64 MB) → IO(10 ms)
8+
Edges: exponential latency ~2-3 ms.
99
10-
Assertions (with sensible tolerances):
11-
- non-empty latency stats; mean latency in a plausible band;
12-
- mean throughput close to the nominal λ (±30%);
13-
- sampled metrics exist for srv-1 and are non-empty.
10+
Checks:
11+
- latency stats present and plausible (broad bounds);
12+
- throughput roughly consistent with nominal λ;
13+
- basic sampled metrics present for srv-1.
1414
"""
1515

1616
from __future__ import annotations
1717

1818
import os
1919
import random
20-
from typing import Dict, List
20+
from typing import TYPE_CHECKING
2121

2222
import numpy as np
2323
import pytest
2424
import simpy
2525

2626
from asyncflow import AsyncFlow
2727
from asyncflow.components import Client, Edge, Endpoint, Server
28-
from asyncflow.metrics.analyzer import ResultsAnalyzer
28+
from asyncflow.config.constants import LatencyKey
2929
from asyncflow.runtime.simulation_runner import SimulationRunner
3030
from asyncflow.settings import SimulationSettings
3131
from asyncflow.workload import RqsGenerator
32-
from asyncflow.config.constants import LatencyKey
3332

34-
# Mark as system and skip unless explicitly enabled in CI (or locally)
33+
if TYPE_CHECKING:
34+
# Imported only for type checking (ruff: TC001)
35+
from asyncflow.metrics.analyzer import ResultsAnalyzer
36+
from asyncflow.schemas.payload import SimulationPayload
37+
3538
pytestmark = [
3639
pytest.mark.system,
3740
pytest.mark.skipif(
@@ -41,16 +44,16 @@
4144
]
4245

4346
SEED = 1337
44-
REL_TOL = 0.30 # 30% tolerance for stochastic expectations
47+
REL_TOL = 0.35 # generous bound for simple sanity
4548

4649

4750
def _seed_all(seed: int = SEED) -> None:
4851
random.seed(seed)
49-
np.random.seed(seed)
52+
np.random.seed(seed) # noqa: NPY002
5053
os.environ["PYTHONHASHSEED"] = str(seed)
5154

5255

53-
def _build_payload():
56+
def _build_payload() -> SimulationPayload:
5457
# Workload: ~26.7 rps (80 users * 20 rpm / 60)
5558
gen = RqsGenerator(
5659
id="rqs-1",
@@ -63,12 +66,16 @@ def _build_payload():
6366
ep = Endpoint(
6467
endpoint_name="/api",
6568
steps=[
66-
{"kind": "initial_parsing", "step_operation": {"cpu_time": 0.0015}},
67-
{"kind": "ram", "step_operation": {"necessary_ram": 96}},
69+
{"kind": "initial_parsing", "step_operation": {"cpu_time": 0.001}},
70+
{"kind": "ram", "step_operation": {"necessary_ram": 64}},
6871
{"kind": "io_wait", "step_operation": {"io_waiting_time": 0.010}},
6972
],
7073
)
71-
srv = Server(id="srv-1", server_resources={"cpu_cores": 1, "ram_mb": 2048}, endpoints=[ep])
74+
srv = Server(
75+
id="srv-1",
76+
server_resources={"cpu_cores": 1, "ram_mb": 2048},
77+
endpoints=[ep],
78+
)
7279

7380
edges = [
7481
Edge(
@@ -81,7 +88,7 @@ def _build_payload():
8188
id="client-srv",
8289
source="client-1",
8390
target="srv-1",
84-
latency={"mean": 0.003, "distribution": "exponential"},
91+
latency={"mean": 0.002, "distribution": "exponential"},
8592
),
8693
Edge(
8794
id="srv-client",
@@ -92,7 +99,7 @@ def _build_payload():
9299
]
93100

94101
settings = SimulationSettings(
95-
total_simulation_time=180, # virtual time; keeps wall time fast
102+
total_simulation_time=400,
96103
sample_period_s=0.05,
97104
enabled_sample_metrics=[
98105
"ready_queue_len",
@@ -103,13 +110,19 @@ def _build_payload():
103110
enabled_event_metrics=["rqs_clock"],
104111
)
105112

106-
flow = AsyncFlow().add_generator(gen).add_client(client).add_servers(srv).add_edges(*edges)
113+
flow = (
114+
AsyncFlow()
115+
.add_generator(gen)
116+
.add_client(client)
117+
.add_servers(srv)
118+
.add_edges(*edges)
119+
)
107120
flow = flow.add_simulation_settings(settings)
108121
return flow.build_payload()
109122

110123

111-
def test_system_single_server_end_to_end() -> None:
112-
"""End-to-end single-server check with tolerances and seeded RNGs."""
124+
def test_system_single_server_sane() -> None:
125+
"""End-to-end single-server scenario: sanity checks with seeded RNGs."""
113126
_seed_all()
114127

115128
env = simpy.Environment()
@@ -118,19 +131,21 @@ def test_system_single_server_end_to_end() -> None:
118131

119132
# Latency stats present and plausible
120133
stats = res.get_latency_stats()
121-
assert stats and LatencyKey.TOTAL_REQUESTS in stats
134+
assert stats, "Expected non-empty stats."
135+
assert LatencyKey.TOTAL_REQUESTS in stats
122136
mean_lat = float(stats.get(LatencyKey.MEAN, 0.0))
123137
assert 0.015 <= mean_lat <= 0.060
124138

125-
# Throughput close to nominal lambda
126-
timestamps, rps = res.get_throughput_series()
127-
assert timestamps, "No throughput series produced."
139+
# Throughput sanity vs nominal λ
140+
_, rps = res.get_throughput_series()
141+
assert rps, "No throughput series produced."
128142
rps_mean = float(np.mean(rps))
129143
lam = 80 * 20 / 60.0
130144
assert abs(rps_mean - lam) / lam <= REL_TOL
131145

132-
# Sampled metrics exist for srv-1
133-
sampled: Dict[str, Dict[str, List[float]]] = res.get_sampled_metrics()
146+
# Sampled metrics present for srv-1
147+
sampled: dict[str, dict[str, list[float]]] = res.get_sampled_metrics()
134148
for key in ("ready_queue_len", "event_loop_io_sleep", "ram_in_use"):
135-
assert key in sampled and "srv-1" in sampled[key]
149+
assert key in sampled
150+
assert "srv-1" in sampled[key]
136151
assert len(sampled[key]["srv-1"]) > 0

0 commit comments

Comments
 (0)