Skip to content

Commit bd2e28c

Browse files
committed
Add local benchmark script for scaling
1 parent 379357e commit bd2e28c

File tree

1 file changed

+100
-0
lines changed

1 file changed

+100
-0
lines changed
Lines changed: 100 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,100 @@
1+
#!/usr/bin/env python3
2+
"""
3+
Ad-hoc benchmark to compare solve times with and without pre-solve scaling.
4+
5+
This is intentionally lightweight and meant for local experimentation.
6+
It relies on HiGHS (highspy) being installed. Adjust sizes or iterations
7+
via CLI flags if you want to stress test further.
8+
"""
9+
10+
from __future__ import annotations
11+
12+
import argparse
13+
import time
14+
from collections.abc import Iterable
15+
16+
import numpy as np
17+
18+
from linopy import Model
19+
from linopy.scaling import ScaleOptions
20+
from linopy.solvers import available_solvers
21+
22+
23+
def build_model(n_vars: int, n_cons: int, density: float) -> Model:
24+
rng = np.random.default_rng(123)
25+
m = Model()
26+
x = m.add_variables(lower=0, name="x", coords=[range(n_vars)])
27+
28+
data = rng.normal(loc=0.0, scale=1.0, size=int(n_vars * n_cons * density))
29+
rows = rng.integers(0, n_cons, size=data.size)
30+
cols = rng.integers(0, n_vars, size=data.size)
31+
32+
# accumulate entries per row
33+
for i in range(n_cons):
34+
mask = rows == i
35+
if not mask.any():
36+
continue
37+
coeffs = data[mask]
38+
vars_idx = cols[mask]
39+
lhs = sum(coeff * x.isel(dim_0=idx) for coeff, idx in zip(coeffs, vars_idx))
40+
rhs = abs(coeffs).sum() * 0.1
41+
m.add_constraints(lhs == rhs, name=f"c{i}")
42+
43+
obj_coeffs = rng.uniform(0.1, 1.0, size=n_vars)
44+
m.objective = np.dot(obj_coeffs, x)
45+
return m
46+
47+
48+
def time_solve(m: Model, scale: bool | ScaleOptions, repeats: int) -> Iterable[float]:
49+
for _ in range(repeats):
50+
start = time.perf_counter()
51+
status, _ = m.solve("highs", io_api="direct", scale=scale)
52+
end = time.perf_counter()
53+
if status != "ok":
54+
raise RuntimeError(f"Solve failed with status {status}")
55+
yield end - start
56+
57+
58+
def run_benchmark(
59+
n_vars: int, n_cons: int, density: float, repeats: int
60+
) -> tuple[np.ndarray, np.ndarray]:
61+
base_model = build_model(n_vars, n_cons, density)
62+
scaled_model = build_model(n_vars, n_cons, density)
63+
64+
base_times = np.fromiter(time_solve(base_model, False, repeats), dtype=float)
65+
scaled_times = np.fromiter(time_solve(scaled_model, True, repeats), dtype=float)
66+
return base_times, scaled_times
67+
68+
69+
def main() -> None:
70+
parser = argparse.ArgumentParser(description=__doc__)
71+
parser.add_argument("--vars", type=int, default=400, help="Number of variables.")
72+
parser.add_argument("--cons", type=int, default=300, help="Number of constraints.")
73+
parser.add_argument(
74+
"--density",
75+
type=float,
76+
default=0.01,
77+
help="Constraint density (0-1) for random coefficients.",
78+
)
79+
parser.add_argument(
80+
"--repeats", type=int, default=3, help="Number of solve repetitions."
81+
)
82+
args = parser.parse_args()
83+
84+
if "highs" not in available_solvers:
85+
raise RuntimeError("HiGHS (highspy) is required for this benchmark.")
86+
87+
base_times, scaled_times = run_benchmark(
88+
n_vars=args.vars, n_cons=args.cons, density=args.density, repeats=args.repeats
89+
)
90+
91+
print(f"Solve times without scaling: {base_times}")
92+
print(f"Solve times with scaling : {scaled_times}")
93+
print(
94+
f"Median speedup: {np.median(base_times) / np.median(scaled_times):.2f}x "
95+
f"(lower is better for scaled)"
96+
)
97+
98+
99+
if __name__ == "__main__":
100+
main()

0 commit comments

Comments
 (0)