|
2 | 2 |
|
3 | 3 | from invoke import task |
4 | 4 |
|
5 | | -from build import ( |
6 | | - DEFAULT_MEASUREMENTS, |
7 | | - GCVS, |
8 | | - BenchmarkSuite, |
9 | | - CustomExperiment, |
10 | | - Elision, |
11 | | - Experiments, |
12 | | - Metric, |
13 | | - PremOpt, |
14 | | -) |
| 5 | +from build import BenchmarkSuite, CustomExperiment, Experiments, Metric |
15 | 6 | from util import timer |
16 | 7 |
|
17 | | -EXPERIMENTS = "gcvs premopt elision" |
18 | | -PROFILES = {"gcvs": GCVS, "premopt": PremOpt, "elision": Elision} |
| 8 | + |
| 9 | +def _parse_args(pexecs, exps=None, suites=None, measurements=None): |
| 10 | + def _to_list(val): |
| 11 | + return val.split() if isinstance(val, str) else val |
| 12 | + |
| 13 | + return Experiments.all(pexecs).filter( |
| 14 | + experiments=_to_list(exps), |
| 15 | + suites=_to_list(suites), |
| 16 | + measurements=_to_list(measurements), |
| 17 | + ) |
19 | 18 |
|
20 | 19 |
|
21 | 20 | def _build_alloy(experiments: "Experiments", warn_if_empty=False): |
22 | | - cfgs = experiments.configurations(only_missing=True) |
23 | | - alloy_cfgs_needed = set(cfg.alloy for cfg in cfgs if not cfg.alloy.installed) |
24 | | - alloy_build_steps = sum(a.steps for a in alloy_cfgs_needed) |
25 | | - if not alloy_cfgs_needed and warn_if_empty: |
26 | | - print("Nothing to do") |
| 21 | + cfgs = experiments.alloy_variants(only_missing=True) |
| 22 | + if not cfgs: |
| 23 | + if warn_if_empty: |
| 24 | + print("Nothing to do") |
| 25 | + return |
27 | 26 |
|
28 | | - print(f"Found {len(alloy_cfgs_needed)} Alloy configuration(s):") |
29 | | - [print(f" {os.path.relpath(a.path)}") for a in alloy_cfgs_needed] |
30 | | - with timer("Building missing alloy configurations", alloy_build_steps): |
31 | | - for alloy in alloy_cfgs_needed: |
32 | | - alloy.build() |
| 27 | + print( |
| 28 | + f"{len(cfgs)} Alloy variant(s) require installing for {len(experiments.experiments)} experiment(s):" |
| 29 | + ) |
| 30 | + [print(f" {a.name}") for a in cfgs] |
| 31 | + with timer("Building", sum(a.steps for a in cfgs)): |
| 32 | + for a in cfgs: |
| 33 | + a.build() |
33 | 34 |
|
34 | 35 |
|
35 | 36 | @task |
36 | | -def build_alloy(c, experiments=EXPERIMENTS): |
| 37 | +def build_alloy(c, experiments=None, measurements=None): |
37 | 38 | """Build all alloy configurations""" |
38 | | - exps = Experiments( |
39 | | - [exp for e in experiments.split() for exp in PROFILES[e].experiments()] |
40 | | - ) |
| 39 | + exps = _parse_args(pexecs=0, exps=experiments, measurements=measurements) |
41 | 40 | _build_alloy(exps, warn_if_empty=True) |
42 | 41 |
|
43 | 42 |
|
44 | 43 | @task |
45 | | -def build_benchmarks(c, experiments=None, suites=None): |
| 44 | +def build_benchmarks(c, experiments=None, suites=None, measurements=None): |
46 | 45 | """Build all benchmarks for all configurations""" |
47 | | - |
48 | | - exps = Experiments( |
49 | | - [exp for e in experiments.split() for exp in PROFILES[e].experiments()] |
| 46 | + exps = _parse_args( |
| 47 | + pexecs=0, exps=experiments, suites=suites, measurements=measurements |
50 | 48 | ) |
51 | | - if experiments: |
52 | | - exps = exps.filter_experiments(suites) |
53 | | - if suites: |
54 | | - exps = exps.filter_suites(suites) |
55 | | - |
56 | 49 | _build_alloy(exps) |
57 | 50 |
|
58 | 51 | cfgs = exps.configurations(only_missing=True) |
| 52 | + if not cfgs: |
| 53 | + print("Nothing to do") |
| 54 | + return |
| 55 | + print(f"Found {len(cfgs)} benchmark configuration(s):") |
| 56 | + [print(f" {cfg.cfg_name}") for cfg in cfgs] |
59 | 57 | with timer("Building benchmark configurations", exps.build_steps): |
60 | 58 | for cfg in cfgs: |
61 | 59 | cfg.build() |
62 | 60 |
|
63 | 61 |
|
64 | 62 | @task |
65 | | -def run_benchmarks(c, pexecs, experiments=None, suites=None, metric=None): |
66 | | - pexecs = int(pexecs) |
67 | | - exps = Experiments.new(pexecs) |
68 | | - if suites: |
69 | | - exps = exps.filter_suites(suites) |
70 | | - |
71 | | - if experiments: |
72 | | - exps = exps.filter_experiments(experiments) |
73 | | - |
74 | | - if metric: |
75 | | - exps = exps.filter_metric(metric) |
| 63 | +def run_benchmarks(c, pexecs, experiments=None, suites=None, measurements=None): |
| 64 | + exps = _parse_args( |
| 65 | + int(pexecs), exps=experiments, suites=suites, measurements=measurements |
| 66 | + ) |
76 | 67 |
|
77 | 68 | total_iters = exps.run_steps |
78 | 69 |
|
|
0 commit comments